1//==--- riscv_vector.td - RISC-V V-ext Builtin function list --------------===//
2//
3//  Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4//  See https://llvm.org/LICENSE.txt for license information.
5//  SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the builtins for RISC-V V-extension. See:
10//
11//     https://github.com/riscv/rvv-intrinsic-doc
12//
13//===----------------------------------------------------------------------===//
14
15include "riscv_vector_common.td"
16
17//===----------------------------------------------------------------------===//
18// Basic classes with automatic codegen.
19//===----------------------------------------------------------------------===//
20
21class RVVOutBuiltin<string suffix, string prototype, string type_range>
22    : RVVBuiltin<suffix, prototype, type_range> {
23  let IntrinsicTypes = [-1];
24}
25
26class RVVOp0Builtin<string suffix, string prototype, string type_range>
27    : RVVBuiltin<suffix, prototype, type_range> {
28  let IntrinsicTypes = [0];
29}
30
31class RVVOutOp1Builtin<string suffix, string prototype, string type_range>
32    : RVVBuiltin<suffix, prototype, type_range> {
33  let IntrinsicTypes = [-1, 1];
34}
35
36class RVVOutOp0Op1Builtin<string suffix, string prototype, string type_range>
37    : RVVBuiltin<suffix, prototype, type_range> {
38  let IntrinsicTypes = [-1, 0, 1];
39}
40
41multiclass RVVBuiltinSet<string intrinsic_name, string type_range,
42                         list<list<string>> suffixes_prototypes,
43                         list<int> intrinsic_types> {
44  let IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask",
45      IntrinsicTypes = intrinsic_types in {
46    foreach s_p = suffixes_prototypes in {
47      let Name = NAME # "_" # s_p[0] in {
48        defvar suffix = s_p[1];
49        defvar prototype = s_p[2];
50        def : RVVBuiltin<suffix, prototype, type_range>;
51      }
52    }
53  }
54}
55
56// IntrinsicTypes is output, op0, op1 [-1, 0, 1]
57multiclass RVVOutOp0Op1BuiltinSet<string intrinsic_name, string type_range,
58                                  list<list<string>> suffixes_prototypes>
59    : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes,
60                            [-1, 0, 1]>;
61
62multiclass RVVOutBuiltinSet<string intrinsic_name, string type_range,
63                            list<list<string>> suffixes_prototypes>
64    : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1]>;
65
66multiclass RVVOp0BuiltinSet<string intrinsic_name, string type_range,
67                            list<list<string>> suffixes_prototypes>
68    : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0]>;
69
70// IntrinsicTypes is output, op1 [-1, 0]
71multiclass RVVOutOp0BuiltinSet<string intrinsic_name, string type_range,
72                               list<list<string>> suffixes_prototypes>
73    : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 0]>;
74
75// IntrinsicTypes is output, op1 [-1, 1]
76multiclass RVVOutOp1BuiltinSet<string intrinsic_name, string type_range,
77                               list<list<string>> suffixes_prototypes>
78    : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1]>;
79
80multiclass RVVOp0Op1BuiltinSet<string intrinsic_name, string type_range,
81                               list<list<string>> suffixes_prototypes>
82    : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0, 1]>;
83
84multiclass RVVOutOp1Op2BuiltinSet<string intrinsic_name, string type_range,
85                                  list<list<string>> suffixes_prototypes>
86    : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1, 2]>;
87
88multiclass RVVSignedBinBuiltinSet
89    : RVVOutOp1BuiltinSet<NAME, "csil",
90                          [["vv", "v", "vvv"],
91                           ["vx", "v", "vve"]]>;
92
93multiclass RVVSignedBinBuiltinSetRoundingMode
94    : RVVOutOp1BuiltinSet<NAME, "csil",
95                          [["vv", "v", "vvvu"],
96                           ["vx", "v", "vveu"]]>;
97
98multiclass RVVUnsignedBinBuiltinSet
99    : RVVOutOp1BuiltinSet<NAME, "csil",
100                          [["vv", "Uv", "UvUvUv"],
101                           ["vx", "Uv", "UvUvUe"]]>;
102
103multiclass RVVUnsignedBinBuiltinSetRoundingMode
104    : RVVOutOp1BuiltinSet<NAME, "csil",
105                          [["vv", "Uv", "UvUvUvu"],
106                           ["vx", "Uv", "UvUvUeu"]]>;
107
108multiclass RVVIntBinBuiltinSet
109    : RVVSignedBinBuiltinSet,
110      RVVUnsignedBinBuiltinSet;
111
112multiclass RVVSlideOneBuiltinSet
113    : RVVOutOp1BuiltinSet<NAME, "csil",
114                          [["vx", "v", "vve"],
115                           ["vx", "Uv", "UvUve"]]>;
116
117multiclass RVVSignedShiftBuiltinSet
118    : RVVOutOp1BuiltinSet<NAME, "csil",
119                          [["vv", "v", "vvUv"],
120                           ["vx", "v", "vvz"]]>;
121
122multiclass RVVSignedShiftBuiltinSetRoundingMode
123    : RVVOutOp1BuiltinSet<NAME, "csil",
124                          [["vv", "v", "vvUvu"],
125                           ["vx", "v", "vvzu"]]>;
126
127multiclass RVVUnsignedShiftBuiltinSet
128    : RVVOutOp1BuiltinSet<NAME, "csil",
129                          [["vv", "Uv", "UvUvUv"],
130                           ["vx", "Uv", "UvUvz"]]>;
131
132multiclass RVVUnsignedShiftBuiltinSetRoundingMode
133    : RVVOutOp1BuiltinSet<NAME, "csil",
134                          [["vv", "Uv", "UvUvUvu"],
135                           ["vx", "Uv", "UvUvzu"]]>;
136
137multiclass RVVShiftBuiltinSet
138    : RVVSignedShiftBuiltinSet,
139      RVVUnsignedShiftBuiltinSet;
140
141let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
142  multiclass RVVSignedNShiftBuiltinSet
143      : RVVOutOp0Op1BuiltinSet<NAME, "csil",
144                                     [["wv", "v", "vwUv"],
145                                      ["wx", "v", "vwz"]]>;
146
147  multiclass RVVSignedNShiftBuiltinSetRoundingMode
148      : RVVOutOp0Op1BuiltinSet<NAME, "csil",
149                                     [["wv", "v", "vwUvu"],
150                                      ["wx", "v", "vwzu"]]>;
151
152  multiclass RVVUnsignedNShiftBuiltinSet
153      : RVVOutOp0Op1BuiltinSet<NAME, "csil",
154                                     [["wv", "Uv", "UvUwUv"],
155                                      ["wx", "Uv", "UvUwz"]]>;
156
157  multiclass RVVUnsignedNShiftBuiltinSetRoundingMode
158      : RVVOutOp0Op1BuiltinSet<NAME, "csil",
159                                     [["wv", "Uv", "UvUwUvu"],
160                                      ["wx", "Uv", "UvUwzu"]]>;
161
162}
163
164multiclass RVVCarryinBuiltinSet
165    : RVVOutOp1BuiltinSet<NAME, "csil",
166                          [["vvm", "v", "vvvm"],
167                           ["vxm", "v", "vvem"],
168                           ["vvm", "Uv", "UvUvUvm"],
169                           ["vxm", "Uv", "UvUvUem"]]>;
170
171multiclass RVVCarryOutInBuiltinSet<string intrinsic_name>
172    : RVVOp0Op1BuiltinSet<intrinsic_name, "csil",
173                          [["vvm", "vm", "mvvm"],
174                           ["vxm", "vm", "mvem"],
175                           ["vvm", "Uvm", "mUvUvm"],
176                           ["vxm", "Uvm", "mUvUem"]]>;
177
178multiclass RVVSignedMaskOutBuiltinSet
179    : RVVOp0Op1BuiltinSet<NAME, "csil",
180                          [["vv", "vm", "mvv"],
181                           ["vx", "vm", "mve"]]>;
182
183multiclass RVVUnsignedMaskOutBuiltinSet
184    : RVVOp0Op1BuiltinSet<NAME, "csil",
185                          [["vv", "Uvm", "mUvUv"],
186                           ["vx", "Uvm", "mUvUe"]]>;
187
188multiclass RVVIntMaskOutBuiltinSet
189    : RVVSignedMaskOutBuiltinSet,
190      RVVUnsignedMaskOutBuiltinSet;
191
192class RVVIntExt<string intrinsic_name, string suffix, string prototype,
193                string type_range>
194    : RVVBuiltin<suffix, prototype, type_range> {
195  let IRName = intrinsic_name;
196  let MaskedIRName = intrinsic_name # "_mask";
197  let OverloadedName = NAME;
198  let IntrinsicTypes = [-1, 0];
199}
200
201let HasMaskedOffOperand = false in {
202  multiclass RVVIntTerBuiltinSet {
203    defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
204                                  [["vv", "v", "vvvv"],
205                                   ["vx", "v", "vvev"],
206                                   ["vv", "Uv", "UvUvUvUv"],
207                                   ["vx", "Uv", "UvUvUeUv"]]>;
208  }
209  multiclass RVVFloatingTerBuiltinSet {
210    defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
211                                  [["vv", "v", "vvvv"],
212                                   ["vf", "v", "vvev"]]>;
213  }
214  multiclass RVVFloatingTerBuiltinSetRoundingMode {
215    defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
216                                  [["vv", "v", "vvvvu"],
217                                   ["vf", "v", "vvevu"]]>;
218  }
219}
220
221let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
222  multiclass RVVFloatingWidenTerBuiltinSet {
223    defm ""  : RVVOutOp1Op2BuiltinSet<NAME, "xf",
224                                      [["vv", "w", "wwvv"],
225                                       ["vf", "w", "wwev"]]>;
226  }
227  multiclass RVVFloatingWidenTerBuiltinSetRoundingMode {
228    defm ""  : RVVOutOp1Op2BuiltinSet<NAME, "xf",
229                                      [["vv", "w", "wwvvu"],
230                                       ["vf", "w", "wwevu"]]>;
231  }
232}
233
234multiclass RVVFloatingBinBuiltinSet
235    : RVVOutOp1BuiltinSet<NAME, "xfd",
236                          [["vv", "v", "vvv"],
237                           ["vf", "v", "vve"]]>;
238
239multiclass RVVFloatingBinBuiltinSetRoundingMode
240    : RVVOutOp1BuiltinSet<NAME, "xfd",
241                          [["vv", "v", "vvvu"],
242                           ["vf", "v", "vveu"]]>;
243
244multiclass RVVFloatingBinVFBuiltinSet
245    : RVVOutOp1BuiltinSet<NAME, "xfd",
246                          [["vf", "v", "vve"]]>;
247
248multiclass RVVFloatingBinVFBuiltinSetRoundingMode
249    : RVVOutOp1BuiltinSet<NAME, "xfd",
250                          [["vf", "v", "vveu"]]>;
251
252multiclass RVVFloatingMaskOutBuiltinSet
253    : RVVOp0Op1BuiltinSet<NAME, "xfd",
254                          [["vv", "vm", "mvv"],
255                           ["vf", "vm", "mve"]]>;
256
257multiclass RVVFloatingMaskOutVFBuiltinSet
258    : RVVOp0Op1BuiltinSet<NAME, "fd",
259                          [["vf", "vm", "mve"]]>;
260
261multiclass RVVConvBuiltinSet<string intrinsic_name, string type_range,
262                         list<list<string>> suffixes_prototypes> {
263let Name = intrinsic_name,
264    IRName = intrinsic_name,
265    MaskedIRName = intrinsic_name # "_mask",
266    IntrinsicTypes = [-1, 0] in {
267  foreach s_p = suffixes_prototypes in {
268      defvar suffix = s_p[0];
269      defvar prototype = s_p[1];
270      def : RVVBuiltin<suffix, prototype, type_range>;
271    }
272  }
273}
274
275
276class RVVMaskBinBuiltin : RVVOutBuiltin<"m", "mmm", "c"> {
277  let Name = NAME # "_mm";
278  let HasMasked = false;
279}
280
281class RVVMaskUnaryBuiltin : RVVOutBuiltin<"m", "mm", "c"> {
282  let Name = NAME # "_m";
283}
284
285class RVVMaskNullaryBuiltin : RVVOutBuiltin<"m", "m", "c"> {
286  let Name = NAME # "_m";
287  let HasMasked = false;
288  let SupportOverloading = false;
289}
290
291class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
292  let Name = NAME # "_m";
293  let HasMaskedOffOperand = false;
294}
295
296let UnMaskedPolicyScheme = HasPolicyOperand,
297    HasMaskedOffOperand = false in {
298  multiclass RVVSlideUpBuiltinSet {
299    defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
300                               [["vx","v", "vvvz"]]>;
301    defm "" : RVVOutBuiltinSet<NAME, "csil",
302                               [["vx","Uv", "UvUvUvz"]]>;
303  }
304}
305
306let UnMaskedPolicyScheme = HasPassthruOperand,
307    ManualCodegen = [{
308      if (IsMasked) {
309        std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
310        if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
311          Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
312      } else {
313        if (PolicyAttrs & RVV_VTA)
314          Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
315      }
316
317      Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
318      IntrinsicTypes = {ResultType, Ops.back()->getType()};
319    }] in {
320  multiclass RVVSlideDownBuiltinSet {
321    defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
322                               [["vx","v", "vvz"]]>;
323    defm "" : RVVOutBuiltinSet<NAME, "csil",
324                               [["vx","Uv", "UvUvz"]]>;
325  }
326}
327
328class RVVFloatingUnaryBuiltin<string builtin_suffix, string ir_suffix,
329                              string prototype>
330    : RVVOutBuiltin<ir_suffix, prototype, "xfd"> {
331  let Name = NAME # "_" # builtin_suffix;
332}
333
334class RVVFloatingUnaryVVBuiltin : RVVFloatingUnaryBuiltin<"v", "v", "vv">;
335
336class RVVConvBuiltin<string suffix, string prototype, string type_range,
337                     string overloaded_name>
338    : RVVBuiltin<suffix, prototype, type_range> {
339  let IntrinsicTypes = [-1, 0];
340  let OverloadedName = overloaded_name;
341}
342
343class RVVConvToSignedBuiltin<string overloaded_name>
344    : RVVConvBuiltin<"Iv", "Ivv", "xfd", overloaded_name>;
345
346class RVVConvToUnsignedBuiltin<string overloaded_name>
347    : RVVConvBuiltin<"Uv", "Uvv", "xfd", overloaded_name>;
348
349class RVVConvToWidenSignedBuiltin<string overloaded_name>
350    : RVVConvBuiltin<"Iw", "Iwv", "xf", overloaded_name>;
351
352class RVVConvToWidenUnsignedBuiltin<string overloaded_name>
353    : RVVConvBuiltin<"Uw", "Uwv", "xf", overloaded_name>;
354
355class RVVConvToNarrowingSignedBuiltin<string overloaded_name>
356    : RVVConvBuiltin<"Iv", "IvFw", "csi", overloaded_name>;
357
358class RVVConvToNarrowingUnsignedBuiltin<string overloaded_name>
359    : RVVConvBuiltin<"Uv", "UvFw", "csi", overloaded_name>;
360
361let HasMaskedOffOperand = true in {
362  multiclass RVVSignedReductionBuiltin {
363    defm "" : RVVOutOp0BuiltinSet<NAME, "csil",
364                                  [["vs", "vSv", "SvvSv"]]>;
365  }
366  multiclass RVVUnsignedReductionBuiltin {
367    defm "" : RVVOutOp0BuiltinSet<NAME, "csil",
368                                  [["vs", "UvUSv", "USvUvUSv"]]>;
369  }
370  multiclass RVVFloatingReductionBuiltin {
371    defm "" : RVVOutOp0BuiltinSet<NAME, "xfd",
372                                  [["vs", "vSv", "SvvSv"]]>;
373  }
374  multiclass RVVFloatingReductionBuiltinRoundingMode {
375    defm "" : RVVOutOp0BuiltinSet<NAME, "xfd",
376                                  [["vs", "vSv", "SvvSvu"]]>;
377  }
378  multiclass RVVFloatingWidenReductionBuiltin {
379    defm "" : RVVOutOp0BuiltinSet<NAME, "xf",
380                                  [["vs", "vSw", "SwvSw"]]>;
381  }
382  multiclass RVVFloatingWidenReductionBuiltinRoundingMode {
383    defm "" : RVVOutOp0BuiltinSet<NAME, "xf",
384                                  [["vs", "vSw", "SwvSwu"]]>;
385  }
386}
387
388multiclass RVVIntReductionBuiltinSet
389    : RVVSignedReductionBuiltin,
390      RVVUnsignedReductionBuiltin;
391
392// For widen operation which has different mangling name.
393multiclass RVVWidenBuiltinSet<string intrinsic_name, string type_range,
394                              list<list<string>> suffixes_prototypes> {
395  let Log2LMUL = [-3, -2, -1, 0, 1, 2],
396      IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask" in {
397    foreach s_p = suffixes_prototypes in {
398      let Name = NAME # "_" # s_p[0],
399          OverloadedName = NAME # "_" # s_p[0] in {
400        defvar suffix = s_p[1];
401        defvar prototype = s_p[2];
402        def : RVVOutOp0Op1Builtin<suffix, prototype, type_range>;
403      }
404    }
405  }
406}
407
408// For widen operation with widen operand which has different mangling name.
409multiclass RVVWidenWOp0BuiltinSet<string intrinsic_name, string type_range,
410                                  list<list<string>> suffixes_prototypes> {
411  let Log2LMUL = [-3, -2, -1, 0, 1, 2],
412      IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask" in {
413    foreach s_p = suffixes_prototypes in {
414      let Name = NAME # "_" # s_p[0],
415          OverloadedName = NAME # "_" # s_p[0] in {
416        defvar suffix = s_p[1];
417        defvar prototype = s_p[2];
418        def : RVVOutOp1Builtin<suffix, prototype, type_range>;
419      }
420    }
421  }
422}
423
424multiclass RVVSignedWidenBinBuiltinSet
425    : RVVWidenBuiltinSet<NAME, "csi",
426                         [["vv", "w", "wvv"],
427                          ["vx", "w", "wve"]]>;
428
429multiclass RVVSignedWidenOp0BinBuiltinSet
430    : RVVWidenWOp0BuiltinSet<NAME # "_w", "csi",
431                             [["wv", "w", "wwv"],
432                              ["wx", "w", "wwe"]]>;
433
434multiclass RVVUnsignedWidenBinBuiltinSet
435    : RVVWidenBuiltinSet<NAME, "csi",
436                         [["vv", "Uw", "UwUvUv"],
437                          ["vx", "Uw", "UwUvUe"]]>;
438
439multiclass RVVUnsignedWidenOp0BinBuiltinSet
440    : RVVWidenWOp0BuiltinSet<NAME # "_w", "csi",
441                             [["wv", "Uw", "UwUwUv"],
442                              ["wx", "Uw", "UwUwUe"]]>;
443
444multiclass RVVFloatingWidenBinBuiltinSet
445    : RVVWidenBuiltinSet<NAME, "xf",
446                         [["vv", "w", "wvv"],
447                          ["vf", "w", "wve"]]>;
448
449multiclass RVVFloatingWidenBinBuiltinSetRoundingMode
450    : RVVWidenBuiltinSet<NAME, "xf",
451                         [["vv", "w", "wvvu"],
452                          ["vf", "w", "wveu"]]>;
453
454multiclass RVVFloatingWidenOp0BinBuiltinSet
455    : RVVWidenWOp0BuiltinSet<NAME # "_w", "xf",
456                             [["wv", "w", "wwv"],
457                              ["wf", "w", "wwe"]]>;
458
459multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode
460    : RVVWidenWOp0BuiltinSet<NAME # "_w", "xf",
461                             [["wv", "w", "wwvu"],
462                              ["wf", "w", "wweu"]]>;
463
464defvar TypeList = ["c","s","i","l","x","f","d"];
465defvar EEWList = [["8", "(Log2EEW:3)"],
466                  ["16", "(Log2EEW:4)"],
467                  ["32", "(Log2EEW:5)"],
468                  ["64", "(Log2EEW:6)"]];
469
470class IsFloat<string type> {
471  bit val = !or(!eq(type, "x"), !eq(type, "f"), !eq(type, "d"));
472}
473
474let SupportOverloading = false,
475    MaskedPolicyScheme = NonePolicy in {
476  class RVVVLEMaskBuiltin : RVVOutBuiltin<"m", "mPCUe", "c"> {
477    let Name = "vlm_v";
478    let IRName = "vlm";
479    let HasMasked = false;
480  }
481}
482
483let SupportOverloading = false,
484    UnMaskedPolicyScheme = HasPassthruOperand in {
485  multiclass RVVVLEBuiltin<list<string> types> {
486    let Name = NAME # "_v",
487        IRName = "vle",
488        MaskedIRName ="vle_mask" in {
489      foreach type = types in {
490        def : RVVOutBuiltin<"v", "vPCe", type>;
491        if !not(IsFloat<type>.val) then {
492          def : RVVOutBuiltin<"Uv", "UvPCUe", type>;
493        }
494      }
495    }
496  }
497}
498
499multiclass RVVVLEFFBuiltin<list<string> types> {
500  let Name = NAME # "_v",
501      IRName = "vleff",
502      MaskedIRName = "vleff_mask",
503      SupportOverloading = false,
504      UnMaskedPolicyScheme = HasPassthruOperand,
505      ManualCodegen = [{
506      {
507        if (IsMasked) {
508          // Move mask to right before vl.
509          std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
510          if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
511            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
512          Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
513          IntrinsicTypes = {ResultType, Ops[4]->getType()};
514        } else {
515          if (PolicyAttrs & RVV_VTA)
516            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
517          IntrinsicTypes = {ResultType, Ops[3]->getType()};
518        }
519        Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
520        Value *NewVL = Ops[2];
521        Ops.erase(Ops.begin() + 2);
522        llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
523        llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
524        llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
525        // Store new_vl.
526        clang::CharUnits Align;
527        if (IsMasked)
528          Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(E->getNumArgs()-2)->getType());
529        else
530          Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType());
531        llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1});
532        Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align));
533        return V;
534      }
535      }] in {
536    foreach type = types in {
537      def : RVVBuiltin<"v", "vPCePz", type>;
538      // Skip floating types for unsigned versions.
539      if !not(IsFloat<type>.val) then {
540        def : RVVBuiltin<"Uv", "UvPCUePz", type>;
541      }
542    }
543  }
544}
545
546multiclass RVVVLSEBuiltin<list<string> types> {
547  let Name = NAME # "_v",
548      IRName = "vlse",
549      MaskedIRName ="vlse_mask",
550      SupportOverloading = false,
551      UnMaskedPolicyScheme = HasPassthruOperand in {
552    foreach type = types in {
553      def : RVVOutBuiltin<"v", "vPCet", type>;
554      if !not(IsFloat<type>.val) then {
555        def : RVVOutBuiltin<"Uv", "UvPCUet", type>;
556      }
557    }
558  }
559}
560
561multiclass RVVIndexedLoad<string op> {
562  let UnMaskedPolicyScheme = HasPassthruOperand in {
563    foreach type = TypeList in {
564      foreach eew_list = EEWList[0-2] in {
565        defvar eew = eew_list[0];
566        defvar eew_type = eew_list[1];
567        let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask" in {
568          def: RVVOutOp1Builtin<"v", "vPCe" # eew_type # "Uv", type>;
569            if !not(IsFloat<type>.val) then {
570              def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew_type # "Uv", type>;
571            }
572        }
573      }
574      defvar eew64 = "64";
575      defvar eew64_type = "(Log2EEW:6)";
576      let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask",
577          RequiredFeatures = ["RV64"] in {
578          def: RVVOutOp1Builtin<"v", "vPCe" # eew64_type # "Uv", type>;
579            if !not(IsFloat<type>.val) then {
580              def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew64_type # "Uv", type>;
581            }
582        }
583    }
584  }
585}
586
587let HasMaskedOffOperand = false,
588    MaskedPolicyScheme = NonePolicy,
589    ManualCodegen = [{
590      if (IsMasked) {
591        // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl)
592        std::swap(Ops[0], Ops[2]);
593      } else {
594        // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl)
595        std::swap(Ops[0], Ops[1]);
596      }
597      Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
598      if (IsMasked)
599        IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
600      else
601        IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()};
602    }] in {
603  class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> {
604    let Name = "vsm_v";
605    let IRName = "vsm";
606    let HasMasked = false;
607  }
608  multiclass RVVVSEBuiltin<list<string> types> {
609    let Name = NAME # "_v",
610        IRName = "vse",
611        MaskedIRName = "vse_mask" in {
612      foreach type = types in {
613        def : RVVBuiltin<"v", "0Pev", type>;
614        if !not(IsFloat<type>.val) then {
615          def : RVVBuiltin<"Uv", "0PUeUv", type>;
616        }
617      }
618    }
619  }
620}
621
622multiclass RVVVSSEBuiltin<list<string> types> {
623  let Name = NAME # "_v",
624      IRName = "vsse",
625      MaskedIRName = "vsse_mask",
626      HasMaskedOffOperand = false,
627      MaskedPolicyScheme = NonePolicy,
628      ManualCodegen = [{
629        if (IsMasked) {
630          // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl)
631          std::swap(Ops[0], Ops[3]);
632        } else {
633          // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
634          std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
635        }
636        Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
637        if (IsMasked)
638          IntrinsicTypes = {Ops[0]->getType(), Ops[4]->getType()};
639        else
640          IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
641      }] in {
642    foreach type = types in {
643      def : RVVBuiltin<"v", "0Petv", type>;
644      if !not(IsFloat<type>.val) then {
645        def : RVVBuiltin<"Uv", "0PUetUv", type>;
646      }
647    }
648  }
649}
650
651multiclass RVVIndexedStore<string op> {
652  let HasMaskedOffOperand = false,
653      MaskedPolicyScheme = NonePolicy,
654      ManualCodegen = [{
655        if (IsMasked) {
656          // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl)
657          std::swap(Ops[0], Ops[3]);
658        } else {
659          // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl)
660          std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
661        }
662        Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
663        if (IsMasked)
664          IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()};
665        else
666          IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()};
667      }] in {
668      foreach type = TypeList in {
669        foreach eew_list = EEWList[0-2] in {
670          defvar eew = eew_list[0];
671          defvar eew_type = eew_list[1];
672          let Name = op # eew  # "_v", IRName = op, MaskedIRName = op # "_mask" in  {
673            def : RVVBuiltin<"v", "0Pe" # eew_type # "Uvv", type>;
674            if !not(IsFloat<type>.val) then {
675              def : RVVBuiltin<"Uv", "0PUe" # eew_type # "UvUv", type>;
676            }
677          }
678        }
679        defvar eew64 = "64";
680        defvar eew64_type = "(Log2EEW:6)";
681        let Name = op # eew64  # "_v", IRName = op, MaskedIRName = op # "_mask",
682            RequiredFeatures = ["RV64"]  in  {
683          def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>;
684          if !not(IsFloat<type>.val) then {
685            def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>;
686          }
687        }
688      }
689  }
690}
691
692defvar NFList = [2, 3, 4, 5, 6, 7, 8];
693/*
694A segment load builtin has different variants.
695
696Therefore a segment unit-stride load builtin can have 4 variants,
6971. When unmasked and the policies are all specified as agnostic:
698(Address0, ..., Address{NF - 1}, Ptr, VL)
6992. When masked and the policies are all specified as agnostic:
700(Address0, ..., Address{NF - 1}, Mask, Ptr, VL)
7013. When unmasked and one of the policies is specified as undisturbed:
702(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1},
703  Ptr, VL)
7044. When masked and one of the policies is specified as undisturbed:
705(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1},
706  Ptr, VL)
707
708Other variants of segment load builtin share the same structure, but they
709have their own extra parameter.
710
711The segment unit-stride fault-only-first load builtin has a 'NewVL'
712operand after the 'Ptr' operand.
7131. When unmasked and the policies are all specified as agnostic:
714(Address0, ..., Address{NF - 1}, Ptr, NewVL, VL)
7152. When masked and the policies are all specified as agnostic:
716(Address0, ..., Address{NF - 1}, Mask, Ptr, NewVL, VL)
7173. When unmasked and one of the policies is specified as undisturbed:
718(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1},
719  Ptr, NewVL, VL)
7204. When masked and one of the policies is specified as undisturbed:
721(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1},
722  Ptr, NewVL, VL)
723
724The segment strided load builtin has a 'Stride' operand after the 'Ptr'
725operand.
7261. When unmasked and the policies are all specified as agnostic:
727(Address0, ..., Address{NF - 1}, Ptr, Stride, VL)
7282. When masked and the policies are all specified as agnostic:
729(Address0, ..., Address{NF - 1}, Mask, Ptr, Stride, VL)
7303. When unmasked and one of the policies is specified as undisturbed:
731(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1},
732  Ptr, Stride, VL)
7334. When masked and one of the policies is specified as undisturbed:
734(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1},
735  Ptr, Stride, VL)
736
737The segment indexed load builtin has a 'Idx' operand after the 'Ptr' operand.
7381. When unmasked and the policies are all specified as agnostic:
739(Address0, ..., Address{NF - 1}, Ptr, Idx, VL)
7402. When masked and the policies are all specified as agnostic:
741(Address0, ..., Address{NF - 1}, Mask, Ptr, Idx, VL)
7423. When unmasked and one of the policies is specified as undisturbed:
743(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1},
744  Ptr, Idx, VL)
7454. When masked and one of the policies is specified as undisturbed:
746(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1},
747  Ptr, Idx, VL)
748
749Segment load intrinsics has different variants similar to their builtins.
750
751Segment unit-stride load intrinsic,
752  Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Mask, VL, Policy)
753  Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, VL)
754Segment unit-stride fault-only-first load intrinsic,
755  Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Mask, VL, Policy)
756  Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, VL)
757Segment strided load intrinsic,
758  Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Stride, Mask, VL, Policy)
759  Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, Stride, VL)
760Segment indexed load intrinsic,
761  Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Index, Mask, VL, Policy)
762  Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, Index, VL)
763
764The Vector(s) is poison when the policy behavior allows us to not care
765about any masked-off elements.
766*/
767
768class PVString<int nf, bit signed> {
769  string S =
770    !cond(!eq(nf, 2): !if(signed, "PvPv", "PUvPUv"),
771          !eq(nf, 3): !if(signed, "PvPvPv", "PUvPUvPUv"),
772          !eq(nf, 4): !if(signed, "PvPvPvPv", "PUvPUvPUvPUv"),
773          !eq(nf, 5): !if(signed, "PvPvPvPvPv", "PUvPUvPUvPUvPUv"),
774          !eq(nf, 6): !if(signed, "PvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUv"),
775          !eq(nf, 7): !if(signed, "PvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUv"),
776          !eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv"));
777}
778
779class VString<int nf, bit signed> {
780  string S = !cond(!eq(nf, 2): !if(signed, "vv", "UvUv"),
781                   !eq(nf, 3): !if(signed, "vvv", "UvUvUv"),
782                   !eq(nf, 4): !if(signed, "vvvv", "UvUvUvUv"),
783                   !eq(nf, 5): !if(signed, "vvvvv", "UvUvUvUvUv"),
784                   !eq(nf, 6): !if(signed, "vvvvvv", "UvUvUvUvUvUv"),
785                   !eq(nf, 7): !if(signed, "vvvvvvv", "UvUvUvUvUvUvUv"),
786                   !eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv"));
787}
788
789multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
790  let Name = NAME,
791      IRName = IR,
792      MaskedIRName = IR # "_mask",
793      UnMaskedPolicyScheme = HasPassthruOperand,
794      ManualCodegen = [{
795      {
796        if (IsMasked) {
797          std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
798          if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
799            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
800        } else {
801          if (PolicyAttrs & RVV_VTA)
802            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
803        }
804        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
805        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
806
807        if (IsMasked) {
808          Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
809          // maskedoff, op1, op2, mask, vl, policy
810          IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
811        } else {
812          // passthru, op1, op2, vl
813          IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
814        }
815        break;
816      }
817      }] in {
818        def : RVVBuiltin<"v", "vv", type_range>;
819  }
820}
821
822multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
823  let Name = NAME,
824      IRName = IR,
825      MaskedIRName = IR # "_mask",
826      UnMaskedPolicyScheme = HasPassthruOperand,
827      ManualCodegen = [{
828      {
829        if (IsMasked) {
830          std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
831          if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
832            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
833        } else {
834          if (PolicyAttrs & RVV_VTA)
835            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
836        }
837        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
838        Ops.insert(Ops.begin() + 2,
839                   llvm::Constant::getAllOnesValue(ElemTy));
840        if (IsMasked) {
841          Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
842          // maskedoff, op1, po2, mask, vl, policy
843          IntrinsicTypes = {ResultType,
844                            ElemTy,
845                            Ops[4]->getType()};
846        } else {
847          // passthru, op1, op2, vl
848          IntrinsicTypes = {ResultType,
849                            ElemTy,
850                            Ops[3]->getType()};
851        }
852        break;
853      }
854      }] in {
855        def : RVVBuiltin<"v", "vv", type_range>;
856        def : RVVBuiltin<"Uv", "UvUv", type_range>;
857  }
858}
859
860multiclass RVVPseudoMaskBuiltin<string IR, string type_range> {
861  let Name = NAME,
862      IRName = IR,
863      HasMasked = false,
864      ManualCodegen = [{
865      {
866        // op1, vl
867        IntrinsicTypes = {ResultType,
868                          Ops[1]->getType()};
869        Ops.insert(Ops.begin() + 1, Ops[0]);
870        break;
871      }
872      }] in {
873        def : RVVBuiltin<"m", "mm", type_range>;
874  }
875}
876
877multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
878  let Name = NAME,
879      IRName = IR,
880      MaskedIRName = IR # "_mask",
881      UnMaskedPolicyScheme = HasPassthruOperand,
882      ManualCodegen = [{
883      {
884        if (IsMasked) {
885          std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
886          if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
887            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
888          Ops.insert(Ops.begin() + 2, Ops[1]);
889          Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
890          // maskedoff, op1, op2, mask, vl
891          IntrinsicTypes = {ResultType,
892                            Ops[2]->getType(),
893                            Ops.back()->getType()};
894        } else {
895          if (PolicyAttrs & RVV_VTA)
896            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
897          // op1, po2, vl
898          IntrinsicTypes = {ResultType,
899                            Ops[1]->getType(), Ops[2]->getType()};
900          Ops.insert(Ops.begin() + 2, Ops[1]);
901          break;
902        }
903        break;
904      }
905      }] in {
906        def : RVVBuiltin<"v", "vv", type_range>;
907  }
908}
909
910multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
911                                 list<list<string>> suffixes_prototypes> {
912  let Name = NAME,
913      OverloadedName = MName,
914      IRName = IR,
915      MaskedIRName = IR # "_mask",
916      UnMaskedPolicyScheme = HasPassthruOperand,
917      ManualCodegen = [{
918      {
919        if (IsMasked) {
920          std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
921          if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
922            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
923        } else {
924          if (PolicyAttrs & RVV_VTA)
925            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
926        }
927        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
928        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
929        if (IsMasked) {
930          Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
931          // maskedoff, op1, op2, mask, vl, policy
932          IntrinsicTypes = {ResultType,
933                            Ops[1]->getType(),
934                            ElemTy,
935                            Ops[4]->getType()};
936        } else {
937          // passtru, op1, op2, vl
938          IntrinsicTypes = {ResultType,
939                            Ops[1]->getType(),
940                            ElemTy,
941                            Ops[3]->getType()};
942        }
943        break;
944      }
945      }] in {
946        foreach s_p = suffixes_prototypes in {
947          def : RVVBuiltin<s_p[0], s_p[1], type_range>;
948        }
949  }
950}
951
952multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
953                                 list<list<string>> suffixes_prototypes> {
954  let Name = NAME,
955      OverloadedName = MName,
956      IRName = IR,
957      MaskedIRName = IR # "_mask",
958      UnMaskedPolicyScheme = HasPassthruOperand,
959      ManualCodegen = [{
960      {
961        if (IsMasked) {
962          std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
963          if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
964            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
965        } else {
966          if (PolicyAttrs & RVV_VTA)
967            Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
968        }
969        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
970        if (IsMasked) {
971          Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
972          // maskedoff, op1, xlen, mask, vl
973          IntrinsicTypes = {ResultType,
974                            Ops[1]->getType(),
975                            Ops[4]->getType(),
976                            Ops[4]->getType()};
977        } else {
978          // passthru, op1, xlen, vl
979          IntrinsicTypes = {ResultType,
980                  Ops[1]->getType(),
981                  Ops[3]->getType(),
982                  Ops[3]->getType()};
983        }
984        break;
985      }
986      }] in {
987        foreach s_p = suffixes_prototypes in {
988          def : RVVBuiltin<s_p[0], s_p[1], type_range>;
989        }
990  }
991}
992
993// Define vread_csr&vwrite_csr described in RVV intrinsics doc.
994let HeaderCode =
995[{
996enum RVV_CSR {
997  RVV_VSTART = 0,
998  RVV_VXSAT,
999  RVV_VXRM,
1000  RVV_VCSR,
1001};
1002
1003static __inline__ __attribute__((__always_inline__, __nodebug__))
1004unsigned long __riscv_vread_csr(enum RVV_CSR __csr) {
1005  unsigned long __rv = 0;
1006  switch (__csr) {
1007    case RVV_VSTART:
1008      __asm__ __volatile__ ("csrr\t%0, vstart" : "=r"(__rv) : : "memory");
1009      break;
1010    case RVV_VXSAT:
1011      __asm__ __volatile__ ("csrr\t%0, vxsat" : "=r"(__rv) : : "memory");
1012      break;
1013    case RVV_VXRM:
1014      __asm__ __volatile__ ("csrr\t%0, vxrm" : "=r"(__rv) : : "memory");
1015      break;
1016    case RVV_VCSR:
1017      __asm__ __volatile__ ("csrr\t%0, vcsr" : "=r"(__rv) : : "memory");
1018      break;
1019  }
1020  return __rv;
1021}
1022
1023static __inline__ __attribute__((__always_inline__, __nodebug__))
1024void __riscv_vwrite_csr(enum RVV_CSR __csr, unsigned long __value) {
1025  switch (__csr) {
1026    case RVV_VSTART:
1027      __asm__ __volatile__ ("csrw\tvstart, %z0" : : "rJ"(__value) : "memory");
1028      break;
1029    case RVV_VXSAT:
1030      __asm__ __volatile__ ("csrw\tvxsat, %z0" : : "rJ"(__value) : "memory");
1031      break;
1032    case RVV_VXRM:
1033      __asm__ __volatile__ ("csrw\tvxrm, %z0" : : "rJ"(__value) : "memory");
1034      break;
1035    case RVV_VCSR:
1036      __asm__ __volatile__ ("csrw\tvcsr, %z0" : : "rJ"(__value) : "memory");
1037      break;
1038  }
1039}
1040}] in
1041def vread_vwrite_csr: RVVHeader;
1042
1043let HeaderCode =
1044[{
1045#define __riscv_vlenb() __builtin_rvv_vlenb()
1046}] in
1047def vlenb_macro: RVVHeader;
1048
1049let HasBuiltinAlias = false, HasVL = false, HasMasked = false,
1050    UnMaskedPolicyScheme = NonePolicy, MaskedPolicyScheme = NonePolicy,
1051    Log2LMUL = [0], IRName = "",
1052    ManualCodegen = [{
1053    {
1054      LLVMContext &Context = CGM.getLLVMContext();
1055      llvm::MDBuilder MDHelper(Context);
1056
1057      llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "vlenb")};
1058      llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
1059      llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
1060      llvm::Function *F =
1061        CGM.getIntrinsic(llvm::Intrinsic::read_register, {SizeTy});
1062      return Builder.CreateCall(F, Metadata);
1063    }
1064    }] in
1065{
1066  def vlenb : RVVBuiltin<"", "u", "i">;
1067}
1068
1069// 6. Configuration-Setting Instructions
1070// 6.1. vsetvli/vsetvl instructions
1071
1072// vsetvl/vsetvlmax are a macro because they require constant integers in SEW
1073// and LMUL.
1074let HeaderCode =
1075[{
1076#define __riscv_vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
1077#define __riscv_vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
1078#define __riscv_vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
1079#define __riscv_vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
1080#define __riscv_vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
1081#define __riscv_vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
1082
1083#define __riscv_vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
1084#define __riscv_vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
1085#define __riscv_vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
1086#define __riscv_vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
1087#define __riscv_vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
1088
1089#define __riscv_vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
1090#define __riscv_vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
1091#define __riscv_vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
1092#define __riscv_vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
1093
1094#if __riscv_v_elen >= 64
1095#define __riscv_vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
1096#define __riscv_vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
1097#define __riscv_vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
1098
1099#define __riscv_vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
1100#define __riscv_vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
1101#define __riscv_vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
1102#define __riscv_vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
1103#endif
1104
1105#define __riscv_vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
1106#define __riscv_vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
1107#define __riscv_vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
1108#define __riscv_vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
1109#define __riscv_vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
1110#define __riscv_vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
1111
1112#define __riscv_vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
1113#define __riscv_vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
1114#define __riscv_vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
1115#define __riscv_vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
1116#define __riscv_vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
1117
1118#define __riscv_vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
1119#define __riscv_vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
1120#define __riscv_vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
1121#define __riscv_vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
1122
1123#if __riscv_v_elen >= 64
1124#define __riscv_vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
1125#define __riscv_vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
1126#define __riscv_vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
1127
1128#define __riscv_vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
1129#define __riscv_vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
1130#define __riscv_vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
1131#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
1132#endif
1133
1134}] in
1135def vsetvl_macro: RVVHeader;
1136
1137let HasBuiltinAlias = false,
1138    HasVL = false,
1139    HasMasked = false,
1140    MaskedPolicyScheme = NonePolicy,
1141    Log2LMUL = [0],
1142    ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type
1143{
1144  def vsetvli : RVVBuiltin<"", "zzKzKz", "i">;
1145  def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">;
1146}
1147
1148// 7. Vector Loads and Stores
1149// 7.4. Vector Unit-Stride Instructions
1150def vlm: RVVVLEMaskBuiltin;
1151defm vle8: RVVVLEBuiltin<["c"]>;
1152defm vle16: RVVVLEBuiltin<["s","x"]>;
1153defm vle32: RVVVLEBuiltin<["i","f"]>;
1154defm vle64: RVVVLEBuiltin<["l","d"]>;
1155
1156def vsm : RVVVSEMaskBuiltin;
1157defm vse8 : RVVVSEBuiltin<["c"]>;
1158defm vse16: RVVVSEBuiltin<["s","x"]>;
1159defm vse32: RVVVSEBuiltin<["i","f"]>;
1160defm vse64: RVVVSEBuiltin<["l","d"]>;
1161
1162// 7.5. Vector Strided Instructions
1163defm vlse8: RVVVLSEBuiltin<["c"]>;
1164defm vlse16: RVVVLSEBuiltin<["s","x"]>;
1165defm vlse32: RVVVLSEBuiltin<["i","f"]>;
1166defm vlse64: RVVVLSEBuiltin<["l","d"]>;
1167
1168defm vsse8 : RVVVSSEBuiltin<["c"]>;
1169defm vsse16: RVVVSSEBuiltin<["s","x"]>;
1170defm vsse32: RVVVSSEBuiltin<["i","f"]>;
1171defm vsse64: RVVVSSEBuiltin<["l","d"]>;
1172
1173// 7.6. Vector Indexed Instructions
1174defm : RVVIndexedLoad<"vluxei">;
1175defm : RVVIndexedLoad<"vloxei">;
1176
1177defm : RVVIndexedStore<"vsuxei">;
1178defm : RVVIndexedStore<"vsoxei">;
1179
1180// 7.7. Unit-stride Fault-Only-First Loads
1181defm vle8ff: RVVVLEFFBuiltin<["c"]>;
1182defm vle16ff: RVVVLEFFBuiltin<["s","x"]>;
1183defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
1184defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
1185
1186multiclass RVVUnitStridedSegLoadTuple<string op> {
1187  foreach type = TypeList in {
1188    defvar eew = !cond(!eq(type, "c") : "8",
1189                       !eq(type, "s") : "16",
1190                       !eq(type, "i") : "32",
1191                       !eq(type, "l") : "64",
1192                       !eq(type, "x") : "16",
1193                       !eq(type, "f") : "32",
1194                       !eq(type, "d") : "64");
1195      foreach nf = NFList in {
1196        let Name = op # nf # "e" # eew # "_v",
1197            IRName = op # nf,
1198            MaskedIRName = op # nf # "_mask",
1199            NF = nf,
1200            ManualCodegen = [{
1201    {
1202      llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
1203      IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
1204      SmallVector<llvm::Value*, 12> Operands;
1205
1206      bool NoPassthru =
1207        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
1208        (!IsMasked && (PolicyAttrs & RVV_VTA));
1209      unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
1210
1211      if (NoPassthru) { // Push poison into passthru
1212        Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
1213      } else { // Push intrinsics operands into passthru
1214        llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
1215        for (unsigned I = 0; I < NF; ++I)
1216          Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
1217      }
1218
1219      Operands.push_back(Ops[Offset]); // Ptr
1220      if (IsMasked)
1221        Operands.push_back(Ops[0]);
1222      Operands.push_back(Ops[Offset + 1]); // VL
1223      if (IsMasked)
1224        Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
1225
1226      llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1227
1228      llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
1229      if (ReturnValue.isNull())
1230        return LoadValue;
1231      else
1232        return Builder.CreateStore(LoadValue, ReturnValue.getValue());
1233    }
1234    }] in {
1235        defvar T = "(Tuple:" # nf # ")";
1236        def : RVVBuiltin<T # "v", T # "vPCe", type>;
1237        if !not(IsFloat<type>.val) then {
1238          def : RVVBuiltin<T # "Uv", T # "UvPCUe", type>;
1239        }
1240      }
1241    }
1242  }
1243}
1244
1245multiclass RVVUnitStridedSegStoreTuple<string op> {
1246  foreach type = TypeList in {
1247    defvar eew = !cond(!eq(type, "c") : "8",
1248                       !eq(type, "s") : "16",
1249                       !eq(type, "i") : "32",
1250                       !eq(type, "l") : "64",
1251                       !eq(type, "x") : "16",
1252                       !eq(type, "f") : "32",
1253                       !eq(type, "d") : "64");
1254      foreach nf = NFList in {
1255      let Name = op # nf # "e" # eew # "_v",
1256          IRName = op # nf,
1257          MaskedIRName = op # nf # "_mask",
1258          NF = nf,
1259          HasMaskedOffOperand = false,
1260          ManualCodegen = [{
1261    {
1262      // Masked
1263      // Builtin: (mask, ptr, v_tuple, vl)
1264      // Intrinsic: (val0, val1, ..., ptr, mask, vl)
1265      // Unmasked
1266      // Builtin: (ptr, v_tuple, vl)
1267      // Intrinsic: (val0, val1, ..., ptr, vl)
1268      unsigned Offset = IsMasked ? 1 : 0;
1269      llvm::Value *VTupleOperand = Ops[Offset + 1];
1270
1271      SmallVector<llvm::Value*, 12> Operands;
1272      for (unsigned I = 0; I < NF; ++I) {
1273        llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
1274        Operands.push_back(V);
1275      }
1276      Operands.push_back(Ops[Offset]); // Ptr
1277      if (IsMasked)
1278        Operands.push_back(Ops[0]);
1279      Operands.push_back(Ops[Offset + 2]); // VL
1280
1281      IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
1282      llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1283      return Builder.CreateCall(F, Operands, "");
1284   }
1285      }] in {
1286        defvar T = "(Tuple:" # nf # ")";
1287        def : RVVBuiltin<T # "v", "0Pe" # T # "v", type>;
1288        if !not(IsFloat<type>.val) then {
1289          def : RVVBuiltin<T # "Uv", "0PUe" # T # "Uv", type>;
1290        }
1291      }
1292    }
1293  }
1294}
1295
1296multiclass RVVUnitStridedSegLoadFFTuple<string op> {
1297  foreach type = TypeList in {
1298    defvar eew = !cond(!eq(type, "c") : "8",
1299                       !eq(type, "s") : "16",
1300                       !eq(type, "i") : "32",
1301                       !eq(type, "l") : "64",
1302                       !eq(type, "x") : "16",
1303                       !eq(type, "f") : "32",
1304                       !eq(type, "d") : "64");
1305      foreach nf = NFList in {
1306        let Name = op # nf # "e" # eew # "ff_v",
1307            IRName = op # nf # "ff",
1308            MaskedIRName = op # nf # "ff_mask",
1309            NF = nf,
1310            ManualCodegen = [{
1311    {
1312      llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
1313      IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
1314      SmallVector<llvm::Value*, 12> Operands;
1315
1316      bool NoPassthru =
1317        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
1318        (!IsMasked && (PolicyAttrs & RVV_VTA));
1319      unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
1320
1321      if (NoPassthru) { // Push poison into passthru
1322        Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
1323      } else { // Push intrinsics operands into passthru
1324        llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
1325        for (unsigned I = 0; I < NF; ++I)
1326          Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
1327      }
1328
1329      Operands.push_back(Ops[Offset]); // Ptr
1330      if (IsMasked)
1331        Operands.push_back(Ops[0]);
1332      Operands.push_back(Ops[Offset + 2]); // vl
1333      if (IsMasked)
1334        Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
1335
1336      llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1337
1338      llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
1339      // Get alignment from the new vl operand
1340      clang::CharUnits Align =
1341          CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
1342
1343      llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType);
1344      for (unsigned I = 0; I < NF; ++I) {
1345        llvm::Value *V = Builder.CreateExtractValue(LoadValue, {I});
1346        ReturnTuple = Builder.CreateInsertValue(ReturnTuple, V, {I});
1347      }
1348
1349      // Store new_vl
1350      llvm::Value *V = Builder.CreateExtractValue(LoadValue, {NF});
1351      Builder.CreateStore(V, Address(Ops[Offset + 1], V->getType(), Align));
1352
1353      if (ReturnValue.isNull())
1354        return ReturnTuple;
1355      else
1356        return Builder.CreateStore(ReturnTuple, ReturnValue.getValue());
1357    }
1358    }] in {
1359        defvar T = "(Tuple:" # nf # ")";
1360        def : RVVBuiltin<T # "v", T # "vPCePz", type>;
1361        if !not(IsFloat<type>.val) then {
1362          def : RVVBuiltin<T # "Uv", T # "UvPCUePz", type>;
1363        }
1364      }
1365    }
1366  }
1367}
1368
1369multiclass RVVStridedSegLoadTuple<string op> {
1370  foreach type = TypeList in {
1371    defvar eew = !cond(!eq(type, "c") : "8",
1372                       !eq(type, "s") : "16",
1373                       !eq(type, "i") : "32",
1374                       !eq(type, "l") : "64",
1375                       !eq(type, "x") : "16",
1376                       !eq(type, "f") : "32",
1377                       !eq(type, "d") : "64");
1378      foreach nf = NFList in {
1379        let Name = op # nf # "e" # eew # "_v",
1380            IRName = op # nf,
1381            MaskedIRName = op # nf # "_mask",
1382            NF = nf,
1383            ManualCodegen = [{
1384    {
1385      llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
1386      IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
1387      SmallVector<llvm::Value*, 12> Operands;
1388
1389      bool NoPassthru =
1390        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
1391        (!IsMasked && (PolicyAttrs & RVV_VTA));
1392      unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
1393
1394      if (NoPassthru) { // Push poison into passthru
1395        Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
1396      } else { // Push intrinsics operands into passthru
1397        llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
1398        for (unsigned I = 0; I < NF; ++I)
1399          Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
1400      }
1401
1402      Operands.push_back(Ops[Offset]); // Ptr
1403      Operands.push_back(Ops[Offset + 1]); // Stride
1404      if (IsMasked)
1405        Operands.push_back(Ops[0]);
1406      Operands.push_back(Ops[Offset + 2]); // VL
1407      if (IsMasked)
1408        Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
1409
1410      llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1411      llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
1412
1413      if (ReturnValue.isNull())
1414        return LoadValue;
1415      else
1416        return Builder.CreateStore(LoadValue, ReturnValue.getValue());
1417    }
1418    }] in {
1419        defvar T = "(Tuple:" # nf # ")";
1420        def : RVVBuiltin<T # "v", T # "vPCet", type>;
1421        if !not(IsFloat<type>.val) then {
1422          def : RVVBuiltin<T # "Uv", T # "UvPCUet", type>;
1423        }
1424      }
1425    }
1426  }
1427}
1428
1429multiclass RVVStridedSegStoreTuple<string op> {
1430  foreach type = TypeList in {
1431    defvar eew = !cond(!eq(type, "c") : "8",
1432                       !eq(type, "s") : "16",
1433                       !eq(type, "i") : "32",
1434                       !eq(type, "l") : "64",
1435                       !eq(type, "x") : "16",
1436                       !eq(type, "f") : "32",
1437                       !eq(type, "d") : "64");
1438      foreach nf = NFList in {
1439        let Name = op # nf # "e" # eew # "_v",
1440            IRName = op # nf,
1441            MaskedIRName = op # nf # "_mask",
1442            NF = nf,
1443            HasMaskedOffOperand = false,
1444            MaskedPolicyScheme = NonePolicy,
1445            ManualCodegen = [{
1446    {
1447      // Masked
1448      // Builtin: (mask, ptr, stride, v_tuple, vl)
1449      // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
1450      // Unmasked
1451      // Builtin: (ptr, stride, v_tuple, vl)
1452      // Intrinsic: (val0, val1, ..., ptr, stride, vl)
1453      unsigned Offset = IsMasked ? 1 : 0;
1454      llvm::Value *VTupleOperand = Ops[Offset + 2];
1455
1456      SmallVector<llvm::Value*, 12> Operands;
1457      for (unsigned I = 0; I < NF; ++I) {
1458        llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
1459        Operands.push_back(V);
1460      }
1461      Operands.push_back(Ops[Offset]); // Ptr
1462      Operands.push_back(Ops[Offset + 1]); // Stride
1463      if (IsMasked)
1464        Operands.push_back(Ops[0]);
1465      Operands.push_back(Ops[Offset + 3]); // VL
1466
1467      IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
1468      llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1469      return Builder.CreateCall(F, Operands, "");
1470    }
1471            }] in {
1472        defvar T = "(Tuple:" # nf # ")";
1473        def : RVVBuiltin<T # "v", "0Pet" # T # "v", type>;
1474        if !not(IsFloat<type>.val) then {
1475          def : RVVBuiltin<T # "Uv", "0PUet" # T # "Uv", type>;
1476        }
1477      }
1478    }
1479  }
1480}
1481
1482multiclass RVVIndexedSegLoadTuple<string op> {
1483  foreach type = TypeList in {
1484    foreach eew_info = EEWList in {
1485      defvar eew = eew_info[0];
1486      defvar eew_type = eew_info[1];
1487      foreach nf = NFList in {
1488        let Name = op # nf # "ei" # eew # "_v",
1489            IRName = op # nf,
1490            MaskedIRName = op # nf # "_mask",
1491            NF = nf,
1492            ManualCodegen = [{
1493    {
1494      llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
1495      IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
1496      SmallVector<llvm::Value*, 12> Operands;
1497
1498      bool NoPassthru =
1499        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
1500        (!IsMasked && (PolicyAttrs & RVV_VTA));
1501      unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
1502
1503      if (NoPassthru) { // Push poison into passthru
1504        Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
1505      } else { // Push intrinsics operands into passthru
1506        llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
1507        for (unsigned I = 0; I < NF; ++I)
1508          Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
1509      }
1510
1511      Operands.push_back(Ops[Offset]); // Ptr
1512      Operands.push_back(Ops[Offset + 1]); // Idx
1513      if (IsMasked)
1514        Operands.push_back(Ops[0]);
1515      Operands.push_back(Ops[Offset + 2]); // VL
1516      if (IsMasked)
1517        Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
1518
1519      IntrinsicTypes = {ElementVectorType, Ops[Offset + 1]->getType(),
1520                        Ops.back()->getType()};
1521      llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1522      llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
1523
1524      if (ReturnValue.isNull())
1525        return LoadValue;
1526      else
1527        return Builder.CreateStore(LoadValue, ReturnValue.getValue());
1528    }
1529    }] in {
1530          defvar T = "(Tuple:" # nf # ")";
1531          def : RVVBuiltin<T # "v", T # "vPCe" # eew_type # "Uv", type>;
1532          if !not(IsFloat<type>.val) then {
1533            def : RVVBuiltin<T # "Uv", T # "UvPCUe" # eew_type # "Uv", type>;
1534          }
1535        }
1536      }
1537    }
1538  }
1539}
1540
1541multiclass RVVIndexedSegStoreTuple<string op> {
1542  foreach type = TypeList in {
1543    foreach eew_info = EEWList in {
1544      defvar eew = eew_info[0];
1545      defvar eew_type = eew_info[1];
1546      foreach nf = NFList in {
1547        let Name = op # nf # "ei" # eew # "_v",
1548            IRName = op # nf,
1549            MaskedIRName = op # nf # "_mask",
1550            NF = nf,
1551            HasMaskedOffOperand = false,
1552            MaskedPolicyScheme = NonePolicy,
1553            ManualCodegen = [{
1554    {
1555      // Masked
1556      // Builtin: (mask, ptr, index, v_tuple, vl)
1557      // Intrinsic: (val0, val1, ..., ptr, index, mask, vl)
1558      // Unmasked
1559      // Builtin: (ptr, index, v_tuple, vl)
1560      // Intrinsic: (val0, val1, ..., ptr, index, vl)
1561      unsigned Offset = IsMasked ? 1 : 0;
1562      llvm::Value *VTupleOperand = Ops[Offset + 2];
1563
1564      SmallVector<llvm::Value*, 12> Operands;
1565      for (unsigned I = 0; I < NF; ++I) {
1566        llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
1567        Operands.push_back(V);
1568      }
1569      Operands.push_back(Ops[Offset]); // Ptr
1570      Operands.push_back(Ops[Offset + 1]); // Idx
1571      if (IsMasked)
1572        Operands.push_back(Ops[0]);
1573      Operands.push_back(Ops[Offset + 3]); // VL
1574
1575      IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
1576                        Operands.back()->getType()};
1577      llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1578      return Builder.CreateCall(F, Operands, "");
1579    }
1580            }] in {
1581          defvar T = "(Tuple:" # nf # ")";
1582          def : RVVBuiltin<T # "v", "0Pe" # eew_type # "Uv" # T # "v", type>;
1583          if !not(IsFloat<type>.val) then {
1584            def : RVVBuiltin<T # "Uv", "0PUe" # eew_type # "Uv" # T # "Uv", type>;
1585          }
1586        }
1587      }
1588    }
1589  }
1590}
1591
1592// 7.8 Vector Load/Store Segment Instructions
1593let UnMaskedPolicyScheme = HasPassthruOperand,
1594    IsTuple = true in {
1595  defm : RVVUnitStridedSegLoadTuple<"vlseg">;
1596  defm : RVVUnitStridedSegLoadFFTuple<"vlseg">;
1597  defm : RVVStridedSegLoadTuple<"vlsseg">;
1598  defm : RVVIndexedSegLoadTuple<"vluxseg">;
1599  defm : RVVIndexedSegLoadTuple<"vloxseg">;
1600}
1601
1602let UnMaskedPolicyScheme = NonePolicy,
1603    MaskedPolicyScheme = NonePolicy,
1604    IsTuple = true in {
1605defm : RVVUnitStridedSegStoreTuple<"vsseg">;
1606defm : RVVStridedSegStoreTuple<"vssseg">;
1607defm : RVVIndexedSegStoreTuple<"vsuxseg">;
1608defm : RVVIndexedSegStoreTuple<"vsoxseg">;
1609}
1610
1611// 12. Vector Integer Arithmetic Instructions
1612// 12.1. Vector Single-Width Integer Add and Subtract
1613let UnMaskedPolicyScheme = HasPassthruOperand in {
1614defm vadd : RVVIntBinBuiltinSet;
1615defm vsub : RVVIntBinBuiltinSet;
1616defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil",
1617                                 [["vx", "v", "vve"],
1618                                  ["vx", "Uv", "UvUvUe"]]>;
1619}
1620defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">;
1621
1622// 12.2. Vector Widening Integer Add/Subtract
1623// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW
1624let UnMaskedPolicyScheme = HasPassthruOperand in {
1625defm vwaddu : RVVUnsignedWidenBinBuiltinSet;
1626defm vwsubu : RVVUnsignedWidenBinBuiltinSet;
1627// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW
1628defm vwadd : RVVSignedWidenBinBuiltinSet;
1629defm vwsub : RVVSignedWidenBinBuiltinSet;
1630// Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW
1631defm vwaddu : RVVUnsignedWidenOp0BinBuiltinSet;
1632defm vwsubu : RVVUnsignedWidenOp0BinBuiltinSet;
1633// Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW
1634defm vwadd : RVVSignedWidenOp0BinBuiltinSet;
1635defm vwsub : RVVSignedWidenOp0BinBuiltinSet;
1636}
1637defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi",
1638                                          [["Uw", "UwUv"]]>;
1639defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi",
1640                                         [["w", "wv"]]>;
1641
1642// 12.3. Vector Integer Extension
1643let UnMaskedPolicyScheme = HasPassthruOperand in {
1644let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
1645  def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
1646  def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
1647}
1648let Log2LMUL = [-3, -2, -1, 0, 1] in {
1649  def vsext_vf4 : RVVIntExt<"vsext", "q", "qv", "cs">;
1650  def vzext_vf4 : RVVIntExt<"vzext", "Uq", "UqUv", "cs">;
1651}
1652let Log2LMUL = [-3, -2, -1, 0] in {
1653  def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">;
1654  def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">;
1655}
1656}
1657
1658// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
1659let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
1660  let UnMaskedPolicyScheme = HasPassthruOperand in {
1661    defm vadc : RVVCarryinBuiltinSet;
1662    defm vsbc : RVVCarryinBuiltinSet;
1663  }
1664  defm vmadc : RVVCarryOutInBuiltinSet<"vmadc_carry_in">;
1665  defm vmadc : RVVIntMaskOutBuiltinSet;
1666  defm vmsbc : RVVCarryOutInBuiltinSet<"vmsbc_borrow_in">;
1667  defm vmsbc : RVVIntMaskOutBuiltinSet;
1668}
1669
1670// 12.5. Vector Bitwise Logical Instructions
1671let UnMaskedPolicyScheme = HasPassthruOperand in {
1672defm vand : RVVIntBinBuiltinSet;
1673defm vxor : RVVIntBinBuiltinSet;
1674defm vor : RVVIntBinBuiltinSet;
1675}
1676defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">;
1677
1678// 12.6. Vector Single-Width Bit Shift Instructions
1679let UnMaskedPolicyScheme = HasPassthruOperand in {
1680defm vsll : RVVShiftBuiltinSet;
1681defm vsrl : RVVUnsignedShiftBuiltinSet;
1682defm vsra : RVVSignedShiftBuiltinSet;
1683
1684// 12.7. Vector Narrowing Integer Right Shift Instructions
1685defm vnsrl : RVVUnsignedNShiftBuiltinSet;
1686defm vnsra : RVVSignedNShiftBuiltinSet;
1687}
1688defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi",
1689                                         [["v", "vw"],
1690                                          ["Uv", "UvUw"]]>;
1691
1692// 12.8. Vector Integer Comparison Instructions
1693let MaskedPolicyScheme = HasPassthruOperand,
1694    HasTailPolicy = false in {
1695defm vmseq : RVVIntMaskOutBuiltinSet;
1696defm vmsne : RVVIntMaskOutBuiltinSet;
1697defm vmsltu : RVVUnsignedMaskOutBuiltinSet;
1698defm vmslt : RVVSignedMaskOutBuiltinSet;
1699defm vmsleu : RVVUnsignedMaskOutBuiltinSet;
1700defm vmsle : RVVSignedMaskOutBuiltinSet;
1701defm vmsgtu : RVVUnsignedMaskOutBuiltinSet;
1702defm vmsgt : RVVSignedMaskOutBuiltinSet;
1703defm vmsgeu : RVVUnsignedMaskOutBuiltinSet;
1704defm vmsge : RVVSignedMaskOutBuiltinSet;
1705}
1706
1707// 12.9. Vector Integer Min/Max Instructions
1708let UnMaskedPolicyScheme = HasPassthruOperand in {
1709defm vminu : RVVUnsignedBinBuiltinSet;
1710defm vmin : RVVSignedBinBuiltinSet;
1711defm vmaxu : RVVUnsignedBinBuiltinSet;
1712defm vmax : RVVSignedBinBuiltinSet;
1713
1714// 12.10. Vector Single-Width Integer Multiply Instructions
1715defm vmul : RVVIntBinBuiltinSet;
1716defm vmulh : RVVSignedBinBuiltinSet;
1717defm vmulhu : RVVUnsignedBinBuiltinSet;
1718defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
1719                                   [["vv", "v", "vvUv"],
1720                                    ["vx", "v", "vvUe"]]>;
1721
1722// 12.11. Vector Integer Divide Instructions
1723defm vdivu : RVVUnsignedBinBuiltinSet;
1724defm vdiv : RVVSignedBinBuiltinSet;
1725defm vremu : RVVUnsignedBinBuiltinSet;
1726defm vrem : RVVSignedBinBuiltinSet;
1727}
1728
1729// 12.12. Vector Widening Integer Multiply Instructions
1730let Log2LMUL = [-3, -2, -1, 0, 1, 2], UnMaskedPolicyScheme = HasPassthruOperand in {
1731defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi",
1732                                    [["vv", "w", "wvv"],
1733                                     ["vx", "w", "wve"]]>;
1734defm vwmulu : RVVOutOp0Op1BuiltinSet<"vwmulu", "csi",
1735                                     [["vv", "Uw", "UwUvUv"],
1736                                      ["vx", "Uw", "UwUvUe"]]>;
1737defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi",
1738                                      [["vv", "w", "wvUv"],
1739                                       ["vx", "w", "wvUe"]]>;
1740}
1741
1742// 12.13. Vector Single-Width Integer Multiply-Add Instructions
1743let UnMaskedPolicyScheme = HasPolicyOperand in {
1744defm vmacc  : RVVIntTerBuiltinSet;
1745defm vnmsac : RVVIntTerBuiltinSet;
1746defm vmadd  : RVVIntTerBuiltinSet;
1747defm vnmsub : RVVIntTerBuiltinSet;
1748
1749// 12.14. Vector Widening Integer Multiply-Add Instructions
1750let HasMaskedOffOperand = false,
1751    Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
1752defm vwmaccu : RVVOutOp1Op2BuiltinSet<"vwmaccu", "csi",
1753                                      [["vv", "Uw", "UwUwUvUv"],
1754                                       ["vx", "Uw", "UwUwUeUv"]]>;
1755defm vwmacc : RVVOutOp1Op2BuiltinSet<"vwmacc", "csi",
1756                                     [["vv", "w", "wwvv"],
1757                                      ["vx", "w", "wwev"]]>;
1758defm vwmaccsu : RVVOutOp1Op2BuiltinSet<"vwmaccsu", "csi",
1759                                       [["vv", "w", "wwvUv"],
1760                                        ["vx", "w", "wweUv"]]>;
1761defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi",
1762                                       [["vx", "w", "wwUev"]]>;
1763}
1764}
1765
1766// 12.15. Vector Integer Merge Instructions
1767// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl)
1768let HasMasked = false,
1769    UnMaskedPolicyScheme = HasPassthruOperand,
1770    MaskedPolicyScheme = NonePolicy,
1771    ManualCodegen = [{
1772      // insert poison passthru
1773      if (PolicyAttrs & RVV_VTA)
1774        Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
1775      IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
1776    }] in {
1777  defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil",
1778                                    [["vvm", "v", "vvvm"],
1779                                     ["vxm", "v", "vvem"],
1780                                     ["vvm", "Uv", "UvUvUvm"],
1781                                     ["vxm", "Uv", "UvUvUem"]]>;
1782}
1783
1784// 12.16. Vector Integer Move Instructions
1785let HasMasked = false,
1786    UnMaskedPolicyScheme = HasPassthruOperand,
1787    MaskedPolicyScheme = NonePolicy,
1788    OverloadedName = "vmv_v" in {
1789    defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil",
1790                                   [["v", "Uv", "UvUv"]]>;
1791    defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilxfd",
1792                                   [["v", "v", "vv"]]>;
1793  let SupportOverloading = false in
1794    defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil",
1795                                   [["x", "v", "ve"],
1796                                    ["x", "Uv", "UvUe"]]>;
1797}
1798
1799// 13. Vector Fixed-Point Arithmetic Instructions
1800let HeaderCode =
1801[{
1802enum __RISCV_VXRM {
1803  __RISCV_VXRM_RNU = 0,
1804  __RISCV_VXRM_RNE = 1,
1805  __RISCV_VXRM_RDN = 2,
1806  __RISCV_VXRM_ROD = 3,
1807};
1808}] in
1809def vxrm_enum : RVVHeader;
1810
1811// 13.1. Vector Single-Width Saturating Add and Subtract
1812let UnMaskedPolicyScheme = HasPassthruOperand in {
1813defm vsaddu : RVVUnsignedBinBuiltinSet;
1814defm vsadd : RVVSignedBinBuiltinSet;
1815defm vssubu : RVVUnsignedBinBuiltinSet;
1816defm vssub : RVVSignedBinBuiltinSet;
1817
1818let ManualCodegen = [{
1819  {
1820    // LLVM intrinsic
1821    // Unmasked: (passthru, op0, op1, round_mode, vl)
1822    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
1823
1824    SmallVector<llvm::Value*, 7> Operands;
1825    bool HasMaskedOff = !(
1826        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
1827        (!IsMasked && PolicyAttrs & RVV_VTA));
1828    unsigned Offset = IsMasked ?
1829        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
1830
1831    if (!HasMaskedOff)
1832      Operands.push_back(llvm::PoisonValue::get(ResultType));
1833    else
1834      Operands.push_back(Ops[IsMasked ? 1 : 0]);
1835
1836    Operands.push_back(Ops[Offset]); // op0
1837    Operands.push_back(Ops[Offset + 1]); // op1
1838
1839    if (IsMasked)
1840      Operands.push_back(Ops[0]); // mask
1841
1842    Operands.push_back(Ops[Offset + 2]); // vxrm
1843    Operands.push_back(Ops[Offset + 3]); // vl
1844
1845    if (IsMasked)
1846      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
1847
1848    IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()};
1849    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1850    return Builder.CreateCall(F, Operands, "");
1851  }
1852}] in {
1853  // 13.2. Vector Single-Width Averaging Add and Subtract
1854  defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode;
1855  defm vaadd : RVVSignedBinBuiltinSetRoundingMode;
1856  defm vasubu : RVVUnsignedBinBuiltinSetRoundingMode;
1857  defm vasub : RVVSignedBinBuiltinSetRoundingMode;
1858
1859  // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
1860  defm vsmul : RVVSignedBinBuiltinSetRoundingMode;
1861
1862  // 13.4. Vector Single-Width Scaling Shift Instructions
1863  defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode;
1864  defm vssra : RVVSignedShiftBuiltinSetRoundingMode;
1865}
1866
1867let ManualCodegen = [{
1868  {
1869    // LLVM intrinsic
1870    // Unmasked: (passthru, op0, op1, round_mode, vl)
1871    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
1872
1873    SmallVector<llvm::Value*, 7> Operands;
1874    bool HasMaskedOff = !(
1875        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
1876        (!IsMasked && PolicyAttrs & RVV_VTA));
1877    unsigned Offset = IsMasked ?
1878        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
1879
1880    if (!HasMaskedOff)
1881      Operands.push_back(llvm::PoisonValue::get(ResultType));
1882    else
1883      Operands.push_back(Ops[IsMasked ? 1 : 0]);
1884
1885    Operands.push_back(Ops[Offset]); // op0
1886    Operands.push_back(Ops[Offset + 1]); // op1
1887
1888    if (IsMasked)
1889      Operands.push_back(Ops[0]); // mask
1890
1891    Operands.push_back(Ops[Offset + 2]); // vxrm
1892    Operands.push_back(Ops[Offset + 3]); // vl
1893
1894    if (IsMasked)
1895      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
1896
1897    IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
1898                      Ops.back()->getType()};
1899    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1900    return Builder.CreateCall(F, Operands, "");
1901  }
1902}] in {
1903  // 13.5. Vector Narrowing Fixed-Point Clip Instructions
1904  defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode;
1905  defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode;
1906}
1907}
1908
1909// 14. Vector Floating-Point Instructions
1910let HeaderCode =
1911[{
1912enum __RISCV_FRM {
1913  __RISCV_FRM_RNE = 0,
1914  __RISCV_FRM_RTZ = 1,
1915  __RISCV_FRM_RDN = 2,
1916  __RISCV_FRM_RUP = 3,
1917  __RISCV_FRM_RMM = 4,
1918};
1919}] in def frm_enum : RVVHeader;
1920
1921let UnMaskedPolicyScheme = HasPassthruOperand in {
1922let ManualCodegen = [{
1923  {
1924    // LLVM intrinsic
1925    // Unmasked: (passthru, op0, op1, round_mode, vl)
1926    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
1927
1928    SmallVector<llvm::Value*, 7> Operands;
1929    bool HasMaskedOff = !(
1930        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
1931        (!IsMasked && PolicyAttrs & RVV_VTA));
1932    bool HasRoundModeOp = IsMasked ?
1933      (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
1934      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
1935
1936    unsigned Offset = IsMasked ?
1937        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
1938
1939    if (!HasMaskedOff)
1940      Operands.push_back(llvm::PoisonValue::get(ResultType));
1941    else
1942      Operands.push_back(Ops[IsMasked ? 1 : 0]);
1943
1944    Operands.push_back(Ops[Offset]); // op0
1945    Operands.push_back(Ops[Offset + 1]); // op1
1946
1947    if (IsMasked)
1948      Operands.push_back(Ops[0]); // mask
1949
1950    if (HasRoundModeOp) {
1951      Operands.push_back(Ops[Offset + 2]); // frm
1952      Operands.push_back(Ops[Offset + 3]); // vl
1953    } else {
1954      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
1955      Operands.push_back(Ops[Offset + 2]); // vl
1956    }
1957
1958    if (IsMasked)
1959      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
1960
1961    IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
1962                      Operands.back()->getType()};
1963    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
1964    return Builder.CreateCall(F, Operands, "");
1965  }
1966}] in {
1967  let HasFRMRoundModeOp = true in {
1968    // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
1969    defm vfadd  : RVVFloatingBinBuiltinSetRoundingMode;
1970    defm vfsub  : RVVFloatingBinBuiltinSetRoundingMode;
1971    defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode;
1972
1973    // 14.3. Vector Widening Floating-Point Add/Subtract Instructions
1974    // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
1975    defm vfwadd : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
1976    defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
1977
1978    // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
1979    defm vfmul  : RVVFloatingBinBuiltinSetRoundingMode;
1980    defm vfdiv  : RVVFloatingBinBuiltinSetRoundingMode;
1981    defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode;
1982  }
1983  // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
1984  defm vfadd  : RVVFloatingBinBuiltinSet;
1985  defm vfsub  : RVVFloatingBinBuiltinSet;
1986  defm vfrsub : RVVFloatingBinVFBuiltinSet;
1987
1988  // 14.3. Vector Widening Floating-Point Add/Subtract Instructions
1989  // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
1990  defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet;
1991  defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
1992
1993  // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
1994  defm vfmul  : RVVFloatingBinBuiltinSet;
1995  defm vfdiv  : RVVFloatingBinBuiltinSet;
1996  defm vfrdiv : RVVFloatingBinVFBuiltinSet;
1997}
1998
1999let ManualCodegen = [{
2000  {
2001    // LLVM intrinsic
2002    // Unmasked: (passthru, op0, op1, round_mode, vl)
2003    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
2004
2005    SmallVector<llvm::Value*, 7> Operands;
2006    bool HasMaskedOff = !(
2007        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
2008        (!IsMasked && PolicyAttrs & RVV_VTA));
2009    bool HasRoundModeOp = IsMasked ?
2010      (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
2011      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
2012
2013    unsigned Offset = IsMasked ?
2014        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
2015
2016    if (!HasMaskedOff)
2017      Operands.push_back(llvm::PoisonValue::get(ResultType));
2018    else
2019      Operands.push_back(Ops[IsMasked ? 1 : 0]);
2020
2021    Operands.push_back(Ops[Offset]); // op0
2022    Operands.push_back(Ops[Offset + 1]); // op1
2023
2024    if (IsMasked)
2025      Operands.push_back(Ops[0]); // mask
2026
2027    if (HasRoundModeOp) {
2028      Operands.push_back(Ops[Offset + 2]); // frm
2029      Operands.push_back(Ops[Offset + 3]); // vl
2030    } else {
2031      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
2032      Operands.push_back(Ops[Offset + 2]); // vl
2033    }
2034
2035    if (IsMasked)
2036      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
2037
2038    IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
2039                      Ops.back()->getType()};
2040    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
2041    return Builder.CreateCall(F, Operands, "");
2042  }
2043}] in {
2044  let HasFRMRoundModeOp = true in {
2045    // 14.3. Vector Widening Floating-Point Add/Subtract Instructions
2046    // Widening FP add/subtract, 2*SEW = SEW +/- SEW
2047    defm vfwadd : RVVFloatingWidenBinBuiltinSetRoundingMode;
2048    defm vfwsub : RVVFloatingWidenBinBuiltinSetRoundingMode;
2049
2050    // 14.5. Vector Widening Floating-Point Multiply
2051    let Log2LMUL = [-2, -1, 0, 1, 2] in {
2052      defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
2053                                          [["vv", "w", "wvvu"],
2054                                            ["vf", "w", "wveu"]]>;
2055    }
2056  }
2057  // 14.3. Vector Widening Floating-Point Add/Subtract Instructions
2058  // Widening FP add/subtract, 2*SEW = SEW +/- SEW
2059  defm vfwadd : RVVFloatingWidenBinBuiltinSet;
2060  defm vfwsub : RVVFloatingWidenBinBuiltinSet;
2061
2062  // 14.5. Vector Widening Floating-Point Multiply
2063  let Log2LMUL = [-2, -1, 0, 1, 2] in {
2064    defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
2065                                        [["vv", "w", "wvv"],
2066                                          ["vf", "w", "wve"]]>;
2067  }
2068}
2069}
2070
2071
2072let UnMaskedPolicyScheme = HasPolicyOperand in {
2073let ManualCodegen = [{
2074  {
2075    // LLVM intrinsic
2076    // Unmasked: (passthru, op0, op1, round_mode, vl)
2077    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
2078
2079    SmallVector<llvm::Value*, 7> Operands;
2080    bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
2081
2082    unsigned Offset = IsMasked ? 2 : 1;
2083
2084    Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough
2085
2086    Operands.push_back(Ops[Offset]); // op0
2087    Operands.push_back(Ops[Offset + 1]); // op1
2088
2089    if (IsMasked)
2090      Operands.push_back(Ops[0]); // mask
2091
2092    if (HasRoundModeOp) {
2093      Operands.push_back(Ops[Offset + 2]); // frm
2094      Operands.push_back(Ops[Offset + 3]); // vl
2095    } else {
2096      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
2097      Operands.push_back(Ops[Offset + 2]); // vl
2098    }
2099
2100    Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
2101
2102    IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
2103                      Operands.back()->getType()};
2104
2105    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
2106
2107    return Builder.CreateCall(F, Operands, "");
2108  }
2109}] in {
2110  let HasFRMRoundModeOp = 1 in {
2111    // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
2112    defm vfmacc  : RVVFloatingTerBuiltinSetRoundingMode;
2113    defm vfnmacc : RVVFloatingTerBuiltinSetRoundingMode;
2114    defm vfmsac  : RVVFloatingTerBuiltinSetRoundingMode;
2115    defm vfnmsac : RVVFloatingTerBuiltinSetRoundingMode;
2116    defm vfmadd  : RVVFloatingTerBuiltinSetRoundingMode;
2117    defm vfnmadd : RVVFloatingTerBuiltinSetRoundingMode;
2118    defm vfmsub  : RVVFloatingTerBuiltinSetRoundingMode;
2119    defm vfnmsub : RVVFloatingTerBuiltinSetRoundingMode;
2120  }
2121  // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
2122  defm vfmacc  : RVVFloatingTerBuiltinSet;
2123  defm vfnmacc : RVVFloatingTerBuiltinSet;
2124  defm vfmsac  : RVVFloatingTerBuiltinSet;
2125  defm vfnmsac : RVVFloatingTerBuiltinSet;
2126  defm vfmadd  : RVVFloatingTerBuiltinSet;
2127  defm vfnmadd : RVVFloatingTerBuiltinSet;
2128  defm vfmsub  : RVVFloatingTerBuiltinSet;
2129  defm vfnmsub : RVVFloatingTerBuiltinSet;
2130}
2131
2132let ManualCodegen = [{
2133  {
2134    // LLVM intrinsic
2135    // Unmasked: (passthru, op0, op1, round_mode, vl)
2136    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
2137
2138    SmallVector<llvm::Value*, 7> Operands;
2139    bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
2140
2141    unsigned Offset = IsMasked ? 2 : 1;
2142
2143    Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough
2144
2145    Operands.push_back(Ops[Offset]); // op0
2146    Operands.push_back(Ops[Offset + 1]); // op1
2147
2148    if (IsMasked)
2149      Operands.push_back(Ops[0]); // mask
2150
2151    if (HasRoundModeOp) {
2152      Operands.push_back(Ops[Offset + 2]); // frm
2153      Operands.push_back(Ops[Offset + 3]); // vl
2154    } else {
2155      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
2156      Operands.push_back(Ops[Offset + 2]); // vl
2157    }
2158
2159    Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
2160
2161    IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
2162                      Operands.back()->getType()};
2163
2164    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
2165
2166    return Builder.CreateCall(F, Operands, "");
2167  }
2168}] in {
2169  let HasFRMRoundModeOp = 1 in {
2170    // 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
2171    defm vfwmacc  : RVVFloatingWidenTerBuiltinSetRoundingMode;
2172    defm vfwnmacc : RVVFloatingWidenTerBuiltinSetRoundingMode;
2173    defm vfwmsac  : RVVFloatingWidenTerBuiltinSetRoundingMode;
2174    defm vfwnmsac : RVVFloatingWidenTerBuiltinSetRoundingMode;
2175  }
2176  // 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
2177  defm vfwmacc  : RVVFloatingWidenTerBuiltinSet;
2178  defm vfwnmacc : RVVFloatingWidenTerBuiltinSet;
2179  defm vfwmsac  : RVVFloatingWidenTerBuiltinSet;
2180  defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
2181}
2182
2183}
2184
2185let UnMaskedPolicyScheme = HasPassthruOperand in {
2186let ManualCodegen = [{
2187  {
2188    // LLVM intrinsic
2189    // Unmasked: (passthru, op0, round_mode, vl)
2190    // Masked:   (passthru, op0, mask, frm, vl, policy)
2191
2192    SmallVector<llvm::Value*, 7> Operands;
2193    bool HasMaskedOff = !(
2194        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
2195        (!IsMasked && PolicyAttrs & RVV_VTA));
2196    bool HasRoundModeOp = IsMasked ?
2197      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
2198      (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
2199
2200    unsigned Offset = IsMasked ?
2201        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
2202
2203    if (!HasMaskedOff)
2204      Operands.push_back(llvm::PoisonValue::get(ResultType));
2205    else
2206      Operands.push_back(Ops[IsMasked ? 1 : 0]);
2207
2208    Operands.push_back(Ops[Offset]); // op0
2209
2210    if (IsMasked)
2211      Operands.push_back(Ops[0]); // mask
2212
2213    if (HasRoundModeOp) {
2214      Operands.push_back(Ops[Offset + 1]); // frm
2215      Operands.push_back(Ops[Offset + 2]); // vl
2216    } else {
2217      Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
2218      Operands.push_back(Ops[Offset + 1]); // vl
2219    }
2220
2221    if (IsMasked)
2222      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
2223
2224    IntrinsicTypes = {ResultType, Operands.back()->getType()};
2225    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
2226    return Builder.CreateCall(F, Operands, "");
2227  }
2228}] in {
2229  let HasFRMRoundModeOp = 1 in {
2230    // 14.8. Vector Floating-Point Square-Root Instruction
2231    defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vvu"]]>;
2232
2233    // 14.10. Vector Floating-Point Reciprocal Estimate Instruction
2234    defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vvu"]]>;
2235  }
2236  // 14.8. Vector Floating-Point Square-Root Instruction
2237  defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vv"]]>;
2238
2239  // 14.10. Vector Floating-Point Reciprocal Estimate Instruction
2240  defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vv"]]>;
2241}
2242
2243// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
2244def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
2245
2246// 14.11. Vector Floating-Point MIN/MAX Instructions
2247defm vfmin : RVVFloatingBinBuiltinSet;
2248defm vfmax : RVVFloatingBinBuiltinSet;
2249
2250// 14.12. Vector Floating-Point Sign-Injection Instructions
2251defm vfsgnj  : RVVFloatingBinBuiltinSet;
2252defm vfsgnjn : RVVFloatingBinBuiltinSet;
2253defm vfsgnjx : RVVFloatingBinBuiltinSet;
2254}
2255defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">;
2256defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">;
2257
2258// 14.13. Vector Floating-Point Compare Instructions
2259let MaskedPolicyScheme = HasPassthruOperand,
2260    HasTailPolicy = false in {
2261defm vmfeq : RVVFloatingMaskOutBuiltinSet;
2262defm vmfne : RVVFloatingMaskOutBuiltinSet;
2263defm vmflt : RVVFloatingMaskOutBuiltinSet;
2264defm vmfle : RVVFloatingMaskOutBuiltinSet;
2265defm vmfgt : RVVFloatingMaskOutBuiltinSet;
2266defm vmfge : RVVFloatingMaskOutBuiltinSet;
2267}
2268
2269// 14.14. Vector Floating-Point Classify Instruction
2270let Name = "vfclass_v", UnMaskedPolicyScheme = HasPassthruOperand in
2271  def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">;
2272
2273// 14.15. Vector Floating-Point Merge Instructio
2274// C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl)
2275let HasMasked = false,
2276    UnMaskedPolicyScheme = HasPassthruOperand,
2277    MaskedPolicyScheme = NonePolicy,
2278    ManualCodegen = [{
2279      // insert poison passthru
2280      if (PolicyAttrs & RVV_VTA)
2281        Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
2282      IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
2283    }] in {
2284  defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd",
2285                                    [["vvm", "v", "vvvm"]]>;
2286  defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd",
2287                                     [["vfm", "v", "vvem"]]>;
2288}
2289
2290// 14.16. Vector Floating-Point Move Instruction
2291let HasMasked = false,
2292    UnMaskedPolicyScheme = HasPassthruOperand,
2293    SupportOverloading = false,
2294    MaskedPolicyScheme = NonePolicy,
2295    OverloadedName = "vfmv_v" in
2296  defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd",
2297                                  [["f", "v", "ve"]]>;
2298
2299// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
2300let UnMaskedPolicyScheme = HasPassthruOperand in {
2301def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
2302def vfcvt_rtz_x_f_v : RVVConvToSignedBuiltin<"vfcvt_rtz_x">;
2303
2304// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
2305let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
2306  def vfwcvt_rtz_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_rtz_xu">;
2307  def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">;
2308  def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">;
2309  def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "csi", "vfwcvt_f">;
2310  def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "xf", "vfwcvt_f">;
2311}
2312
2313// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
2314let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
2315  def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">;
2316  def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">;
2317  def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">;
2318}
2319let ManualCodegen = [{
2320  {
2321    // LLVM intrinsic
2322    // Unmasked: (passthru, op0, frm, vl)
2323    // Masked:   (passthru, op0, mask, frm, vl, policy)
2324    SmallVector<llvm::Value*, 7> Operands;
2325    bool HasMaskedOff = !(
2326        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
2327        (!IsMasked && PolicyAttrs & RVV_VTA));
2328    bool HasRoundModeOp = IsMasked ?
2329      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
2330      (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
2331
2332    unsigned Offset = IsMasked ?
2333        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
2334
2335    if (!HasMaskedOff)
2336      Operands.push_back(llvm::PoisonValue::get(ResultType));
2337    else
2338      Operands.push_back(Ops[IsMasked ? 1 : 0]);
2339
2340    Operands.push_back(Ops[Offset]); // op0
2341
2342    if (IsMasked)
2343      Operands.push_back(Ops[0]); // mask
2344
2345    if (HasRoundModeOp) {
2346      Operands.push_back(Ops[Offset + 1]); // frm
2347      Operands.push_back(Ops[Offset + 2]); // vl
2348    } else {
2349      Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
2350      Operands.push_back(Ops[Offset + 1]); // vl
2351    }
2352
2353    if (IsMasked)
2354      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
2355
2356    IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
2357                      Operands.back()->getType()};
2358    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
2359    return Builder.CreateCall(F, Operands, "");
2360  }
2361}] in {
2362  let HasFRMRoundModeOp = 1 in {
2363    // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
2364    let OverloadedName = "vfcvt_x" in
2365      defm :
2366        RVVConvBuiltinSet<"vfcvt_x_f_v", "xfd", [["Iv", "Ivvu"]]>;
2367    let OverloadedName = "vfcvt_xu" in
2368      defm :
2369        RVVConvBuiltinSet<"vfcvt_xu_f_v", "xfd", [["Uv", "Uvvu"]]>;
2370    let OverloadedName = "vfcvt_f" in {
2371      defm :
2372        RVVConvBuiltinSet<"vfcvt_f_x_v", "sil", [["Fv", "Fvvu"]]>;
2373      defm :
2374        RVVConvBuiltinSet<"vfcvt_f_xu_v", "sil", [["Fv", "FvUvu"]]>;
2375    }
2376
2377    // 14.18. Widening Floating-Point/Integer Type-Convert Instructions
2378    let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
2379      let OverloadedName = "vfwcvt_x" in
2380        defm :
2381          RVVConvBuiltinSet<"vfwcvt_x_f_v", "xf", [["Iw", "Iwvu"]]>;
2382      let OverloadedName = "vfwcvt_xu" in
2383        defm :
2384          RVVConvBuiltinSet<"vfwcvt_xu_f_v", "xf", [["Uw", "Uwvu"]]>;
2385    }
2386    // 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
2387    let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
2388      let OverloadedName = "vfncvt_x" in
2389        defm :
2390          RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFwu"]]>;
2391      let OverloadedName = "vfncvt_xu" in
2392        defm :
2393          RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFwu"]]>;
2394      let OverloadedName = "vfncvt_f" in {
2395        defm :
2396          RVVConvBuiltinSet<"vfncvt_f_x_w", "csi", [["Fv", "Fvwu"]]>;
2397        defm :
2398          RVVConvBuiltinSet<"vfncvt_f_xu_w", "csi", [["Fv", "FvUwu"]]>;
2399      }
2400      let OverloadedName = "vfncvt_f" in
2401        defm :
2402          RVVConvBuiltinSet<"vfncvt_f_f_w", "xf", [["v", "vwu"]]>;
2403    }
2404  }
2405
2406  // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
2407  let OverloadedName = "vfcvt_x" in
2408    defm :
2409      RVVConvBuiltinSet<"vfcvt_x_f_v", "xfd", [["Iv", "Ivv"]]>;
2410  let OverloadedName = "vfcvt_xu" in
2411    defm :
2412      RVVConvBuiltinSet<"vfcvt_xu_f_v", "xfd", [["Uv", "Uvv"]]>;
2413  let OverloadedName = "vfcvt_f" in {
2414    defm :
2415      RVVConvBuiltinSet<"vfcvt_f_x_v", "sil", [["Fv", "Fvv"]]>;
2416    defm :
2417      RVVConvBuiltinSet<"vfcvt_f_xu_v", "sil", [["Fv", "FvUv"]]>;
2418  }
2419
2420  // 14.18. Widening Floating-Point/Integer Type-Convert Instructions
2421  let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
2422    let OverloadedName = "vfwcvt_x" in
2423      defm :
2424        RVVConvBuiltinSet<"vfwcvt_x_f_v", "xf", [["Iw", "Iwv"]]>;
2425    let OverloadedName = "vfwcvt_xu" in
2426      defm :
2427        RVVConvBuiltinSet<"vfwcvt_xu_f_v", "xf", [["Uw", "Uwv"]]>;
2428  }
2429  // 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
2430  let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
2431    let OverloadedName = "vfncvt_x" in
2432      defm :
2433        RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFw"]]>;
2434    let OverloadedName = "vfncvt_xu" in
2435      defm :
2436        RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFw"]]>;
2437    let OverloadedName = "vfncvt_f" in {
2438      defm :
2439        RVVConvBuiltinSet<"vfncvt_f_x_w", "csi", [["Fv", "Fvw"]]>;
2440      defm :
2441        RVVConvBuiltinSet<"vfncvt_f_xu_w", "csi", [["Fv", "FvUw"]]>;
2442    }
2443    let OverloadedName = "vfncvt_f" in
2444      defm :
2445        RVVConvBuiltinSet<"vfncvt_f_f_w", "xf", [["v", "vw"]]>;
2446  }
2447}
2448}
2449
2450// 15. Vector Reduction Operations
2451// 15.1. Vector Single-Width Integer Reduction Instructions
2452let UnMaskedPolicyScheme = HasPassthruOperand,
2453    MaskedPolicyScheme = HasPassthruOperand,
2454    HasMaskPolicy = false in {
2455defm vredsum : RVVIntReductionBuiltinSet;
2456defm vredmaxu : RVVUnsignedReductionBuiltin;
2457defm vredmax : RVVSignedReductionBuiltin;
2458defm vredminu : RVVUnsignedReductionBuiltin;
2459defm vredmin : RVVSignedReductionBuiltin;
2460defm vredand : RVVIntReductionBuiltinSet;
2461defm vredor : RVVIntReductionBuiltinSet;
2462defm vredxor : RVVIntReductionBuiltinSet;
2463
2464// 15.2. Vector Widening Integer Reduction Instructions
2465// Vector Widening Integer Reduction Operations
2466let HasMaskedOffOperand = true in {
2467  defm vwredsum : RVVOutOp0BuiltinSet<"vwredsum", "csi",
2468                                      [["vs", "vSw", "SwvSw"]]>;
2469  defm vwredsumu : RVVOutOp0BuiltinSet<"vwredsumu", "csi",
2470                                       [["vs", "UvUSw", "USwUvUSw"]]>;
2471}
2472
2473// 15.3. Vector Single-Width Floating-Point Reduction Instructions
2474defm vfredmax : RVVFloatingReductionBuiltin;
2475defm vfredmin : RVVFloatingReductionBuiltin;
2476let ManualCodegen = [{
2477  {
2478    // LLVM intrinsic
2479    // Unmasked: (passthru, op0, op1, round_mode, vl)
2480    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
2481
2482    SmallVector<llvm::Value*, 7> Operands;
2483    bool HasMaskedOff = !(
2484        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
2485        (!IsMasked && PolicyAttrs & RVV_VTA));
2486    bool HasRoundModeOp = IsMasked ?
2487      (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
2488      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
2489
2490    unsigned Offset = IsMasked ?
2491        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
2492
2493    if (!HasMaskedOff)
2494      Operands.push_back(llvm::PoisonValue::get(ResultType));
2495    else
2496      Operands.push_back(Ops[IsMasked ? 1 : 0]);
2497
2498    Operands.push_back(Ops[Offset]); // op0
2499    Operands.push_back(Ops[Offset + 1]); // op1
2500
2501    if (IsMasked)
2502      Operands.push_back(Ops[0]); // mask
2503
2504    if (HasRoundModeOp) {
2505      Operands.push_back(Ops[Offset + 2]); // frm
2506      Operands.push_back(Ops[Offset + 3]); // vl
2507    } else {
2508      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
2509      Operands.push_back(Ops[Offset + 2]); // vl
2510    }
2511
2512    IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
2513                      Ops.back()->getType()};
2514    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
2515    return Builder.CreateCall(F, Operands, "");
2516  }
2517}] in {
2518  let HasFRMRoundModeOp = 1 in {
2519    // 15.3. Vector Single-Width Floating-Point Reduction Instructions
2520    defm vfredusum : RVVFloatingReductionBuiltinRoundingMode;
2521    defm vfredosum : RVVFloatingReductionBuiltinRoundingMode;
2522
2523    // 15.4. Vector Widening Floating-Point Reduction Instructions
2524    defm vfwredusum : RVVFloatingWidenReductionBuiltinRoundingMode;
2525    defm vfwredosum : RVVFloatingWidenReductionBuiltinRoundingMode;
2526  }
2527  // 15.3. Vector Single-Width Floating-Point Reduction Instructions
2528  defm vfredusum : RVVFloatingReductionBuiltin;
2529  defm vfredosum : RVVFloatingReductionBuiltin;
2530
2531  // 15.4. Vector Widening Floating-Point Reduction Instructions
2532  defm vfwredusum : RVVFloatingWidenReductionBuiltin;
2533  defm vfwredosum : RVVFloatingWidenReductionBuiltin;
2534}
2535}
2536
2537// 16. Vector Mask Instructions
2538// 16.1. Vector Mask-Register Logical Instructions
2539def vmand    : RVVMaskBinBuiltin;
2540def vmnand   : RVVMaskBinBuiltin;
2541def vmandn   : RVVMaskBinBuiltin;
2542def vmxor    : RVVMaskBinBuiltin;
2543def vmor     : RVVMaskBinBuiltin;
2544def vmnor    : RVVMaskBinBuiltin;
2545def vmorn    : RVVMaskBinBuiltin;
2546def vmxnor   : RVVMaskBinBuiltin;
2547// pseudoinstructions
2548def vmclr    : RVVMaskNullaryBuiltin;
2549def vmset    : RVVMaskNullaryBuiltin;
2550defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">;
2551defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">;
2552
2553let MaskedPolicyScheme = NonePolicy in {
2554// 16.2. Vector count population in mask vcpop.m
2555def vcpop : RVVMaskOp0Builtin<"um">;
2556
2557// 16.3. vfirst find-first-set mask bit
2558def vfirst : RVVMaskOp0Builtin<"lm">;
2559}
2560
2561let MaskedPolicyScheme = HasPassthruOperand,
2562    HasTailPolicy = false in {
2563// 16.4. vmsbf.m set-before-first mask bit
2564def vmsbf : RVVMaskUnaryBuiltin;
2565
2566// 16.5. vmsif.m set-including-first mask bit
2567def vmsif : RVVMaskUnaryBuiltin;
2568
2569// 16.6. vmsof.m set-only-first mask bit
2570def vmsof : RVVMaskUnaryBuiltin;
2571}
2572
2573let UnMaskedPolicyScheme = HasPassthruOperand, SupportOverloading = false in {
2574  // 16.8. Vector Iota Instruction
2575  defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
2576
2577  // 16.9. Vector Element Index Instruction
2578  defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"],
2579                                              ["v", "Uv", "Uv"]]>;
2580}
2581
2582// 17. Vector Permutation Instructions
2583// 17.1. Integer Scalar Move Instructions
2584let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
2585  let HasVL = false, OverloadedName = "vmv_x" in
2586    defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil",
2587                                   [["s", "ve", "ev"],
2588                                    ["s", "UvUe", "UeUv"]]>;
2589  let OverloadedName = "vmv_s",
2590      UnMaskedPolicyScheme = HasPassthruOperand,
2591      SupportOverloading = false in
2592    defm vmv_s : RVVOutBuiltinSet<"vmv_s_x", "csil",
2593                                   [["x", "v", "ve"],
2594                                    ["x", "Uv", "UvUe"]]>;
2595}
2596
2597// 17.2. Floating-Point Scalar Move Instructions
2598let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
2599  let HasVL = false, OverloadedName = "vfmv_f" in
2600    defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd",
2601                                     [["s", "ve", "ev"]]>;
2602  let OverloadedName = "vfmv_s",
2603      UnMaskedPolicyScheme = HasPassthruOperand,
2604      SupportOverloading = false in
2605    defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "xfd",
2606                                     [["f", "v", "ve"],
2607                                      ["x", "Uv", "UvUe"]]>;
2608}
2609
2610// 17.3. Vector Slide Instructions
2611// 17.3.1. Vector Slideup Instructions
2612defm vslideup   : RVVSlideUpBuiltinSet;
2613// 17.3.2. Vector Slidedown Instructions
2614defm vslidedown : RVVSlideDownBuiltinSet;
2615
2616// 17.3.3. Vector Slide1up Instructions
2617let UnMaskedPolicyScheme = HasPassthruOperand in {
2618defm vslide1up : RVVSlideOneBuiltinSet;
2619defm vfslide1up : RVVFloatingBinVFBuiltinSet;
2620
2621// 17.3.4. Vector Slide1down Instruction
2622defm vslide1down : RVVSlideOneBuiltinSet;
2623defm vfslide1down : RVVFloatingBinVFBuiltinSet;
2624
2625// 17.4. Vector Register Gather Instructions
2626// signed and floating type
2627defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfd",
2628                                 [["vv", "v", "vvUv"]]>;
2629defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilxfd",
2630                                 [["vx", "v", "vvz"]]>;
2631defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfd",
2632                                     [["vv", "v", "vv(Log2EEW:4)Uv"]]>;
2633// unsigned type
2634defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csil",
2635                                 [["vv", "Uv", "UvUvUv"]]>;
2636defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil",
2637                                 [["vx", "Uv", "UvUvz"]]>;
2638defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil",
2639                                     [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>;
2640}
2641
2642// 17.5. Vector Compress Instruction
2643let HasMasked = false,
2644    UnMaskedPolicyScheme = HasPassthruOperand,
2645    MaskedPolicyScheme = NonePolicy,
2646    ManualCodegen = [{
2647      // insert poison passthru
2648      if (PolicyAttrs & RVV_VTA)
2649        Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
2650      IntrinsicTypes = {ResultType, Ops.back()->getType()};
2651    }] in {
2652  // signed and floating type
2653  defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd",
2654                                    [["vm", "v", "vvm"]]>;
2655  // unsigned type
2656  defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
2657                                    [["vm", "Uv", "UvUvm"]]>;
2658}
2659
2660// Miscellaneous
2661let HasMasked = false, HasVL = false, IRName = "" in {
2662  let Name = "vreinterpret_v", MaskedPolicyScheme = NonePolicy,
2663      ManualCodegen = [{
2664        if (ResultType->isIntOrIntVectorTy(1) ||
2665            Ops[0]->getType()->isIntOrIntVectorTy(1)) {
2666          assert(isa<ScalableVectorType>(ResultType) &&
2667                 isa<ScalableVectorType>(Ops[0]->getType()));
2668
2669          LLVMContext &Context = CGM.getLLVMContext();
2670          ScalableVectorType *Boolean64Ty =
2671              ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64);
2672
2673          if (ResultType->isIntOrIntVectorTy(1)) {
2674            // Casting from m1 vector integer -> vector boolean
2675            // Ex: <vscale x 8 x i8>
2676            //     --(bitcast)--------> <vscale x 64 x i1>
2677            //     --(vector_extract)-> <vscale x  8 x i1>
2678            llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty);
2679            return Builder.CreateExtractVector(ResultType, BitCast,
2680                                               ConstantInt::get(Int64Ty, 0));
2681          } else {
2682            // Casting from vector boolean -> m1 vector integer
2683            // Ex: <vscale x  1 x i1>
2684            //       --(vector_insert)-> <vscale x 64 x i1>
2685            //       --(bitcast)-------> <vscale x  8 x i8>
2686            llvm::Value *Boolean64Val =
2687              Builder.CreateInsertVector(Boolean64Ty,
2688                                         llvm::PoisonValue::get(Boolean64Ty),
2689                                         Ops[0],
2690                                         ConstantInt::get(Int64Ty, 0));
2691            return Builder.CreateBitCast(Boolean64Val, ResultType);
2692          }
2693        }
2694        return Builder.CreateBitCast(Ops[0], ResultType);
2695      }] in {
2696    // Reinterpret between different type under the same SEW and LMUL
2697    def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">;
2698    def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "sil", "v">;
2699    def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil", "Uv">;
2700    def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "sil", "Uv">;
2701    def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "sil", "Fv">;
2702    def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "sil", "Fv">;
2703
2704    // Reinterpret between different SEW under the same LMUL
2705    foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)",
2706                       "(FixedSEW:64)"] in {
2707      def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v",
2708                                                 dst_sew # "vv", "csil", dst_sew # "v">;
2709      def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv",
2710                                                 dst_sew # "UvUv", "csil", dst_sew # "Uv">;
2711    }
2712
2713    // Existing users of FixedSEW - the reinterpretation between different SEW
2714    // and same LMUL has the implicit assumption that if FixedSEW is set to the
2715    // given element width, then the type will be identified as invalid, thus
2716    // skipping definition of reinterpret of SEW=8 to SEW=8. However this blocks
2717    // our usage here of defining all possible combinations of a fixed SEW to
2718    // any boolean. So we need to separately define SEW=8 here.
2719    // Reinterpret from LMUL=1 integer type to vector boolean type
2720    def vreintrepret_m1_b8_signed :
2721        RVVBuiltin<"Svm",
2722                    "mSv",
2723                    "c", "m">;
2724    def vreintrepret_m1_b8_usigned :
2725        RVVBuiltin<"USvm",
2726                    "mUSv",
2727                    "c", "m">;
2728
2729    // Reinterpret from vector boolean type to LMUL=1 integer type
2730    def vreintrepret_b8_m1_signed :
2731        RVVBuiltin<"mSv",
2732                    "Svm",
2733                    "c", "Sv">;
2734    def vreintrepret_b8_m1_usigned :
2735        RVVBuiltin<"mUSv",
2736                    "USvm",
2737                    "c", "USv">;
2738
2739    foreach dst_sew = ["16", "32", "64"] in {
2740      // Reinterpret from LMUL=1 integer type to vector boolean type
2741      def vreinterpret_m1_b # dst_sew # _signed:
2742        RVVBuiltin<"(FixedSEW:" # dst_sew # ")Svm",
2743                    "m(FixedSEW:" # dst_sew # ")Sv",
2744                    "c", "m">;
2745      def vreinterpret_m1_b # dst_sew # _unsigned:
2746        RVVBuiltin<"(FixedSEW:" # dst_sew # ")USvm",
2747                    "m(FixedSEW:" # dst_sew # ")USv",
2748                    "c", "m">;
2749      // Reinterpret from vector boolean type to LMUL=1 integer type
2750      def vreinterpret_b # dst_sew # _m1_signed:
2751        RVVBuiltin<"m(FixedSEW:" # dst_sew # ")Sv",
2752                    "(FixedSEW:" # dst_sew # ")Svm",
2753                    "c", "(FixedSEW:" # dst_sew # ")Sv">;
2754      def vreinterpret_b # dst_sew # _m1_unsigned:
2755        RVVBuiltin<"m(FixedSEW:" # dst_sew # ")USv",
2756                    "(FixedSEW:" # dst_sew # ")USvm",
2757                    "c", "(FixedSEW:" # dst_sew # ")USv">;
2758    }
2759  }
2760
2761  let Name = "vundefined", SupportOverloading = false,
2762      MaskedPolicyScheme = NonePolicy,
2763      ManualCodegen = [{
2764        return llvm::PoisonValue::get(ResultType);
2765      }] in {
2766    def vundefined : RVVBuiltin<"v", "v", "csilxfd">;
2767    def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">;
2768  }
2769
2770  // LMUL truncation
2771  // C/C++ Operand: VecTy, IR Operand: VecTy, Index
2772  let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc",
2773      MaskedPolicyScheme = NonePolicy,
2774      ManualCodegen = [{ {
2775        return Builder.CreateExtractVector(ResultType, Ops[0],
2776                                           ConstantInt::get(Int64Ty, 0));
2777      } }] in {
2778    foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)",
2779                        "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
2780      def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v",
2781                                              dst_lmul # "vv", "csilxfd", dst_lmul # "v">;
2782      def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv",
2783                                                dst_lmul # "UvUv", "csil", dst_lmul # "Uv">;
2784    }
2785  }
2786
2787  // LMUL extension
2788  // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index
2789  let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext",
2790      MaskedPolicyScheme = NonePolicy,
2791      ManualCodegen = [{
2792        return Builder.CreateInsertVector(ResultType,
2793                                          llvm::PoisonValue::get(ResultType),
2794                                          Ops[0], ConstantInt::get(Int64Ty, 0));
2795      }] in {
2796    foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)",
2797                        "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
2798      def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v",
2799                                            dst_lmul # "vv", "csilxfd", dst_lmul # "v">;
2800      def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv",
2801                                              dst_lmul # "UvUv", "csil", dst_lmul # "Uv">;
2802    }
2803  }
2804
2805  let Name = "vget_v", MaskedPolicyScheme = NonePolicy,
2806      ManualCodegen = [{
2807      {
2808        if (isa<StructType>(Ops[0]->getType())) // For tuple type
2809          // Extract value from index (operand 1) of vtuple (operand 0)
2810          return Builder.CreateExtractValue(
2811            Ops[0],
2812            {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
2813        auto *VecTy = cast<ScalableVectorType>(ResultType);
2814        auto *OpVecTy = cast<ScalableVectorType>(Ops[0]->getType());
2815        // Mask to only valid indices.
2816        unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
2817        assert(isPowerOf2_32(MaxIndex));
2818        Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2819        Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
2820        Ops[1] = Builder.CreateMul(Ops[1],
2821                                   ConstantInt::get(Ops[1]->getType(),
2822                                                    VecTy->getMinNumElements()));
2823        return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
2824      }
2825      }] in {
2826    foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
2827      def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilxfd", dst_lmul # "v">;
2828      def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil", dst_lmul # "Uv">;
2829    }
2830    foreach nf = NFList in {
2831      defvar T = "(Tuple:" # nf # ")";
2832      def : RVVBuiltin<T # "vv", "v" # T # "vKz", "csilxfd", "v">;
2833      def : RVVBuiltin<T # "UvUv", "Uv" # T # "UvKz", "csil", "Uv">;
2834    }
2835  }
2836
2837  let Name = "vset_v", MaskedPolicyScheme = NonePolicy,
2838      ManualCodegen = [{
2839      {
2840        if (isa<StructType>(ResultType)) // For tuple type
2841          // Insert value (operand 2) into index (operand 1) of vtuple (operand 0)
2842          return Builder.CreateInsertValue(
2843            Ops[0], Ops[2],
2844            {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
2845        auto *ResVecTy = cast<ScalableVectorType>(ResultType);
2846        auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
2847        // Mask to only valid indices.
2848        unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
2849        assert(isPowerOf2_32(MaxIndex));
2850        Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
2851        Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
2852        Ops[1] = Builder.CreateMul(Ops[1],
2853                                   ConstantInt::get(Ops[1]->getType(),
2854                                                    VecTy->getMinNumElements()));
2855        return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
2856      }
2857      }] in {
2858    let Log2LMUL = [0, 1, 2] in {
2859      foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
2860        def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilxfd">;
2861        def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "Uv" # dst_lmul #"UvKzUv", "csil">;
2862      }
2863    }
2864    foreach nf = NFList in {
2865      defvar T = "(Tuple:" # nf # ")";
2866      def : RVVBuiltin<"v" # T # "v", T # "v" # T # "vKzv", "csilxfd">;
2867      def : RVVBuiltin<"Uv" # T # "Uv", T # "Uv" # T # "UvKzUv", "csil">;
2868    }
2869  }
2870}
2871