1//===---- AMDCallingConv.td - Calling Conventions for Radeon GPUs ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This describes the calling conventions for the AMD Radeon GPUs.
10//
11//===----------------------------------------------------------------------===//
12
13// Inversion of CCIfInReg
14class CCIfNotInReg<CCAction A> : CCIf<"!ArgFlags.isInReg()", A> {}
15class CCIfExtend<CCAction A>
16  : CCIf<"ArgFlags.isSExt() || ArgFlags.isZExt()", A>;
17
18// Calling convention for SI
19def CC_SI : CallingConv<[
20
21  CCIfInReg<CCIfType<[f32, i32, f16, v2i16, v2f16] , CCAssignToReg<[
22    SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
23    SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
24    SGPR16, SGPR17, SGPR18, SGPR19, SGPR20, SGPR21, SGPR22, SGPR23,
25    SGPR24, SGPR25, SGPR26, SGPR27, SGPR28, SGPR29, SGPR30, SGPR31,
26    SGPR32, SGPR33, SGPR34, SGPR35, SGPR36, SGPR37, SGPR38, SGPR39,
27    SGPR40, SGPR41, SGPR42, SGPR43
28  ]>>>,
29
30  // 32*4 + 4 is the minimum for a fetch shader consumer with 32 inputs.
31  CCIfNotInReg<CCIfType<[f32, i32, f16, v2i16, v2f16] , CCAssignToReg<[
32    VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
33    VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
34    VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
35    VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31,
36    VGPR32, VGPR33, VGPR34, VGPR35, VGPR36, VGPR37, VGPR38, VGPR39,
37    VGPR40, VGPR41, VGPR42, VGPR43, VGPR44, VGPR45, VGPR46, VGPR47,
38    VGPR48, VGPR49, VGPR50, VGPR51, VGPR52, VGPR53, VGPR54, VGPR55,
39    VGPR56, VGPR57, VGPR58, VGPR59, VGPR60, VGPR61, VGPR62, VGPR63,
40    VGPR64, VGPR65, VGPR66, VGPR67, VGPR68, VGPR69, VGPR70, VGPR71,
41    VGPR72, VGPR73, VGPR74, VGPR75, VGPR76, VGPR77, VGPR78, VGPR79,
42    VGPR80, VGPR81, VGPR82, VGPR83, VGPR84, VGPR85, VGPR86, VGPR87,
43    VGPR88, VGPR89, VGPR90, VGPR91, VGPR92, VGPR93, VGPR94, VGPR95,
44    VGPR96, VGPR97, VGPR98, VGPR99, VGPR100, VGPR101, VGPR102, VGPR103,
45    VGPR104, VGPR105, VGPR106, VGPR107, VGPR108, VGPR109, VGPR110, VGPR111,
46    VGPR112, VGPR113, VGPR114, VGPR115, VGPR116, VGPR117, VGPR118, VGPR119,
47    VGPR120, VGPR121, VGPR122, VGPR123, VGPR124, VGPR125, VGPR126, VGPR127,
48    VGPR128, VGPR129, VGPR130, VGPR131, VGPR132, VGPR133, VGPR134, VGPR135
49  ]>>>
50]>;
51
52def RetCC_SI_Shader : CallingConv<[
53  CCIfType<[i32] , CCAssignToReg<[
54    SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
55    SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
56    SGPR16, SGPR17, SGPR18, SGPR19, SGPR20, SGPR21, SGPR22, SGPR23,
57    SGPR24, SGPR25, SGPR26, SGPR27, SGPR28, SGPR29, SGPR30, SGPR31,
58    SGPR32, SGPR33, SGPR34, SGPR35, SGPR36, SGPR37, SGPR38, SGPR39,
59    SGPR40, SGPR41, SGPR42, SGPR43
60  ]>>,
61
62  // 32*4 + 4 is the minimum for a fetch shader with 32 outputs.
63  CCIfType<[f32, f16, v2f16] , CCAssignToReg<[
64    VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
65    VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
66    VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
67    VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31,
68    VGPR32, VGPR33, VGPR34, VGPR35, VGPR36, VGPR37, VGPR38, VGPR39,
69    VGPR40, VGPR41, VGPR42, VGPR43, VGPR44, VGPR45, VGPR46, VGPR47,
70    VGPR48, VGPR49, VGPR50, VGPR51, VGPR52, VGPR53, VGPR54, VGPR55,
71    VGPR56, VGPR57, VGPR58, VGPR59, VGPR60, VGPR61, VGPR62, VGPR63,
72    VGPR64, VGPR65, VGPR66, VGPR67, VGPR68, VGPR69, VGPR70, VGPR71,
73    VGPR72, VGPR73, VGPR74, VGPR75, VGPR76, VGPR77, VGPR78, VGPR79,
74    VGPR80, VGPR81, VGPR82, VGPR83, VGPR84, VGPR85, VGPR86, VGPR87,
75    VGPR88, VGPR89, VGPR90, VGPR91, VGPR92, VGPR93, VGPR94, VGPR95,
76    VGPR96, VGPR97, VGPR98, VGPR99, VGPR100, VGPR101, VGPR102, VGPR103,
77    VGPR104, VGPR105, VGPR106, VGPR107, VGPR108, VGPR109, VGPR110, VGPR111,
78    VGPR112, VGPR113, VGPR114, VGPR115, VGPR116, VGPR117, VGPR118, VGPR119,
79    VGPR120, VGPR121, VGPR122, VGPR123, VGPR124, VGPR125, VGPR126, VGPR127,
80    VGPR128, VGPR129, VGPR130, VGPR131, VGPR132, VGPR133, VGPR134, VGPR135
81  ]>>
82]>;
83
84def CSR_AMDGPU_VGPRs_24_255 : CalleeSavedRegs<
85  (sequence "VGPR%u", 24, 255)
86>;
87
88def CSR_AMDGPU_VGPRs_32_255 : CalleeSavedRegs<
89  (sequence "VGPR%u", 32, 255)
90>;
91
92def CSR_AMDGPU_SGPRs_32_105 : CalleeSavedRegs<
93  (sequence "SGPR%u", 32, 105)
94>;
95
96// Just to get the regmask, not for calling convention purposes.
97def CSR_AMDGPU_AllVGPRs : CalleeSavedRegs<
98  (sequence "VGPR%u", 0, 255)
99>;
100
101// Just to get the regmask, not for calling convention purposes.
102def CSR_AMDGPU_AllAllocatableSRegs : CalleeSavedRegs<
103  (add (sequence "SGPR%u", 0, 105), VCC_LO, VCC_HI)
104>;
105
106def CSR_AMDGPU_HighRegs : CalleeSavedRegs<
107  (add CSR_AMDGPU_VGPRs_32_255, CSR_AMDGPU_SGPRs_32_105)
108>;
109
110// Calling convention for leaf functions
111def CC_AMDGPU_Func : CallingConv<[
112  CCIfByVal<CCPassByVal<4, 4>>,
113  CCIfType<[i1], CCPromoteToType<i32>>,
114  CCIfType<[i1, i8, i16], CCIfExtend<CCPromoteToType<i32>>>,
115  CCIfType<[i32, f32, i16, f16, v2i16, v2f16, i1], CCAssignToReg<[
116    VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
117    VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
118    VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
119    VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31]>>,
120  CCIfType<[i32, f32, v2i16, v2f16, i16, f16, i1], CCAssignToStack<4, 4>>,
121  CCIfType<[i64, f64, v2i32, v2f32], CCAssignToStack<8, 4>>,
122  CCIfType<[v3i32, v3f32], CCAssignToStack<12, 4>>,
123  CCIfType<[v4i32, v4f32, v2i64, v2f64], CCAssignToStack<16, 4>>,
124  CCIfType<[v5i32, v5f32], CCAssignToStack<20, 4>>,
125  CCIfType<[v8i32, v8f32], CCAssignToStack<32, 4>>,
126  CCIfType<[v16i32, v16f32], CCAssignToStack<64, 4>>
127]>;
128
129// Calling convention for leaf functions
130def RetCC_AMDGPU_Func : CallingConv<[
131  CCIfType<[i1], CCPromoteToType<i32>>,
132  CCIfType<[i1, i16], CCIfExtend<CCPromoteToType<i32>>>,
133  CCIfType<[i32, f32, i16, f16, v2i16, v2f16], CCAssignToReg<[
134    VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
135    VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
136    VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
137    VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31]>>,
138]>;
139
140def CC_AMDGPU : CallingConv<[
141   CCIf<"static_cast<const GCNSubtarget&>"
142         "(State.getMachineFunction().getSubtarget()).getGeneration() >= "
143           "AMDGPUSubtarget::SOUTHERN_ISLANDS",
144        CCDelegateTo<CC_SI>>,
145   CCIf<"static_cast<const GCNSubtarget&>"
146         "(State.getMachineFunction().getSubtarget()).getGeneration() >= "
147           "AMDGPUSubtarget::SOUTHERN_ISLANDS && State.getCallingConv() == CallingConv::C",
148        CCDelegateTo<CC_AMDGPU_Func>>
149]>;
150