1//===-- PPCRegisterInfo.td - The PowerPC Register File -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12let Namespace = "PPC" in {
13def sub_lt : SubRegIndex<1>;
14def sub_gt : SubRegIndex<1, 1>;
15def sub_eq : SubRegIndex<1, 2>;
16def sub_un : SubRegIndex<1, 3>;
17def sub_32 : SubRegIndex<32>;
18def sub_64 : SubRegIndex<64>;
19def sub_vsx0 : SubRegIndex<128>;
20def sub_vsx1 : SubRegIndex<128, 128>;
21def sub_pair0 : SubRegIndex<256>;
22def sub_pair1 : SubRegIndex<256, 256>;
23def sub_gp8_x0 : SubRegIndex<64>;
24def sub_gp8_x1 : SubRegIndex<64, 64>;
25}
26
27
28class PPCReg<string n> : Register<n> {
29  let Namespace = "PPC";
30}
31
32// We identify all our registers with a 5-bit ID, for consistency's sake.
33
34// GPR - One of the 32 32-bit general-purpose registers
35class GPR<bits<5> num, string n> : PPCReg<n> {
36  let HWEncoding{4-0} = num;
37}
38
39// GP8 - One of the 32 64-bit general-purpose registers
40class GP8<GPR SubReg, string n> : PPCReg<n> {
41  let HWEncoding = SubReg.HWEncoding;
42  let SubRegs = [SubReg];
43  let SubRegIndices = [sub_32];
44}
45
46// SPE - One of the 32 64-bit general-purpose registers (SPE)
47class SPE<GPR SubReg, string n> : PPCReg<n> {
48  let HWEncoding = SubReg.HWEncoding;
49  let SubRegs = [SubReg];
50  let SubRegIndices = [sub_32];
51}
52
53// SPR - One of the 32-bit special-purpose registers
54class SPR<bits<10> num, string n> : PPCReg<n> {
55  let HWEncoding{9-0} = num;
56}
57
58// FPR - One of the 32 64-bit floating-point registers
59class FPR<bits<5> num, string n> : PPCReg<n> {
60  let HWEncoding{4-0} = num;
61}
62
63// VF - One of the 32 64-bit floating-point subregisters of the vector
64// registers (used by VSX).
65class VF<bits<5> num, string n> : PPCReg<n> {
66  let HWEncoding{4-0} = num;
67  let HWEncoding{5} = 1;
68}
69
70// VR - One of the 32 128-bit vector registers
71class VR<VF SubReg, string n> : PPCReg<n> {
72  let HWEncoding{4-0} = SubReg.HWEncoding{4-0};
73  let HWEncoding{5} = 0;
74  let SubRegs = [SubReg];
75  let SubRegIndices = [sub_64];
76}
77
78// VSRL - One of the 32 128-bit VSX registers that overlap with the scalar
79// floating-point registers.
80class VSRL<FPR SubReg, string n> : PPCReg<n> {
81  let HWEncoding = SubReg.HWEncoding;
82  let SubRegs = [SubReg];
83  let SubRegIndices = [sub_64];
84}
85
86// VSXReg - One of the VSX registers in the range vs32-vs63 with numbering
87// and encoding to match.
88class VSXReg<bits<6> num, string n> : PPCReg<n> {
89  let HWEncoding{5-0} = num;
90}
91
92// CR - One of the 8 4-bit condition registers
93class CR<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
94  let HWEncoding{2-0} = num;
95  let SubRegs = subregs;
96}
97
98// CRBIT - One of the 32 1-bit condition register fields
99class CRBIT<bits<5> num, string n> : PPCReg<n> {
100  let HWEncoding{4-0} = num;
101}
102
103// ACC - One of the 8 512-bit VSX accumulators.
104class ACC<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
105  let HWEncoding{2-0} = num;
106  let SubRegs = subregs;
107}
108
109// UACC - One of the 8 512-bit VSX accumulators prior to being primed.
110// Without using this register class, the register allocator has no way to
111// differentiate a primed accumulator from an unprimed accumulator.
112// This may result in invalid copies between primed and unprimed accumulators.
113class UACC<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
114  let HWEncoding{2-0} = num;
115  let SubRegs = subregs;
116}
117
118// VSR Pairs - One of the 32 paired even-odd consecutive VSRs.
119class VSRPair<bits<5> num, string n, list<Register> subregs> : PPCReg<n> {
120  let HWEncoding{4-0} = num;
121  let SubRegs = subregs;
122}
123
124// GP8Pair - Consecutive even-odd paired GP8.
125class GP8Pair<string n, bits<5> EvenIndex> : PPCReg<n> {
126  assert !eq(EvenIndex{0}, 0), "Index should be even.";
127  let HWEncoding{4-0} = EvenIndex;
128  let SubRegs = [!cast<GP8>("X"#EvenIndex), !cast<GP8>("X"#!add(EvenIndex, 1))];
129  let DwarfNumbers = [-1, -1];
130  let SubRegIndices = [sub_gp8_x0, sub_gp8_x1];
131}
132
133// General-purpose registers
134foreach Index = 0-31 in {
135  def R#Index : GPR<Index, "r"#Index>, DwarfRegNum<[-2, Index]>;
136}
137
138// 64-bit General-purpose registers
139foreach Index = 0-31 in {
140  def X#Index : GP8<!cast<GPR>("R"#Index), "r"#Index>,
141                    DwarfRegNum<[Index, -2]>;
142}
143
144// SPE registers
145foreach Index = 0-31 in {
146  def S#Index : SPE<!cast<GPR>("R"#Index), "r"#Index>,
147                    DwarfRegNum<[!add(Index, 1200), !add(Index, 1200)]>;
148}
149
150// Floating-point registers
151foreach Index = 0-31 in {
152  def F#Index : FPR<Index, "f"#Index>,
153                DwarfRegNum<[!add(Index, 32), !add(Index, 32)]>;
154}
155
156// 64-bit Floating-point subregisters of Altivec registers
157// Note: the register names are v0-v31 or vs32-vs63 depending on the use.
158//       Custom C++ code is used to produce the correct name and encoding.
159foreach Index = 0-31 in {
160  def VF#Index : VF<Index, "v" #Index>,
161                 DwarfRegNum<[!add(Index, 77), !add(Index, 77)]>;
162}
163
164// Vector registers
165foreach Index = 0-31 in {
166  def V#Index : VR<!cast<VF>("VF"#Index), "v"#Index>,
167                DwarfRegNum<[!add(Index, 77), !add(Index, 77)]>;
168}
169
170// VSX registers
171foreach Index = 0-31 in {
172  def VSL#Index : VSRL<!cast<FPR>("F"#Index), "vs"#Index>,
173                  DwarfRegAlias<!cast<FPR>("F"#Index)>;
174}
175
176// Dummy VSX registers, this defines string: "vs32"-"vs63", and is only used for
177// asm printing.
178foreach Index = 32-63 in {
179  def VSX#Index : VSXReg<Index, "vs"#Index>;
180}
181
182let SubRegIndices = [sub_vsx0, sub_vsx1] in {
183  // VSR pairs 0 - 15 (corresponding to VSRs 0 - 30 paired with 1 - 31).
184  foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
185    def VSRp#!srl(Index, 1) : VSRPair<!srl(Index, 1), "vsp"#Index,
186                                      [!cast<VSRL>("VSL"#Index), !cast<VSRL>("VSL"#!add(Index, 1))]>,
187                              DwarfRegNum<[-1, -1]>;
188  }
189
190  // VSR pairs 16 - 31 (corresponding to VSRs 32 - 62 paired with 33 - 63).
191  foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
192    def VSRp#!add(!srl(Index, 1), 16) :
193      VSRPair<!add(!srl(Index, 1), 16), "vsp"#!add(Index, 32),
194              [!cast<VR>("V"#Index), !cast<VR>("V"#!add(Index, 1))]>,
195      DwarfRegNum<[-1, -1]>;
196  }
197}
198
199// 16 paired even-odd consecutive GP8s.
200foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
201  def G8p#!srl(Index, 1) : GP8Pair<"r"#Index, Index>;
202}
203
204// The representation of r0 when treated as the constant 0.
205def ZERO  : GPR<0, "0">,    DwarfRegAlias<R0>;
206def ZERO8 : GP8<ZERO, "0">, DwarfRegAlias<X0>;
207
208// Representations of the frame pointer used by ISD::FRAMEADDR.
209def FP   : GPR<0 /* arbitrary */, "**FRAME POINTER**">;
210def FP8  : GP8<FP, "**FRAME POINTER**">;
211
212// Representations of the base pointer used by setjmp.
213def BP   : GPR<0 /* arbitrary */, "**BASE POINTER**">;
214def BP8  : GP8<BP, "**BASE POINTER**">;
215
216// Condition register bits
217def CR0LT : CRBIT< 0, "0">;
218def CR0GT : CRBIT< 1, "1">;
219def CR0EQ : CRBIT< 2, "2">;
220def CR0UN : CRBIT< 3, "3">;
221def CR1LT : CRBIT< 4, "4">;
222def CR1GT : CRBIT< 5, "5">;
223def CR1EQ : CRBIT< 6, "6">;
224def CR1UN : CRBIT< 7, "7">;
225def CR2LT : CRBIT< 8, "8">;
226def CR2GT : CRBIT< 9, "9">;
227def CR2EQ : CRBIT<10, "10">;
228def CR2UN : CRBIT<11, "11">;
229def CR3LT : CRBIT<12, "12">;
230def CR3GT : CRBIT<13, "13">;
231def CR3EQ : CRBIT<14, "14">;
232def CR3UN : CRBIT<15, "15">;
233def CR4LT : CRBIT<16, "16">;
234def CR4GT : CRBIT<17, "17">;
235def CR4EQ : CRBIT<18, "18">;
236def CR4UN : CRBIT<19, "19">;
237def CR5LT : CRBIT<20, "20">;
238def CR5GT : CRBIT<21, "21">;
239def CR5EQ : CRBIT<22, "22">;
240def CR5UN : CRBIT<23, "23">;
241def CR6LT : CRBIT<24, "24">;
242def CR6GT : CRBIT<25, "25">;
243def CR6EQ : CRBIT<26, "26">;
244def CR6UN : CRBIT<27, "27">;
245def CR7LT : CRBIT<28, "28">;
246def CR7GT : CRBIT<29, "29">;
247def CR7EQ : CRBIT<30, "30">;
248def CR7UN : CRBIT<31, "31">;
249
250// Condition registers
251let SubRegIndices = [sub_lt, sub_gt, sub_eq, sub_un] in {
252def CR0 : CR<0, "cr0", [CR0LT, CR0GT, CR0EQ, CR0UN]>, DwarfRegNum<[68, 68]>;
253def CR1 : CR<1, "cr1", [CR1LT, CR1GT, CR1EQ, CR1UN]>, DwarfRegNum<[69, 69]>;
254def CR2 : CR<2, "cr2", [CR2LT, CR2GT, CR2EQ, CR2UN]>, DwarfRegNum<[70, 70]>;
255def CR3 : CR<3, "cr3", [CR3LT, CR3GT, CR3EQ, CR3UN]>, DwarfRegNum<[71, 71]>;
256def CR4 : CR<4, "cr4", [CR4LT, CR4GT, CR4EQ, CR4UN]>, DwarfRegNum<[72, 72]>;
257def CR5 : CR<5, "cr5", [CR5LT, CR5GT, CR5EQ, CR5UN]>, DwarfRegNum<[73, 73]>;
258def CR6 : CR<6, "cr6", [CR6LT, CR6GT, CR6EQ, CR6UN]>, DwarfRegNum<[74, 74]>;
259def CR7 : CR<7, "cr7", [CR7LT, CR7GT, CR7EQ, CR7UN]>, DwarfRegNum<[75, 75]>;
260}
261
262// Link register
263def LR  : SPR<8, "lr">, DwarfRegNum<[-2, 65]>;
264//let Aliases = [LR] in
265def LR8 : SPR<8, "lr">, DwarfRegNum<[65, -2]>;
266
267// Count register
268def CTR  : SPR<9, "ctr">, DwarfRegNum<[-2, 66]>;
269def CTR8 : SPR<9, "ctr">, DwarfRegNum<[66, -2]>;
270
271// VRsave register
272def VRSAVE: SPR<256, "vrsave">, DwarfRegNum<[109]>;
273
274// SPE extra registers
275// SPE Accumulator for multiply-accumulate SPE operations.  Never directly
276// accessed, so there's no real encoding for it.
277def SPEACC: DwarfRegNum<[99, 111]>;
278def SPEFSCR: SPR<512, "spefscr">, DwarfRegNum<[612, 112]>;
279
280def XER: SPR<1, "xer">, DwarfRegNum<[76]>;
281
282// Carry bit.  In the architecture this is really bit 0 of the XER register
283// (which really is SPR register 1);  this is the only bit interesting to a
284// compiler.
285def CARRY: SPR<1, "xer">, DwarfRegNum<[76]> {
286  let Aliases = [XER];
287}
288
289// FP rounding mode:  bits 30 and 31 of the FP status and control register
290// This is not allocated as a normal register; it appears only in
291// Uses and Defs.  The ABI says it needs to be preserved by a function,
292// but this is not achieved by saving and restoring it as with
293// most registers, it has to be done in code; to make this work all the
294// return and call instructions are described as Uses of RM, so instructions
295// that do nothing but change RM will not get deleted.
296def RM: PPCReg<"**ROUNDING MODE**">;
297
298/// Register classes
299// Allocate volatiles first
300// then nonvolatiles in reverse order since stmw/lmw save from rN to r31
301def GPRC : RegisterClass<"PPC", [i32,f32], 32, (add (sequence "R%u", 2, 12),
302                                                    (sequence "R%u", 30, 13),
303                                                    R31, R0, R1, FP, BP)> {
304  // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
305  // put it at the end of the list.
306  // On AIX, CSRs are allocated starting from R31 according to:
307  // https://www.ibm.com/docs/en/ssw_aix_72/assembler/assembler_pdf.pdf.
308  // This also helps setting the correct `NumOfGPRsSaved' in traceback table.
309  let AltOrders = [(add (sub GPRC, R2), R2),
310                   (add (sequence "R%u", 2, 12),
311                        (sequence "R%u", 31, 13), R0, R1, FP, BP)];
312  let AltOrderSelect = [{
313    return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
314  }];
315}
316
317def G8RC : RegisterClass<"PPC", [i64], 64, (add (sequence "X%u", 2, 12),
318                                                (sequence "X%u", 30, 14),
319                                                X31, X13, X0, X1, FP8, BP8)> {
320  // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
321  // put it at the end of the list.
322  let AltOrders = [(add (sub G8RC, X2), X2),
323                   (add (sequence "X%u", 2, 12),
324                        (sequence "X%u", 31, 13), X0, X1, FP8, BP8)];
325  let AltOrderSelect = [{
326    return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
327  }];
328}
329
330// For some instructions r0 is special (representing the value 0 instead of
331// the value in the r0 register), and we use these register subclasses to
332// prevent r0 from being allocated for use by those instructions.
333def GPRC_NOR0 : RegisterClass<"PPC", [i32,f32], 32, (add (sub GPRC, R0), ZERO)> {
334  // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
335  // put it at the end of the list.
336  let AltOrders = [(add (sub GPRC_NOR0, R2), R2),
337                   (add (sequence "R%u", 2, 12),
338                        (sequence "R%u", 31, 13), R1, FP, BP, ZERO)];
339  let AltOrderSelect = [{
340    return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
341  }];
342}
343
344def G8RC_NOX0 : RegisterClass<"PPC", [i64], 64, (add (sub G8RC, X0), ZERO8)> {
345  // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
346  // put it at the end of the list.
347  let AltOrders = [(add (sub G8RC_NOX0, X2), X2),
348                   (add (sequence "X%u", 2, 12),
349                        (sequence "X%u", 31, 13), X1, FP8, BP8, ZERO8)];
350  let AltOrderSelect = [{
351    return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
352  }];
353}
354
355def SPERC : RegisterClass<"PPC", [f64], 64, (add (sequence "S%u", 2, 12),
356                                                (sequence "S%u", 30, 13),
357                                                S31, S0, S1)>;
358
359// Allocate volatiles first, then non-volatiles in reverse order. With the SVR4
360// ABI the size of the Floating-point register save area is determined by the
361// allocated non-volatile register with the lowest register number, as FP
362// register N is spilled to offset 8 * (32 - N) below the back chain word of the
363// previous stack frame. By allocating non-volatiles in reverse order we make
364// sure that the Floating-point register save area is always as small as
365// possible because there aren't any unused spill slots.
366def F8RC : RegisterClass<"PPC", [f64], 64, (add (sequence "F%u", 0, 13),
367                                                (sequence "F%u", 31, 14))>;
368def F4RC : RegisterClass<"PPC", [f32], 32, (add F8RC)>;
369
370def VRRC : RegisterClass<"PPC",
371                         [v16i8,v8i16,v4i32,v2i64,v1i128,v4f32,v2f64, f128],
372                         128,
373                         (add V2, V3, V4, V5, V0, V1, V6, V7, V8, V9, V10, V11,
374                             V12, V13, V14, V15, V16, V17, V18, V19, V31, V30,
375                             V29, V28, V27, V26, V25, V24, V23, V22, V21, V20)>;
376
377// VSX register classes (the allocation order mirrors that of the corresponding
378// subregister classes).
379def VSLRC : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
380                          (add (sequence "VSL%u", 0, 13),
381                               (sequence "VSL%u", 31, 14))>;
382def VSRC  : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
383                          (add VSLRC, VRRC)>;
384
385// Register classes for the 64-bit "scalar" VSX subregisters.
386def VFRC :  RegisterClass<"PPC", [f64], 64,
387                          (add VF2, VF3, VF4, VF5, VF0, VF1, VF6, VF7,
388                               VF8, VF9, VF10, VF11, VF12, VF13, VF14,
389                               VF15, VF16, VF17, VF18, VF19, VF31, VF30,
390                               VF29, VF28, VF27, VF26, VF25, VF24, VF23,
391                               VF22, VF21, VF20)>;
392def VSFRC : RegisterClass<"PPC", [f64], 64, (add F8RC, VFRC)>;
393
394// Allow spilling GPR's into caller-saved VSR's.
395def SPILLTOVSRRC : RegisterClass<"PPC", [i64, f64], 64, (add G8RC, (sub VSFRC,
396				(sequence "VF%u", 31, 20),
397				(sequence "F%u", 31, 14)))>;
398
399// Register class for single precision scalars in VSX registers
400def VSSRC : RegisterClass<"PPC", [f32], 32, (add VSFRC)>;
401
402def CRBITRC : RegisterClass<"PPC", [i1], 32,
403  (add CR2LT, CR2GT, CR2EQ, CR2UN,
404       CR3LT, CR3GT, CR3EQ, CR3UN,
405       CR4LT, CR4GT, CR4EQ, CR4UN,
406       CR5LT, CR5GT, CR5EQ, CR5UN,
407       CR6LT, CR6GT, CR6EQ, CR6UN,
408       CR7LT, CR7GT, CR7EQ, CR7UN,
409       CR1LT, CR1GT, CR1EQ, CR1UN,
410       CR0LT, CR0GT, CR0EQ, CR0UN)> {
411  let Size = 32;
412  let AltOrders = [(sub CRBITRC, CR2LT, CR2GT, CR2EQ, CR2UN, CR3LT, CR3GT,
413                        CR3EQ, CR3UN, CR4LT, CR4GT, CR4EQ, CR4UN)];
414  let AltOrderSelect = [{
415    return MF.getSubtarget<PPCSubtarget>().isELFv2ABI() &&
416           MF.getInfo<PPCFunctionInfo>()->isNonVolatileCRDisabled();
417  }];
418}
419
420def CRRC : RegisterClass<"PPC", [i32], 32,
421  (add CR0, CR1, CR5, CR6,
422       CR7, CR2, CR3, CR4)> {
423  let AltOrders = [(sub CRRC, CR2, CR3, CR4)];
424  let AltOrderSelect = [{
425    return MF.getSubtarget<PPCSubtarget>().isELFv2ABI() &&
426           MF.getInfo<PPCFunctionInfo>()->isNonVolatileCRDisabled();
427  }];
428}
429// The CTR registers are not allocatable because they're used by the
430// decrement-and-branch instructions, and thus need to stay live across
431// multiple basic blocks.
432def CTRRC : RegisterClass<"PPC", [i32], 32, (add CTR)> {
433  let isAllocatable = 0;
434}
435def CTRRC8 : RegisterClass<"PPC", [i64], 64, (add CTR8)> {
436  let isAllocatable = 0;
437}
438
439def LRRC : RegisterClass<"PPC", [i32], 32, (add LR)> {
440  let isAllocatable = 0;
441}
442def LR8RC : RegisterClass<"PPC", [i64], 64, (add LR8)> {
443  let isAllocatable = 0;
444}
445
446def VRSAVERC : RegisterClass<"PPC", [i32], 32, (add VRSAVE)>;
447def CARRYRC : RegisterClass<"PPC", [i32], 32, (add CARRY, XER)> {
448  let CopyCost = -1;
449}
450
451let SubRegIndices = [sub_pair0, sub_pair1] in {
452  def ACC0 : ACC<0, "acc0", [VSRp0, VSRp1]>, DwarfRegNum<[-1, -1]>;
453  def ACC1 : ACC<1, "acc1", [VSRp2, VSRp3]>, DwarfRegNum<[-1, -1]>;
454  def ACC2 : ACC<2, "acc2", [VSRp4, VSRp5]>, DwarfRegNum<[-1, -1]>;
455  def ACC3 : ACC<3, "acc3", [VSRp6, VSRp7]>, DwarfRegNum<[-1, -1]>;
456  def ACC4 : ACC<4, "acc4", [VSRp8, VSRp9]>, DwarfRegNum<[-1, -1]>;
457  def ACC5 : ACC<5, "acc5", [VSRp10, VSRp11]>, DwarfRegNum<[-1, -1]>;
458  def ACC6 : ACC<6, "acc6", [VSRp12, VSRp13]>, DwarfRegNum<[-1, -1]>;
459  def ACC7 : ACC<7, "acc7", [VSRp14, VSRp15]>, DwarfRegNum<[-1, -1]>;
460}
461def ACCRC : RegisterClass<"PPC", [v512i1], 128, (add ACC0, ACC1, ACC2, ACC3,
462                                                      ACC4, ACC5, ACC6, ACC7)> {
463  // The AllocationPriority is in the range [0, 63]. Assigned the ACC registers
464  // the highest possible priority in this range to force the register allocator
465  // to assign these registers first. This is done because the ACC registers
466  // must represent 4 advacent vector registers. For example ACC1 must be
467  // VS4 - VS7. The value here must be at least 32 as we want to allocate
468  // these registers even before we allocate global ranges.
469  let AllocationPriority = 63;
470  let Size = 512;
471}
472
473let SubRegIndices = [sub_pair0, sub_pair1] in {
474  def UACC0 : UACC<0, "acc0", [VSRp0, VSRp1]>, DwarfRegNum<[-1, -1]>;
475  def UACC1 : UACC<1, "acc1", [VSRp2, VSRp3]>, DwarfRegNum<[-1, -1]>;
476  def UACC2 : UACC<2, "acc2", [VSRp4, VSRp5]>, DwarfRegNum<[-1, -1]>;
477  def UACC3 : UACC<3, "acc3", [VSRp6, VSRp7]>, DwarfRegNum<[-1, -1]>;
478  def UACC4 : UACC<4, "acc4", [VSRp8, VSRp9]>, DwarfRegNum<[-1, -1]>;
479  def UACC5 : UACC<5, "acc5", [VSRp10, VSRp11]>, DwarfRegNum<[-1, -1]>;
480  def UACC6 : UACC<6, "acc6", [VSRp12, VSRp13]>, DwarfRegNum<[-1, -1]>;
481  def UACC7 : UACC<7, "acc7", [VSRp14, VSRp15]>, DwarfRegNum<[-1, -1]>;
482}
483def UACCRC : RegisterClass<"PPC", [v512i1], 128,
484                           (add UACC0, UACC1, UACC2, UACC3,
485                                UACC4, UACC5, UACC6, UACC7)> {
486  // The AllocationPriority for the UACC registers is still high and must be at
487  // least 32 as we want to allocate these registers before we allocate other
488  // global ranges. The value must be less than the AllocationPriority of the
489  // ACC registers.
490  let AllocationPriority = 36;
491  let Size = 512;
492}
493
494// FIXME: This allocation order may increase stack frame size when allocating
495// non-volatile registers.
496//
497// Placing Altivec registers first and allocate the rest as underlying VSX
498// ones, to reduce interference with accumulator registers (lower 32 VSRs).
499// This reduces copies when loading for accumulators, which is common use for
500// paired VSX registers.
501def VSRpRC :
502  RegisterClass<"PPC", [v256i1], 128,
503                (add VSRp17, VSRp18, VSRp16, VSRp19, VSRp20, VSRp21,
504                     VSRp22, VSRp23, VSRp24, VSRp25, VSRp31, VSRp30,
505                     VSRp29, VSRp28, VSRp27, VSRp26,
506                     (sequence "VSRp%u", 0, 6),
507                     (sequence "VSRp%u", 15, 7))> {
508  // Give the VSRp registers a non-zero AllocationPriority. The value is less
509  // than 32 as these registers should not always be allocated before global
510  // ranges and the value should be less than the AllocationPriority - 32 for
511  // the UACC registers. Even global VSRp registers should be allocated after
512  // the UACC registers have been chosen.
513  let AllocationPriority = 2;
514  let Size = 256;
515}
516
517// Make AllocationOrder as similar as G8RC's to avoid potential spilling.
518// Similarly, we have an AltOrder for 64-bit ELF ABI which r2 is allocated
519// at last.
520def G8pRC :
521  RegisterClass<"PPC", [i128], 128,
522                (add (sequence "G8p%u", 1, 5),
523                     (sequence "G8p%u", 14, 7),
524                     G8p15, G8p6, G8p0)> {
525  let AltOrders = [(add (sub G8pRC, G8p1), G8p1)];
526  let AltOrderSelect = [{
527    return MF.getSubtarget<PPCSubtarget>().is64BitELFABI();
528  }];
529  let Size = 128;
530}
531