1;; ARM Cortex-A8 scheduling description.
2;; Copyright (C) 2007-2019 Free Software Foundation, Inc.
3;; Contributed by CodeSourcery.
4
5;; This file is part of GCC.
6
7;; GCC is free software; you can redistribute it and/or modify it
8;; under the terms of the GNU General Public License as published
9;; by the Free Software Foundation; either version 3, or (at your
10;; option) any later version.
11
12;; GCC is distributed in the hope that it will be useful, but WITHOUT
13;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15;; License for more details.
16
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21(define_automaton "cortex_a8")
22
23;; Only one load/store instruction can be issued per cycle
24;; (although reservation of this unit is only required for single
25;; loads and stores -- see below).
26(define_cpu_unit "cortex_a8_issue_ls" "cortex_a8")
27
28;; Only one branch instruction can be issued per cycle.
29(define_cpu_unit "cortex_a8_issue_branch" "cortex_a8")
30
31;; The two ALU pipelines.
32(define_cpu_unit "cortex_a8_alu0" "cortex_a8")
33(define_cpu_unit "cortex_a8_alu1" "cortex_a8")
34
35;; The usual flow of an instruction through the pipelines.
36(define_reservation "cortex_a8_default"
37                    "cortex_a8_alu0|cortex_a8_alu1")
38
39;; The flow of a branch instruction through the pipelines.
40(define_reservation "cortex_a8_branch"
41                    "(cortex_a8_alu0+cortex_a8_issue_branch)|\
42                     (cortex_a8_alu1+cortex_a8_issue_branch)")
43
44;; The flow of a load or store instruction through the pipeline in
45;; the case where that instruction consists of only one micro-op...
46(define_reservation "cortex_a8_load_store_1"
47                    "(cortex_a8_alu0+cortex_a8_issue_ls)|\
48                     (cortex_a8_alu1+cortex_a8_issue_ls)")
49
50;; ...and in the case of two micro-ops.  Dual issue is altogether forbidden
51;; during the issue cycle of the first micro-op.  (Instead of modelling
52;; a separate issue unit, we instead reserve alu0 and alu1 to
53;; prevent any other instructions from being issued upon that first cycle.)
54;; Even though the load/store pipeline is usually available in either
55;; ALU pipe, multi-cycle instructions always issue in pipeline 0.
56(define_reservation "cortex_a8_load_store_2"
57                    "cortex_a8_alu0+cortex_a8_alu1+cortex_a8_issue_ls,\
58                     cortex_a8_alu0+cortex_a8_issue_ls")
59
60;; The flow of a single-cycle multiplication.
61(define_reservation "cortex_a8_multiply"
62                    "cortex_a8_alu0")
63
64;; The flow of a multiplication instruction that gets decomposed into
65;; two micro-ops.  The two micro-ops will be issued to pipeline 0 on
66;; successive cycles.  Dual issue cannot happen at the same time as the
67;; first of the micro-ops.
68(define_reservation "cortex_a8_multiply_2"
69                    "cortex_a8_alu0+cortex_a8_alu1,\
70                     cortex_a8_alu0")
71
72;; Similarly, the flow of a multiplication instruction that gets
73;; decomposed into three micro-ops.  Dual issue cannot occur except on
74;; the cycle upon which the third micro-op is issued.
75(define_reservation "cortex_a8_multiply_3"
76                    "cortex_a8_alu0+cortex_a8_alu1,\
77                     cortex_a8_alu0+cortex_a8_alu1,\
78                     cortex_a8_alu0")
79
80;; The model given here assumes that all instructions are unconditional.
81
82;; Data processing instructions, but not move instructions.
83
84;; We include CLZ with these since it has the same execution pattern
85;; (source read in E2 and destination available at the end of that cycle).
86(define_insn_reservation "cortex_a8_alu" 2
87  (and (eq_attr "tune" "cortexa8")
88       (eq_attr "type" "alu_imm,alus_imm,logic_imm,logics_imm,\
89                        alu_sreg,alus_sreg,logic_reg,logics_reg,\
90                        adc_imm,adcs_imm,adc_reg,adcs_reg,\
91                        adr,bfm,clz,rbit,rev,alu_dsp_reg,\
92                        shift_imm,shift_reg,\
93                        multiple,no_insn"))
94  "cortex_a8_default")
95
96(define_insn_reservation "cortex_a8_alu_shift" 2
97  (and (eq_attr "tune" "cortexa8")
98       (eq_attr "type" "alu_shift_imm,alus_shift_imm,\
99                        logic_shift_imm,logics_shift_imm,\
100                        extend"))
101  "cortex_a8_default")
102
103(define_insn_reservation "cortex_a8_alu_shift_reg" 2
104  (and (eq_attr "tune" "cortexa8")
105       (eq_attr "type" "alu_shift_reg,alus_shift_reg,\
106                        logic_shift_reg,logics_shift_reg"))
107  "cortex_a8_default")
108
109;; Move instructions.
110
111(define_insn_reservation "cortex_a8_mov" 1
112  (and (eq_attr "tune" "cortexa8")
113       (eq_attr "type" "mov_imm,mov_reg,mov_shift,mov_shift_reg,\
114                        mvn_imm,mvn_reg,mvn_shift,mvn_shift_reg,\
115                        mrs"))
116  "cortex_a8_default")
117
118;; Exceptions to the default latencies for data processing instructions.
119
120;; A move followed by an ALU instruction with no early dep.
121;; (Such a pair can be issued in parallel, hence latency zero.)
122(define_bypass 0 "cortex_a8_mov" "cortex_a8_alu")
123(define_bypass 0 "cortex_a8_mov" "cortex_a8_alu_shift"
124               "arm_no_early_alu_shift_dep")
125(define_bypass 0 "cortex_a8_mov" "cortex_a8_alu_shift_reg"
126               "arm_no_early_alu_shift_value_dep")
127
128;; An ALU instruction followed by an ALU instruction with no early dep.
129(define_bypass 1 "cortex_a8_alu,cortex_a8_alu_shift,cortex_a8_alu_shift_reg"
130               "cortex_a8_alu")
131(define_bypass 1 "cortex_a8_alu,cortex_a8_alu_shift,cortex_a8_alu_shift_reg"
132               "cortex_a8_alu_shift"
133               "arm_no_early_alu_shift_dep")
134(define_bypass 1 "cortex_a8_alu,cortex_a8_alu_shift,cortex_a8_alu_shift_reg"
135               "cortex_a8_alu_shift_reg"
136               "arm_no_early_alu_shift_value_dep")
137
138;; Multiplication instructions.  These are categorized according to their
139;; reservation behavior and the need below to distinguish certain
140;; varieties for bypasses.  Results are available at the E5 stage
141;; (but some of these are multi-cycle instructions which explains the
142;; latencies below).
143
144(define_insn_reservation "cortex_a8_mul" 6
145  (and (eq_attr "tune" "cortexa8")
146       (eq_attr "type" "mul,smulxy,smmul"))
147  "cortex_a8_multiply_2")
148
149(define_insn_reservation "cortex_a8_mla" 6
150  (and (eq_attr "tune" "cortexa8")
151       (eq_attr "type" "mla,smlaxy,smlawy,smmla,smlad,smlsd"))
152  "cortex_a8_multiply_2")
153
154(define_insn_reservation "cortex_a8_mull" 7
155  (and (eq_attr "tune" "cortexa8")
156       (eq_attr "type" "smull,umull,smlal,umlal,umaal,smlalxy"))
157  "cortex_a8_multiply_3")
158
159(define_insn_reservation "cortex_a8_smulwy" 5
160  (and (eq_attr "tune" "cortexa8")
161       (eq_attr "type" "smulwy,smuad,smusd"))
162  "cortex_a8_multiply")
163
164;; smlald and smlsld are multiply-accumulate instructions but do not
165;; received bypassed data from other multiplication results; thus, they
166;; cannot go in cortex_a8_mla above.  (See below for bypass details.)
167(define_insn_reservation "cortex_a8_smlald" 6
168  (and (eq_attr "tune" "cortexa8")
169       (eq_attr "type" "smlald,smlsld"))
170  "cortex_a8_multiply_2")
171
172;; A multiply with a single-register result or an MLA, followed by an
173;; MLA with an accumulator dependency, has its result forwarded so two
174;; such instructions can issue back-to-back.
175(define_bypass 1 "cortex_a8_mul,cortex_a8_mla,cortex_a8_smulwy"
176               "cortex_a8_mla"
177               "arm_mac_accumulator_is_mul_result")
178
179;; A multiply followed by an ALU instruction needing the multiply
180;; result only at E2 has lower latency than one needing it at E1.
181(define_bypass 4 "cortex_a8_mul,cortex_a8_mla,cortex_a8_mull,\
182                  cortex_a8_smulwy,cortex_a8_smlald"
183               "cortex_a8_alu")
184(define_bypass 4 "cortex_a8_mul,cortex_a8_mla,cortex_a8_mull,\
185                  cortex_a8_smulwy,cortex_a8_smlald"
186               "cortex_a8_alu_shift"
187               "arm_no_early_alu_shift_dep")
188(define_bypass 4 "cortex_a8_mul,cortex_a8_mla,cortex_a8_mull,\
189                  cortex_a8_smulwy,cortex_a8_smlald"
190               "cortex_a8_alu_shift_reg"
191               "arm_no_early_alu_shift_value_dep")
192
193;; Load instructions.
194;; The presence of any register writeback is ignored here.
195
196;; A load result has latency 3 unless the dependent instruction has
197;; no early dep, in which case it is only latency two.
198;; We assume 64-bit alignment for doubleword loads.
199(define_insn_reservation "cortex_a8_load1_2" 3
200  (and (eq_attr "tune" "cortexa8")
201       (eq_attr "type" "load_4,load_8,load_byte"))
202  "cortex_a8_load_store_1")
203
204(define_bypass 2 "cortex_a8_load1_2"
205               "cortex_a8_alu")
206(define_bypass 2 "cortex_a8_load1_2"
207               "cortex_a8_alu_shift"
208               "arm_no_early_alu_shift_dep")
209(define_bypass 2 "cortex_a8_load1_2"
210               "cortex_a8_alu_shift_reg"
211               "arm_no_early_alu_shift_value_dep")
212
213;; We do not currently model the fact that loads with scaled register
214;; offsets that are not LSL #2 have an extra cycle latency (they issue
215;; as two micro-ops).
216
217;; A load multiple of three registers is usually issued as two micro-ops.
218;; The first register will be available at E3 of the first iteration,
219;; the second at E3 of the second iteration, and the third at E4 of
220;; the second iteration.  A load multiple of four registers is usually
221;; issued as two micro-ops.
222(define_insn_reservation "cortex_a8_load3_4" 5
223  (and (eq_attr "tune" "cortexa8")
224       (eq_attr "type" "load_12,load_16"))
225  "cortex_a8_load_store_2")
226
227(define_bypass 4 "cortex_a8_load3_4"
228               "cortex_a8_alu")
229(define_bypass 4 "cortex_a8_load3_4"
230               "cortex_a8_alu_shift"
231               "arm_no_early_alu_shift_dep")
232(define_bypass 4 "cortex_a8_load3_4"
233               "cortex_a8_alu_shift_reg"
234               "arm_no_early_alu_shift_value_dep")
235
236;; Store instructions.
237;; Writeback is again ignored.
238
239(define_insn_reservation "cortex_a8_store1_2" 0
240  (and (eq_attr "tune" "cortexa8")
241       (eq_attr "type" "store_4,store_8"))
242  "cortex_a8_load_store_1")
243
244(define_insn_reservation "cortex_a8_store3_4" 0
245  (and (eq_attr "tune" "cortexa8")
246       (eq_attr "type" "store_12,store_16"))
247  "cortex_a8_load_store_2")
248
249;; An ALU instruction acting as a producer for a store instruction
250;; that only uses the result as the value to be stored (as opposed to
251;; using it to calculate the address) has latency zero; the store
252;; reads the value to be stored at the start of E3 and the ALU insn
253;; writes it at the end of E2.  Move instructions actually produce the
254;; result at the end of E1, but since we don't have delay slots, the
255;; scheduling behavior will be the same.
256(define_bypass 0 "cortex_a8_alu,cortex_a8_alu_shift,\
257                  cortex_a8_alu_shift_reg,cortex_a8_mov"
258               "cortex_a8_store1_2,cortex_a8_store3_4"
259               "arm_no_early_store_addr_dep")
260
261;; Branch instructions
262
263(define_insn_reservation "cortex_a8_branch" 0
264  (and (eq_attr "tune" "cortexa8")
265       (eq_attr "type" "branch"))
266  "cortex_a8_branch")
267
268;; Call latencies are not predictable.  A semi-arbitrary very large
269;; number is used as "positive infinity" so that everything should be
270;; finished by the time of return.
271(define_insn_reservation "cortex_a8_call" 32
272  (and (eq_attr "tune" "cortexa8")
273       (eq_attr "type" "call"))
274  "cortex_a8_issue_branch")
275
276;; NEON (including VFP) instructions.
277
278(include "cortex-a8-neon.md")
279
280