1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2013
4 * NVIDIA Corporation <www.nvidia.com>
5 */
6
7 #include <common.h>
8 #include <log.h>
9 #include <asm/io.h>
10 #include <asm/arch/ahb.h>
11 #include <asm/arch/clock.h>
12 #include <asm/arch/flow.h>
13 #include <asm/arch/pinmux.h>
14 #include <asm/arch/tegra.h>
15 #include <asm/arch-tegra/clk_rst.h>
16 #include <asm/arch-tegra/pmc.h>
17 #include <asm/arch-tegra/ap.h>
18 #include <linux/delay.h>
19 #include "../cpu.h"
20
21 /* Tegra124-specific CPU init code */
22
enable_cpu_power_rail(void)23 static void enable_cpu_power_rail(void)
24 {
25 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
26
27 debug("%s entry\n", __func__);
28
29 /* un-tristate PWR_I2C SCL/SDA, rest of the defaults are correct */
30 pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SCL_PZ6);
31 pinmux_tristate_disable(PMUX_PINGRP_PWR_I2C_SDA_PZ7);
32
33 pmic_enable_cpu_vdd();
34
35 /*
36 * Set CPUPWRGOOD_TIMER - APB clock is 1/2 of SCLK (102MHz),
37 * set it for 5ms as per SysEng (102MHz*5ms = 510000 (7C830h).
38 */
39 writel(0x7C830, &pmc->pmc_cpupwrgood_timer);
40
41 /* Set polarity to 0 (normal) and enable CPUPWRREQ_OE */
42 clrbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_POL);
43 setbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_OE);
44 }
45
enable_cpu_clocks(void)46 static void enable_cpu_clocks(void)
47 {
48 struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
49 struct clk_pll_info *pllinfo = &tegra_pll_info_table[CLOCK_ID_XCPU];
50 u32 reg;
51
52 debug("%s entry\n", __func__);
53
54 /* Wait for PLL-X to lock */
55 do {
56 reg = readl(&clkrst->crc_pll_simple[SIMPLE_PLLX].pll_base);
57 debug("%s: PLLX base = 0x%08X\n", __func__, reg);
58 } while ((reg & (1 << pllinfo->lock_det)) == 0);
59
60 debug("%s: PLLX locked, delay for stable clocks\n", __func__);
61 /* Wait until all clocks are stable */
62 udelay(PLL_STABILIZATION_DELAY);
63
64 debug("%s: Setting CCLK_BURST and DIVIDER\n", __func__);
65 writel(CCLK_BURST_POLICY, &clkrst->crc_cclk_brst_pol);
66 writel(SUPER_CCLK_DIVIDER, &clkrst->crc_super_cclk_div);
67
68 debug("%s: Enabling clock to all CPUs\n", __func__);
69 /* Enable the clock to all CPUs */
70 reg = CLR_CPU3_CLK_STP | CLR_CPU2_CLK_STP | CLR_CPU1_CLK_STP |
71 CLR_CPU0_CLK_STP;
72 writel(reg, &clkrst->crc_clk_cpu_cmplx_clr);
73
74 debug("%s: Enabling main CPU complex clocks\n", __func__);
75 /* Always enable the main CPU complex clocks */
76 clock_enable(PERIPH_ID_CPU);
77 clock_enable(PERIPH_ID_CPULP);
78 clock_enable(PERIPH_ID_CPUG);
79
80 debug("%s: Done\n", __func__);
81 }
82
remove_cpu_resets(void)83 static void remove_cpu_resets(void)
84 {
85 struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
86 u32 reg;
87
88 debug("%s entry\n", __func__);
89
90 /* Take the slow and fast partitions out of reset */
91 reg = CLR_NONCPURESET;
92 writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr);
93 writel(reg, &clkrst->crc_rst_cpug_cmplx_clr);
94
95 /* Clear the SW-controlled reset of the slow cluster */
96 reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 |
97 CLR_L2RESET | CLR_PRESETDBG;
98 writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr);
99
100 /* Clear the SW-controlled reset of the fast cluster */
101 reg = CLR_CPURESET0 | CLR_DBGRESET0 | CLR_CORERESET0 | CLR_CXRESET0 |
102 CLR_CPURESET1 | CLR_DBGRESET1 | CLR_CORERESET1 | CLR_CXRESET1 |
103 CLR_CPURESET2 | CLR_DBGRESET2 | CLR_CORERESET2 | CLR_CXRESET2 |
104 CLR_CPURESET3 | CLR_DBGRESET3 | CLR_CORERESET3 | CLR_CXRESET3 |
105 CLR_L2RESET | CLR_PRESETDBG;
106 writel(reg, &clkrst->crc_rst_cpug_cmplx_clr);
107 }
108
tegra124_ram_repair(void)109 static void tegra124_ram_repair(void)
110 {
111 struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE;
112 u32 ram_repair_timeout; /*usec*/
113 u32 val;
114
115 /*
116 * Request the Flow Controller perform RAM repair whenever it turns on
117 * a power rail that requires RAM repair.
118 */
119 clrbits_le32(&flow->ram_repair, RAM_REPAIR_BYPASS_EN);
120
121 /* Request SW trigerred RAM repair by setting req bit */
122 /* cluster 0 */
123 setbits_le32(&flow->ram_repair, RAM_REPAIR_REQ);
124 /* Wait for completion (status == 0) */
125 ram_repair_timeout = 500;
126 do {
127 udelay(1);
128 val = readl(&flow->ram_repair);
129 } while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--);
130 if (!ram_repair_timeout)
131 debug("Ram Repair cluster0 failed\n");
132
133 /* cluster 1 */
134 setbits_le32(&flow->ram_repair_cluster1, RAM_REPAIR_REQ);
135 /* Wait for completion (status == 0) */
136 ram_repair_timeout = 500;
137 do {
138 udelay(1);
139 val = readl(&flow->ram_repair_cluster1);
140 } while (!(val & RAM_REPAIR_STS) && ram_repair_timeout--);
141
142 if (!ram_repair_timeout)
143 debug("Ram Repair cluster1 failed\n");
144 }
145
146 /**
147 * Tegra124 requires some special clock initialization, including setting up
148 * the DVC I2C, turning on MSELECT and selecting the G CPU cluster
149 */
tegra124_init_clocks(void)150 void tegra124_init_clocks(void)
151 {
152 struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE;
153 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
154 struct clk_rst_ctlr *clkrst =
155 (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE;
156 u32 val;
157
158 debug("%s entry\n", __func__);
159
160 /* Set active CPU cluster to G */
161 clrbits_le32(&flow->cluster_control, 1);
162
163 /* Change the oscillator drive strength */
164 val = readl(&clkrst->crc_osc_ctrl);
165 val &= ~OSC_XOFS_MASK;
166 val |= (OSC_DRIVE_STRENGTH << OSC_XOFS_SHIFT);
167 writel(val, &clkrst->crc_osc_ctrl);
168
169 /* Update same value in PMC_OSC_EDPD_OVER XOFS field for warmboot */
170 val = readl(&pmc->pmc_osc_edpd_over);
171 val &= ~PMC_XOFS_MASK;
172 val |= (OSC_DRIVE_STRENGTH << PMC_XOFS_SHIFT);
173 writel(val, &pmc->pmc_osc_edpd_over);
174
175 /* Set HOLD_CKE_LOW_EN to 1 */
176 setbits_le32(&pmc->pmc_cntrl2, HOLD_CKE_LOW_EN);
177
178 debug("Setting up PLLX\n");
179 init_pllx();
180
181 val = (1 << CLK_SYS_RATE_AHB_RATE_SHIFT);
182 writel(val, &clkrst->crc_clk_sys_rate);
183
184 /* Enable clocks to required peripherals. TBD - minimize this list */
185 debug("Enabling clocks\n");
186
187 clock_set_enable(PERIPH_ID_CACHE2, 1);
188 clock_set_enable(PERIPH_ID_GPIO, 1);
189 clock_set_enable(PERIPH_ID_TMR, 1);
190 clock_set_enable(PERIPH_ID_CPU, 1);
191 clock_set_enable(PERIPH_ID_EMC, 1);
192 clock_set_enable(PERIPH_ID_I2C5, 1);
193 clock_set_enable(PERIPH_ID_APBDMA, 1);
194 clock_set_enable(PERIPH_ID_MEM, 1);
195 clock_set_enable(PERIPH_ID_CORESIGHT, 1);
196 clock_set_enable(PERIPH_ID_MSELECT, 1);
197 clock_set_enable(PERIPH_ID_DVFS, 1);
198
199 /*
200 * Set MSELECT clock source as PLLP (00), and ask for a clock
201 * divider that would set the MSELECT clock at 102MHz for a
202 * PLLP base of 408MHz.
203 */
204 clock_ll_set_source_divisor(PERIPH_ID_MSELECT, 0,
205 CLK_DIVIDER(NVBL_PLLP_KHZ, 102000));
206
207 /* Give clock time to stabilize */
208 udelay(IO_STABILIZATION_DELAY);
209
210 /* I2C5 (DVC) gets CLK_M and a divisor of 17 */
211 clock_ll_set_source_divisor(PERIPH_ID_I2C5, 3, 16);
212
213 /* Give clock time to stabilize */
214 udelay(IO_STABILIZATION_DELAY);
215
216 /* Take required peripherals out of reset */
217 debug("Taking periphs out of reset\n");
218 reset_set_enable(PERIPH_ID_CACHE2, 0);
219 reset_set_enable(PERIPH_ID_GPIO, 0);
220 reset_set_enable(PERIPH_ID_TMR, 0);
221 reset_set_enable(PERIPH_ID_COP, 0);
222 reset_set_enable(PERIPH_ID_EMC, 0);
223 reset_set_enable(PERIPH_ID_I2C5, 0);
224 reset_set_enable(PERIPH_ID_APBDMA, 0);
225 reset_set_enable(PERIPH_ID_MEM, 0);
226 reset_set_enable(PERIPH_ID_CORESIGHT, 0);
227 reset_set_enable(PERIPH_ID_MSELECT, 0);
228 reset_set_enable(PERIPH_ID_DVFS, 0);
229
230 debug("%s exit\n", __func__);
231 }
232
is_partition_powered(u32 partid)233 static bool is_partition_powered(u32 partid)
234 {
235 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
236 u32 reg;
237
238 /* Get power gate status */
239 reg = readl(&pmc->pmc_pwrgate_status);
240 return !!(reg & (1 << partid));
241 }
242
unpower_partition(u32 partid)243 static void unpower_partition(u32 partid)
244 {
245 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
246
247 debug("%s: part ID = %08X\n", __func__, partid);
248 /* Is the partition on? */
249 if (is_partition_powered(partid)) {
250 /* Yes, toggle the partition power state (ON -> OFF) */
251 debug("power_partition, toggling state\n");
252 writel(START_CP | partid, &pmc->pmc_pwrgate_toggle);
253
254 /* Wait for the power to come down */
255 while (is_partition_powered(partid))
256 ;
257
258 /* Give I/O signals time to stabilize */
259 udelay(IO_STABILIZATION_DELAY);
260 }
261 }
262
unpower_cpus(void)263 void unpower_cpus(void)
264 {
265 debug("%s entry: G cluster\n", __func__);
266
267 /* Power down the fast cluster rail partition */
268 debug("%s: CRAIL\n", __func__);
269 unpower_partition(CRAIL);
270
271 /* Power down the fast cluster non-CPU partition */
272 debug("%s: C0NC\n", __func__);
273 unpower_partition(C0NC);
274
275 /* Power down the fast cluster CPU0 partition */
276 debug("%s: CE0\n", __func__);
277 unpower_partition(CE0);
278
279 debug("%s: done\n", __func__);
280 }
281
power_partition(u32 partid)282 static void power_partition(u32 partid)
283 {
284 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
285
286 debug("%s: part ID = %08X\n", __func__, partid);
287 /* Is the partition already on? */
288 if (!is_partition_powered(partid)) {
289 /* No, toggle the partition power state (OFF -> ON) */
290 debug("power_partition, toggling state\n");
291 writel(START_CP | partid, &pmc->pmc_pwrgate_toggle);
292
293 /* Wait for the power to come up */
294 while (!is_partition_powered(partid))
295 ;
296
297 /* Give I/O signals time to stabilize */
298 udelay(IO_STABILIZATION_DELAY);
299 }
300 }
301
powerup_cpus(void)302 void powerup_cpus(void)
303 {
304 /* We boot to the fast cluster */
305 debug("%s entry: G cluster\n", __func__);
306
307 /* Power up the fast cluster rail partition */
308 debug("%s: CRAIL\n", __func__);
309 power_partition(CRAIL);
310
311 /* Power up the fast cluster non-CPU partition */
312 debug("%s: C0NC\n", __func__);
313 power_partition(C0NC);
314
315 /* Power up the fast cluster CPU0 partition */
316 debug("%s: CE0\n", __func__);
317 power_partition(CE0);
318
319 debug("%s: done\n", __func__);
320 }
321
start_cpu(u32 reset_vector)322 void start_cpu(u32 reset_vector)
323 {
324 struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE;
325
326 debug("%s entry, reset_vector = %x\n", __func__, reset_vector);
327
328 /*
329 * High power clusters are on after software reset,
330 * it may interfere with tegra124_ram_repair.
331 * unpower them.
332 */
333 unpower_cpus();
334 tegra124_init_clocks();
335
336 /* Set power-gating timer multiplier */
337 writel((MULT_8 << TIMER_MULT_SHIFT) | (MULT_8 << TIMER_MULT_CPU_SHIFT),
338 &pmc->pmc_pwrgate_timer_mult);
339
340 enable_cpu_power_rail();
341 powerup_cpus();
342 tegra124_ram_repair();
343 enable_cpu_clocks();
344 clock_enable_coresight(1);
345 writel(reset_vector, EXCEP_VECTOR_CPU_RESET_VECTOR);
346 remove_cpu_resets();
347 debug("%s exit, should continue @ reset_vector\n", __func__);
348 }
349