1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2015
4 * Texas Instruments Incorporated, <www.ti.com>
5 *
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9 #include <common.h>
10 #include <hang.h>
11 #include <log.h>
12 #include <asm/utils.h>
13 #include <asm/arch/dra7xx_iodelay.h>
14 #include <asm/arch/omap.h>
15 #include <asm/arch/sys_proto.h>
16 #include <asm/arch/clock.h>
17 #include <asm/arch/mux_dra7xx.h>
18 #include <asm/omap_common.h>
19
isolate_io(u32 isolate)20 static int isolate_io(u32 isolate)
21 {
22 if (isolate) {
23 clrsetbits_le32((*ctrl)->control_pbias, SDCARD_PWRDNZ,
24 SDCARD_PWRDNZ);
25 clrsetbits_le32((*ctrl)->control_pbias, SDCARD_BIAS_PWRDNZ,
26 SDCARD_BIAS_PWRDNZ);
27 }
28
29 /* Override control on ISOCLKIN signal to IO pad ring. */
30 clrsetbits_le32((*prcm)->prm_io_pmctrl, PMCTRL_ISOCLK_OVERRIDE_MASK,
31 PMCTRL_ISOCLK_OVERRIDE_CTRL);
32 if (!wait_on_value(PMCTRL_ISOCLK_STATUS_MASK, PMCTRL_ISOCLK_STATUS_MASK,
33 (u32 *)(*prcm)->prm_io_pmctrl, LDELAY))
34 return ERR_DEISOLATE_IO << isolate;
35
36 /* Isolate/Deisolate IO */
37 clrsetbits_le32((*ctrl)->ctrl_core_sma_sw_0, CTRL_ISOLATE_MASK,
38 isolate << CTRL_ISOLATE_SHIFT);
39 /* Dummy read to add delay t > 10ns */
40 readl((*ctrl)->ctrl_core_sma_sw_0);
41
42 /* Return control on ISOCLKIN to hardware */
43 clrsetbits_le32((*prcm)->prm_io_pmctrl, PMCTRL_ISOCLK_OVERRIDE_MASK,
44 PMCTRL_ISOCLK_NOT_OVERRIDE_CTRL);
45 if (!wait_on_value(PMCTRL_ISOCLK_STATUS_MASK,
46 0 << PMCTRL_ISOCLK_STATUS_SHIFT,
47 (u32 *)(*prcm)->prm_io_pmctrl, LDELAY))
48 return ERR_DEISOLATE_IO << isolate;
49
50 return 0;
51 }
52
calibrate_iodelay(u32 base)53 static int calibrate_iodelay(u32 base)
54 {
55 u32 reg;
56
57 /* Configure REFCLK period */
58 reg = readl(base + CFG_REG_2_OFFSET);
59 reg &= ~CFG_REG_REFCLK_PERIOD_MASK;
60 reg |= CFG_REG_REFCLK_PERIOD;
61 writel(reg, base + CFG_REG_2_OFFSET);
62
63 /* Initiate Calibration */
64 clrsetbits_le32(base + CFG_REG_0_OFFSET, CFG_REG_CALIB_STRT_MASK,
65 CFG_REG_CALIB_STRT << CFG_REG_CALIB_STRT_SHIFT);
66 if (!wait_on_value(CFG_REG_CALIB_STRT_MASK, CFG_REG_CALIB_END,
67 (u32 *)(base + CFG_REG_0_OFFSET), LDELAY))
68 return ERR_CALIBRATE_IODELAY;
69
70 return 0;
71 }
72
update_delay_mechanism(u32 base)73 static int update_delay_mechanism(u32 base)
74 {
75 /* Initiate the reload of calibrated values. */
76 clrsetbits_le32(base + CFG_REG_0_OFFSET, CFG_REG_ROM_READ_MASK,
77 CFG_REG_ROM_READ_START);
78 if (!wait_on_value(CFG_REG_ROM_READ_MASK, CFG_REG_ROM_READ_END,
79 (u32 *)(base + CFG_REG_0_OFFSET), LDELAY))
80 return ERR_UPDATE_DELAY;
81
82 return 0;
83 }
84
calculate_delay(u32 base,u16 offset,u16 den)85 static u32 calculate_delay(u32 base, u16 offset, u16 den)
86 {
87 u16 refclk_period, dly_cnt, ref_cnt;
88 u32 reg, q, r;
89
90 refclk_period = readl(base + CFG_REG_2_OFFSET) &
91 CFG_REG_REFCLK_PERIOD_MASK;
92
93 reg = readl(base + offset);
94 dly_cnt = (reg & CFG_REG_DLY_CNT_MASK) >> CFG_REG_DLY_CNT_SHIFT;
95 ref_cnt = (reg & CFG_REG_REF_CNT_MASK) >> CFG_REG_REF_CNT_SHIFT;
96
97 if (!dly_cnt || !den)
98 return 0;
99
100 /*
101 * To avoid overflow and integer truncation, delay value
102 * is calculated as quotient + remainder.
103 */
104 q = 5 * ((ref_cnt * refclk_period) / (dly_cnt * den));
105 r = (10 * ((ref_cnt * refclk_period) % (dly_cnt * den))) /
106 (2 * dly_cnt * den);
107
108 return q + r;
109 }
110
get_cfg_reg(u16 a_delay,u16 g_delay,u32 cpde,u32 fpde)111 static u32 get_cfg_reg(u16 a_delay, u16 g_delay, u32 cpde, u32 fpde)
112 {
113 u32 g_delay_coarse, g_delay_fine;
114 u32 a_delay_coarse, a_delay_fine;
115 u32 c_elements, f_elements;
116 u32 total_delay, reg = 0;
117
118 g_delay_coarse = g_delay / 920;
119 g_delay_fine = ((g_delay % 920) * 10) / 60;
120
121 a_delay_coarse = a_delay / cpde;
122 a_delay_fine = ((a_delay % cpde) * 10) / fpde;
123
124 c_elements = g_delay_coarse + a_delay_coarse;
125 f_elements = (g_delay_fine + a_delay_fine) / 10;
126
127 if (f_elements > 22) {
128 total_delay = c_elements * cpde + f_elements * fpde;
129
130 c_elements = total_delay / cpde;
131 f_elements = (total_delay % cpde) / fpde;
132 }
133
134 reg = (c_elements << CFG_X_COARSE_DLY_SHIFT) & CFG_X_COARSE_DLY_MASK;
135 reg |= (f_elements << CFG_X_FINE_DLY_SHIFT) & CFG_X_FINE_DLY_MASK;
136 reg |= CFG_X_SIGNATURE << CFG_X_SIGNATURE_SHIFT;
137 reg |= CFG_X_LOCK << CFG_X_LOCK_SHIFT;
138
139 return reg;
140 }
141
do_set_iodelay(u32 base,struct iodelay_cfg_entry const * array,int niodelays)142 int do_set_iodelay(u32 base, struct iodelay_cfg_entry const *array,
143 int niodelays)
144 {
145 struct iodelay_cfg_entry *iodelay = (struct iodelay_cfg_entry *)array;
146 u32 reg, cpde, fpde, i;
147
148 if (!niodelays)
149 return 0;
150
151 cpde = calculate_delay((*ctrl)->iodelay_config_base, CFG_REG_3_OFFSET,
152 88);
153 if (!cpde)
154 return ERR_CPDE;
155
156 fpde = calculate_delay((*ctrl)->iodelay_config_base, CFG_REG_4_OFFSET,
157 264);
158 if (!fpde)
159 return ERR_FPDE;
160
161 for (i = 0; i < niodelays; i++, iodelay++) {
162 reg = get_cfg_reg(iodelay->a_delay, iodelay->g_delay, cpde,
163 fpde);
164 writel(reg, base + iodelay->offset);
165 }
166
167 return 0;
168 }
169
__recalibrate_iodelay_start(void)170 int __recalibrate_iodelay_start(void)
171 {
172 int ret = 0;
173
174 /* IO recalibration should be done only from SRAM */
175 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context()) {
176 puts("IODELAY recalibration called from invalid context - use only from SPL in SRAM\n");
177 return -1;
178 }
179
180 /* unlock IODELAY CONFIG registers */
181 writel(CFG_IODELAY_UNLOCK_KEY, (*ctrl)->iodelay_config_base +
182 CFG_REG_8_OFFSET);
183
184 ret = calibrate_iodelay((*ctrl)->iodelay_config_base);
185 if (ret)
186 goto err;
187
188 ret = isolate_io(ISOLATE_IO);
189 if (ret)
190 goto err;
191
192 ret = update_delay_mechanism((*ctrl)->iodelay_config_base);
193
194 err:
195 return ret;
196 }
197
__recalibrate_iodelay_end(int ret)198 void __recalibrate_iodelay_end(int ret)
199 {
200
201 /* IO recalibration should be done only from SRAM */
202 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context()) {
203 puts("IODELAY recalibration called from invalid context - use only from SPL in SRAM\n");
204 return;
205 }
206
207 /* Deisolate IO if it is already isolated */
208 if (readl((*ctrl)->ctrl_core_sma_sw_0) & CTRL_ISOLATE_MASK)
209 isolate_io(DEISOLATE_IO);
210
211 /* lock IODELAY CONFIG registers */
212 writel(CFG_IODELAY_LOCK_KEY, (*ctrl)->iodelay_config_base +
213 CFG_REG_8_OFFSET);
214
215 /*
216 * UART cannot be used during IO recalibration sequence as IOs are in
217 * isolation. So error handling and debug prints are done after
218 * complete IO delay recalibration sequence
219 */
220 switch (ret) {
221 case ERR_CALIBRATE_IODELAY:
222 puts("IODELAY: IO delay calibration sequence failed\n");
223 break;
224 case ERR_ISOLATE_IO:
225 puts("IODELAY: Isolation of Device IOs failed\n");
226 break;
227 case ERR_UPDATE_DELAY:
228 puts("IODELAY: Delay mechanism update with new calibrated values failed\n");
229 break;
230 case ERR_DEISOLATE_IO:
231 puts("IODELAY: De-isolation of Device IOs failed\n");
232 break;
233 case ERR_CPDE:
234 puts("IODELAY: CPDE calculation failed\n");
235 break;
236 case ERR_FPDE:
237 puts("IODELAY: FPDE calculation failed\n");
238 break;
239 case -1:
240 puts("IODELAY: Wrong Context call?\n");
241 break;
242 default:
243 debug("IODELAY: IO delay recalibration successfully completed\n");
244 }
245
246 /* If there is an error during iodelay recalibration, SoC is in a bad
247 * state. Do not progress any further.
248 */
249 if (ret)
250 hang();
251
252 return;
253 }
254
__recalibrate_iodelay(struct pad_conf_entry const * pad,int npads,struct iodelay_cfg_entry const * iodelay,int niodelays)255 void __recalibrate_iodelay(struct pad_conf_entry const *pad, int npads,
256 struct iodelay_cfg_entry const *iodelay,
257 int niodelays)
258 {
259 int ret = 0;
260
261 /* IO recalibration should be done only from SRAM */
262 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context()) {
263 puts("IODELAY recalibration called from invalid context - use only from SPL in SRAM\n");
264 return;
265 }
266
267 ret = __recalibrate_iodelay_start();
268 if (ret)
269 goto err;
270
271 /* Configure Mux settings */
272 do_set_mux32((*ctrl)->control_padconf_core_base, pad, npads);
273
274 /* Configure Manual IO timing modes */
275 ret = do_set_iodelay((*ctrl)->iodelay_config_base, iodelay, niodelays);
276 if (ret)
277 goto err;
278
279 err:
280 __recalibrate_iodelay_end(ret);
281
282 }
283
late_recalibrate_iodelay(struct pad_conf_entry const * pad,int npads,struct iodelay_cfg_entry const * iodelay,int niodelays)284 void late_recalibrate_iodelay(struct pad_conf_entry const *pad, int npads,
285 struct iodelay_cfg_entry const *iodelay,
286 int niodelays)
287 {
288 int ret = 0;
289
290 /* unlock IODELAY CONFIG registers */
291 writel(CFG_IODELAY_UNLOCK_KEY, (*ctrl)->iodelay_config_base +
292 CFG_REG_8_OFFSET);
293
294 ret = calibrate_iodelay((*ctrl)->iodelay_config_base);
295 if (ret)
296 goto err;
297
298 ret = update_delay_mechanism((*ctrl)->iodelay_config_base);
299
300 /* Configure Mux settings */
301 do_set_mux32((*ctrl)->control_padconf_core_base, pad, npads);
302
303 /* Configure Manual IO timing modes */
304 ret = do_set_iodelay((*ctrl)->iodelay_config_base, iodelay, niodelays);
305 if (ret)
306 goto err;
307
308 err:
309 /* lock IODELAY CONFIG registers */
310 writel(CFG_IODELAY_LOCK_KEY, (*ctrl)->iodelay_config_base +
311 CFG_REG_8_OFFSET);
312 }
313