xref: /linux/drivers/clk/imx/clk-pll14xx.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2017-2018 NXP.
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/clk-provider.h>
8 #include <linux/err.h>
9 #include <linux/io.h>
10 #include <linux/iopoll.h>
11 #include <linux/slab.h>
12 #include <linux/jiffies.h>
13 
14 #include "clk.h"
15 
16 #define GNRL_CTL	0x0
17 #define DIV_CTL		0x4
18 #define LOCK_STATUS	BIT(31)
19 #define LOCK_SEL_MASK	BIT(29)
20 #define CLKE_MASK	BIT(11)
21 #define RST_MASK	BIT(9)
22 #define BYPASS_MASK	BIT(4)
23 #define MDIV_SHIFT	12
24 #define MDIV_MASK	GENMASK(21, 12)
25 #define PDIV_SHIFT	4
26 #define PDIV_MASK	GENMASK(9, 4)
27 #define SDIV_SHIFT	0
28 #define SDIV_MASK	GENMASK(2, 0)
29 #define KDIV_SHIFT	0
30 #define KDIV_MASK	GENMASK(15, 0)
31 
32 #define LOCK_TIMEOUT_US		10000
33 
34 struct clk_pll14xx {
35 	struct clk_hw			hw;
36 	void __iomem			*base;
37 	enum imx_pll14xx_type		type;
38 	const struct imx_pll14xx_rate_table *rate_table;
39 	int rate_count;
40 };
41 
42 #define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
43 
44 static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
45 	PLL_1416X_RATE(1800000000U, 225, 3, 0),
46 	PLL_1416X_RATE(1600000000U, 200, 3, 0),
47 	PLL_1416X_RATE(1500000000U, 375, 3, 1),
48 	PLL_1416X_RATE(1400000000U, 350, 3, 1),
49 	PLL_1416X_RATE(1200000000U, 300, 3, 1),
50 	PLL_1416X_RATE(1000000000U, 250, 3, 1),
51 	PLL_1416X_RATE(800000000U,  200, 3, 1),
52 	PLL_1416X_RATE(750000000U,  250, 2, 2),
53 	PLL_1416X_RATE(700000000U,  350, 3, 2),
54 	PLL_1416X_RATE(600000000U,  300, 3, 2),
55 };
56 
57 static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
58 	PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
59 	PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
60 	PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
61 	PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
62 };
63 
64 struct imx_pll14xx_clk imx_1443x_pll = {
65 	.type = PLL_1443X,
66 	.rate_table = imx_pll1443x_tbl,
67 	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
68 };
69 
70 struct imx_pll14xx_clk imx_1416x_pll = {
71 	.type = PLL_1416X,
72 	.rate_table = imx_pll1416x_tbl,
73 	.rate_count = ARRAY_SIZE(imx_pll1416x_tbl),
74 };
75 
76 static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
77 		struct clk_pll14xx *pll, unsigned long rate)
78 {
79 	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
80 	int i;
81 
82 	for (i = 0; i < pll->rate_count; i++)
83 		if (rate == rate_table[i].rate)
84 			return &rate_table[i];
85 
86 	return NULL;
87 }
88 
89 static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate,
90 			unsigned long *prate)
91 {
92 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
93 	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
94 	int i;
95 
96 	/* Assumming rate_table is in descending order */
97 	for (i = 0; i < pll->rate_count; i++)
98 		if (rate >= rate_table[i].rate)
99 			return rate_table[i].rate;
100 
101 	/* return minimum supported value */
102 	return rate_table[i - 1].rate;
103 }
104 
105 static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
106 						  unsigned long parent_rate)
107 {
108 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
109 	u32 mdiv, pdiv, sdiv, pll_div;
110 	u64 fvco = parent_rate;
111 
112 	pll_div = readl_relaxed(pll->base + 4);
113 	mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
114 	pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
115 	sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT;
116 
117 	fvco *= mdiv;
118 	do_div(fvco, pdiv << sdiv);
119 
120 	return fvco;
121 }
122 
123 static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
124 						  unsigned long parent_rate)
125 {
126 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
127 	u32 mdiv, pdiv, sdiv, pll_div_ctl0, pll_div_ctl1;
128 	short int kdiv;
129 	u64 fvco = parent_rate;
130 
131 	pll_div_ctl0 = readl_relaxed(pll->base + 4);
132 	pll_div_ctl1 = readl_relaxed(pll->base + 8);
133 	mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
134 	pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT;
135 	sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT;
136 	kdiv = pll_div_ctl1 & KDIV_MASK;
137 
138 	/* fvco = (m * 65536 + k) * Fin / (p * 65536) */
139 	fvco *= (mdiv * 65536 + kdiv);
140 	pdiv *= 65536;
141 
142 	do_div(fvco, pdiv << sdiv);
143 
144 	return fvco;
145 }
146 
147 static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate,
148 					  u32 pll_div)
149 {
150 	u32 old_mdiv, old_pdiv;
151 
152 	old_mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
153 	old_pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
154 
155 	return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
156 }
157 
158 static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
159 {
160 	u32 val;
161 
162 	return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0,
163 			LOCK_TIMEOUT_US);
164 }
165 
166 static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
167 				 unsigned long prate)
168 {
169 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
170 	const struct imx_pll14xx_rate_table *rate;
171 	u32 tmp, div_val;
172 	int ret;
173 
174 	rate = imx_get_pll_settings(pll, drate);
175 	if (!rate) {
176 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
177 		       drate, clk_hw_get_name(hw));
178 		return -EINVAL;
179 	}
180 
181 	tmp = readl_relaxed(pll->base + 4);
182 
183 	if (!clk_pll14xx_mp_change(rate, tmp)) {
184 		tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
185 		tmp |= rate->sdiv << SDIV_SHIFT;
186 		writel_relaxed(tmp, pll->base + 4);
187 
188 		return 0;
189 	}
190 
191 	/* Bypass clock and set lock to pll output lock */
192 	tmp = readl_relaxed(pll->base);
193 	tmp |= LOCK_SEL_MASK;
194 	writel_relaxed(tmp, pll->base);
195 
196 	/* Enable RST */
197 	tmp &= ~RST_MASK;
198 	writel_relaxed(tmp, pll->base);
199 
200 	/* Enable BYPASS */
201 	tmp |= BYPASS_MASK;
202 	writel(tmp, pll->base);
203 
204 	div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
205 		(rate->sdiv << SDIV_SHIFT);
206 	writel_relaxed(div_val, pll->base + 0x4);
207 
208 	/*
209 	 * According to SPEC, t3 - t2 need to be greater than
210 	 * 1us and 1/FREF, respectively.
211 	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
212 	 * 3us.
213 	 */
214 	udelay(3);
215 
216 	/* Disable RST */
217 	tmp |= RST_MASK;
218 	writel_relaxed(tmp, pll->base);
219 
220 	/* Wait Lock */
221 	ret = clk_pll14xx_wait_lock(pll);
222 	if (ret)
223 		return ret;
224 
225 	/* Bypass */
226 	tmp &= ~BYPASS_MASK;
227 	writel_relaxed(tmp, pll->base);
228 
229 	return 0;
230 }
231 
232 static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
233 				 unsigned long prate)
234 {
235 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
236 	const struct imx_pll14xx_rate_table *rate;
237 	u32 tmp, div_val;
238 	int ret;
239 
240 	rate = imx_get_pll_settings(pll, drate);
241 	if (!rate) {
242 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
243 			drate, clk_hw_get_name(hw));
244 		return -EINVAL;
245 	}
246 
247 	tmp = readl_relaxed(pll->base + 4);
248 
249 	if (!clk_pll14xx_mp_change(rate, tmp)) {
250 		tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
251 		tmp |= rate->sdiv << SDIV_SHIFT;
252 		writel_relaxed(tmp, pll->base + 4);
253 
254 		tmp = rate->kdiv << KDIV_SHIFT;
255 		writel_relaxed(tmp, pll->base + 8);
256 
257 		return 0;
258 	}
259 
260 	/* Enable RST */
261 	tmp = readl_relaxed(pll->base);
262 	tmp &= ~RST_MASK;
263 	writel_relaxed(tmp, pll->base);
264 
265 	/* Enable BYPASS */
266 	tmp |= BYPASS_MASK;
267 	writel_relaxed(tmp, pll->base);
268 
269 	div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
270 		(rate->sdiv << SDIV_SHIFT);
271 	writel_relaxed(div_val, pll->base + 0x4);
272 	writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8);
273 
274 	/*
275 	 * According to SPEC, t3 - t2 need to be greater than
276 	 * 1us and 1/FREF, respectively.
277 	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
278 	 * 3us.
279 	 */
280 	udelay(3);
281 
282 	/* Disable RST */
283 	tmp |= RST_MASK;
284 	writel_relaxed(tmp, pll->base);
285 
286 	/* Wait Lock*/
287 	ret = clk_pll14xx_wait_lock(pll);
288 	if (ret)
289 		return ret;
290 
291 	/* Bypass */
292 	tmp &= ~BYPASS_MASK;
293 	writel_relaxed(tmp, pll->base);
294 
295 	return 0;
296 }
297 
298 static int clk_pll14xx_prepare(struct clk_hw *hw)
299 {
300 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
301 	u32 val;
302 	int ret;
303 
304 	/*
305 	 * RESETB = 1 from 0, PLL starts its normal
306 	 * operation after lock time
307 	 */
308 	val = readl_relaxed(pll->base + GNRL_CTL);
309 	if (val & RST_MASK)
310 		return 0;
311 	val |= BYPASS_MASK;
312 	writel_relaxed(val, pll->base + GNRL_CTL);
313 	val |= RST_MASK;
314 	writel_relaxed(val, pll->base + GNRL_CTL);
315 
316 	ret = clk_pll14xx_wait_lock(pll);
317 	if (ret)
318 		return ret;
319 
320 	val &= ~BYPASS_MASK;
321 	writel_relaxed(val, pll->base + GNRL_CTL);
322 
323 	return 0;
324 }
325 
326 static int clk_pll14xx_is_prepared(struct clk_hw *hw)
327 {
328 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
329 	u32 val;
330 
331 	val = readl_relaxed(pll->base + GNRL_CTL);
332 
333 	return (val & RST_MASK) ? 1 : 0;
334 }
335 
336 static void clk_pll14xx_unprepare(struct clk_hw *hw)
337 {
338 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
339 	u32 val;
340 
341 	/*
342 	 * Set RST to 0, power down mode is enabled and
343 	 * every digital block is reset
344 	 */
345 	val = readl_relaxed(pll->base + GNRL_CTL);
346 	val &= ~RST_MASK;
347 	writel_relaxed(val, pll->base + GNRL_CTL);
348 }
349 
350 static const struct clk_ops clk_pll1416x_ops = {
351 	.prepare	= clk_pll14xx_prepare,
352 	.unprepare	= clk_pll14xx_unprepare,
353 	.is_prepared	= clk_pll14xx_is_prepared,
354 	.recalc_rate	= clk_pll1416x_recalc_rate,
355 	.round_rate	= clk_pll14xx_round_rate,
356 	.set_rate	= clk_pll1416x_set_rate,
357 };
358 
359 static const struct clk_ops clk_pll1416x_min_ops = {
360 	.recalc_rate	= clk_pll1416x_recalc_rate,
361 };
362 
363 static const struct clk_ops clk_pll1443x_ops = {
364 	.prepare	= clk_pll14xx_prepare,
365 	.unprepare	= clk_pll14xx_unprepare,
366 	.is_prepared	= clk_pll14xx_is_prepared,
367 	.recalc_rate	= clk_pll1443x_recalc_rate,
368 	.round_rate	= clk_pll14xx_round_rate,
369 	.set_rate	= clk_pll1443x_set_rate,
370 };
371 
372 struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
373 			    void __iomem *base,
374 			    const struct imx_pll14xx_clk *pll_clk)
375 {
376 	struct clk_pll14xx *pll;
377 	struct clk *clk;
378 	struct clk_init_data init;
379 	u32 val;
380 
381 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
382 	if (!pll)
383 		return ERR_PTR(-ENOMEM);
384 
385 	init.name = name;
386 	init.flags = pll_clk->flags;
387 	init.parent_names = &parent_name;
388 	init.num_parents = 1;
389 
390 	switch (pll_clk->type) {
391 	case PLL_1416X:
392 		if (!pll_clk->rate_table)
393 			init.ops = &clk_pll1416x_min_ops;
394 		else
395 			init.ops = &clk_pll1416x_ops;
396 		break;
397 	case PLL_1443X:
398 		init.ops = &clk_pll1443x_ops;
399 		break;
400 	default:
401 		pr_err("%s: Unknown pll type for pll clk %s\n",
402 		       __func__, name);
403 	};
404 
405 	pll->base = base;
406 	pll->hw.init = &init;
407 	pll->type = pll_clk->type;
408 	pll->rate_table = pll_clk->rate_table;
409 	pll->rate_count = pll_clk->rate_count;
410 
411 	val = readl_relaxed(pll->base + GNRL_CTL);
412 	val &= ~BYPASS_MASK;
413 	writel_relaxed(val, pll->base + GNRL_CTL);
414 
415 	clk = clk_register(NULL, &pll->hw);
416 	if (IS_ERR(clk)) {
417 		pr_err("%s: failed to register pll %s %lu\n",
418 			__func__, name, PTR_ERR(clk));
419 		kfree(pll);
420 	}
421 
422 	return clk;
423 }
424