xref: /linux/drivers/clk/imx/clk-pll14xx.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2017-2018 NXP.
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/clk-provider.h>
8 #include <linux/err.h>
9 #include <linux/io.h>
10 #include <linux/iopoll.h>
11 #include <linux/slab.h>
12 #include <linux/jiffies.h>
13 
14 #include "clk.h"
15 
16 #define GNRL_CTL	0x0
17 #define DIV_CTL		0x4
18 #define LOCK_STATUS	BIT(31)
19 #define LOCK_SEL_MASK	BIT(29)
20 #define CLKE_MASK	BIT(11)
21 #define RST_MASK	BIT(9)
22 #define BYPASS_MASK	BIT(4)
23 #define MDIV_SHIFT	12
24 #define MDIV_MASK	GENMASK(21, 12)
25 #define PDIV_SHIFT	4
26 #define PDIV_MASK	GENMASK(9, 4)
27 #define SDIV_SHIFT	0
28 #define SDIV_MASK	GENMASK(2, 0)
29 #define KDIV_SHIFT	0
30 #define KDIV_MASK	GENMASK(15, 0)
31 
32 #define LOCK_TIMEOUT_US		10000
33 
34 struct clk_pll14xx {
35 	struct clk_hw			hw;
36 	void __iomem			*base;
37 	enum imx_pll14xx_type		type;
38 	const struct imx_pll14xx_rate_table *rate_table;
39 	int rate_count;
40 };
41 
42 #define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
43 
44 static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
45 		struct clk_pll14xx *pll, unsigned long rate)
46 {
47 	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
48 	int i;
49 
50 	for (i = 0; i < pll->rate_count; i++)
51 		if (rate == rate_table[i].rate)
52 			return &rate_table[i];
53 
54 	return NULL;
55 }
56 
57 static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate,
58 			unsigned long *prate)
59 {
60 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
61 	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
62 	int i;
63 
64 	/* Assumming rate_table is in descending order */
65 	for (i = 0; i < pll->rate_count; i++)
66 		if (rate >= rate_table[i].rate)
67 			return rate_table[i].rate;
68 
69 	/* return minimum supported value */
70 	return rate_table[i - 1].rate;
71 }
72 
73 static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
74 						  unsigned long parent_rate)
75 {
76 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
77 	u32 mdiv, pdiv, sdiv, pll_div;
78 	u64 fvco = parent_rate;
79 
80 	pll_div = readl_relaxed(pll->base + 4);
81 	mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
82 	pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
83 	sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT;
84 
85 	fvco *= mdiv;
86 	do_div(fvco, pdiv << sdiv);
87 
88 	return fvco;
89 }
90 
91 static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
92 						  unsigned long parent_rate)
93 {
94 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
95 	u32 mdiv, pdiv, sdiv, pll_div_ctl0, pll_div_ctl1;
96 	short int kdiv;
97 	u64 fvco = parent_rate;
98 
99 	pll_div_ctl0 = readl_relaxed(pll->base + 4);
100 	pll_div_ctl1 = readl_relaxed(pll->base + 8);
101 	mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
102 	pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT;
103 	sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT;
104 	kdiv = pll_div_ctl1 & KDIV_MASK;
105 
106 	/* fvco = (m * 65536 + k) * Fin / (p * 65536) */
107 	fvco *= (mdiv * 65536 + kdiv);
108 	pdiv *= 65536;
109 
110 	do_div(fvco, pdiv << sdiv);
111 
112 	return fvco;
113 }
114 
115 static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate,
116 					  u32 pll_div)
117 {
118 	u32 old_mdiv, old_pdiv;
119 
120 	old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK;
121 	old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK;
122 
123 	return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
124 }
125 
126 static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate,
127 					  u32 pll_div_ctl0, u32 pll_div_ctl1)
128 {
129 	u32 old_mdiv, old_pdiv, old_kdiv;
130 
131 	old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
132 	old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
133 	old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
134 
135 	return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
136 		rate->kdiv != old_kdiv;
137 }
138 
139 static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate,
140 					  u32 pll_div_ctl0, u32 pll_div_ctl1)
141 {
142 	u32 old_mdiv, old_pdiv, old_kdiv;
143 
144 	old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
145 	old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
146 	old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
147 
148 	return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
149 		rate->kdiv != old_kdiv;
150 }
151 
152 static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
153 {
154 	u32 val;
155 
156 	return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
157 			LOCK_TIMEOUT_US);
158 }
159 
160 static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
161 				 unsigned long prate)
162 {
163 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
164 	const struct imx_pll14xx_rate_table *rate;
165 	u32 tmp, div_val;
166 	int ret;
167 
168 	rate = imx_get_pll_settings(pll, drate);
169 	if (!rate) {
170 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
171 		       drate, clk_hw_get_name(hw));
172 		return -EINVAL;
173 	}
174 
175 	tmp = readl_relaxed(pll->base + 4);
176 
177 	if (!clk_pll1416x_mp_change(rate, tmp)) {
178 		tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
179 		tmp |= rate->sdiv << SDIV_SHIFT;
180 		writel_relaxed(tmp, pll->base + 4);
181 
182 		return 0;
183 	}
184 
185 	/* Bypass clock and set lock to pll output lock */
186 	tmp = readl_relaxed(pll->base);
187 	tmp |= LOCK_SEL_MASK;
188 	writel_relaxed(tmp, pll->base);
189 
190 	/* Enable RST */
191 	tmp &= ~RST_MASK;
192 	writel_relaxed(tmp, pll->base);
193 
194 	div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
195 		(rate->sdiv << SDIV_SHIFT);
196 	writel_relaxed(div_val, pll->base + 0x4);
197 
198 	/*
199 	 * According to SPEC, t3 - t2 need to be greater than
200 	 * 1us and 1/FREF, respectively.
201 	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
202 	 * 3us.
203 	 */
204 	udelay(3);
205 
206 	/* Disable RST */
207 	tmp |= RST_MASK;
208 	writel_relaxed(tmp, pll->base);
209 
210 	/* Wait Lock */
211 	ret = clk_pll14xx_wait_lock(pll);
212 	if (ret)
213 		return ret;
214 
215 	/* Bypass */
216 	tmp &= ~BYPASS_MASK;
217 	writel_relaxed(tmp, pll->base);
218 
219 	return 0;
220 }
221 
222 static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
223 				 unsigned long prate)
224 {
225 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
226 	const struct imx_pll14xx_rate_table *rate;
227 	u32 tmp, div_val;
228 	int ret;
229 
230 	rate = imx_get_pll_settings(pll, drate);
231 	if (!rate) {
232 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
233 			drate, clk_hw_get_name(hw));
234 		return -EINVAL;
235 	}
236 
237 	tmp = readl_relaxed(pll->base + 4);
238 	div_val = readl_relaxed(pll->base + 8);
239 
240 	if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) {
241 		tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
242 		tmp |= rate->sdiv << SDIV_SHIFT;
243 		writel_relaxed(tmp, pll->base + 4);
244 
245 		return 0;
246 	}
247 
248 	/* Enable RST */
249 	tmp = readl_relaxed(pll->base);
250 	tmp &= ~RST_MASK;
251 	writel_relaxed(tmp, pll->base);
252 
253 	div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
254 		(rate->sdiv << SDIV_SHIFT);
255 	writel_relaxed(div_val, pll->base + 0x4);
256 	writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8);
257 
258 	/*
259 	 * According to SPEC, t3 - t2 need to be greater than
260 	 * 1us and 1/FREF, respectively.
261 	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
262 	 * 3us.
263 	 */
264 	udelay(3);
265 
266 	/* Disable RST */
267 	tmp |= RST_MASK;
268 	writel_relaxed(tmp, pll->base);
269 
270 	/* Wait Lock*/
271 	ret = clk_pll14xx_wait_lock(pll);
272 	if (ret)
273 		return ret;
274 
275 	/* Bypass */
276 	tmp &= ~BYPASS_MASK;
277 	writel_relaxed(tmp, pll->base);
278 
279 	return 0;
280 }
281 
282 static int clk_pll14xx_prepare(struct clk_hw *hw)
283 {
284 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
285 	u32 val;
286 
287 	/*
288 	 * RESETB = 1 from 0, PLL starts its normal
289 	 * operation after lock time
290 	 */
291 	val = readl_relaxed(pll->base + GNRL_CTL);
292 	val |= RST_MASK;
293 	writel_relaxed(val, pll->base + GNRL_CTL);
294 
295 	return clk_pll14xx_wait_lock(pll);
296 }
297 
298 static int clk_pll14xx_is_prepared(struct clk_hw *hw)
299 {
300 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
301 	u32 val;
302 
303 	val = readl_relaxed(pll->base + GNRL_CTL);
304 
305 	return (val & RST_MASK) ? 1 : 0;
306 }
307 
308 static void clk_pll14xx_unprepare(struct clk_hw *hw)
309 {
310 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
311 	u32 val;
312 
313 	/*
314 	 * Set RST to 0, power down mode is enabled and
315 	 * every digital block is reset
316 	 */
317 	val = readl_relaxed(pll->base + GNRL_CTL);
318 	val &= ~RST_MASK;
319 	writel_relaxed(val, pll->base + GNRL_CTL);
320 }
321 
322 static const struct clk_ops clk_pll1416x_ops = {
323 	.prepare	= clk_pll14xx_prepare,
324 	.unprepare	= clk_pll14xx_unprepare,
325 	.is_prepared	= clk_pll14xx_is_prepared,
326 	.recalc_rate	= clk_pll1416x_recalc_rate,
327 	.round_rate	= clk_pll14xx_round_rate,
328 	.set_rate	= clk_pll1416x_set_rate,
329 };
330 
331 static const struct clk_ops clk_pll1416x_min_ops = {
332 	.recalc_rate	= clk_pll1416x_recalc_rate,
333 };
334 
335 static const struct clk_ops clk_pll1443x_ops = {
336 	.prepare	= clk_pll14xx_prepare,
337 	.unprepare	= clk_pll14xx_unprepare,
338 	.is_prepared	= clk_pll14xx_is_prepared,
339 	.recalc_rate	= clk_pll1443x_recalc_rate,
340 	.round_rate	= clk_pll14xx_round_rate,
341 	.set_rate	= clk_pll1443x_set_rate,
342 };
343 
344 struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
345 			    void __iomem *base,
346 			    const struct imx_pll14xx_clk *pll_clk)
347 {
348 	struct clk_pll14xx *pll;
349 	struct clk *clk;
350 	struct clk_init_data init;
351 
352 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
353 	if (!pll)
354 		return ERR_PTR(-ENOMEM);
355 
356 	init.name = name;
357 	init.flags = pll_clk->flags;
358 	init.parent_names = &parent_name;
359 	init.num_parents = 1;
360 
361 	switch (pll_clk->type) {
362 	case PLL_1416X:
363 		if (!pll_clk->rate_table)
364 			init.ops = &clk_pll1416x_min_ops;
365 		else
366 			init.ops = &clk_pll1416x_ops;
367 		break;
368 	case PLL_1443X:
369 		init.ops = &clk_pll1443x_ops;
370 		break;
371 	default:
372 		pr_err("%s: Unknown pll type for pll clk %s\n",
373 		       __func__, name);
374 	};
375 
376 	pll->base = base;
377 	pll->hw.init = &init;
378 	pll->type = pll_clk->type;
379 	pll->rate_table = pll_clk->rate_table;
380 	pll->rate_count = pll_clk->rate_count;
381 
382 	clk = clk_register(NULL, &pll->hw);
383 	if (IS_ERR(clk)) {
384 		pr_err("%s: failed to register pll %s %lu\n",
385 			__func__, name, PTR_ERR(clk));
386 		kfree(pll);
387 	}
388 
389 	return clk;
390 }
391