xref: /linux/drivers/clk/st/clkgen-fsyn.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2014 STMicroelectronics R&D Ltd
4  */
5 
6 /*
7  * Authors:
8  * Stephen Gallimore <stephen.gallimore@st.com>,
9  * Pankaj Dev <pankaj.dev@st.com>.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/of_address.h>
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 
17 #include "clkgen.h"
18 
19 /*
20  * Maximum input clock to the PLL before we divide it down by 2
21  * although in reality in actual systems this has never been seen to
22  * be used.
23  */
24 #define QUADFS_NDIV_THRESHOLD 30000000
25 
26 #define PLL_BW_GOODREF   (0L)
27 #define PLL_BW_VBADREF   (1L)
28 #define PLL_BW_BADREF    (2L)
29 #define PLL_BW_VGOODREF  (3L)
30 
31 #define QUADFS_MAX_CHAN 4
32 
33 struct stm_fs {
34 	unsigned long ndiv;
35 	unsigned long mdiv;
36 	unsigned long pe;
37 	unsigned long sdiv;
38 	unsigned long nsdiv;
39 };
40 
41 struct clkgen_quadfs_data {
42 	bool reset_present;
43 	bool bwfilter_present;
44 	bool lockstatus_present;
45 	bool powerup_polarity;
46 	bool standby_polarity;
47 	bool nsdiv_present;
48 	bool nrst_present;
49 	struct clkgen_field ndiv;
50 	struct clkgen_field ref_bw;
51 	struct clkgen_field nreset;
52 	struct clkgen_field npda;
53 	struct clkgen_field lock_status;
54 
55 	struct clkgen_field nrst[QUADFS_MAX_CHAN];
56 	struct clkgen_field nsb[QUADFS_MAX_CHAN];
57 	struct clkgen_field en[QUADFS_MAX_CHAN];
58 	struct clkgen_field mdiv[QUADFS_MAX_CHAN];
59 	struct clkgen_field pe[QUADFS_MAX_CHAN];
60 	struct clkgen_field sdiv[QUADFS_MAX_CHAN];
61 	struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
62 
63 	const struct clk_ops *pll_ops;
64 	int  (*get_params)(unsigned long, unsigned long, struct stm_fs *);
65 	int  (*get_rate)(unsigned long , const struct stm_fs *,
66 			unsigned long *);
67 };
68 
69 static const struct clk_ops st_quadfs_pll_c32_ops;
70 static const struct clk_ops st_quadfs_fs660c32_ops;
71 
72 static int clk_fs660c32_dig_get_params(unsigned long input,
73 		unsigned long output, struct stm_fs *fs);
74 static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *,
75 		unsigned long *);
76 
77 static const struct clkgen_quadfs_data st_fs660c32_C = {
78 	.nrst_present = true,
79 	.nrst	= { CLKGEN_FIELD(0x2f0, 0x1, 0),
80 		    CLKGEN_FIELD(0x2f0, 0x1, 1),
81 		    CLKGEN_FIELD(0x2f0, 0x1, 2),
82 		    CLKGEN_FIELD(0x2f0, 0x1, 3) },
83 	.npda	= CLKGEN_FIELD(0x2f0, 0x1, 12),
84 	.nsb	= { CLKGEN_FIELD(0x2f0, 0x1, 8),
85 		    CLKGEN_FIELD(0x2f0, 0x1, 9),
86 		    CLKGEN_FIELD(0x2f0, 0x1, 10),
87 		    CLKGEN_FIELD(0x2f0, 0x1, 11) },
88 	.nsdiv_present = true,
89 	.nsdiv	= { CLKGEN_FIELD(0x304, 0x1, 24),
90 		    CLKGEN_FIELD(0x308, 0x1, 24),
91 		    CLKGEN_FIELD(0x30c, 0x1, 24),
92 		    CLKGEN_FIELD(0x310, 0x1, 24) },
93 	.mdiv	= { CLKGEN_FIELD(0x304, 0x1f, 15),
94 		    CLKGEN_FIELD(0x308, 0x1f, 15),
95 		    CLKGEN_FIELD(0x30c, 0x1f, 15),
96 		    CLKGEN_FIELD(0x310, 0x1f, 15) },
97 	.en	= { CLKGEN_FIELD(0x2fc, 0x1, 0),
98 		    CLKGEN_FIELD(0x2fc, 0x1, 1),
99 		    CLKGEN_FIELD(0x2fc, 0x1, 2),
100 		    CLKGEN_FIELD(0x2fc, 0x1, 3) },
101 	.ndiv	= CLKGEN_FIELD(0x2f4, 0x7, 16),
102 	.pe	= { CLKGEN_FIELD(0x304, 0x7fff, 0),
103 		    CLKGEN_FIELD(0x308, 0x7fff, 0),
104 		    CLKGEN_FIELD(0x30c, 0x7fff, 0),
105 		    CLKGEN_FIELD(0x310, 0x7fff, 0) },
106 	.sdiv	= { CLKGEN_FIELD(0x304, 0xf, 20),
107 		    CLKGEN_FIELD(0x308, 0xf, 20),
108 		    CLKGEN_FIELD(0x30c, 0xf, 20),
109 		    CLKGEN_FIELD(0x310, 0xf, 20) },
110 	.lockstatus_present = true,
111 	.lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
112 	.powerup_polarity = 1,
113 	.standby_polarity = 1,
114 	.pll_ops	= &st_quadfs_pll_c32_ops,
115 	.get_params	= clk_fs660c32_dig_get_params,
116 	.get_rate	= clk_fs660c32_dig_get_rate,
117 };
118 
119 static const struct clkgen_quadfs_data st_fs660c32_D = {
120 	.nrst_present = true,
121 	.nrst	= { CLKGEN_FIELD(0x2a0, 0x1, 0),
122 		    CLKGEN_FIELD(0x2a0, 0x1, 1),
123 		    CLKGEN_FIELD(0x2a0, 0x1, 2),
124 		    CLKGEN_FIELD(0x2a0, 0x1, 3) },
125 	.ndiv	= CLKGEN_FIELD(0x2a4, 0x7, 16),
126 	.pe	= { CLKGEN_FIELD(0x2b4, 0x7fff, 0),
127 		    CLKGEN_FIELD(0x2b8, 0x7fff, 0),
128 		    CLKGEN_FIELD(0x2bc, 0x7fff, 0),
129 		    CLKGEN_FIELD(0x2c0, 0x7fff, 0) },
130 	.sdiv	= { CLKGEN_FIELD(0x2b4, 0xf, 20),
131 		    CLKGEN_FIELD(0x2b8, 0xf, 20),
132 		    CLKGEN_FIELD(0x2bc, 0xf, 20),
133 		    CLKGEN_FIELD(0x2c0, 0xf, 20) },
134 	.npda	= CLKGEN_FIELD(0x2a0, 0x1, 12),
135 	.nsb	= { CLKGEN_FIELD(0x2a0, 0x1, 8),
136 		    CLKGEN_FIELD(0x2a0, 0x1, 9),
137 		    CLKGEN_FIELD(0x2a0, 0x1, 10),
138 		    CLKGEN_FIELD(0x2a0, 0x1, 11) },
139 	.nsdiv_present = true,
140 	.nsdiv	= { CLKGEN_FIELD(0x2b4, 0x1, 24),
141 		    CLKGEN_FIELD(0x2b8, 0x1, 24),
142 		    CLKGEN_FIELD(0x2bc, 0x1, 24),
143 		    CLKGEN_FIELD(0x2c0, 0x1, 24) },
144 	.mdiv	= { CLKGEN_FIELD(0x2b4, 0x1f, 15),
145 		    CLKGEN_FIELD(0x2b8, 0x1f, 15),
146 		    CLKGEN_FIELD(0x2bc, 0x1f, 15),
147 		    CLKGEN_FIELD(0x2c0, 0x1f, 15) },
148 	.en	= { CLKGEN_FIELD(0x2ac, 0x1, 0),
149 		    CLKGEN_FIELD(0x2ac, 0x1, 1),
150 		    CLKGEN_FIELD(0x2ac, 0x1, 2),
151 		    CLKGEN_FIELD(0x2ac, 0x1, 3) },
152 	.lockstatus_present = true,
153 	.lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
154 	.powerup_polarity = 1,
155 	.standby_polarity = 1,
156 	.pll_ops	= &st_quadfs_pll_c32_ops,
157 	.get_params	= clk_fs660c32_dig_get_params,
158 	.get_rate	= clk_fs660c32_dig_get_rate,};
159 
160 /**
161  * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor
162  *
163  * Traits of this clock:
164  * prepare - clk_(un)prepare only ensures parent is (un)prepared
165  * enable - clk_enable and clk_disable are functional & control the Fsyn
166  * rate - inherits rate from parent. set_rate/round_rate/recalc_rate
167  * parent - fixed parent.  No clk_set_parent support
168  */
169 
170 /**
171  * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of
172  *                                  its parent clock, found inside a type of
173  *                                  ST quad channel frequency synthesizer block
174  *
175  * @hw: handle between common and hardware-specific interfaces.
176  * @ndiv: regmap field for the ndiv control.
177  * @regs_base: base address of the configuration registers.
178  * @lock: spinlock.
179  *
180  */
181 struct st_clk_quadfs_pll {
182 	struct clk_hw	hw;
183 	void __iomem	*regs_base;
184 	spinlock_t	*lock;
185 	struct clkgen_quadfs_data *data;
186 	u32 ndiv;
187 };
188 
189 #define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw)
190 
191 static int quadfs_pll_enable(struct clk_hw *hw)
192 {
193 	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
194 	unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10);
195 
196 	if (pll->lock)
197 		spin_lock_irqsave(pll->lock, flags);
198 
199 	/*
200 	 * Bring block out of reset if we have reset control.
201 	 */
202 	if (pll->data->reset_present)
203 		CLKGEN_WRITE(pll, nreset, 1);
204 
205 	/*
206 	 * Use a fixed input clock noise bandwidth filter for the moment
207 	 */
208 	if (pll->data->bwfilter_present)
209 		CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF);
210 
211 
212 	CLKGEN_WRITE(pll, ndiv, pll->ndiv);
213 
214 	/*
215 	 * Power up the PLL
216 	 */
217 	CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity);
218 
219 	if (pll->lock)
220 		spin_unlock_irqrestore(pll->lock, flags);
221 
222 	if (pll->data->lockstatus_present)
223 		while (!CLKGEN_READ(pll, lock_status)) {
224 			if (time_after(jiffies, timeout))
225 				return -ETIMEDOUT;
226 			cpu_relax();
227 		}
228 
229 	return 0;
230 }
231 
232 static void quadfs_pll_disable(struct clk_hw *hw)
233 {
234 	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
235 	unsigned long flags = 0;
236 
237 	if (pll->lock)
238 		spin_lock_irqsave(pll->lock, flags);
239 
240 	/*
241 	 * Powerdown the PLL and then put block into soft reset if we have
242 	 * reset control.
243 	 */
244 	CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity);
245 
246 	if (pll->data->reset_present)
247 		CLKGEN_WRITE(pll, nreset, 0);
248 
249 	if (pll->lock)
250 		spin_unlock_irqrestore(pll->lock, flags);
251 }
252 
253 static int quadfs_pll_is_enabled(struct clk_hw *hw)
254 {
255 	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
256 	u32 npda = CLKGEN_READ(pll, npda);
257 
258 	return pll->data->powerup_polarity ? !npda : !!npda;
259 }
260 
261 static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
262 			   unsigned long *rate)
263 {
264 	unsigned long nd = fs->ndiv + 16; /* ndiv value */
265 
266 	*rate = input * nd;
267 
268 	return 0;
269 }
270 
271 static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
272 					unsigned long parent_rate)
273 {
274 	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
275 	unsigned long rate = 0;
276 	struct stm_fs params;
277 
278 	params.ndiv = CLKGEN_READ(pll, ndiv);
279 	if (clk_fs660c32_vco_get_rate(parent_rate, &params, &rate))
280 		pr_err("%s:%s error calculating rate\n",
281 		       clk_hw_get_name(hw), __func__);
282 
283 	pll->ndiv = params.ndiv;
284 
285 	return rate;
286 }
287 
288 static int clk_fs660c32_vco_get_params(unsigned long input,
289 				unsigned long output, struct stm_fs *fs)
290 {
291 /* Formula
292    VCO frequency = (fin x ndiv) / pdiv
293    ndiv = VCOfreq * pdiv / fin
294    */
295 	unsigned long pdiv = 1, n;
296 
297 	/* Output clock range: 384Mhz to 660Mhz */
298 	if (output < 384000000 || output > 660000000)
299 		return -EINVAL;
300 
301 	if (input > 40000000)
302 		/* This means that PDIV would be 2 instead of 1.
303 		   Not supported today. */
304 		return -EINVAL;
305 
306 	input /= 1000;
307 	output /= 1000;
308 
309 	n = output * pdiv / input;
310 	if (n < 16)
311 		n = 16;
312 	fs->ndiv = n - 16; /* Converting formula value to reg value */
313 
314 	return 0;
315 }
316 
317 static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
318 					   unsigned long rate,
319 					   unsigned long *prate)
320 {
321 	struct stm_fs params;
322 
323 	if (clk_fs660c32_vco_get_params(*prate, rate, &params))
324 		return rate;
325 
326 	clk_fs660c32_vco_get_rate(*prate, &params, &rate);
327 
328 	pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
329 		 __func__, clk_hw_get_name(hw),
330 		 rate, (unsigned int)params.ndiv);
331 
332 	return rate;
333 }
334 
335 static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
336 				unsigned long parent_rate)
337 {
338 	struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
339 	struct stm_fs params;
340 	long hwrate = 0;
341 	unsigned long flags = 0;
342 	int ret;
343 
344 	if (!rate || !parent_rate)
345 		return -EINVAL;
346 
347 	ret = clk_fs660c32_vco_get_params(parent_rate, rate, &params);
348 	if (ret)
349 		return ret;
350 
351 	clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
352 
353 	pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
354 		 __func__, clk_hw_get_name(hw),
355 		 hwrate, (unsigned int)params.ndiv);
356 
357 	if (!hwrate)
358 		return -EINVAL;
359 
360 	pll->ndiv = params.ndiv;
361 
362 	if (pll->lock)
363 		spin_lock_irqsave(pll->lock, flags);
364 
365 	CLKGEN_WRITE(pll, ndiv, pll->ndiv);
366 
367 	if (pll->lock)
368 		spin_unlock_irqrestore(pll->lock, flags);
369 
370 	return 0;
371 }
372 
373 static const struct clk_ops st_quadfs_pll_c32_ops = {
374 	.enable		= quadfs_pll_enable,
375 	.disable	= quadfs_pll_disable,
376 	.is_enabled	= quadfs_pll_is_enabled,
377 	.recalc_rate	= quadfs_pll_fs660c32_recalc_rate,
378 	.round_rate	= quadfs_pll_fs660c32_round_rate,
379 	.set_rate	= quadfs_pll_fs660c32_set_rate,
380 };
381 
382 static struct clk * __init st_clk_register_quadfs_pll(
383 		const char *name, const char *parent_name,
384 		struct clkgen_quadfs_data *quadfs, void __iomem *reg,
385 		spinlock_t *lock)
386 {
387 	struct st_clk_quadfs_pll *pll;
388 	struct clk *clk;
389 	struct clk_init_data init;
390 
391 	/*
392 	 * Sanity check required pointers.
393 	 */
394 	if (WARN_ON(!name || !parent_name))
395 		return ERR_PTR(-EINVAL);
396 
397 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
398 	if (!pll)
399 		return ERR_PTR(-ENOMEM);
400 
401 	init.name = name;
402 	init.ops = quadfs->pll_ops;
403 	init.flags = CLK_GET_RATE_NOCACHE;
404 	init.parent_names = &parent_name;
405 	init.num_parents = 1;
406 
407 	pll->data = quadfs;
408 	pll->regs_base = reg;
409 	pll->lock = lock;
410 	pll->hw.init = &init;
411 
412 	clk = clk_register(NULL, &pll->hw);
413 
414 	if (IS_ERR(clk))
415 		kfree(pll);
416 
417 	return clk;
418 }
419 
420 /**
421  * DOC: A digital frequency synthesizer
422  *
423  * Traits of this clock:
424  * prepare - clk_(un)prepare only ensures parent is (un)prepared
425  * enable - clk_enable and clk_disable are functional
426  * rate - set rate is functional
427  * parent - fixed parent.  No clk_set_parent support
428  */
429 
430 /**
431  * struct st_clk_quadfs_fsynth - One clock output from a four channel digital
432  *                                  frequency synthesizer (fsynth) block.
433  *
434  * @hw: handle between common and hardware-specific interfaces
435  *
436  * @nsb: regmap field in the output control register for the digital
437  *       standby of this fsynth channel. This control is active low so
438  *       the channel is in standby when the control bit is cleared.
439  *
440  * @nsdiv: regmap field in the output control register for
441  *          for the optional divide by 3 of this fsynth channel. This control
442  *          is active low so the divide by 3 is active when the control bit is
443  *          cleared and the divide is bypassed when the bit is set.
444  */
445 struct st_clk_quadfs_fsynth {
446 	struct clk_hw	hw;
447 	void __iomem	*regs_base;
448 	spinlock_t	*lock;
449 	struct clkgen_quadfs_data *data;
450 
451 	u32 chan;
452 	/*
453 	 * Cached hardware values from set_rate so we can program the
454 	 * hardware in enable. There are two reasons for this:
455 	 *
456 	 *  1. The registers may not be writable until the parent has been
457 	 *     enabled.
458 	 *
459 	 *  2. It restores the clock rate when a driver does an enable
460 	 *     on PM restore, after a suspend to RAM has lost the hardware
461 	 *     setup.
462 	 */
463 	u32 md;
464 	u32 pe;
465 	u32 sdiv;
466 	u32 nsdiv;
467 };
468 
469 #define to_quadfs_fsynth(_hw) \
470 	container_of(_hw, struct st_clk_quadfs_fsynth, hw)
471 
472 static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs)
473 {
474 	/*
475 	 * Pulse the program enable register lsb to make the hardware take
476 	 * notice of the new md/pe values with a glitchless transition.
477 	 */
478 	CLKGEN_WRITE(fs, en[fs->chan], 1);
479 	CLKGEN_WRITE(fs, en[fs->chan], 0);
480 }
481 
482 static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs)
483 {
484 	unsigned long flags = 0;
485 
486 	/*
487 	 * Ensure the md/pe parameters are ignored while we are
488 	 * reprogramming them so we can get a glitchless change
489 	 * when fine tuning the speed of a running clock.
490 	 */
491 	CLKGEN_WRITE(fs, en[fs->chan], 0);
492 
493 	CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md);
494 	CLKGEN_WRITE(fs, pe[fs->chan], fs->pe);
495 	CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv);
496 
497 	if (fs->lock)
498 		spin_lock_irqsave(fs->lock, flags);
499 
500 	if (fs->data->nsdiv_present)
501 		CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv);
502 
503 	if (fs->lock)
504 		spin_unlock_irqrestore(fs->lock, flags);
505 }
506 
507 static int quadfs_fsynth_enable(struct clk_hw *hw)
508 {
509 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
510 	unsigned long flags = 0;
511 
512 	pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
513 
514 	quadfs_fsynth_program_rate(fs);
515 
516 	if (fs->lock)
517 		spin_lock_irqsave(fs->lock, flags);
518 
519 	CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
520 
521 	if (fs->data->nrst_present)
522 		CLKGEN_WRITE(fs, nrst[fs->chan], 0);
523 
524 	if (fs->lock)
525 		spin_unlock_irqrestore(fs->lock, flags);
526 
527 	quadfs_fsynth_program_enable(fs);
528 
529 	return 0;
530 }
531 
532 static void quadfs_fsynth_disable(struct clk_hw *hw)
533 {
534 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
535 	unsigned long flags = 0;
536 
537 	pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
538 
539 	if (fs->lock)
540 		spin_lock_irqsave(fs->lock, flags);
541 
542 	CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
543 
544 	if (fs->lock)
545 		spin_unlock_irqrestore(fs->lock, flags);
546 }
547 
548 static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
549 {
550 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
551 	u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
552 
553 	pr_debug("%s: %s enable bit = 0x%x\n",
554 		 __func__, clk_hw_get_name(hw), nsb);
555 
556 	return fs->data->standby_polarity ? !nsb : !!nsb;
557 }
558 
559 #define P20		(uint64_t)(1 << 20)
560 
561 static int clk_fs660c32_dig_get_rate(unsigned long input,
562 				const struct stm_fs *fs, unsigned long *rate)
563 {
564 	unsigned long s = (1 << fs->sdiv);
565 	unsigned long ns;
566 	uint64_t res;
567 
568 	/*
569 	 * 'nsdiv' is a register value ('BIN') which is translated
570 	 * to a decimal value according to following rules.
571 	 *
572 	 *     nsdiv      ns.dec
573 	 *       0        3
574 	 *       1        1
575 	 */
576 	ns = (fs->nsdiv == 1) ? 1 : 3;
577 
578 	res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns;
579 	*rate = (unsigned long)div64_u64(input * P20 * 32, res);
580 
581 	return 0;
582 }
583 
584 
585 static int clk_fs660c32_get_pe(int m, int si, unsigned long *deviation,
586 		signed long input, unsigned long output, uint64_t *p,
587 		struct stm_fs *fs)
588 {
589 	unsigned long new_freq, new_deviation;
590 	struct stm_fs fs_tmp;
591 	uint64_t val;
592 
593 	val = (uint64_t)output << si;
594 
595 	*p = (uint64_t)input * P20 - (32LL  + (uint64_t)m) * val * (P20 / 32LL);
596 
597 	*p = div64_u64(*p, val);
598 
599 	if (*p > 32767LL)
600 		return 1;
601 
602 	fs_tmp.mdiv = (unsigned long) m;
603 	fs_tmp.pe = (unsigned long)*p;
604 	fs_tmp.sdiv = si;
605 	fs_tmp.nsdiv = 1;
606 
607 	clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
608 
609 	new_deviation = abs(output - new_freq);
610 
611 	if (new_deviation < *deviation) {
612 		fs->mdiv = m;
613 		fs->pe = (unsigned long)*p;
614 		fs->sdiv = si;
615 		fs->nsdiv = 1;
616 		*deviation = new_deviation;
617 	}
618 	return 0;
619 }
620 
621 static int clk_fs660c32_dig_get_params(unsigned long input,
622 		unsigned long output, struct stm_fs *fs)
623 {
624 	int si;	/* sdiv_reg (8 downto 0) */
625 	int m; /* md value */
626 	unsigned long new_freq, new_deviation;
627 	/* initial condition to say: "infinite deviation" */
628 	unsigned long deviation = ~0;
629 	uint64_t p, p1, p2;	/* pe value */
630 	int r1, r2;
631 
632 	struct stm_fs fs_tmp;
633 
634 	for (si = 0; (si <= 8) && deviation; si++) {
635 
636 		/* Boundary test to avoid useless iteration */
637 		r1 = clk_fs660c32_get_pe(0, si, &deviation,
638 				input, output, &p1, fs);
639 		r2 = clk_fs660c32_get_pe(31, si, &deviation,
640 				input, output, &p2, fs);
641 
642 		/* No solution */
643 		if (r1 && r2 && (p1 > p2))
644 			continue;
645 
646 		/* Try to find best deviation */
647 		for (m = 1; (m < 31) && deviation; m++)
648 			clk_fs660c32_get_pe(m, si, &deviation,
649 					input, output, &p, fs);
650 
651 	}
652 
653 	if (deviation == ~0) /* No solution found */
654 		return -1;
655 
656 	/* pe fine tuning if deviation not 0: +/- 2 around computed pe value */
657 	if (deviation) {
658 		fs_tmp.mdiv = fs->mdiv;
659 		fs_tmp.sdiv = fs->sdiv;
660 		fs_tmp.nsdiv = fs->nsdiv;
661 
662 		if (fs->pe > 2)
663 			p2 = fs->pe - 2;
664 		else
665 			p2 = 0;
666 
667 		for (; p2 < 32768ll && (p2 <= (fs->pe + 2)); p2++) {
668 			fs_tmp.pe = (unsigned long)p2;
669 
670 			clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
671 
672 			new_deviation = abs(output - new_freq);
673 
674 			/* Check if this is a better solution */
675 			if (new_deviation < deviation) {
676 				fs->pe = (unsigned long)p2;
677 				deviation = new_deviation;
678 
679 			}
680 		}
681 	}
682 	return 0;
683 }
684 
685 static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs,
686 		struct stm_fs *params)
687 {
688 	/*
689 	 * Get the initial hardware values for recalc_rate
690 	 */
691 	params->mdiv	= CLKGEN_READ(fs, mdiv[fs->chan]);
692 	params->pe	= CLKGEN_READ(fs, pe[fs->chan]);
693 	params->sdiv	= CLKGEN_READ(fs, sdiv[fs->chan]);
694 
695 	if (fs->data->nsdiv_present)
696 		params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]);
697 	else
698 		params->nsdiv = 1;
699 
700 	/*
701 	 * If All are NULL then assume no clock rate is programmed.
702 	 */
703 	if (!params->mdiv && !params->pe && !params->sdiv)
704 		return 1;
705 
706 	fs->md = params->mdiv;
707 	fs->pe = params->pe;
708 	fs->sdiv = params->sdiv;
709 	fs->nsdiv = params->nsdiv;
710 
711 	return 0;
712 }
713 
714 static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
715 				unsigned long prate, struct stm_fs *params)
716 {
717 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
718 	int (*clk_fs_get_rate)(unsigned long ,
719 				const struct stm_fs *, unsigned long *);
720 	int (*clk_fs_get_params)(unsigned long, unsigned long, struct stm_fs *);
721 	unsigned long rate = 0;
722 
723 	clk_fs_get_rate = fs->data->get_rate;
724 	clk_fs_get_params = fs->data->get_params;
725 
726 	if (!clk_fs_get_params(prate, drate, params))
727 		clk_fs_get_rate(prate, params, &rate);
728 
729 	return rate;
730 }
731 
732 static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
733 		unsigned long parent_rate)
734 {
735 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
736 	unsigned long rate = 0;
737 	struct stm_fs params;
738 	int (*clk_fs_get_rate)(unsigned long ,
739 				const struct stm_fs *, unsigned long *);
740 
741 	clk_fs_get_rate = fs->data->get_rate;
742 
743 	if (quadfs_fsynt_get_hw_value_for_recalc(fs, &params))
744 		return 0;
745 
746 	if (clk_fs_get_rate(parent_rate, &params, &rate)) {
747 		pr_err("%s:%s error calculating rate\n",
748 		       clk_hw_get_name(hw), __func__);
749 	}
750 
751 	pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
752 
753 	return rate;
754 }
755 
756 static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
757 				     unsigned long *prate)
758 {
759 	struct stm_fs params;
760 
761 	rate = quadfs_find_best_rate(hw, rate, *prate, &params);
762 
763 	pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
764 		 __func__, clk_hw_get_name(hw),
765 		 rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
766 			 (unsigned int)params.pe, (unsigned int)params.nsdiv);
767 
768 	return rate;
769 }
770 
771 
772 static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs,
773 		struct stm_fs *params)
774 {
775 	fs->md = params->mdiv;
776 	fs->pe = params->pe;
777 	fs->sdiv = params->sdiv;
778 	fs->nsdiv = params->nsdiv;
779 
780 	/*
781 	 * In some integrations you can only change the fsynth programming when
782 	 * the parent entity containing it is enabled.
783 	 */
784 	quadfs_fsynth_program_rate(fs);
785 	quadfs_fsynth_program_enable(fs);
786 }
787 
788 static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate,
789 				  unsigned long parent_rate)
790 {
791 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
792 	struct stm_fs params;
793 	long hwrate;
794 	int uninitialized_var(i);
795 
796 	if (!rate || !parent_rate)
797 		return -EINVAL;
798 
799 	memset(&params, 0, sizeof(struct stm_fs));
800 
801 	hwrate = quadfs_find_best_rate(hw, rate, parent_rate, &params);
802 	if (!hwrate)
803 		return -EINVAL;
804 
805 	quadfs_program_and_enable(fs, &params);
806 
807 	return 0;
808 }
809 
810 
811 
812 static const struct clk_ops st_quadfs_ops = {
813 	.enable		= quadfs_fsynth_enable,
814 	.disable	= quadfs_fsynth_disable,
815 	.is_enabled	= quadfs_fsynth_is_enabled,
816 	.round_rate	= quadfs_round_rate,
817 	.set_rate	= quadfs_set_rate,
818 	.recalc_rate	= quadfs_recalc_rate,
819 };
820 
821 static struct clk * __init st_clk_register_quadfs_fsynth(
822 		const char *name, const char *parent_name,
823 		struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan,
824 		unsigned long flags, spinlock_t *lock)
825 {
826 	struct st_clk_quadfs_fsynth *fs;
827 	struct clk *clk;
828 	struct clk_init_data init;
829 
830 	/*
831 	 * Sanity check required pointers, note that nsdiv3 is optional.
832 	 */
833 	if (WARN_ON(!name || !parent_name))
834 		return ERR_PTR(-EINVAL);
835 
836 	fs = kzalloc(sizeof(*fs), GFP_KERNEL);
837 	if (!fs)
838 		return ERR_PTR(-ENOMEM);
839 
840 	init.name = name;
841 	init.ops = &st_quadfs_ops;
842 	init.flags = flags | CLK_GET_RATE_NOCACHE;
843 	init.parent_names = &parent_name;
844 	init.num_parents = 1;
845 
846 	fs->data = quadfs;
847 	fs->regs_base = reg;
848 	fs->chan = chan;
849 	fs->lock = lock;
850 	fs->hw.init = &init;
851 
852 	clk = clk_register(NULL, &fs->hw);
853 
854 	if (IS_ERR(clk))
855 		kfree(fs);
856 
857 	return clk;
858 }
859 
860 static void __init st_of_create_quadfs_fsynths(
861 		struct device_node *np, const char *pll_name,
862 		struct clkgen_quadfs_data *quadfs, void __iomem *reg,
863 		spinlock_t *lock)
864 {
865 	struct clk_onecell_data *clk_data;
866 	int fschan;
867 
868 	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
869 	if (!clk_data)
870 		return;
871 
872 	clk_data->clk_num = QUADFS_MAX_CHAN;
873 	clk_data->clks = kcalloc(QUADFS_MAX_CHAN, sizeof(struct clk *),
874 				 GFP_KERNEL);
875 
876 	if (!clk_data->clks) {
877 		kfree(clk_data);
878 		return;
879 	}
880 
881 	for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) {
882 		struct clk *clk;
883 		const char *clk_name;
884 		unsigned long flags = 0;
885 
886 		if (of_property_read_string_index(np, "clock-output-names",
887 						  fschan, &clk_name)) {
888 			break;
889 		}
890 
891 		/*
892 		 * If we read an empty clock name then the channel is unused
893 		 */
894 		if (*clk_name == '\0')
895 			continue;
896 
897 		of_clk_detect_critical(np, fschan, &flags);
898 
899 		clk = st_clk_register_quadfs_fsynth(clk_name, pll_name,
900 						    quadfs, reg, fschan,
901 						    flags, lock);
902 
903 		/*
904 		 * If there was an error registering this clock output, clean
905 		 * up and move on to the next one.
906 		 */
907 		if (!IS_ERR(clk)) {
908 			clk_data->clks[fschan] = clk;
909 			pr_debug("%s: parent %s rate %u\n",
910 				__clk_get_name(clk),
911 				__clk_get_name(clk_get_parent(clk)),
912 				(unsigned int)clk_get_rate(clk));
913 		}
914 	}
915 
916 	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
917 }
918 
919 static void __init st_of_quadfs_setup(struct device_node *np,
920 		struct clkgen_quadfs_data *data)
921 {
922 	struct clk *clk;
923 	const char *pll_name, *clk_parent_name;
924 	void __iomem *reg;
925 	spinlock_t *lock;
926 
927 	reg = of_iomap(np, 0);
928 	if (!reg)
929 		return;
930 
931 	clk_parent_name = of_clk_get_parent_name(np, 0);
932 	if (!clk_parent_name)
933 		return;
934 
935 	pll_name = kasprintf(GFP_KERNEL, "%pOFn.pll", np);
936 	if (!pll_name)
937 		return;
938 
939 	lock = kzalloc(sizeof(*lock), GFP_KERNEL);
940 	if (!lock)
941 		goto err_exit;
942 
943 	spin_lock_init(lock);
944 
945 	clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, data,
946 			reg, lock);
947 	if (IS_ERR(clk))
948 		goto err_exit;
949 	else
950 		pr_debug("%s: parent %s rate %u\n",
951 			__clk_get_name(clk),
952 			__clk_get_name(clk_get_parent(clk)),
953 			(unsigned int)clk_get_rate(clk));
954 
955 	st_of_create_quadfs_fsynths(np, pll_name, data, reg, lock);
956 
957 err_exit:
958 	kfree(pll_name); /* No longer need local copy of the PLL name */
959 }
960 
961 static void __init st_of_quadfs660C_setup(struct device_node *np)
962 {
963 	st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_C);
964 }
965 CLK_OF_DECLARE(quadfs660C, "st,quadfs-pll", st_of_quadfs660C_setup);
966 
967 static void __init st_of_quadfs660D_setup(struct device_node *np)
968 {
969 	st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_D);
970 }
971 CLK_OF_DECLARE(quadfs660D, "st,quadfs", st_of_quadfs660D_setup);
972