xref: /linux/drivers/clk/qcom/clk-rcg2.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/rational.h>
14 #include <linux/regmap.h>
15 #include <linux/math64.h>
16 #include <linux/slab.h>
17 
18 #include <asm/div64.h>
19 
20 #include "clk-rcg.h"
21 #include "common.h"
22 
23 #define CMD_REG			0x0
24 #define CMD_UPDATE		BIT(0)
25 #define CMD_ROOT_EN		BIT(1)
26 #define CMD_DIRTY_CFG		BIT(4)
27 #define CMD_DIRTY_N		BIT(5)
28 #define CMD_DIRTY_M		BIT(6)
29 #define CMD_DIRTY_D		BIT(7)
30 #define CMD_ROOT_OFF		BIT(31)
31 
32 #define CFG_REG			0x4
33 #define CFG_SRC_DIV_SHIFT	0
34 #define CFG_SRC_SEL_SHIFT	8
35 #define CFG_SRC_SEL_MASK	(0x7 << CFG_SRC_SEL_SHIFT)
36 #define CFG_MODE_SHIFT		12
37 #define CFG_MODE_MASK		(0x3 << CFG_MODE_SHIFT)
38 #define CFG_MODE_DUAL_EDGE	(0x2 << CFG_MODE_SHIFT)
39 #define CFG_HW_CLK_CTRL_MASK	BIT(20)
40 
41 #define M_REG			0x8
42 #define N_REG			0xc
43 #define D_REG			0x10
44 
45 #define RCG_CFG_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
46 #define RCG_M_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
47 #define RCG_N_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
48 #define RCG_D_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
49 
50 /* Dynamic Frequency Scaling */
51 #define MAX_PERF_LEVEL		8
52 #define SE_CMD_DFSR_OFFSET	0x14
53 #define SE_CMD_DFS_EN		BIT(0)
54 #define SE_PERF_DFSR(level)	(0x1c + 0x4 * (level))
55 #define SE_PERF_M_DFSR(level)	(0x5c + 0x4 * (level))
56 #define SE_PERF_N_DFSR(level)	(0x9c + 0x4 * (level))
57 
58 enum freq_policy {
59 	FLOOR,
60 	CEIL,
61 };
62 
63 static int clk_rcg2_is_enabled(struct clk_hw *hw)
64 {
65 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
66 	u32 cmd;
67 	int ret;
68 
69 	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
70 	if (ret)
71 		return ret;
72 
73 	return (cmd & CMD_ROOT_OFF) == 0;
74 }
75 
76 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
77 {
78 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
79 	int num_parents = clk_hw_get_num_parents(hw);
80 	u32 cfg;
81 	int i, ret;
82 
83 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
84 	if (ret)
85 		goto err;
86 
87 	cfg &= CFG_SRC_SEL_MASK;
88 	cfg >>= CFG_SRC_SEL_SHIFT;
89 
90 	for (i = 0; i < num_parents; i++)
91 		if (cfg == rcg->parent_map[i].cfg)
92 			return i;
93 
94 err:
95 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
96 		 __func__, clk_hw_get_name(hw));
97 	return 0;
98 }
99 
100 static int update_config(struct clk_rcg2 *rcg)
101 {
102 	int count, ret;
103 	u32 cmd;
104 	struct clk_hw *hw = &rcg->clkr.hw;
105 	const char *name = clk_hw_get_name(hw);
106 
107 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
108 				 CMD_UPDATE, CMD_UPDATE);
109 	if (ret)
110 		return ret;
111 
112 	/* Wait for update to take effect */
113 	for (count = 500; count > 0; count--) {
114 		ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
115 		if (ret)
116 			return ret;
117 		if (!(cmd & CMD_UPDATE))
118 			return 0;
119 		udelay(1);
120 	}
121 
122 	WARN(1, "%s: rcg didn't update its configuration.", name);
123 	return -EBUSY;
124 }
125 
126 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
127 {
128 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
129 	int ret;
130 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
131 
132 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
133 				 CFG_SRC_SEL_MASK, cfg);
134 	if (ret)
135 		return ret;
136 
137 	return update_config(rcg);
138 }
139 
140 /*
141  * Calculate m/n:d rate
142  *
143  *          parent_rate     m
144  *   rate = ----------- x  ---
145  *            hid_div       n
146  */
147 static unsigned long
148 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
149 {
150 	if (hid_div) {
151 		rate *= 2;
152 		rate /= hid_div + 1;
153 	}
154 
155 	if (mode) {
156 		u64 tmp = rate;
157 		tmp *= m;
158 		do_div(tmp, n);
159 		rate = tmp;
160 	}
161 
162 	return rate;
163 }
164 
165 static unsigned long
166 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
167 {
168 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
169 	u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
170 
171 	regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
172 
173 	if (rcg->mnd_width) {
174 		mask = BIT(rcg->mnd_width) - 1;
175 		regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
176 		m &= mask;
177 		regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
178 		n =  ~n;
179 		n &= mask;
180 		n += m;
181 		mode = cfg & CFG_MODE_MASK;
182 		mode >>= CFG_MODE_SHIFT;
183 	}
184 
185 	mask = BIT(rcg->hid_width) - 1;
186 	hid_div = cfg >> CFG_SRC_DIV_SHIFT;
187 	hid_div &= mask;
188 
189 	return calc_rate(parent_rate, m, n, mode, hid_div);
190 }
191 
192 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
193 				    struct clk_rate_request *req,
194 				    enum freq_policy policy)
195 {
196 	unsigned long clk_flags, rate = req->rate;
197 	struct clk_hw *p;
198 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
199 	int index;
200 
201 	switch (policy) {
202 	case FLOOR:
203 		f = qcom_find_freq_floor(f, rate);
204 		break;
205 	case CEIL:
206 		f = qcom_find_freq(f, rate);
207 		break;
208 	default:
209 		return -EINVAL;
210 	}
211 
212 	if (!f)
213 		return -EINVAL;
214 
215 	index = qcom_find_src_index(hw, rcg->parent_map, f->src);
216 	if (index < 0)
217 		return index;
218 
219 	clk_flags = clk_hw_get_flags(hw);
220 	p = clk_hw_get_parent_by_index(hw, index);
221 	if (!p)
222 		return -EINVAL;
223 
224 	if (clk_flags & CLK_SET_RATE_PARENT) {
225 		rate = f->freq;
226 		if (f->pre_div) {
227 			if (!rate)
228 				rate = req->rate;
229 			rate /= 2;
230 			rate *= f->pre_div + 1;
231 		}
232 
233 		if (f->n) {
234 			u64 tmp = rate;
235 			tmp = tmp * f->n;
236 			do_div(tmp, f->m);
237 			rate = tmp;
238 		}
239 	} else {
240 		rate =  clk_hw_get_rate(p);
241 	}
242 	req->best_parent_hw = p;
243 	req->best_parent_rate = rate;
244 	req->rate = f->freq;
245 
246 	return 0;
247 }
248 
249 static int clk_rcg2_determine_rate(struct clk_hw *hw,
250 				   struct clk_rate_request *req)
251 {
252 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
253 
254 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
255 }
256 
257 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
258 					 struct clk_rate_request *req)
259 {
260 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
261 
262 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
263 }
264 
265 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
266 {
267 	u32 cfg, mask, d_val, not2d_val, n_minus_m;
268 	struct clk_hw *hw = &rcg->clkr.hw;
269 	int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
270 
271 	if (index < 0)
272 		return index;
273 
274 	if (rcg->mnd_width && f->n) {
275 		mask = BIT(rcg->mnd_width) - 1;
276 		ret = regmap_update_bits(rcg->clkr.regmap,
277 				RCG_M_OFFSET(rcg), mask, f->m);
278 		if (ret)
279 			return ret;
280 
281 		ret = regmap_update_bits(rcg->clkr.regmap,
282 				RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
283 		if (ret)
284 			return ret;
285 
286 		/* Calculate 2d value */
287 		d_val = f->n;
288 
289 		n_minus_m = f->n - f->m;
290 		n_minus_m *= 2;
291 
292 		d_val = clamp_t(u32, d_val, f->m, n_minus_m);
293 		not2d_val = ~d_val & mask;
294 
295 		ret = regmap_update_bits(rcg->clkr.regmap,
296 				RCG_D_OFFSET(rcg), mask, not2d_val);
297 		if (ret)
298 			return ret;
299 	}
300 
301 	mask = BIT(rcg->hid_width) - 1;
302 	mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
303 	cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
304 	cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
305 	if (rcg->mnd_width && f->n && (f->m != f->n))
306 		cfg |= CFG_MODE_DUAL_EDGE;
307 	return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
308 					mask, cfg);
309 }
310 
311 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
312 {
313 	int ret;
314 
315 	ret = __clk_rcg2_configure(rcg, f);
316 	if (ret)
317 		return ret;
318 
319 	return update_config(rcg);
320 }
321 
322 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
323 			       enum freq_policy policy)
324 {
325 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
326 	const struct freq_tbl *f;
327 
328 	switch (policy) {
329 	case FLOOR:
330 		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
331 		break;
332 	case CEIL:
333 		f = qcom_find_freq(rcg->freq_tbl, rate);
334 		break;
335 	default:
336 		return -EINVAL;
337 	}
338 
339 	if (!f)
340 		return -EINVAL;
341 
342 	return clk_rcg2_configure(rcg, f);
343 }
344 
345 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
346 			    unsigned long parent_rate)
347 {
348 	return __clk_rcg2_set_rate(hw, rate, CEIL);
349 }
350 
351 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
352 				   unsigned long parent_rate)
353 {
354 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
355 }
356 
357 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
358 		unsigned long rate, unsigned long parent_rate, u8 index)
359 {
360 	return __clk_rcg2_set_rate(hw, rate, CEIL);
361 }
362 
363 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
364 		unsigned long rate, unsigned long parent_rate, u8 index)
365 {
366 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
367 }
368 
369 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
370 {
371 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
372 	u32 notn_m, n, m, d, not2d, mask;
373 
374 	if (!rcg->mnd_width) {
375 		/* 50 % duty-cycle for Non-MND RCGs */
376 		duty->num = 1;
377 		duty->den = 2;
378 		return 0;
379 	}
380 
381 	regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d);
382 	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
383 	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
384 
385 	if (!not2d && !m && !notn_m) {
386 		/* 50 % duty-cycle always */
387 		duty->num = 1;
388 		duty->den = 2;
389 		return 0;
390 	}
391 
392 	mask = BIT(rcg->mnd_width) - 1;
393 
394 	d = ~(not2d) & mask;
395 	d = DIV_ROUND_CLOSEST(d, 2);
396 
397 	n = (~(notn_m) + m) & mask;
398 
399 	duty->num = d;
400 	duty->den = n;
401 
402 	return 0;
403 }
404 
405 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
406 {
407 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
408 	u32 notn_m, n, m, d, not2d, mask, duty_per;
409 	int ret;
410 
411 	/* Duty-cycle cannot be modified for non-MND RCGs */
412 	if (!rcg->mnd_width)
413 		return -EINVAL;
414 
415 	mask = BIT(rcg->mnd_width) - 1;
416 
417 	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
418 	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
419 
420 	n = (~(notn_m) + m) & mask;
421 
422 	duty_per = (duty->num * 100) / duty->den;
423 
424 	/* Calculate 2d value */
425 	d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
426 
427 	 /* Check bit widths of 2d. If D is too big reduce duty cycle. */
428 	if (d > mask)
429 		d = mask;
430 
431 	if ((d / 2) > (n - m))
432 		d = (n - m) * 2;
433 	else if ((d / 2) < (m / 2))
434 		d = m;
435 
436 	not2d = ~d & mask;
437 
438 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
439 				 not2d);
440 	if (ret)
441 		return ret;
442 
443 	return update_config(rcg);
444 }
445 
446 const struct clk_ops clk_rcg2_ops = {
447 	.is_enabled = clk_rcg2_is_enabled,
448 	.get_parent = clk_rcg2_get_parent,
449 	.set_parent = clk_rcg2_set_parent,
450 	.recalc_rate = clk_rcg2_recalc_rate,
451 	.determine_rate = clk_rcg2_determine_rate,
452 	.set_rate = clk_rcg2_set_rate,
453 	.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
454 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
455 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
456 };
457 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
458 
459 const struct clk_ops clk_rcg2_floor_ops = {
460 	.is_enabled = clk_rcg2_is_enabled,
461 	.get_parent = clk_rcg2_get_parent,
462 	.set_parent = clk_rcg2_set_parent,
463 	.recalc_rate = clk_rcg2_recalc_rate,
464 	.determine_rate = clk_rcg2_determine_floor_rate,
465 	.set_rate = clk_rcg2_set_floor_rate,
466 	.set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
467 	.get_duty_cycle = clk_rcg2_get_duty_cycle,
468 	.set_duty_cycle = clk_rcg2_set_duty_cycle,
469 };
470 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
471 
472 struct frac_entry {
473 	int num;
474 	int den;
475 };
476 
477 static const struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
478 	{ 52, 295 },	/* 119 M */
479 	{ 11, 57 },	/* 130.25 M */
480 	{ 63, 307 },	/* 138.50 M */
481 	{ 11, 50 },	/* 148.50 M */
482 	{ 47, 206 },	/* 154 M */
483 	{ 31, 100 },	/* 205.25 M */
484 	{ 107, 269 },	/* 268.50 M */
485 	{ },
486 };
487 
488 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
489 	{ 31, 211 },	/* 119 M */
490 	{ 32, 199 },	/* 130.25 M */
491 	{ 63, 307 },	/* 138.50 M */
492 	{ 11, 60 },	/* 148.50 M */
493 	{ 50, 263 },	/* 154 M */
494 	{ 31, 120 },	/* 205.25 M */
495 	{ 119, 359 },	/* 268.50 M */
496 	{ },
497 };
498 
499 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
500 			      unsigned long parent_rate)
501 {
502 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
503 	struct freq_tbl f = *rcg->freq_tbl;
504 	const struct frac_entry *frac;
505 	int delta = 100000;
506 	s64 src_rate = parent_rate;
507 	s64 request;
508 	u32 mask = BIT(rcg->hid_width) - 1;
509 	u32 hid_div;
510 
511 	if (src_rate == 810000000)
512 		frac = frac_table_810m;
513 	else
514 		frac = frac_table_675m;
515 
516 	for (; frac->num; frac++) {
517 		request = rate;
518 		request *= frac->den;
519 		request = div_s64(request, frac->num);
520 		if ((src_rate < (request - delta)) ||
521 		    (src_rate > (request + delta)))
522 			continue;
523 
524 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
525 				&hid_div);
526 		f.pre_div = hid_div;
527 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
528 		f.pre_div &= mask;
529 		f.m = frac->num;
530 		f.n = frac->den;
531 
532 		return clk_rcg2_configure(rcg, &f);
533 	}
534 
535 	return -EINVAL;
536 }
537 
538 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
539 		unsigned long rate, unsigned long parent_rate, u8 index)
540 {
541 	/* Parent index is set statically in frequency table */
542 	return clk_edp_pixel_set_rate(hw, rate, parent_rate);
543 }
544 
545 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
546 					struct clk_rate_request *req)
547 {
548 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
549 	const struct freq_tbl *f = rcg->freq_tbl;
550 	const struct frac_entry *frac;
551 	int delta = 100000;
552 	s64 request;
553 	u32 mask = BIT(rcg->hid_width) - 1;
554 	u32 hid_div;
555 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
556 
557 	/* Force the correct parent */
558 	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
559 	req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
560 
561 	if (req->best_parent_rate == 810000000)
562 		frac = frac_table_810m;
563 	else
564 		frac = frac_table_675m;
565 
566 	for (; frac->num; frac++) {
567 		request = req->rate;
568 		request *= frac->den;
569 		request = div_s64(request, frac->num);
570 		if ((req->best_parent_rate < (request - delta)) ||
571 		    (req->best_parent_rate > (request + delta)))
572 			continue;
573 
574 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
575 				&hid_div);
576 		hid_div >>= CFG_SRC_DIV_SHIFT;
577 		hid_div &= mask;
578 
579 		req->rate = calc_rate(req->best_parent_rate,
580 				      frac->num, frac->den,
581 				      !!frac->den, hid_div);
582 		return 0;
583 	}
584 
585 	return -EINVAL;
586 }
587 
588 const struct clk_ops clk_edp_pixel_ops = {
589 	.is_enabled = clk_rcg2_is_enabled,
590 	.get_parent = clk_rcg2_get_parent,
591 	.set_parent = clk_rcg2_set_parent,
592 	.recalc_rate = clk_rcg2_recalc_rate,
593 	.set_rate = clk_edp_pixel_set_rate,
594 	.set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
595 	.determine_rate = clk_edp_pixel_determine_rate,
596 };
597 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
598 
599 static int clk_byte_determine_rate(struct clk_hw *hw,
600 				   struct clk_rate_request *req)
601 {
602 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
603 	const struct freq_tbl *f = rcg->freq_tbl;
604 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
605 	unsigned long parent_rate, div;
606 	u32 mask = BIT(rcg->hid_width) - 1;
607 	struct clk_hw *p;
608 
609 	if (req->rate == 0)
610 		return -EINVAL;
611 
612 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
613 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
614 
615 	div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
616 	div = min_t(u32, div, mask);
617 
618 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
619 
620 	return 0;
621 }
622 
623 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
624 			 unsigned long parent_rate)
625 {
626 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
627 	struct freq_tbl f = *rcg->freq_tbl;
628 	unsigned long div;
629 	u32 mask = BIT(rcg->hid_width) - 1;
630 
631 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
632 	div = min_t(u32, div, mask);
633 
634 	f.pre_div = div;
635 
636 	return clk_rcg2_configure(rcg, &f);
637 }
638 
639 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
640 		unsigned long rate, unsigned long parent_rate, u8 index)
641 {
642 	/* Parent index is set statically in frequency table */
643 	return clk_byte_set_rate(hw, rate, parent_rate);
644 }
645 
646 const struct clk_ops clk_byte_ops = {
647 	.is_enabled = clk_rcg2_is_enabled,
648 	.get_parent = clk_rcg2_get_parent,
649 	.set_parent = clk_rcg2_set_parent,
650 	.recalc_rate = clk_rcg2_recalc_rate,
651 	.set_rate = clk_byte_set_rate,
652 	.set_rate_and_parent = clk_byte_set_rate_and_parent,
653 	.determine_rate = clk_byte_determine_rate,
654 };
655 EXPORT_SYMBOL_GPL(clk_byte_ops);
656 
657 static int clk_byte2_determine_rate(struct clk_hw *hw,
658 				    struct clk_rate_request *req)
659 {
660 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
661 	unsigned long parent_rate, div;
662 	u32 mask = BIT(rcg->hid_width) - 1;
663 	struct clk_hw *p;
664 	unsigned long rate = req->rate;
665 
666 	if (rate == 0)
667 		return -EINVAL;
668 
669 	p = req->best_parent_hw;
670 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
671 
672 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
673 	div = min_t(u32, div, mask);
674 
675 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
676 
677 	return 0;
678 }
679 
680 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
681 			 unsigned long parent_rate)
682 {
683 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
684 	struct freq_tbl f = { 0 };
685 	unsigned long div;
686 	int i, num_parents = clk_hw_get_num_parents(hw);
687 	u32 mask = BIT(rcg->hid_width) - 1;
688 	u32 cfg;
689 
690 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
691 	div = min_t(u32, div, mask);
692 
693 	f.pre_div = div;
694 
695 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
696 	cfg &= CFG_SRC_SEL_MASK;
697 	cfg >>= CFG_SRC_SEL_SHIFT;
698 
699 	for (i = 0; i < num_parents; i++) {
700 		if (cfg == rcg->parent_map[i].cfg) {
701 			f.src = rcg->parent_map[i].src;
702 			return clk_rcg2_configure(rcg, &f);
703 		}
704 	}
705 
706 	return -EINVAL;
707 }
708 
709 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
710 		unsigned long rate, unsigned long parent_rate, u8 index)
711 {
712 	/* Read the hardware to determine parent during set_rate */
713 	return clk_byte2_set_rate(hw, rate, parent_rate);
714 }
715 
716 const struct clk_ops clk_byte2_ops = {
717 	.is_enabled = clk_rcg2_is_enabled,
718 	.get_parent = clk_rcg2_get_parent,
719 	.set_parent = clk_rcg2_set_parent,
720 	.recalc_rate = clk_rcg2_recalc_rate,
721 	.set_rate = clk_byte2_set_rate,
722 	.set_rate_and_parent = clk_byte2_set_rate_and_parent,
723 	.determine_rate = clk_byte2_determine_rate,
724 };
725 EXPORT_SYMBOL_GPL(clk_byte2_ops);
726 
727 static const struct frac_entry frac_table_pixel[] = {
728 	{ 3, 8 },
729 	{ 2, 9 },
730 	{ 4, 9 },
731 	{ 1, 1 },
732 	{ 2, 3 },
733 	{ }
734 };
735 
736 static int clk_pixel_determine_rate(struct clk_hw *hw,
737 				    struct clk_rate_request *req)
738 {
739 	unsigned long request, src_rate;
740 	int delta = 100000;
741 	const struct frac_entry *frac = frac_table_pixel;
742 
743 	for (; frac->num; frac++) {
744 		request = (req->rate * frac->den) / frac->num;
745 
746 		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
747 		if ((src_rate < (request - delta)) ||
748 			(src_rate > (request + delta)))
749 			continue;
750 
751 		req->best_parent_rate = src_rate;
752 		req->rate = (src_rate * frac->num) / frac->den;
753 		return 0;
754 	}
755 
756 	return -EINVAL;
757 }
758 
759 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
760 		unsigned long parent_rate)
761 {
762 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
763 	struct freq_tbl f = { 0 };
764 	const struct frac_entry *frac = frac_table_pixel;
765 	unsigned long request;
766 	int delta = 100000;
767 	u32 mask = BIT(rcg->hid_width) - 1;
768 	u32 hid_div, cfg;
769 	int i, num_parents = clk_hw_get_num_parents(hw);
770 
771 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
772 	cfg &= CFG_SRC_SEL_MASK;
773 	cfg >>= CFG_SRC_SEL_SHIFT;
774 
775 	for (i = 0; i < num_parents; i++)
776 		if (cfg == rcg->parent_map[i].cfg) {
777 			f.src = rcg->parent_map[i].src;
778 			break;
779 		}
780 
781 	for (; frac->num; frac++) {
782 		request = (rate * frac->den) / frac->num;
783 
784 		if ((parent_rate < (request - delta)) ||
785 			(parent_rate > (request + delta)))
786 			continue;
787 
788 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
789 				&hid_div);
790 		f.pre_div = hid_div;
791 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
792 		f.pre_div &= mask;
793 		f.m = frac->num;
794 		f.n = frac->den;
795 
796 		return clk_rcg2_configure(rcg, &f);
797 	}
798 	return -EINVAL;
799 }
800 
801 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
802 		unsigned long parent_rate, u8 index)
803 {
804 	return clk_pixel_set_rate(hw, rate, parent_rate);
805 }
806 
807 const struct clk_ops clk_pixel_ops = {
808 	.is_enabled = clk_rcg2_is_enabled,
809 	.get_parent = clk_rcg2_get_parent,
810 	.set_parent = clk_rcg2_set_parent,
811 	.recalc_rate = clk_rcg2_recalc_rate,
812 	.set_rate = clk_pixel_set_rate,
813 	.set_rate_and_parent = clk_pixel_set_rate_and_parent,
814 	.determine_rate = clk_pixel_determine_rate,
815 };
816 EXPORT_SYMBOL_GPL(clk_pixel_ops);
817 
818 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
819 				    struct clk_rate_request *req)
820 {
821 	struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
822 	struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
823 	struct clk_hw *xo, *p0, *p1, *p2;
824 	unsigned long p0_rate;
825 	u8 mux_div = cgfx->div;
826 	int ret;
827 
828 	p0 = cgfx->hws[0];
829 	p1 = cgfx->hws[1];
830 	p2 = cgfx->hws[2];
831 	/*
832 	 * This function does ping-pong the RCG between PLLs: if we don't
833 	 * have at least one fixed PLL and two variable ones,
834 	 * then it's not going to work correctly.
835 	 */
836 	if (WARN_ON(!p0 || !p1 || !p2))
837 		return -EINVAL;
838 
839 	xo = clk_hw_get_parent_by_index(hw, 0);
840 	if (req->rate == clk_hw_get_rate(xo)) {
841 		req->best_parent_hw = xo;
842 		return 0;
843 	}
844 
845 	if (mux_div == 0)
846 		mux_div = 1;
847 
848 	parent_req.rate = req->rate * mux_div;
849 
850 	/* This has to be a fixed rate PLL */
851 	p0_rate = clk_hw_get_rate(p0);
852 
853 	if (parent_req.rate == p0_rate) {
854 		req->rate = req->best_parent_rate = p0_rate;
855 		req->best_parent_hw = p0;
856 		return 0;
857 	}
858 
859 	if (req->best_parent_hw == p0) {
860 		/* Are we going back to a previously used rate? */
861 		if (clk_hw_get_rate(p2) == parent_req.rate)
862 			req->best_parent_hw = p2;
863 		else
864 			req->best_parent_hw = p1;
865 	} else if (req->best_parent_hw == p2) {
866 		req->best_parent_hw = p1;
867 	} else {
868 		req->best_parent_hw = p2;
869 	}
870 
871 	ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
872 	if (ret)
873 		return ret;
874 
875 	req->rate = req->best_parent_rate = parent_req.rate;
876 	req->rate /= mux_div;
877 
878 	return 0;
879 }
880 
881 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
882 		unsigned long parent_rate, u8 index)
883 {
884 	struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
885 	struct clk_rcg2 *rcg = &cgfx->rcg;
886 	u32 cfg;
887 	int ret;
888 
889 	cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
890 	/* On some targets, the GFX3D RCG may need to divide PLL frequency */
891 	if (cgfx->div > 1)
892 		cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
893 
894 	ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
895 	if (ret)
896 		return ret;
897 
898 	return update_config(rcg);
899 }
900 
901 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
902 			      unsigned long parent_rate)
903 {
904 	/*
905 	 * We should never get here; clk_gfx3d_determine_rate() should always
906 	 * make us use a different parent than what we're currently using, so
907 	 * clk_gfx3d_set_rate_and_parent() should always be called.
908 	 */
909 	return 0;
910 }
911 
912 const struct clk_ops clk_gfx3d_ops = {
913 	.is_enabled = clk_rcg2_is_enabled,
914 	.get_parent = clk_rcg2_get_parent,
915 	.set_parent = clk_rcg2_set_parent,
916 	.recalc_rate = clk_rcg2_recalc_rate,
917 	.set_rate = clk_gfx3d_set_rate,
918 	.set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
919 	.determine_rate = clk_gfx3d_determine_rate,
920 };
921 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
922 
923 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
924 {
925 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
926 	const char *name = clk_hw_get_name(hw);
927 	int ret, count;
928 
929 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
930 				 CMD_ROOT_EN, CMD_ROOT_EN);
931 	if (ret)
932 		return ret;
933 
934 	/* wait for RCG to turn ON */
935 	for (count = 500; count > 0; count--) {
936 		if (clk_rcg2_is_enabled(hw))
937 			return 0;
938 
939 		udelay(1);
940 	}
941 
942 	pr_err("%s: RCG did not turn on\n", name);
943 	return -ETIMEDOUT;
944 }
945 
946 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
947 {
948 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
949 
950 	return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
951 					CMD_ROOT_EN, 0);
952 }
953 
954 static int
955 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
956 {
957 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
958 	int ret;
959 
960 	ret = clk_rcg2_set_force_enable(hw);
961 	if (ret)
962 		return ret;
963 
964 	ret = clk_rcg2_configure(rcg, f);
965 	if (ret)
966 		return ret;
967 
968 	return clk_rcg2_clear_force_enable(hw);
969 }
970 
971 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
972 				    unsigned long parent_rate)
973 {
974 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
975 	const struct freq_tbl *f;
976 
977 	f = qcom_find_freq(rcg->freq_tbl, rate);
978 	if (!f)
979 		return -EINVAL;
980 
981 	/*
982 	 * In case clock is disabled, update the CFG, M, N and D registers
983 	 * and don't hit the update bit of CMD register.
984 	 */
985 	if (!__clk_is_enabled(hw->clk))
986 		return __clk_rcg2_configure(rcg, f);
987 
988 	return clk_rcg2_shared_force_enable_clear(hw, f);
989 }
990 
991 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
992 		unsigned long rate, unsigned long parent_rate, u8 index)
993 {
994 	return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
995 }
996 
997 static int clk_rcg2_shared_enable(struct clk_hw *hw)
998 {
999 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1000 	int ret;
1001 
1002 	/*
1003 	 * Set the update bit because required configuration has already
1004 	 * been written in clk_rcg2_shared_set_rate()
1005 	 */
1006 	ret = clk_rcg2_set_force_enable(hw);
1007 	if (ret)
1008 		return ret;
1009 
1010 	ret = update_config(rcg);
1011 	if (ret)
1012 		return ret;
1013 
1014 	return clk_rcg2_clear_force_enable(hw);
1015 }
1016 
1017 static void clk_rcg2_shared_disable(struct clk_hw *hw)
1018 {
1019 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1020 	u32 cfg;
1021 
1022 	/*
1023 	 * Store current configuration as switching to safe source would clear
1024 	 * the SRC and DIV of CFG register
1025 	 */
1026 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1027 
1028 	/*
1029 	 * Park the RCG at a safe configuration - sourced off of safe source.
1030 	 * Force enable and disable the RCG while configuring it to safeguard
1031 	 * against any update signal coming from the downstream clock.
1032 	 * The current parent is still prepared and enabled at this point, and
1033 	 * the safe source is always on while application processor subsystem
1034 	 * is online. Therefore, the RCG can safely switch its parent.
1035 	 */
1036 	clk_rcg2_set_force_enable(hw);
1037 
1038 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1039 		     rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
1040 
1041 	update_config(rcg);
1042 
1043 	clk_rcg2_clear_force_enable(hw);
1044 
1045 	/* Write back the stored configuration corresponding to current rate */
1046 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
1047 }
1048 
1049 const struct clk_ops clk_rcg2_shared_ops = {
1050 	.enable = clk_rcg2_shared_enable,
1051 	.disable = clk_rcg2_shared_disable,
1052 	.get_parent = clk_rcg2_get_parent,
1053 	.set_parent = clk_rcg2_set_parent,
1054 	.recalc_rate = clk_rcg2_recalc_rate,
1055 	.determine_rate = clk_rcg2_determine_rate,
1056 	.set_rate = clk_rcg2_shared_set_rate,
1057 	.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1058 };
1059 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
1060 
1061 /* Common APIs to be used for DFS based RCGR */
1062 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
1063 				       struct freq_tbl *f)
1064 {
1065 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1066 	struct clk_hw *p;
1067 	unsigned long prate = 0;
1068 	u32 val, mask, cfg, mode, src;
1069 	int i, num_parents;
1070 
1071 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
1072 
1073 	mask = BIT(rcg->hid_width) - 1;
1074 	f->pre_div = 1;
1075 	if (cfg & mask)
1076 		f->pre_div = cfg & mask;
1077 
1078 	src = cfg & CFG_SRC_SEL_MASK;
1079 	src >>= CFG_SRC_SEL_SHIFT;
1080 
1081 	num_parents = clk_hw_get_num_parents(hw);
1082 	for (i = 0; i < num_parents; i++) {
1083 		if (src == rcg->parent_map[i].cfg) {
1084 			f->src = rcg->parent_map[i].src;
1085 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
1086 			prate = clk_hw_get_rate(p);
1087 		}
1088 	}
1089 
1090 	mode = cfg & CFG_MODE_MASK;
1091 	mode >>= CFG_MODE_SHIFT;
1092 	if (mode) {
1093 		mask = BIT(rcg->mnd_width) - 1;
1094 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
1095 			    &val);
1096 		val &= mask;
1097 		f->m = val;
1098 
1099 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
1100 			    &val);
1101 		val = ~val;
1102 		val &= mask;
1103 		val += f->m;
1104 		f->n = val;
1105 	}
1106 
1107 	f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
1108 }
1109 
1110 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
1111 {
1112 	struct freq_tbl *freq_tbl;
1113 	int i;
1114 
1115 	/* Allocate space for 1 extra since table is NULL terminated */
1116 	freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1117 	if (!freq_tbl)
1118 		return -ENOMEM;
1119 	rcg->freq_tbl = freq_tbl;
1120 
1121 	for (i = 0; i < MAX_PERF_LEVEL; i++)
1122 		clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1123 
1124 	return 0;
1125 }
1126 
1127 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1128 				   struct clk_rate_request *req)
1129 {
1130 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1131 	int ret;
1132 
1133 	if (!rcg->freq_tbl) {
1134 		ret = clk_rcg2_dfs_populate_freq_table(rcg);
1135 		if (ret) {
1136 			pr_err("Failed to update DFS tables for %s\n",
1137 					clk_hw_get_name(hw));
1138 			return ret;
1139 		}
1140 	}
1141 
1142 	return clk_rcg2_determine_rate(hw, req);
1143 }
1144 
1145 static unsigned long
1146 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1147 {
1148 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1149 	u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1150 
1151 	regmap_read(rcg->clkr.regmap,
1152 		    rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1153 	level &= GENMASK(4, 1);
1154 	level >>= 1;
1155 
1156 	if (rcg->freq_tbl)
1157 		return rcg->freq_tbl[level].freq;
1158 
1159 	/*
1160 	 * Assume that parent_rate is actually the parent because
1161 	 * we can't do any better at figuring it out when the table
1162 	 * hasn't been populated yet. We only populate the table
1163 	 * in determine_rate because we can't guarantee the parents
1164 	 * will be registered with the framework until then.
1165 	 */
1166 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1167 		    &cfg);
1168 
1169 	mask = BIT(rcg->hid_width) - 1;
1170 	pre_div = 1;
1171 	if (cfg & mask)
1172 		pre_div = cfg & mask;
1173 
1174 	mode = cfg & CFG_MODE_MASK;
1175 	mode >>= CFG_MODE_SHIFT;
1176 	if (mode) {
1177 		mask = BIT(rcg->mnd_width) - 1;
1178 		regmap_read(rcg->clkr.regmap,
1179 			    rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1180 		m &= mask;
1181 
1182 		regmap_read(rcg->clkr.regmap,
1183 			    rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1184 		n = ~n;
1185 		n &= mask;
1186 		n += m;
1187 	}
1188 
1189 	return calc_rate(parent_rate, m, n, mode, pre_div);
1190 }
1191 
1192 static const struct clk_ops clk_rcg2_dfs_ops = {
1193 	.is_enabled = clk_rcg2_is_enabled,
1194 	.get_parent = clk_rcg2_get_parent,
1195 	.determine_rate = clk_rcg2_dfs_determine_rate,
1196 	.recalc_rate = clk_rcg2_dfs_recalc_rate,
1197 };
1198 
1199 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1200 			       struct regmap *regmap)
1201 {
1202 	struct clk_rcg2 *rcg = data->rcg;
1203 	struct clk_init_data *init = data->init;
1204 	u32 val;
1205 	int ret;
1206 
1207 	ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1208 	if (ret)
1209 		return -EINVAL;
1210 
1211 	if (!(val & SE_CMD_DFS_EN))
1212 		return 0;
1213 
1214 	/*
1215 	 * Rate changes with consumer writing a register in
1216 	 * their own I/O region
1217 	 */
1218 	init->flags |= CLK_GET_RATE_NOCACHE;
1219 	init->ops = &clk_rcg2_dfs_ops;
1220 
1221 	rcg->freq_tbl = NULL;
1222 
1223 	return 0;
1224 }
1225 
1226 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1227 			     const struct clk_rcg_dfs_data *rcgs, size_t len)
1228 {
1229 	int i, ret;
1230 
1231 	for (i = 0; i < len; i++) {
1232 		ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1233 		if (ret)
1234 			return ret;
1235 	}
1236 
1237 	return 0;
1238 }
1239 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1240 
1241 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
1242 			unsigned long parent_rate)
1243 {
1244 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1245 	struct freq_tbl f = { 0 };
1246 	u32 mask = BIT(rcg->hid_width) - 1;
1247 	u32 hid_div, cfg;
1248 	int i, num_parents = clk_hw_get_num_parents(hw);
1249 	unsigned long num, den;
1250 
1251 	rational_best_approximation(parent_rate, rate,
1252 			GENMASK(rcg->mnd_width - 1, 0),
1253 			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1254 
1255 	if (!num || !den)
1256 		return -EINVAL;
1257 
1258 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1259 	hid_div = cfg;
1260 	cfg &= CFG_SRC_SEL_MASK;
1261 	cfg >>= CFG_SRC_SEL_SHIFT;
1262 
1263 	for (i = 0; i < num_parents; i++) {
1264 		if (cfg == rcg->parent_map[i].cfg) {
1265 			f.src = rcg->parent_map[i].src;
1266 			break;
1267 		}
1268 	}
1269 
1270 	f.pre_div = hid_div;
1271 	f.pre_div >>= CFG_SRC_DIV_SHIFT;
1272 	f.pre_div &= mask;
1273 
1274 	if (num != den) {
1275 		f.m = num;
1276 		f.n = den;
1277 	} else {
1278 		f.m = 0;
1279 		f.n = 0;
1280 	}
1281 
1282 	return clk_rcg2_configure(rcg, &f);
1283 }
1284 
1285 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
1286 		unsigned long rate, unsigned long parent_rate, u8 index)
1287 {
1288 	return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
1289 }
1290 
1291 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
1292 				struct clk_rate_request *req)
1293 {
1294 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1295 	unsigned long num, den;
1296 	u64 tmp;
1297 
1298 	/* Parent rate is a fixed phy link rate */
1299 	rational_best_approximation(req->best_parent_rate, req->rate,
1300 			GENMASK(rcg->mnd_width - 1, 0),
1301 			GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1302 
1303 	if (!num || !den)
1304 		return -EINVAL;
1305 
1306 	tmp = req->best_parent_rate * num;
1307 	do_div(tmp, den);
1308 	req->rate = tmp;
1309 
1310 	return 0;
1311 }
1312 
1313 const struct clk_ops clk_dp_ops = {
1314 	.is_enabled = clk_rcg2_is_enabled,
1315 	.get_parent = clk_rcg2_get_parent,
1316 	.set_parent = clk_rcg2_set_parent,
1317 	.recalc_rate = clk_rcg2_recalc_rate,
1318 	.set_rate = clk_rcg2_dp_set_rate,
1319 	.set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
1320 	.determine_rate = clk_rcg2_dp_determine_rate,
1321 };
1322 EXPORT_SYMBOL_GPL(clk_dp_ops);
1323