xref: /linux/drivers/clk/renesas/rzg2l-cpg.c (revision 908fc4c2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 #include <linux/clk/renesas.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/init.h>
20 #include <linux/iopoll.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31 
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33 
34 #include "rzg2l-cpg.h"
35 
36 #ifdef DEBUG
37 #define WARN_DEBUG(x)	WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x)	do { } while (0)
40 #endif
41 
42 #define DIV_RSMASK(v, s, m)	((v >> s) & m)
43 #define GET_SHIFT(val)		((val >> 12) & 0xff)
44 #define GET_WIDTH(val)		((val >> 8) & 0xf)
45 
46 #define KDIV(val)		DIV_RSMASK(val, 16, 0xffff)
47 #define MDIV(val)		DIV_RSMASK(val, 6, 0x3ff)
48 #define PDIV(val)		DIV_RSMASK(val, 0, 0x3f)
49 #define SDIV(val)		DIV_RSMASK(val, 0, 0x7)
50 
51 #define CLK_ON_R(reg)		(reg)
52 #define CLK_MON_R(reg)		(0x180 + (reg))
53 #define CLK_RST_R(reg)		(reg)
54 #define CLK_MRST_R(reg)		(0x180 + (reg))
55 
56 #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
57 #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
58 #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
59 
60 #define MAX_VCLK_FREQ		(148500000)
61 
62 struct sd_hw_data {
63 	struct clk_hw hw;
64 	u32 conf;
65 	struct rzg2l_cpg_priv *priv;
66 };
67 
68 #define to_sd_hw_data(_hw)	container_of(_hw, struct sd_hw_data, hw)
69 
70 struct rzg2l_pll5_param {
71 	u32 pl5_fracin;
72 	u8 pl5_refdiv;
73 	u8 pl5_intin;
74 	u8 pl5_postdiv1;
75 	u8 pl5_postdiv2;
76 	u8 pl5_spread;
77 };
78 
79 struct rzg2l_pll5_mux_dsi_div_param {
80 	u8 clksrc;
81 	u8 dsi_div_a;
82 	u8 dsi_div_b;
83 };
84 
85 /**
86  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
87  *
88  * @rcdev: Reset controller entity
89  * @dev: CPG device
90  * @base: CPG register block base address
91  * @rmw_lock: protects register accesses
92  * @clks: Array containing all Core and Module Clocks
93  * @num_core_clks: Number of Core Clocks in clks[]
94  * @num_mod_clks: Number of Module Clocks in clks[]
95  * @num_resets: Number of Module Resets in info->resets[]
96  * @last_dt_core_clk: ID of the last Core Clock exported to DT
97  * @info: Pointer to platform data
98  * @pll5_mux_dsi_div_params: pll5 mux and dsi div parameters
99  */
100 struct rzg2l_cpg_priv {
101 	struct reset_controller_dev rcdev;
102 	struct device *dev;
103 	void __iomem *base;
104 	spinlock_t rmw_lock;
105 
106 	struct clk **clks;
107 	unsigned int num_core_clks;
108 	unsigned int num_mod_clks;
109 	unsigned int num_resets;
110 	unsigned int last_dt_core_clk;
111 
112 	const struct rzg2l_cpg_info *info;
113 
114 	struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
115 };
116 
117 static void rzg2l_cpg_del_clk_provider(void *data)
118 {
119 	of_clk_del_provider(data);
120 }
121 
122 static struct clk * __init
123 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
124 			   struct clk **clks,
125 			   void __iomem *base,
126 			   struct rzg2l_cpg_priv *priv)
127 {
128 	struct device *dev = priv->dev;
129 	const struct clk *parent;
130 	const char *parent_name;
131 	struct clk_hw *clk_hw;
132 
133 	parent = clks[core->parent & 0xffff];
134 	if (IS_ERR(parent))
135 		return ERR_CAST(parent);
136 
137 	parent_name = __clk_get_name(parent);
138 
139 	if (core->dtable)
140 		clk_hw = clk_hw_register_divider_table(dev, core->name,
141 						       parent_name, 0,
142 						       base + GET_REG_OFFSET(core->conf),
143 						       GET_SHIFT(core->conf),
144 						       GET_WIDTH(core->conf),
145 						       core->flag,
146 						       core->dtable,
147 						       &priv->rmw_lock);
148 	else
149 		clk_hw = clk_hw_register_divider(dev, core->name,
150 						 parent_name, 0,
151 						 base + GET_REG_OFFSET(core->conf),
152 						 GET_SHIFT(core->conf),
153 						 GET_WIDTH(core->conf),
154 						 core->flag, &priv->rmw_lock);
155 
156 	if (IS_ERR(clk_hw))
157 		return ERR_CAST(clk_hw);
158 
159 	return clk_hw->clk;
160 }
161 
162 static struct clk * __init
163 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
164 			   void __iomem *base,
165 			   struct rzg2l_cpg_priv *priv)
166 {
167 	const struct clk_hw *clk_hw;
168 
169 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
170 					  core->parent_names, core->num_parents,
171 					  core->flag,
172 					  base + GET_REG_OFFSET(core->conf),
173 					  GET_SHIFT(core->conf),
174 					  GET_WIDTH(core->conf),
175 					  core->mux_flags, &priv->rmw_lock);
176 	if (IS_ERR(clk_hw))
177 		return ERR_CAST(clk_hw);
178 
179 	return clk_hw->clk;
180 }
181 
182 static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw,
183 					       struct clk_rate_request *req)
184 {
185 	return clk_mux_determine_rate_flags(hw, req, 0);
186 }
187 
188 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
189 {
190 	struct sd_hw_data *hwdata = to_sd_hw_data(hw);
191 	struct rzg2l_cpg_priv *priv = hwdata->priv;
192 	u32 off = GET_REG_OFFSET(hwdata->conf);
193 	u32 shift = GET_SHIFT(hwdata->conf);
194 	const u32 clk_src_266 = 2;
195 	u32 bitmask;
196 
197 	/*
198 	 * As per the HW manual, we should not directly switch from 533 MHz to
199 	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
200 	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
201 	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
202 	 * (400 MHz)).
203 	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
204 	 * switching register is prohibited.
205 	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
206 	 * the index to value mapping is done by adding 1 to the index.
207 	 */
208 	bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
209 	if (index != clk_src_266) {
210 		u32 msk, val;
211 		int ret;
212 
213 		writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
214 
215 		msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
216 
217 		ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
218 					 !(val & msk), 100,
219 					 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
220 		if (ret) {
221 			dev_err(priv->dev, "failed to switch clk source\n");
222 			return ret;
223 		}
224 	}
225 
226 	writel(bitmask | ((index + 1) << shift), priv->base + off);
227 
228 	return 0;
229 }
230 
231 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
232 {
233 	struct sd_hw_data *hwdata = to_sd_hw_data(hw);
234 	struct rzg2l_cpg_priv *priv = hwdata->priv;
235 	u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
236 
237 	val >>= GET_SHIFT(hwdata->conf);
238 	val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
239 	if (val) {
240 		val--;
241 	} else {
242 		/* Prohibited clk source, change it to 533 MHz(reset value) */
243 		rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
244 	}
245 
246 	return val;
247 }
248 
249 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
250 	.determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate,
251 	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
252 	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
253 };
254 
255 static struct clk * __init
256 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
257 			      void __iomem *base,
258 			      struct rzg2l_cpg_priv *priv)
259 {
260 	struct sd_hw_data *clk_hw_data;
261 	struct clk_init_data init;
262 	struct clk_hw *clk_hw;
263 	int ret;
264 
265 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
266 	if (!clk_hw_data)
267 		return ERR_PTR(-ENOMEM);
268 
269 	clk_hw_data->priv = priv;
270 	clk_hw_data->conf = core->conf;
271 
272 	init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
273 	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
274 	init.flags = 0;
275 	init.num_parents = core->num_parents;
276 	init.parent_names = core->parent_names;
277 
278 	clk_hw = &clk_hw_data->hw;
279 	clk_hw->init = &init;
280 
281 	ret = devm_clk_hw_register(priv->dev, clk_hw);
282 	if (ret)
283 		return ERR_PTR(ret);
284 
285 	return clk_hw->clk;
286 }
287 
288 static unsigned long
289 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
290 			       unsigned long rate)
291 {
292 	unsigned long foutpostdiv_rate;
293 
294 	params->pl5_intin = rate / MEGA;
295 	params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
296 	params->pl5_refdiv = 2;
297 	params->pl5_postdiv1 = 1;
298 	params->pl5_postdiv2 = 1;
299 	params->pl5_spread = 0x16;
300 
301 	foutpostdiv_rate =
302 		EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
303 		((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
304 		(params->pl5_postdiv1 * params->pl5_postdiv2);
305 
306 	return foutpostdiv_rate;
307 }
308 
309 struct dsi_div_hw_data {
310 	struct clk_hw hw;
311 	u32 conf;
312 	unsigned long rate;
313 	struct rzg2l_cpg_priv *priv;
314 };
315 
316 #define to_dsi_div_hw_data(_hw)	container_of(_hw, struct dsi_div_hw_data, hw)
317 
318 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
319 						   unsigned long parent_rate)
320 {
321 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
322 	unsigned long rate = dsi_div->rate;
323 
324 	if (!rate)
325 		rate = parent_rate;
326 
327 	return rate;
328 }
329 
330 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
331 						    unsigned long rate)
332 {
333 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
334 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
335 	struct rzg2l_pll5_param params;
336 	unsigned long parent_rate;
337 
338 	parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
339 
340 	if (priv->mux_dsi_div_params.clksrc)
341 		parent_rate /= 2;
342 
343 	return parent_rate;
344 }
345 
346 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
347 					    struct clk_rate_request *req)
348 {
349 	if (req->rate > MAX_VCLK_FREQ)
350 		req->rate = MAX_VCLK_FREQ;
351 
352 	req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
353 
354 	return 0;
355 }
356 
357 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
358 				      unsigned long rate,
359 				      unsigned long parent_rate)
360 {
361 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
362 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
363 
364 	/*
365 	 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
366 	 *
367 	 * Based on the dot clock, the DSI divider clock sets the divider value,
368 	 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
369 	 * source for the MUX and propagates that info to the parents.
370 	 */
371 
372 	if (!rate || rate > MAX_VCLK_FREQ)
373 		return -EINVAL;
374 
375 	dsi_div->rate = rate;
376 	writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
377 	       (priv->mux_dsi_div_params.dsi_div_a << 0) |
378 	       (priv->mux_dsi_div_params.dsi_div_b << 8),
379 	       priv->base + CPG_PL5_SDIV);
380 
381 	return 0;
382 }
383 
384 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
385 	.recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
386 	.determine_rate = rzg2l_cpg_dsi_div_determine_rate,
387 	.set_rate = rzg2l_cpg_dsi_div_set_rate,
388 };
389 
390 static struct clk * __init
391 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
392 			       struct clk **clks,
393 			       struct rzg2l_cpg_priv *priv)
394 {
395 	struct dsi_div_hw_data *clk_hw_data;
396 	const struct clk *parent;
397 	const char *parent_name;
398 	struct clk_init_data init;
399 	struct clk_hw *clk_hw;
400 	int ret;
401 
402 	parent = clks[core->parent & 0xffff];
403 	if (IS_ERR(parent))
404 		return ERR_CAST(parent);
405 
406 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
407 	if (!clk_hw_data)
408 		return ERR_PTR(-ENOMEM);
409 
410 	clk_hw_data->priv = priv;
411 
412 	parent_name = __clk_get_name(parent);
413 	init.name = core->name;
414 	init.ops = &rzg2l_cpg_dsi_div_ops;
415 	init.flags = CLK_SET_RATE_PARENT;
416 	init.parent_names = &parent_name;
417 	init.num_parents = 1;
418 
419 	clk_hw = &clk_hw_data->hw;
420 	clk_hw->init = &init;
421 
422 	ret = devm_clk_hw_register(priv->dev, clk_hw);
423 	if (ret)
424 		return ERR_PTR(ret);
425 
426 	return clk_hw->clk;
427 }
428 
429 struct pll5_mux_hw_data {
430 	struct clk_hw hw;
431 	u32 conf;
432 	unsigned long rate;
433 	struct rzg2l_cpg_priv *priv;
434 };
435 
436 #define to_pll5_mux_hw_data(_hw)	container_of(_hw, struct pll5_mux_hw_data, hw)
437 
438 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
439 						   struct clk_rate_request *req)
440 {
441 	struct clk_hw *parent;
442 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
443 	struct rzg2l_cpg_priv *priv = hwdata->priv;
444 
445 	parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
446 	req->best_parent_hw = parent;
447 	req->best_parent_rate = req->rate;
448 
449 	return 0;
450 }
451 
452 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
453 {
454 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
455 	struct rzg2l_cpg_priv *priv = hwdata->priv;
456 
457 	/*
458 	 * FOUTPOSTDIV--->|
459 	 *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
460 	 *  |--FOUT1PH0-->|
461 	 *
462 	 * Based on the dot clock, the DSI divider clock calculates the parent
463 	 * rate and clk source for the MUX. It propagates that info to
464 	 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
465 	 */
466 
467 	writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
468 	       priv->base + CPG_OTHERFUNC1_REG);
469 
470 	return 0;
471 }
472 
473 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
474 {
475 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
476 	struct rzg2l_cpg_priv *priv = hwdata->priv;
477 
478 	return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
479 }
480 
481 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
482 	.determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
483 	.set_parent	= rzg2l_cpg_pll5_4_clk_mux_set_parent,
484 	.get_parent	= rzg2l_cpg_pll5_4_clk_mux_get_parent,
485 };
486 
487 static struct clk * __init
488 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
489 				  struct rzg2l_cpg_priv *priv)
490 {
491 	struct pll5_mux_hw_data *clk_hw_data;
492 	struct clk_init_data init;
493 	struct clk_hw *clk_hw;
494 	int ret;
495 
496 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
497 	if (!clk_hw_data)
498 		return ERR_PTR(-ENOMEM);
499 
500 	clk_hw_data->priv = priv;
501 	clk_hw_data->conf = core->conf;
502 
503 	init.name = core->name;
504 	init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
505 	init.flags = CLK_SET_RATE_PARENT;
506 	init.num_parents = core->num_parents;
507 	init.parent_names = core->parent_names;
508 
509 	clk_hw = &clk_hw_data->hw;
510 	clk_hw->init = &init;
511 
512 	ret = devm_clk_hw_register(priv->dev, clk_hw);
513 	if (ret)
514 		return ERR_PTR(ret);
515 
516 	return clk_hw->clk;
517 }
518 
519 struct sipll5 {
520 	struct clk_hw hw;
521 	u32 conf;
522 	unsigned long foutpostdiv_rate;
523 	struct rzg2l_cpg_priv *priv;
524 };
525 
526 #define to_sipll5(_hw)	container_of(_hw, struct sipll5, hw)
527 
528 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
529 					     unsigned long rate)
530 {
531 	struct sipll5 *sipll5 = to_sipll5(hw);
532 	struct rzg2l_cpg_priv *priv = sipll5->priv;
533 	unsigned long vclk;
534 
535 	vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
536 		       (priv->mux_dsi_div_params.dsi_div_b + 1));
537 
538 	if (priv->mux_dsi_div_params.clksrc)
539 		vclk /= 2;
540 
541 	return vclk;
542 }
543 
544 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
545 						  unsigned long parent_rate)
546 {
547 	struct sipll5 *sipll5 = to_sipll5(hw);
548 	unsigned long pll5_rate = sipll5->foutpostdiv_rate;
549 
550 	if (!pll5_rate)
551 		pll5_rate = parent_rate;
552 
553 	return pll5_rate;
554 }
555 
556 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
557 					unsigned long rate,
558 					unsigned long *parent_rate)
559 {
560 	return rate;
561 }
562 
563 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
564 				     unsigned long rate,
565 				     unsigned long parent_rate)
566 {
567 	struct sipll5 *sipll5 = to_sipll5(hw);
568 	struct rzg2l_cpg_priv *priv = sipll5->priv;
569 	struct rzg2l_pll5_param params;
570 	unsigned long vclk_rate;
571 	int ret;
572 	u32 val;
573 
574 	/*
575 	 *  OSC --> PLL5 --> FOUTPOSTDIV-->|
576 	 *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
577 	 *                   |--FOUT1PH0-->|
578 	 *
579 	 * Based on the dot clock, the DSI divider clock calculates the parent
580 	 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
581 	 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
582 	 *
583 	 * OSC --> PLL5 --> FOUTPOSTDIV
584 	 */
585 
586 	if (!rate)
587 		return -EINVAL;
588 
589 	vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
590 	sipll5->foutpostdiv_rate =
591 		rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
592 
593 	/* Put PLL5 into standby mode */
594 	writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
595 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
596 				 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
597 	if (ret) {
598 		dev_err(priv->dev, "failed to release pll5 lock");
599 		return ret;
600 	}
601 
602 	/* Output clock setting 1 */
603 	writel(CPG_SIPLL5_CLK1_POSTDIV1_WEN | CPG_SIPLL5_CLK1_POSTDIV2_WEN |
604 	       CPG_SIPLL5_CLK1_REFDIV_WEN  | (params.pl5_postdiv1 << 0) |
605 	       (params.pl5_postdiv2 << 4) | (params.pl5_refdiv << 8),
606 	       priv->base + CPG_SIPLL5_CLK1);
607 
608 	/* Output clock setting, SSCG modulation value setting 3 */
609 	writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
610 
611 	/* Output clock setting 4 */
612 	writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
613 	       priv->base + CPG_SIPLL5_CLK4);
614 
615 	/* Output clock setting 5 */
616 	writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
617 
618 	/* PLL normal mode setting */
619 	writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
620 	       CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
621 	       priv->base + CPG_SIPLL5_STBY);
622 
623 	/* PLL normal mode transition, output clock stability check */
624 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
625 				 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
626 	if (ret) {
627 		dev_err(priv->dev, "failed to lock pll5");
628 		return ret;
629 	}
630 
631 	return 0;
632 }
633 
634 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
635 	.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
636 	.round_rate = rzg2l_cpg_sipll5_round_rate,
637 	.set_rate = rzg2l_cpg_sipll5_set_rate,
638 };
639 
640 static struct clk * __init
641 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
642 			  struct clk **clks,
643 			  struct rzg2l_cpg_priv *priv)
644 {
645 	const struct clk *parent;
646 	struct clk_init_data init;
647 	const char *parent_name;
648 	struct sipll5 *sipll5;
649 	struct clk_hw *clk_hw;
650 	int ret;
651 
652 	parent = clks[core->parent & 0xffff];
653 	if (IS_ERR(parent))
654 		return ERR_CAST(parent);
655 
656 	sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
657 	if (!sipll5)
658 		return ERR_PTR(-ENOMEM);
659 
660 	init.name = core->name;
661 	parent_name = __clk_get_name(parent);
662 	init.ops = &rzg2l_cpg_sipll5_ops;
663 	init.flags = 0;
664 	init.parent_names = &parent_name;
665 	init.num_parents = 1;
666 
667 	sipll5->hw.init = &init;
668 	sipll5->conf = core->conf;
669 	sipll5->priv = priv;
670 
671 	writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
672 	       CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
673 
674 	clk_hw = &sipll5->hw;
675 	clk_hw->init = &init;
676 
677 	ret = devm_clk_hw_register(priv->dev, clk_hw);
678 	if (ret)
679 		return ERR_PTR(ret);
680 
681 	priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
682 	priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
683 	priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
684 
685 	return clk_hw->clk;
686 }
687 
688 struct pll_clk {
689 	struct clk_hw hw;
690 	unsigned int conf;
691 	unsigned int type;
692 	void __iomem *base;
693 	struct rzg2l_cpg_priv *priv;
694 };
695 
696 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
697 
698 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
699 						   unsigned long parent_rate)
700 {
701 	struct pll_clk *pll_clk = to_pll(hw);
702 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
703 	unsigned int val1, val2;
704 	unsigned int mult = 1;
705 	unsigned int div = 1;
706 
707 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
708 		return parent_rate;
709 
710 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
711 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
712 	mult = MDIV(val1) + KDIV(val1) / 65536;
713 	div = PDIV(val1) << SDIV(val2);
714 
715 	return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
716 }
717 
718 static const struct clk_ops rzg2l_cpg_pll_ops = {
719 	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
720 };
721 
722 static struct clk * __init
723 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
724 			   struct clk **clks,
725 			   void __iomem *base,
726 			   struct rzg2l_cpg_priv *priv)
727 {
728 	struct device *dev = priv->dev;
729 	const struct clk *parent;
730 	struct clk_init_data init;
731 	const char *parent_name;
732 	struct pll_clk *pll_clk;
733 
734 	parent = clks[core->parent & 0xffff];
735 	if (IS_ERR(parent))
736 		return ERR_CAST(parent);
737 
738 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
739 	if (!pll_clk)
740 		return ERR_PTR(-ENOMEM);
741 
742 	parent_name = __clk_get_name(parent);
743 	init.name = core->name;
744 	init.ops = &rzg2l_cpg_pll_ops;
745 	init.flags = 0;
746 	init.parent_names = &parent_name;
747 	init.num_parents = 1;
748 
749 	pll_clk->hw.init = &init;
750 	pll_clk->conf = core->conf;
751 	pll_clk->base = base;
752 	pll_clk->priv = priv;
753 	pll_clk->type = core->type;
754 
755 	return clk_register(NULL, &pll_clk->hw);
756 }
757 
758 static struct clk
759 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
760 			       void *data)
761 {
762 	unsigned int clkidx = clkspec->args[1];
763 	struct rzg2l_cpg_priv *priv = data;
764 	struct device *dev = priv->dev;
765 	const char *type;
766 	struct clk *clk;
767 
768 	switch (clkspec->args[0]) {
769 	case CPG_CORE:
770 		type = "core";
771 		if (clkidx > priv->last_dt_core_clk) {
772 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
773 			return ERR_PTR(-EINVAL);
774 		}
775 		clk = priv->clks[clkidx];
776 		break;
777 
778 	case CPG_MOD:
779 		type = "module";
780 		if (clkidx >= priv->num_mod_clks) {
781 			dev_err(dev, "Invalid %s clock index %u\n", type,
782 				clkidx);
783 			return ERR_PTR(-EINVAL);
784 		}
785 		clk = priv->clks[priv->num_core_clks + clkidx];
786 		break;
787 
788 	default:
789 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
790 		return ERR_PTR(-EINVAL);
791 	}
792 
793 	if (IS_ERR(clk))
794 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
795 			PTR_ERR(clk));
796 	else
797 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
798 			clkspec->args[0], clkspec->args[1], clk,
799 			clk_get_rate(clk));
800 	return clk;
801 }
802 
803 static void __init
804 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
805 			    const struct rzg2l_cpg_info *info,
806 			    struct rzg2l_cpg_priv *priv)
807 {
808 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
809 	struct device *dev = priv->dev;
810 	unsigned int id = core->id, div = core->div;
811 	const char *parent_name;
812 
813 	WARN_DEBUG(id >= priv->num_core_clks);
814 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
815 
816 	if (!core->name) {
817 		/* Skip NULLified clock */
818 		return;
819 	}
820 
821 	switch (core->type) {
822 	case CLK_TYPE_IN:
823 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
824 		break;
825 	case CLK_TYPE_FF:
826 		WARN_DEBUG(core->parent >= priv->num_core_clks);
827 		parent = priv->clks[core->parent];
828 		if (IS_ERR(parent)) {
829 			clk = parent;
830 			goto fail;
831 		}
832 
833 		parent_name = __clk_get_name(parent);
834 		clk = clk_register_fixed_factor(NULL, core->name,
835 						parent_name, CLK_SET_RATE_PARENT,
836 						core->mult, div);
837 		break;
838 	case CLK_TYPE_SAM_PLL:
839 		clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
840 						 priv->base, priv);
841 		break;
842 	case CLK_TYPE_SIPLL5:
843 		clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
844 		break;
845 	case CLK_TYPE_DIV:
846 		clk = rzg2l_cpg_div_clk_register(core, priv->clks,
847 						 priv->base, priv);
848 		break;
849 	case CLK_TYPE_MUX:
850 		clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
851 		break;
852 	case CLK_TYPE_SD_MUX:
853 		clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
854 		break;
855 	case CLK_TYPE_PLL5_4_MUX:
856 		clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
857 		break;
858 	case CLK_TYPE_DSI_DIV:
859 		clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
860 		break;
861 	default:
862 		goto fail;
863 	}
864 
865 	if (IS_ERR_OR_NULL(clk))
866 		goto fail;
867 
868 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
869 	priv->clks[id] = clk;
870 	return;
871 
872 fail:
873 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
874 		core->name, PTR_ERR(clk));
875 }
876 
877 /**
878  * struct mstp_clock - MSTP gating clock
879  *
880  * @hw: handle between common and hardware-specific interfaces
881  * @off: register offset
882  * @bit: ON/MON bit
883  * @enabled: soft state of the clock, if it is coupled with another clock
884  * @priv: CPG/MSTP private data
885  * @sibling: pointer to the other coupled clock
886  */
887 struct mstp_clock {
888 	struct clk_hw hw;
889 	u16 off;
890 	u8 bit;
891 	bool enabled;
892 	struct rzg2l_cpg_priv *priv;
893 	struct mstp_clock *sibling;
894 };
895 
896 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
897 
898 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
899 {
900 	struct mstp_clock *clock = to_mod_clock(hw);
901 	struct rzg2l_cpg_priv *priv = clock->priv;
902 	unsigned int reg = clock->off;
903 	struct device *dev = priv->dev;
904 	unsigned long flags;
905 	unsigned int i;
906 	u32 bitmask = BIT(clock->bit);
907 	u32 value;
908 
909 	if (!clock->off) {
910 		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
911 		return 0;
912 	}
913 
914 	dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
915 		enable ? "ON" : "OFF");
916 	spin_lock_irqsave(&priv->rmw_lock, flags);
917 
918 	if (enable)
919 		value = (bitmask << 16) | bitmask;
920 	else
921 		value = bitmask << 16;
922 	writel(value, priv->base + CLK_ON_R(reg));
923 
924 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
925 
926 	if (!enable)
927 		return 0;
928 
929 	if (!priv->info->has_clk_mon_regs)
930 		return 0;
931 
932 	for (i = 1000; i > 0; --i) {
933 		if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
934 			break;
935 		cpu_relax();
936 	}
937 
938 	if (!i) {
939 		dev_err(dev, "Failed to enable CLK_ON %p\n",
940 			priv->base + CLK_ON_R(reg));
941 		return -ETIMEDOUT;
942 	}
943 
944 	return 0;
945 }
946 
947 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
948 {
949 	struct mstp_clock *clock = to_mod_clock(hw);
950 
951 	if (clock->sibling) {
952 		struct rzg2l_cpg_priv *priv = clock->priv;
953 		unsigned long flags;
954 		bool enabled;
955 
956 		spin_lock_irqsave(&priv->rmw_lock, flags);
957 		enabled = clock->sibling->enabled;
958 		clock->enabled = true;
959 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
960 		if (enabled)
961 			return 0;
962 	}
963 
964 	return rzg2l_mod_clock_endisable(hw, true);
965 }
966 
967 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
968 {
969 	struct mstp_clock *clock = to_mod_clock(hw);
970 
971 	if (clock->sibling) {
972 		struct rzg2l_cpg_priv *priv = clock->priv;
973 		unsigned long flags;
974 		bool enabled;
975 
976 		spin_lock_irqsave(&priv->rmw_lock, flags);
977 		enabled = clock->sibling->enabled;
978 		clock->enabled = false;
979 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
980 		if (enabled)
981 			return;
982 	}
983 
984 	rzg2l_mod_clock_endisable(hw, false);
985 }
986 
987 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
988 {
989 	struct mstp_clock *clock = to_mod_clock(hw);
990 	struct rzg2l_cpg_priv *priv = clock->priv;
991 	u32 bitmask = BIT(clock->bit);
992 	u32 value;
993 
994 	if (!clock->off) {
995 		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
996 		return 1;
997 	}
998 
999 	if (clock->sibling)
1000 		return clock->enabled;
1001 
1002 	if (priv->info->has_clk_mon_regs)
1003 		value = readl(priv->base + CLK_MON_R(clock->off));
1004 	else
1005 		value = readl(priv->base + clock->off);
1006 
1007 	return value & bitmask;
1008 }
1009 
1010 static const struct clk_ops rzg2l_mod_clock_ops = {
1011 	.enable = rzg2l_mod_clock_enable,
1012 	.disable = rzg2l_mod_clock_disable,
1013 	.is_enabled = rzg2l_mod_clock_is_enabled,
1014 };
1015 
1016 static struct mstp_clock
1017 *rzg2l_mod_clock__get_sibling(struct mstp_clock *clock,
1018 			      struct rzg2l_cpg_priv *priv)
1019 {
1020 	struct clk_hw *hw;
1021 	unsigned int i;
1022 
1023 	for (i = 0; i < priv->num_mod_clks; i++) {
1024 		struct mstp_clock *clk;
1025 
1026 		if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1027 			continue;
1028 
1029 		hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1030 		clk = to_mod_clock(hw);
1031 		if (clock->off == clk->off && clock->bit == clk->bit)
1032 			return clk;
1033 	}
1034 
1035 	return NULL;
1036 }
1037 
1038 static void __init
1039 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1040 			   const struct rzg2l_cpg_info *info,
1041 			   struct rzg2l_cpg_priv *priv)
1042 {
1043 	struct mstp_clock *clock = NULL;
1044 	struct device *dev = priv->dev;
1045 	unsigned int id = mod->id;
1046 	struct clk_init_data init;
1047 	struct clk *parent, *clk;
1048 	const char *parent_name;
1049 	unsigned int i;
1050 
1051 	WARN_DEBUG(id < priv->num_core_clks);
1052 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1053 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1054 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1055 
1056 	if (!mod->name) {
1057 		/* Skip NULLified clock */
1058 		return;
1059 	}
1060 
1061 	parent = priv->clks[mod->parent];
1062 	if (IS_ERR(parent)) {
1063 		clk = parent;
1064 		goto fail;
1065 	}
1066 
1067 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1068 	if (!clock) {
1069 		clk = ERR_PTR(-ENOMEM);
1070 		goto fail;
1071 	}
1072 
1073 	init.name = mod->name;
1074 	init.ops = &rzg2l_mod_clock_ops;
1075 	init.flags = CLK_SET_RATE_PARENT;
1076 	for (i = 0; i < info->num_crit_mod_clks; i++)
1077 		if (id == info->crit_mod_clks[i]) {
1078 			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1079 				mod->name);
1080 			init.flags |= CLK_IS_CRITICAL;
1081 			break;
1082 		}
1083 
1084 	parent_name = __clk_get_name(parent);
1085 	init.parent_names = &parent_name;
1086 	init.num_parents = 1;
1087 
1088 	clock->off = mod->off;
1089 	clock->bit = mod->bit;
1090 	clock->priv = priv;
1091 	clock->hw.init = &init;
1092 
1093 	clk = clk_register(NULL, &clock->hw);
1094 	if (IS_ERR(clk))
1095 		goto fail;
1096 
1097 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1098 	priv->clks[id] = clk;
1099 
1100 	if (mod->is_coupled) {
1101 		struct mstp_clock *sibling;
1102 
1103 		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1104 		sibling = rzg2l_mod_clock__get_sibling(clock, priv);
1105 		if (sibling) {
1106 			clock->sibling = sibling;
1107 			sibling->sibling = clock;
1108 		}
1109 	}
1110 
1111 	return;
1112 
1113 fail:
1114 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1115 		mod->name, PTR_ERR(clk));
1116 }
1117 
1118 #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
1119 
1120 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1121 			   unsigned long id)
1122 {
1123 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1124 	const struct rzg2l_cpg_info *info = priv->info;
1125 	unsigned int reg = info->resets[id].off;
1126 	u32 dis = BIT(info->resets[id].bit);
1127 	u32 we = dis << 16;
1128 
1129 	dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1130 
1131 	/* Reset module */
1132 	writel(we, priv->base + CLK_RST_R(reg));
1133 
1134 	/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1135 	udelay(35);
1136 
1137 	/* Release module from reset state */
1138 	writel(we | dis, priv->base + CLK_RST_R(reg));
1139 
1140 	return 0;
1141 }
1142 
1143 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1144 			    unsigned long id)
1145 {
1146 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1147 	const struct rzg2l_cpg_info *info = priv->info;
1148 	unsigned int reg = info->resets[id].off;
1149 	u32 value = BIT(info->resets[id].bit) << 16;
1150 
1151 	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1152 
1153 	writel(value, priv->base + CLK_RST_R(reg));
1154 	return 0;
1155 }
1156 
1157 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1158 			      unsigned long id)
1159 {
1160 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1161 	const struct rzg2l_cpg_info *info = priv->info;
1162 	unsigned int reg = info->resets[id].off;
1163 	u32 dis = BIT(info->resets[id].bit);
1164 	u32 value = (dis << 16) | dis;
1165 
1166 	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1167 		CLK_RST_R(reg));
1168 
1169 	writel(value, priv->base + CLK_RST_R(reg));
1170 	return 0;
1171 }
1172 
1173 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1174 			    unsigned long id)
1175 {
1176 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1177 	const struct rzg2l_cpg_info *info = priv->info;
1178 	unsigned int reg = info->resets[id].off;
1179 	u32 bitmask = BIT(info->resets[id].bit);
1180 	s8 monbit = info->resets[id].monbit;
1181 
1182 	if (info->has_clk_mon_regs) {
1183 		return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
1184 	} else if (monbit >= 0) {
1185 		u32 monbitmask = BIT(monbit);
1186 
1187 		return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
1188 	}
1189 	return -ENOTSUPP;
1190 }
1191 
1192 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1193 	.reset = rzg2l_cpg_reset,
1194 	.assert = rzg2l_cpg_assert,
1195 	.deassert = rzg2l_cpg_deassert,
1196 	.status = rzg2l_cpg_status,
1197 };
1198 
1199 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1200 				 const struct of_phandle_args *reset_spec)
1201 {
1202 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1203 	const struct rzg2l_cpg_info *info = priv->info;
1204 	unsigned int id = reset_spec->args[0];
1205 
1206 	if (id >= rcdev->nr_resets || !info->resets[id].off) {
1207 		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1208 		return -EINVAL;
1209 	}
1210 
1211 	return id;
1212 }
1213 
1214 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1215 {
1216 	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1217 	priv->rcdev.of_node = priv->dev->of_node;
1218 	priv->rcdev.dev = priv->dev;
1219 	priv->rcdev.of_reset_n_cells = 1;
1220 	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1221 	priv->rcdev.nr_resets = priv->num_resets;
1222 
1223 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
1224 }
1225 
1226 static bool rzg2l_cpg_is_pm_clk(const struct of_phandle_args *clkspec)
1227 {
1228 	if (clkspec->args_count != 2)
1229 		return false;
1230 
1231 	switch (clkspec->args[0]) {
1232 	case CPG_MOD:
1233 		return true;
1234 
1235 	default:
1236 		return false;
1237 	}
1238 }
1239 
1240 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device *dev)
1241 {
1242 	struct device_node *np = dev->of_node;
1243 	struct of_phandle_args clkspec;
1244 	bool once = true;
1245 	struct clk *clk;
1246 	int error;
1247 	int i = 0;
1248 
1249 	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1250 					   &clkspec)) {
1251 		if (rzg2l_cpg_is_pm_clk(&clkspec)) {
1252 			if (once) {
1253 				once = false;
1254 				error = pm_clk_create(dev);
1255 				if (error) {
1256 					of_node_put(clkspec.np);
1257 					goto err;
1258 				}
1259 			}
1260 			clk = of_clk_get_from_provider(&clkspec);
1261 			of_node_put(clkspec.np);
1262 			if (IS_ERR(clk)) {
1263 				error = PTR_ERR(clk);
1264 				goto fail_destroy;
1265 			}
1266 
1267 			error = pm_clk_add_clk(dev, clk);
1268 			if (error) {
1269 				dev_err(dev, "pm_clk_add_clk failed %d\n",
1270 					error);
1271 				goto fail_put;
1272 			}
1273 		} else {
1274 			of_node_put(clkspec.np);
1275 		}
1276 		i++;
1277 	}
1278 
1279 	return 0;
1280 
1281 fail_put:
1282 	clk_put(clk);
1283 
1284 fail_destroy:
1285 	pm_clk_destroy(dev);
1286 err:
1287 	return error;
1288 }
1289 
1290 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1291 {
1292 	if (!pm_clk_no_clocks(dev))
1293 		pm_clk_destroy(dev);
1294 }
1295 
1296 static void rzg2l_cpg_genpd_remove(void *data)
1297 {
1298 	pm_genpd_remove(data);
1299 }
1300 
1301 static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
1302 {
1303 	struct device_node *np = dev->of_node;
1304 	struct generic_pm_domain *genpd;
1305 	int ret;
1306 
1307 	genpd = devm_kzalloc(dev, sizeof(*genpd), GFP_KERNEL);
1308 	if (!genpd)
1309 		return -ENOMEM;
1310 
1311 	genpd->name = np->name;
1312 	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1313 		       GENPD_FLAG_ACTIVE_WAKEUP;
1314 	genpd->attach_dev = rzg2l_cpg_attach_dev;
1315 	genpd->detach_dev = rzg2l_cpg_detach_dev;
1316 	ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1317 	if (ret)
1318 		return ret;
1319 
1320 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1321 	if (ret)
1322 		return ret;
1323 
1324 	return of_genpd_add_provider_simple(np, genpd);
1325 }
1326 
1327 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1328 {
1329 	struct device *dev = &pdev->dev;
1330 	struct device_node *np = dev->of_node;
1331 	const struct rzg2l_cpg_info *info;
1332 	struct rzg2l_cpg_priv *priv;
1333 	unsigned int nclks, i;
1334 	struct clk **clks;
1335 	int error;
1336 
1337 	info = of_device_get_match_data(dev);
1338 
1339 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1340 	if (!priv)
1341 		return -ENOMEM;
1342 
1343 	priv->dev = dev;
1344 	priv->info = info;
1345 	spin_lock_init(&priv->rmw_lock);
1346 
1347 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1348 	if (IS_ERR(priv->base))
1349 		return PTR_ERR(priv->base);
1350 
1351 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1352 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1353 	if (!clks)
1354 		return -ENOMEM;
1355 
1356 	dev_set_drvdata(dev, priv);
1357 	priv->clks = clks;
1358 	priv->num_core_clks = info->num_total_core_clks;
1359 	priv->num_mod_clks = info->num_hw_mod_clks;
1360 	priv->num_resets = info->num_resets;
1361 	priv->last_dt_core_clk = info->last_dt_core_clk;
1362 
1363 	for (i = 0; i < nclks; i++)
1364 		clks[i] = ERR_PTR(-ENOENT);
1365 
1366 	for (i = 0; i < info->num_core_clks; i++)
1367 		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1368 
1369 	for (i = 0; i < info->num_mod_clks; i++)
1370 		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1371 
1372 	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1373 	if (error)
1374 		return error;
1375 
1376 	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1377 	if (error)
1378 		return error;
1379 
1380 	error = rzg2l_cpg_add_clk_domain(dev);
1381 	if (error)
1382 		return error;
1383 
1384 	error = rzg2l_cpg_reset_controller_register(priv);
1385 	if (error)
1386 		return error;
1387 
1388 	return 0;
1389 }
1390 
1391 static const struct of_device_id rzg2l_cpg_match[] = {
1392 #ifdef CONFIG_CLK_R9A07G043
1393 	{
1394 		.compatible = "renesas,r9a07g043-cpg",
1395 		.data = &r9a07g043_cpg_info,
1396 	},
1397 #endif
1398 #ifdef CONFIG_CLK_R9A07G044
1399 	{
1400 		.compatible = "renesas,r9a07g044-cpg",
1401 		.data = &r9a07g044_cpg_info,
1402 	},
1403 #endif
1404 #ifdef CONFIG_CLK_R9A07G054
1405 	{
1406 		.compatible = "renesas,r9a07g054-cpg",
1407 		.data = &r9a07g054_cpg_info,
1408 	},
1409 #endif
1410 #ifdef CONFIG_CLK_R9A09G011
1411 	{
1412 		.compatible = "renesas,r9a09g011-cpg",
1413 		.data = &r9a09g011_cpg_info,
1414 	},
1415 #endif
1416 	{ /* sentinel */ }
1417 };
1418 
1419 static struct platform_driver rzg2l_cpg_driver = {
1420 	.driver		= {
1421 		.name	= "rzg2l-cpg",
1422 		.of_match_table = rzg2l_cpg_match,
1423 	},
1424 };
1425 
1426 static int __init rzg2l_cpg_init(void)
1427 {
1428 	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1429 }
1430 
1431 subsys_initcall(rzg2l_cpg_init);
1432 
1433 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1434 MODULE_LICENSE("GPL v2");
1435