1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Marvell Armada 37xx SoC Peripheral clocks
4  *
5  * Copyright (C) 2016 Marvell
6  *
7  * Gregory CLEMENT <gregory.clement@free-electrons.com>
8  *
9  * Most of the peripheral clocks can be modelled like this:
10  *             _____    _______    _______
11  * TBG-A-P  --|     |  |       |  |       |   ______
12  * TBG-B-P  --| Mux |--| /div1 |--| /div2 |--| Gate |--> perip_clk
13  * TBG-A-S  --|     |  |       |  |       |  |______|
14  * TBG-B-S  --|_____|  |_______|  |_______|
15  *
16  * However some clocks may use only one or two block or and use the
17  * xtal clock as parent.
18  */
19 
20 #include <linux/clk-provider.h>
21 #include <linux/io.h>
22 #include <linux/mfd/syscon.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/regmap.h>
27 #include <linux/slab.h>
28 
29 #define TBG_SEL		0x0
30 #define DIV_SEL0	0x4
31 #define DIV_SEL1	0x8
32 #define DIV_SEL2	0xC
33 #define CLK_SEL		0x10
34 #define CLK_DIS		0x14
35 
36 #define  ARMADA_37XX_DVFS_LOAD_1 1
37 #define LOAD_LEVEL_NR	4
38 
39 #define ARMADA_37XX_NB_L0L1	0x18
40 #define ARMADA_37XX_NB_L2L3	0x1C
41 #define		ARMADA_37XX_NB_TBG_DIV_OFF	13
42 #define		ARMADA_37XX_NB_TBG_DIV_MASK	0x7
43 #define		ARMADA_37XX_NB_CLK_SEL_OFF	11
44 #define		ARMADA_37XX_NB_CLK_SEL_MASK	0x1
45 #define		ARMADA_37XX_NB_TBG_SEL_OFF	9
46 #define		ARMADA_37XX_NB_TBG_SEL_MASK	0x3
47 #define		ARMADA_37XX_NB_CONFIG_SHIFT	16
48 #define ARMADA_37XX_NB_DYN_MOD	0x24
49 #define		ARMADA_37XX_NB_DFS_EN	31
50 #define ARMADA_37XX_NB_CPU_LOAD	0x30
51 #define		ARMADA_37XX_NB_CPU_LOAD_MASK	0x3
52 #define		ARMADA_37XX_DVFS_LOAD_0		0
53 #define		ARMADA_37XX_DVFS_LOAD_1		1
54 #define		ARMADA_37XX_DVFS_LOAD_2		2
55 #define		ARMADA_37XX_DVFS_LOAD_3		3
56 
57 struct clk_periph_driver_data {
58 	struct clk_hw_onecell_data *hw_data;
59 	spinlock_t lock;
60 	void __iomem *reg;
61 
62 	/* Storage registers for suspend/resume operations */
63 	u32 tbg_sel;
64 	u32 div_sel0;
65 	u32 div_sel1;
66 	u32 div_sel2;
67 	u32 clk_sel;
68 	u32 clk_dis;
69 };
70 
71 struct clk_double_div {
72 	struct clk_hw hw;
73 	void __iomem *reg1;
74 	u8 shift1;
75 	void __iomem *reg2;
76 	u8 shift2;
77 };
78 
79 struct clk_pm_cpu {
80 	struct clk_hw hw;
81 	void __iomem *reg_mux;
82 	u8 shift_mux;
83 	u32 mask_mux;
84 	void __iomem *reg_div;
85 	u8 shift_div;
86 	struct regmap *nb_pm_base;
87 };
88 
89 #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
90 #define to_clk_pm_cpu(_hw) container_of(_hw, struct clk_pm_cpu, hw)
91 
92 struct clk_periph_data {
93 	const char *name;
94 	const char * const *parent_names;
95 	int num_parents;
96 	struct clk_hw *mux_hw;
97 	struct clk_hw *rate_hw;
98 	struct clk_hw *gate_hw;
99 	struct clk_hw *muxrate_hw;
100 	bool is_double_div;
101 };
102 
103 static const struct clk_div_table clk_table6[] = {
104 	{ .val = 1, .div = 1, },
105 	{ .val = 2, .div = 2, },
106 	{ .val = 3, .div = 3, },
107 	{ .val = 4, .div = 4, },
108 	{ .val = 5, .div = 5, },
109 	{ .val = 6, .div = 6, },
110 	{ .val = 0, .div = 0, }, /* last entry */
111 };
112 
113 static const struct clk_div_table clk_table1[] = {
114 	{ .val = 0, .div = 1, },
115 	{ .val = 1, .div = 2, },
116 	{ .val = 0, .div = 0, }, /* last entry */
117 };
118 
119 static const struct clk_div_table clk_table2[] = {
120 	{ .val = 0, .div = 2, },
121 	{ .val = 1, .div = 4, },
122 	{ .val = 0, .div = 0, }, /* last entry */
123 };
124 
125 static const struct clk_ops clk_double_div_ops;
126 static const struct clk_ops clk_pm_cpu_ops;
127 
128 #define PERIPH_GATE(_name, _bit)		\
129 struct clk_gate gate_##_name = {		\
130 	.reg = (void *)CLK_DIS,			\
131 	.bit_idx = _bit,			\
132 	.hw.init = &(struct clk_init_data){	\
133 		.ops =  &clk_gate_ops,		\
134 	}					\
135 };
136 
137 #define PERIPH_MUX(_name, _shift)		\
138 struct clk_mux mux_##_name = {			\
139 	.reg = (void *)TBG_SEL,			\
140 	.shift = _shift,			\
141 	.mask = 3,				\
142 	.hw.init = &(struct clk_init_data){	\
143 		.ops =  &clk_mux_ro_ops,	\
144 	}					\
145 };
146 
147 #define PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2)	\
148 struct clk_double_div rate_##_name = {		\
149 	.reg1 = (void *)_reg1,			\
150 	.reg2 = (void *)_reg2,			\
151 	.shift1 = _shift1,			\
152 	.shift2 = _shift2,			\
153 	.hw.init = &(struct clk_init_data){	\
154 		.ops =  &clk_double_div_ops,	\
155 	}					\
156 };
157 
158 #define PERIPH_DIV(_name, _reg, _shift, _table)	\
159 struct clk_divider rate_##_name = {		\
160 	.reg = (void *)_reg,			\
161 	.table = _table,			\
162 	.shift = _shift,			\
163 	.hw.init = &(struct clk_init_data){	\
164 		.ops =  &clk_divider_ro_ops,	\
165 	}					\
166 };
167 
168 #define PERIPH_PM_CPU(_name, _shift1, _reg, _shift2)	\
169 struct clk_pm_cpu muxrate_##_name = {		\
170 	.reg_mux = (void *)TBG_SEL,		\
171 	.mask_mux = 3,				\
172 	.shift_mux = _shift1,			\
173 	.reg_div = (void *)_reg,		\
174 	.shift_div = _shift2,			\
175 	.hw.init = &(struct clk_init_data){	\
176 		.ops =  &clk_pm_cpu_ops,	\
177 	}					\
178 };
179 
180 #define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\
181 static PERIPH_GATE(_name, _bit);			    \
182 static PERIPH_MUX(_name, _shift);			    \
183 static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
184 
185 #define PERIPH_CLK_FULL(_name, _bit, _shift, _reg, _shift1, _table)	\
186 static PERIPH_GATE(_name, _bit);			    \
187 static PERIPH_MUX(_name, _shift);			    \
188 static PERIPH_DIV(_name, _reg, _shift1, _table);
189 
190 #define PERIPH_CLK_GATE_DIV(_name, _bit,  _reg, _shift, _table)	\
191 static PERIPH_GATE(_name, _bit);			\
192 static PERIPH_DIV(_name, _reg, _shift, _table);
193 
194 #define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\
195 static PERIPH_MUX(_name, _shift);			    \
196 static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
197 
198 #define REF_CLK_FULL(_name)				\
199 	{ .name = #_name,				\
200 	  .parent_names = (const char *[]){ "TBG-A-P",	\
201 	      "TBG-B-P", "TBG-A-S", "TBG-B-S"},		\
202 	  .num_parents = 4,				\
203 	  .mux_hw = &mux_##_name.hw,			\
204 	  .gate_hw = &gate_##_name.hw,			\
205 	  .rate_hw = &rate_##_name.hw,			\
206 	}
207 
208 #define REF_CLK_FULL_DD(_name)				\
209 	{ .name = #_name,				\
210 	  .parent_names = (const char *[]){ "TBG-A-P",	\
211 	      "TBG-B-P", "TBG-A-S", "TBG-B-S"},		\
212 	  .num_parents = 4,				\
213 	  .mux_hw = &mux_##_name.hw,			\
214 	  .gate_hw = &gate_##_name.hw,			\
215 	  .rate_hw = &rate_##_name.hw,			\
216 	  .is_double_div = true,			\
217 	}
218 
219 #define REF_CLK_GATE(_name, _parent_name)			\
220 	{ .name = #_name,					\
221 	  .parent_names = (const char *[]){ _parent_name},	\
222 	  .num_parents = 1,					\
223 	  .gate_hw = &gate_##_name.hw,				\
224 	}
225 
226 #define REF_CLK_GATE_DIV(_name, _parent_name)			\
227 	{ .name = #_name,					\
228 	  .parent_names = (const char *[]){ _parent_name},	\
229 	  .num_parents = 1,					\
230 	  .gate_hw = &gate_##_name.hw,				\
231 	  .rate_hw = &rate_##_name.hw,				\
232 	}
233 
234 #define REF_CLK_PM_CPU(_name)				\
235 	{ .name = #_name,				\
236 	  .parent_names = (const char *[]){ "TBG-A-P",	\
237 	      "TBG-B-P", "TBG-A-S", "TBG-B-S"},		\
238 	  .num_parents = 4,				\
239 	  .muxrate_hw = &muxrate_##_name.hw,		\
240 	}
241 
242 #define REF_CLK_MUX_DD(_name)				\
243 	{ .name = #_name,				\
244 	  .parent_names = (const char *[]){ "TBG-A-P",	\
245 	      "TBG-B-P", "TBG-A-S", "TBG-B-S"},		\
246 	  .num_parents = 4,				\
247 	  .mux_hw = &mux_##_name.hw,			\
248 	  .rate_hw = &rate_##_name.hw,			\
249 	  .is_double_div = true,			\
250 	}
251 
252 /* NB periph clocks */
253 PERIPH_CLK_FULL_DD(mmc, 2, 0, DIV_SEL2, DIV_SEL2, 16, 13);
254 PERIPH_CLK_FULL_DD(sata_host, 3, 2, DIV_SEL2, DIV_SEL2, 10, 7);
255 PERIPH_CLK_FULL_DD(sec_at, 6, 4, DIV_SEL1, DIV_SEL1, 3, 0);
256 PERIPH_CLK_FULL_DD(sec_dap, 7, 6, DIV_SEL1, DIV_SEL1, 9, 6);
257 PERIPH_CLK_FULL_DD(tscem, 8, 8, DIV_SEL1, DIV_SEL1, 15, 12);
258 PERIPH_CLK_FULL(tscem_tmx, 10, 10, DIV_SEL1, 18, clk_table6);
259 static PERIPH_GATE(avs, 11);
260 PERIPH_CLK_FULL_DD(pwm, 13, 14, DIV_SEL0, DIV_SEL0, 3, 0);
261 PERIPH_CLK_FULL_DD(sqf, 12, 12, DIV_SEL1, DIV_SEL1, 27, 24);
262 static PERIPH_GATE(i2c_2, 16);
263 static PERIPH_GATE(i2c_1, 17);
264 PERIPH_CLK_GATE_DIV(ddr_phy, 19, DIV_SEL0, 18, clk_table2);
265 PERIPH_CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12);
266 PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6);
267 PERIPH_CLK_FULL(counter, 23, 20, DIV_SEL0, 23, clk_table6);
268 PERIPH_CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19);
269 static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28);
270 
271 static struct clk_periph_data data_nb[] = {
272 	REF_CLK_FULL_DD(mmc),
273 	REF_CLK_FULL_DD(sata_host),
274 	REF_CLK_FULL_DD(sec_at),
275 	REF_CLK_FULL_DD(sec_dap),
276 	REF_CLK_FULL_DD(tscem),
277 	REF_CLK_FULL(tscem_tmx),
278 	REF_CLK_GATE(avs, "xtal"),
279 	REF_CLK_FULL_DD(sqf),
280 	REF_CLK_FULL_DD(pwm),
281 	REF_CLK_GATE(i2c_2, "xtal"),
282 	REF_CLK_GATE(i2c_1, "xtal"),
283 	REF_CLK_GATE_DIV(ddr_phy, "TBG-A-S"),
284 	REF_CLK_FULL_DD(ddr_fclk),
285 	REF_CLK_FULL(trace),
286 	REF_CLK_FULL(counter),
287 	REF_CLK_FULL_DD(eip97),
288 	REF_CLK_PM_CPU(cpu),
289 	{ },
290 };
291 
292 /* SB periph clocks */
293 PERIPH_CLK_MUX_DD(gbe_50, 6, DIV_SEL2, DIV_SEL2, 6, 9);
294 PERIPH_CLK_MUX_DD(gbe_core, 8, DIV_SEL1, DIV_SEL1, 18, 21);
295 PERIPH_CLK_MUX_DD(gbe_125, 10, DIV_SEL1, DIV_SEL1, 6, 9);
296 static PERIPH_GATE(gbe1_50, 0);
297 static PERIPH_GATE(gbe0_50, 1);
298 static PERIPH_GATE(gbe1_125, 2);
299 static PERIPH_GATE(gbe0_125, 3);
300 PERIPH_CLK_GATE_DIV(gbe1_core, 4, DIV_SEL1, 13, clk_table1);
301 PERIPH_CLK_GATE_DIV(gbe0_core, 5, DIV_SEL1, 14, clk_table1);
302 PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1);
303 PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6);
304 PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12);
305 PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18);
306 static PERIPH_GATE(pcie, 14);
307 
308 static struct clk_periph_data data_sb[] = {
309 	REF_CLK_MUX_DD(gbe_50),
310 	REF_CLK_MUX_DD(gbe_core),
311 	REF_CLK_MUX_DD(gbe_125),
312 	REF_CLK_GATE(gbe1_50, "gbe_50"),
313 	REF_CLK_GATE(gbe0_50, "gbe_50"),
314 	REF_CLK_GATE(gbe1_125, "gbe_125"),
315 	REF_CLK_GATE(gbe0_125, "gbe_125"),
316 	REF_CLK_GATE_DIV(gbe1_core, "gbe_core"),
317 	REF_CLK_GATE_DIV(gbe0_core, "gbe_core"),
318 	REF_CLK_GATE_DIV(gbe_bm, "gbe_core"),
319 	REF_CLK_FULL_DD(sdio),
320 	REF_CLK_FULL_DD(usb32_usb2_sys),
321 	REF_CLK_FULL_DD(usb32_ss_sys),
322 	REF_CLK_GATE(pcie, "gbe_core"),
323 	{ },
324 };
325 
326 static unsigned int get_div(void __iomem *reg, int shift)
327 {
328 	u32 val;
329 
330 	val = (readl(reg) >> shift) & 0x7;
331 	if (val > 6)
332 		return 0;
333 	return val;
334 }
335 
336 static unsigned long clk_double_div_recalc_rate(struct clk_hw *hw,
337 						unsigned long parent_rate)
338 {
339 	struct clk_double_div *double_div = to_clk_double_div(hw);
340 	unsigned int div;
341 
342 	div = get_div(double_div->reg1, double_div->shift1);
343 	div *= get_div(double_div->reg2, double_div->shift2);
344 
345 	return DIV_ROUND_UP_ULL((u64)parent_rate, div);
346 }
347 
348 static const struct clk_ops clk_double_div_ops = {
349 	.recalc_rate = clk_double_div_recalc_rate,
350 };
351 
352 static void armada_3700_pm_dvfs_update_regs(unsigned int load_level,
353 					    unsigned int *reg,
354 					    unsigned int *offset)
355 {
356 	if (load_level <= ARMADA_37XX_DVFS_LOAD_1)
357 		*reg = ARMADA_37XX_NB_L0L1;
358 	else
359 		*reg = ARMADA_37XX_NB_L2L3;
360 
361 	if (load_level == ARMADA_37XX_DVFS_LOAD_0 ||
362 	    load_level ==  ARMADA_37XX_DVFS_LOAD_2)
363 		*offset += ARMADA_37XX_NB_CONFIG_SHIFT;
364 }
365 
366 static bool armada_3700_pm_dvfs_is_enabled(struct regmap *base)
367 {
368 	unsigned int val, reg = ARMADA_37XX_NB_DYN_MOD;
369 
370 	if (IS_ERR(base))
371 		return false;
372 
373 	regmap_read(base, reg, &val);
374 
375 	return !!(val & BIT(ARMADA_37XX_NB_DFS_EN));
376 }
377 
378 static unsigned int armada_3700_pm_dvfs_get_cpu_div(struct regmap *base)
379 {
380 	unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
381 	unsigned int offset = ARMADA_37XX_NB_TBG_DIV_OFF;
382 	unsigned int load_level, div;
383 
384 	/*
385 	 * This function is always called after the function
386 	 * armada_3700_pm_dvfs_is_enabled, so no need to check again
387 	 * if the base is valid.
388 	 */
389 	regmap_read(base, reg, &load_level);
390 
391 	/*
392 	 * The register and the offset inside this register accessed to
393 	 * read the current divider depend on the load level
394 	 */
395 	load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
396 	armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
397 
398 	regmap_read(base, reg, &div);
399 
400 	return (div >> offset) & ARMADA_37XX_NB_TBG_DIV_MASK;
401 }
402 
403 static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
404 {
405 	unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
406 	unsigned int offset = ARMADA_37XX_NB_TBG_SEL_OFF;
407 	unsigned int load_level, sel;
408 
409 	/*
410 	 * This function is always called after the function
411 	 * armada_3700_pm_dvfs_is_enabled, so no need to check again
412 	 * if the base is valid
413 	 */
414 	regmap_read(base, reg, &load_level);
415 
416 	/*
417 	 * The register and the offset inside this register accessed to
418 	 * read the current divider depend on the load level
419 	 */
420 	load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
421 	armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
422 
423 	regmap_read(base, reg, &sel);
424 
425 	return (sel >> offset) & ARMADA_37XX_NB_TBG_SEL_MASK;
426 }
427 
428 static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
429 {
430 	struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
431 	u32 val;
432 
433 	if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
434 		val = armada_3700_pm_dvfs_get_cpu_parent(pm_cpu->nb_pm_base);
435 	} else {
436 		val = readl(pm_cpu->reg_mux) >> pm_cpu->shift_mux;
437 		val &= pm_cpu->mask_mux;
438 	}
439 
440 	return val;
441 }
442 
443 static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
444 {
445 	struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
446 	struct regmap *base = pm_cpu->nb_pm_base;
447 	int load_level;
448 
449 	/*
450 	 * We set the clock parent only if the DVFS is available but
451 	 * not enabled.
452 	 */
453 	if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
454 		return -EINVAL;
455 
456 	/* Set the parent clock for all the load level */
457 	for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
458 		unsigned int reg, mask,  val,
459 			offset = ARMADA_37XX_NB_TBG_SEL_OFF;
460 
461 		armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
462 
463 		val = index << offset;
464 		mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
465 		regmap_update_bits(base, reg, mask, val);
466 	}
467 	return 0;
468 }
469 
470 static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
471 					    unsigned long parent_rate)
472 {
473 	struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
474 	unsigned int div;
475 
476 	if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base))
477 		div = armada_3700_pm_dvfs_get_cpu_div(pm_cpu->nb_pm_base);
478 	else
479 		div = get_div(pm_cpu->reg_div, pm_cpu->shift_div);
480 	return DIV_ROUND_UP_ULL((u64)parent_rate, div);
481 }
482 
483 static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
484 				  unsigned long *parent_rate)
485 {
486 	struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
487 	struct regmap *base = pm_cpu->nb_pm_base;
488 	unsigned int div = *parent_rate / rate;
489 	unsigned int load_level;
490 	/* only available when DVFS is enabled */
491 	if (!armada_3700_pm_dvfs_is_enabled(base))
492 		return -EINVAL;
493 
494 	for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
495 		unsigned int reg, val, offset = ARMADA_37XX_NB_TBG_DIV_OFF;
496 
497 		armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
498 
499 		regmap_read(base, reg, &val);
500 
501 		val >>= offset;
502 		val &= ARMADA_37XX_NB_TBG_DIV_MASK;
503 		if (val == div)
504 			/*
505 			 * We found a load level matching the target
506 			 * divider, switch to this load level and
507 			 * return.
508 			 */
509 			return *parent_rate / div;
510 	}
511 
512 	/* We didn't find any valid divider */
513 	return -EINVAL;
514 }
515 
516 /*
517  * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
518  * respectively) to L0 frequency (1.2 Ghz) requires a significant
519  * amount of time to let VDD stabilize to the appropriate
520  * voltage. This amount of time is large enough that it cannot be
521  * covered by the hardware countdown register. Due to this, the CPU
522  * might start operating at L0 before the voltage is stabilized,
523  * leading to CPU stalls.
524  *
525  * To work around this problem, we prevent switching directly from the
526  * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
527  * frequency in-between. The sequence therefore becomes:
528  * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
529  * 2. Sleep 20ms for stabling VDD voltage
530  * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
531  */
532 static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
533 {
534 	unsigned int cur_level;
535 
536 	if (rate != 1200 * 1000 * 1000)
537 		return;
538 
539 	regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
540 	cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
541 	if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
542 		return;
543 
544 	regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
545 			   ARMADA_37XX_NB_CPU_LOAD_MASK,
546 			   ARMADA_37XX_DVFS_LOAD_1);
547 	msleep(20);
548 }
549 
550 static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
551 			       unsigned long parent_rate)
552 {
553 	struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
554 	struct regmap *base = pm_cpu->nb_pm_base;
555 	unsigned int div = parent_rate / rate;
556 	unsigned int load_level;
557 
558 	/* only available when DVFS is enabled */
559 	if (!armada_3700_pm_dvfs_is_enabled(base))
560 		return -EINVAL;
561 
562 	for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
563 		unsigned int reg, mask, val,
564 			offset = ARMADA_37XX_NB_TBG_DIV_OFF;
565 
566 		armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
567 
568 		regmap_read(base, reg, &val);
569 		val >>= offset;
570 		val &= ARMADA_37XX_NB_TBG_DIV_MASK;
571 
572 		if (val == div) {
573 			/*
574 			 * We found a load level matching the target
575 			 * divider, switch to this load level and
576 			 * return.
577 			 */
578 			reg = ARMADA_37XX_NB_CPU_LOAD;
579 			mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
580 
581 			clk_pm_cpu_set_rate_wa(rate, base);
582 
583 			regmap_update_bits(base, reg, mask, load_level);
584 
585 			return rate;
586 		}
587 	}
588 
589 	/* We didn't find any valid divider */
590 	return -EINVAL;
591 }
592 
593 static const struct clk_ops clk_pm_cpu_ops = {
594 	.get_parent = clk_pm_cpu_get_parent,
595 	.set_parent = clk_pm_cpu_set_parent,
596 	.round_rate = clk_pm_cpu_round_rate,
597 	.set_rate = clk_pm_cpu_set_rate,
598 	.recalc_rate = clk_pm_cpu_recalc_rate,
599 };
600 
601 static const struct of_device_id armada_3700_periph_clock_of_match[] = {
602 	{ .compatible = "marvell,armada-3700-periph-clock-nb",
603 	  .data = data_nb, },
604 	{ .compatible = "marvell,armada-3700-periph-clock-sb",
605 	.data = data_sb, },
606 	{ }
607 };
608 
609 static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
610 					 void __iomem *reg, spinlock_t *lock,
611 					 struct device *dev, struct clk_hw **hw)
612 {
613 	const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
614 		*rate_ops = NULL;
615 	struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *rate_hw = NULL;
616 
617 	if (data->mux_hw) {
618 		struct clk_mux *mux;
619 
620 		mux_hw = data->mux_hw;
621 		mux = to_clk_mux(mux_hw);
622 		mux->lock = lock;
623 		mux_ops = mux_hw->init->ops;
624 		mux->reg = reg + (u64)mux->reg;
625 	}
626 
627 	if (data->gate_hw) {
628 		struct clk_gate *gate;
629 
630 		gate_hw = data->gate_hw;
631 		gate = to_clk_gate(gate_hw);
632 		gate->lock = lock;
633 		gate_ops = gate_hw->init->ops;
634 		gate->reg = reg + (u64)gate->reg;
635 		gate->flags = CLK_GATE_SET_TO_DISABLE;
636 	}
637 
638 	if (data->rate_hw) {
639 		rate_hw = data->rate_hw;
640 		rate_ops = rate_hw->init->ops;
641 		if (data->is_double_div) {
642 			struct clk_double_div *rate;
643 
644 			rate =  to_clk_double_div(rate_hw);
645 			rate->reg1 = reg + (u64)rate->reg1;
646 			rate->reg2 = reg + (u64)rate->reg2;
647 		} else {
648 			struct clk_divider *rate = to_clk_divider(rate_hw);
649 			const struct clk_div_table *clkt;
650 			int table_size = 0;
651 
652 			rate->reg = reg + (u64)rate->reg;
653 			for (clkt = rate->table; clkt->div; clkt++)
654 				table_size++;
655 			rate->width = order_base_2(table_size);
656 			rate->lock = lock;
657 		}
658 	}
659 
660 	if (data->muxrate_hw) {
661 		struct clk_pm_cpu *pmcpu_clk;
662 		struct clk_hw *muxrate_hw = data->muxrate_hw;
663 		struct regmap *map;
664 
665 		pmcpu_clk =  to_clk_pm_cpu(muxrate_hw);
666 		pmcpu_clk->reg_mux = reg + (u64)pmcpu_clk->reg_mux;
667 		pmcpu_clk->reg_div = reg + (u64)pmcpu_clk->reg_div;
668 
669 		mux_hw = muxrate_hw;
670 		rate_hw = muxrate_hw;
671 		mux_ops = muxrate_hw->init->ops;
672 		rate_ops = muxrate_hw->init->ops;
673 
674 		map = syscon_regmap_lookup_by_compatible(
675 				"marvell,armada-3700-nb-pm");
676 		pmcpu_clk->nb_pm_base = map;
677 	}
678 
679 	*hw = clk_hw_register_composite(dev, data->name, data->parent_names,
680 					data->num_parents, mux_hw,
681 					mux_ops, rate_hw, rate_ops,
682 					gate_hw, gate_ops, CLK_IGNORE_UNUSED);
683 
684 	return PTR_ERR_OR_ZERO(*hw);
685 }
686 
687 static int __maybe_unused armada_3700_periph_clock_suspend(struct device *dev)
688 {
689 	struct clk_periph_driver_data *data = dev_get_drvdata(dev);
690 
691 	data->tbg_sel = readl(data->reg + TBG_SEL);
692 	data->div_sel0 = readl(data->reg + DIV_SEL0);
693 	data->div_sel1 = readl(data->reg + DIV_SEL1);
694 	data->div_sel2 = readl(data->reg + DIV_SEL2);
695 	data->clk_sel = readl(data->reg + CLK_SEL);
696 	data->clk_dis = readl(data->reg + CLK_DIS);
697 
698 	return 0;
699 }
700 
701 static int __maybe_unused armada_3700_periph_clock_resume(struct device *dev)
702 {
703 	struct clk_periph_driver_data *data = dev_get_drvdata(dev);
704 
705 	/* Follow the same order than what the Cortex-M3 does (ATF code) */
706 	writel(data->clk_dis, data->reg + CLK_DIS);
707 	writel(data->div_sel0, data->reg + DIV_SEL0);
708 	writel(data->div_sel1, data->reg + DIV_SEL1);
709 	writel(data->div_sel2, data->reg + DIV_SEL2);
710 	writel(data->tbg_sel, data->reg + TBG_SEL);
711 	writel(data->clk_sel, data->reg + CLK_SEL);
712 
713 	return 0;
714 }
715 
716 static const struct dev_pm_ops armada_3700_periph_clock_pm_ops = {
717 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend,
718 				      armada_3700_periph_clock_resume)
719 };
720 
721 static int armada_3700_periph_clock_probe(struct platform_device *pdev)
722 {
723 	struct clk_periph_driver_data *driver_data;
724 	struct device_node *np = pdev->dev.of_node;
725 	const struct clk_periph_data *data;
726 	struct device *dev = &pdev->dev;
727 	int num_periph = 0, i, ret;
728 	struct resource *res;
729 
730 	data = of_device_get_match_data(dev);
731 	if (!data)
732 		return -ENODEV;
733 
734 	while (data[num_periph].name)
735 		num_periph++;
736 
737 	driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
738 	if (!driver_data)
739 		return -ENOMEM;
740 
741 	driver_data->hw_data = devm_kzalloc(dev,
742 					    struct_size(driver_data->hw_data,
743 							hws, num_periph),
744 					    GFP_KERNEL);
745 	if (!driver_data->hw_data)
746 		return -ENOMEM;
747 	driver_data->hw_data->num = num_periph;
748 
749 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
750 	driver_data->reg = devm_ioremap_resource(dev, res);
751 	if (IS_ERR(driver_data->reg))
752 		return PTR_ERR(driver_data->reg);
753 
754 	spin_lock_init(&driver_data->lock);
755 
756 	for (i = 0; i < num_periph; i++) {
757 		struct clk_hw **hw = &driver_data->hw_data->hws[i];
758 		if (armada_3700_add_composite_clk(&data[i], driver_data->reg,
759 						  &driver_data->lock, dev, hw))
760 			dev_err(dev, "Can't register periph clock %s\n",
761 				data[i].name);
762 	}
763 
764 	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
765 				     driver_data->hw_data);
766 	if (ret) {
767 		for (i = 0; i < num_periph; i++)
768 			clk_hw_unregister(driver_data->hw_data->hws[i]);
769 		return ret;
770 	}
771 
772 	platform_set_drvdata(pdev, driver_data);
773 	return 0;
774 }
775 
776 static int armada_3700_periph_clock_remove(struct platform_device *pdev)
777 {
778 	struct clk_periph_driver_data *data = platform_get_drvdata(pdev);
779 	struct clk_hw_onecell_data *hw_data = data->hw_data;
780 	int i;
781 
782 	of_clk_del_provider(pdev->dev.of_node);
783 
784 	for (i = 0; i < hw_data->num; i++)
785 		clk_hw_unregister(hw_data->hws[i]);
786 
787 	return 0;
788 }
789 
790 static struct platform_driver armada_3700_periph_clock_driver = {
791 	.probe = armada_3700_periph_clock_probe,
792 	.remove = armada_3700_periph_clock_remove,
793 	.driver		= {
794 		.name	= "marvell-armada-3700-periph-clock",
795 		.of_match_table = armada_3700_periph_clock_of_match,
796 		.pm	= &armada_3700_periph_clock_pm_ops,
797 	},
798 };
799 
800 builtin_platform_driver(armada_3700_periph_clock_driver);
801