xref: /linux/drivers/clk/imx/clk-composite-93.c (revision d342df11)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2021 NXP
4  *
5  * Peng Fan <peng.fan@nxp.com>
6  */
7 
8 #include <linux/clk-provider.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/slab.h>
14 
15 #include "clk.h"
16 
17 #define TIMEOUT_US	500U
18 
19 #define CCM_DIV_SHIFT	0
20 #define CCM_DIV_WIDTH	8
21 #define CCM_MUX_SHIFT	8
22 #define CCM_MUX_MASK	3
23 #define CCM_OFF_SHIFT	24
24 #define CCM_BUSY_SHIFT	28
25 
26 #define STAT_OFFSET	0x4
27 #define AUTHEN_OFFSET	0x30
28 #define TZ_NS_SHIFT	9
29 #define TZ_NS_MASK	BIT(9)
30 
31 #define WHITE_LIST_SHIFT	16
32 
imx93_clk_composite_wait_ready(struct clk_hw * hw,void __iomem * reg)33 static int imx93_clk_composite_wait_ready(struct clk_hw *hw, void __iomem *reg)
34 {
35 	int ret;
36 	u32 val;
37 
38 	ret = readl_poll_timeout_atomic(reg + STAT_OFFSET, val, !(val & BIT(CCM_BUSY_SHIFT)),
39 					0, TIMEOUT_US);
40 	if (ret)
41 		pr_err("Slice[%s] busy timeout\n", clk_hw_get_name(hw));
42 
43 	return ret;
44 }
45 
imx93_clk_composite_gate_endisable(struct clk_hw * hw,int enable)46 static void imx93_clk_composite_gate_endisable(struct clk_hw *hw, int enable)
47 {
48 	struct clk_gate *gate = to_clk_gate(hw);
49 	unsigned long flags;
50 	u32 reg;
51 
52 	if (gate->lock)
53 		spin_lock_irqsave(gate->lock, flags);
54 
55 	reg = readl(gate->reg);
56 
57 	if (enable)
58 		reg &= ~BIT(gate->bit_idx);
59 	else
60 		reg |= BIT(gate->bit_idx);
61 
62 	writel(reg, gate->reg);
63 
64 	imx93_clk_composite_wait_ready(hw, gate->reg);
65 
66 	if (gate->lock)
67 		spin_unlock_irqrestore(gate->lock, flags);
68 }
69 
imx93_clk_composite_gate_enable(struct clk_hw * hw)70 static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
71 {
72 	imx93_clk_composite_gate_endisable(hw, 1);
73 
74 	return 0;
75 }
76 
imx93_clk_composite_gate_disable(struct clk_hw * hw)77 static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
78 {
79 	/*
80 	 * Skip disable the root clock gate if mcore enabled.
81 	 * The root clock may be used by the mcore.
82 	 */
83 	if (mcore_booted)
84 		return;
85 
86 	imx93_clk_composite_gate_endisable(hw, 0);
87 }
88 
89 static const struct clk_ops imx93_clk_composite_gate_ops = {
90 	.enable = imx93_clk_composite_gate_enable,
91 	.disable = imx93_clk_composite_gate_disable,
92 	.is_enabled = clk_gate_is_enabled,
93 };
94 
95 static unsigned long
imx93_clk_composite_divider_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)96 imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
97 {
98 	return clk_divider_ops.recalc_rate(hw, parent_rate);
99 }
100 
101 static long
imx93_clk_composite_divider_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)102 imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
103 {
104 	return clk_divider_ops.round_rate(hw, rate, prate);
105 }
106 
107 static int
imx93_clk_composite_divider_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)108 imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
109 {
110 	return clk_divider_ops.determine_rate(hw, req);
111 }
112 
imx93_clk_composite_divider_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)113 static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate,
114 						unsigned long parent_rate)
115 {
116 	struct clk_divider *divider = to_clk_divider(hw);
117 	int value;
118 	unsigned long flags = 0;
119 	u32 val;
120 	int ret;
121 
122 	value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
123 	if (value < 0)
124 		return value;
125 
126 	if (divider->lock)
127 		spin_lock_irqsave(divider->lock, flags);
128 
129 	val = readl(divider->reg);
130 	val &= ~(clk_div_mask(divider->width) << divider->shift);
131 	val |= (u32)value << divider->shift;
132 	writel(val, divider->reg);
133 
134 	ret = imx93_clk_composite_wait_ready(hw, divider->reg);
135 
136 	if (divider->lock)
137 		spin_unlock_irqrestore(divider->lock, flags);
138 
139 	return ret;
140 }
141 
142 static const struct clk_ops imx93_clk_composite_divider_ops = {
143 	.recalc_rate = imx93_clk_composite_divider_recalc_rate,
144 	.round_rate = imx93_clk_composite_divider_round_rate,
145 	.determine_rate = imx93_clk_composite_divider_determine_rate,
146 	.set_rate = imx93_clk_composite_divider_set_rate,
147 };
148 
imx93_clk_composite_mux_get_parent(struct clk_hw * hw)149 static u8 imx93_clk_composite_mux_get_parent(struct clk_hw *hw)
150 {
151 	return clk_mux_ops.get_parent(hw);
152 }
153 
imx93_clk_composite_mux_set_parent(struct clk_hw * hw,u8 index)154 static int imx93_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
155 {
156 	struct clk_mux *mux = to_clk_mux(hw);
157 	u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
158 	unsigned long flags = 0;
159 	u32 reg;
160 	int ret;
161 
162 	if (mux->lock)
163 		spin_lock_irqsave(mux->lock, flags);
164 
165 	reg = readl(mux->reg);
166 	reg &= ~(mux->mask << mux->shift);
167 	val = val << mux->shift;
168 	reg |= val;
169 	writel(reg, mux->reg);
170 
171 	ret = imx93_clk_composite_wait_ready(hw, mux->reg);
172 
173 	if (mux->lock)
174 		spin_unlock_irqrestore(mux->lock, flags);
175 
176 	return ret;
177 }
178 
179 static int
imx93_clk_composite_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)180 imx93_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
181 {
182 	return clk_mux_ops.determine_rate(hw, req);
183 }
184 
185 static const struct clk_ops imx93_clk_composite_mux_ops = {
186 	.get_parent = imx93_clk_composite_mux_get_parent,
187 	.set_parent = imx93_clk_composite_mux_set_parent,
188 	.determine_rate = imx93_clk_composite_mux_determine_rate,
189 };
190 
imx93_clk_composite_flags(const char * name,const char * const * parent_names,int num_parents,void __iomem * reg,u32 domain_id,unsigned long flags)191 struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
192 					 int num_parents, void __iomem *reg, u32 domain_id,
193 					 unsigned long flags)
194 {
195 	struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
196 	struct clk_hw *div_hw, *gate_hw;
197 	struct clk_divider *div = NULL;
198 	struct clk_gate *gate = NULL;
199 	struct clk_mux *mux = NULL;
200 	bool clk_ro = false;
201 	u32 authen;
202 
203 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
204 	if (!mux)
205 		goto fail;
206 
207 	mux_hw = &mux->hw;
208 	mux->reg = reg;
209 	mux->shift = CCM_MUX_SHIFT;
210 	mux->mask = CCM_MUX_MASK;
211 	mux->lock = &imx_ccm_lock;
212 
213 	div = kzalloc(sizeof(*div), GFP_KERNEL);
214 	if (!div)
215 		goto fail;
216 
217 	div_hw = &div->hw;
218 	div->reg = reg;
219 	div->shift = CCM_DIV_SHIFT;
220 	div->width = CCM_DIV_WIDTH;
221 	div->lock = &imx_ccm_lock;
222 	div->flags = CLK_DIVIDER_ROUND_CLOSEST;
223 
224 	authen = readl(reg + AUTHEN_OFFSET);
225 	if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
226 		clk_ro = true;
227 
228 	if (clk_ro) {
229 		hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
230 					       mux_hw, &clk_mux_ro_ops, div_hw,
231 					       &clk_divider_ro_ops, NULL, NULL, flags);
232 	} else {
233 		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
234 		if (!gate)
235 			goto fail;
236 
237 		gate_hw = &gate->hw;
238 		gate->reg = reg;
239 		gate->bit_idx = CCM_OFF_SHIFT;
240 		gate->lock = &imx_ccm_lock;
241 		gate->flags = CLK_GATE_SET_TO_DISABLE;
242 
243 		hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
244 					       mux_hw, &imx93_clk_composite_mux_ops, div_hw,
245 					       &imx93_clk_composite_divider_ops, gate_hw,
246 					       &imx93_clk_composite_gate_ops,
247 					       flags | CLK_SET_RATE_NO_REPARENT);
248 	}
249 
250 	if (IS_ERR(hw))
251 		goto fail;
252 
253 	return hw;
254 
255 fail:
256 	kfree(gate);
257 	kfree(div);
258 	kfree(mux);
259 	return ERR_CAST(hw);
260 }
261 EXPORT_SYMBOL_GPL(imx93_clk_composite_flags);
262