xref: /linux/drivers/clk/clk-scmi.c (revision d642ef71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Power Interface (SCMI) Protocol based clock driver
4  *
5  * Copyright (C) 2018-2022 ARM Ltd.
6  */
7 
8 #include <linux/clk-provider.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/of.h>
12 #include <linux/module.h>
13 #include <linux/scmi_protocol.h>
14 #include <asm/div64.h>
15 
16 #define NOT_ATOMIC	false
17 #define ATOMIC		true
18 
19 static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
20 
21 struct scmi_clk {
22 	u32 id;
23 	struct device *dev;
24 	struct clk_hw hw;
25 	const struct scmi_clock_info *info;
26 	const struct scmi_protocol_handle *ph;
27 	struct clk_parent_data *parent_data;
28 };
29 
30 #define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
31 
32 static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
33 					  unsigned long parent_rate)
34 {
35 	int ret;
36 	u64 rate;
37 	struct scmi_clk *clk = to_scmi_clk(hw);
38 
39 	ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate);
40 	if (ret)
41 		return 0;
42 	return rate;
43 }
44 
45 static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
46 				unsigned long *parent_rate)
47 {
48 	u64 fmin, fmax, ftmp;
49 	struct scmi_clk *clk = to_scmi_clk(hw);
50 
51 	/*
52 	 * We can't figure out what rate it will be, so just return the
53 	 * rate back to the caller. scmi_clk_recalc_rate() will be called
54 	 * after the rate is set and we'll know what rate the clock is
55 	 * running at then.
56 	 */
57 	if (clk->info->rate_discrete)
58 		return rate;
59 
60 	fmin = clk->info->range.min_rate;
61 	fmax = clk->info->range.max_rate;
62 	if (rate <= fmin)
63 		return fmin;
64 	else if (rate >= fmax)
65 		return fmax;
66 
67 	ftmp = rate - fmin;
68 	ftmp += clk->info->range.step_size - 1; /* to round up */
69 	do_div(ftmp, clk->info->range.step_size);
70 
71 	return ftmp * clk->info->range.step_size + fmin;
72 }
73 
74 static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
75 			     unsigned long parent_rate)
76 {
77 	struct scmi_clk *clk = to_scmi_clk(hw);
78 
79 	return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
80 }
81 
82 static int scmi_clk_set_parent(struct clk_hw *hw, u8 parent_index)
83 {
84 	struct scmi_clk *clk = to_scmi_clk(hw);
85 
86 	return scmi_proto_clk_ops->parent_set(clk->ph, clk->id, parent_index);
87 }
88 
89 static u8 scmi_clk_get_parent(struct clk_hw *hw)
90 {
91 	struct scmi_clk *clk = to_scmi_clk(hw);
92 	u32 parent_id, p_idx;
93 	int ret;
94 
95 	ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id);
96 	if (ret)
97 		return 0;
98 
99 	for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) {
100 		if (clk->parent_data[p_idx].index == parent_id)
101 			break;
102 	}
103 
104 	if (p_idx == clk->info->num_parents)
105 		return 0;
106 
107 	return p_idx;
108 }
109 
110 static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
111 {
112 	/*
113 	 * Suppose all the requested rates are supported, and let firmware
114 	 * to handle the left work.
115 	 */
116 	return 0;
117 }
118 
119 static int scmi_clk_enable(struct clk_hw *hw)
120 {
121 	struct scmi_clk *clk = to_scmi_clk(hw);
122 
123 	return scmi_proto_clk_ops->enable(clk->ph, clk->id, NOT_ATOMIC);
124 }
125 
126 static void scmi_clk_disable(struct clk_hw *hw)
127 {
128 	struct scmi_clk *clk = to_scmi_clk(hw);
129 
130 	scmi_proto_clk_ops->disable(clk->ph, clk->id, NOT_ATOMIC);
131 }
132 
133 static int scmi_clk_atomic_enable(struct clk_hw *hw)
134 {
135 	struct scmi_clk *clk = to_scmi_clk(hw);
136 
137 	return scmi_proto_clk_ops->enable(clk->ph, clk->id, ATOMIC);
138 }
139 
140 static void scmi_clk_atomic_disable(struct clk_hw *hw)
141 {
142 	struct scmi_clk *clk = to_scmi_clk(hw);
143 
144 	scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
145 }
146 
147 static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
148 {
149 	int ret;
150 	bool enabled = false;
151 	struct scmi_clk *clk = to_scmi_clk(hw);
152 
153 	ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC);
154 	if (ret)
155 		dev_warn(clk->dev,
156 			 "Failed to get state for clock ID %d\n", clk->id);
157 
158 	return !!enabled;
159 }
160 
161 /*
162  * We can provide enable/disable/is_enabled atomic callbacks only if the
163  * underlying SCMI transport for an SCMI instance is configured to handle
164  * SCMI commands in an atomic manner.
165  *
166  * When no SCMI atomic transport support is available we instead provide only
167  * the prepare/unprepare API, as allowed by the clock framework when atomic
168  * calls are not available.
169  *
170  * Two distinct sets of clk_ops are provided since we could have multiple SCMI
171  * instances with different underlying transport quality, so they cannot be
172  * shared.
173  */
174 static const struct clk_ops scmi_clk_ops = {
175 	.recalc_rate = scmi_clk_recalc_rate,
176 	.round_rate = scmi_clk_round_rate,
177 	.set_rate = scmi_clk_set_rate,
178 	.prepare = scmi_clk_enable,
179 	.unprepare = scmi_clk_disable,
180 	.set_parent = scmi_clk_set_parent,
181 	.get_parent = scmi_clk_get_parent,
182 	.determine_rate = scmi_clk_determine_rate,
183 };
184 
185 static const struct clk_ops scmi_atomic_clk_ops = {
186 	.recalc_rate = scmi_clk_recalc_rate,
187 	.round_rate = scmi_clk_round_rate,
188 	.set_rate = scmi_clk_set_rate,
189 	.enable = scmi_clk_atomic_enable,
190 	.disable = scmi_clk_atomic_disable,
191 	.is_enabled = scmi_clk_atomic_is_enabled,
192 	.set_parent = scmi_clk_set_parent,
193 	.get_parent = scmi_clk_get_parent,
194 	.determine_rate = scmi_clk_determine_rate,
195 };
196 
197 static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
198 			     const struct clk_ops *scmi_ops)
199 {
200 	int ret;
201 	unsigned long min_rate, max_rate;
202 
203 	struct clk_init_data init = {
204 		.flags = CLK_GET_RATE_NOCACHE,
205 		.num_parents = sclk->info->num_parents,
206 		.ops = scmi_ops,
207 		.name = sclk->info->name,
208 		.parent_data = sclk->parent_data,
209 	};
210 
211 	sclk->hw.init = &init;
212 	ret = devm_clk_hw_register(dev, &sclk->hw);
213 	if (ret)
214 		return ret;
215 
216 	if (sclk->info->rate_discrete) {
217 		int num_rates = sclk->info->list.num_rates;
218 
219 		if (num_rates <= 0)
220 			return -EINVAL;
221 
222 		min_rate = sclk->info->list.rates[0];
223 		max_rate = sclk->info->list.rates[num_rates - 1];
224 	} else {
225 		min_rate = sclk->info->range.min_rate;
226 		max_rate = sclk->info->range.max_rate;
227 	}
228 
229 	clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
230 	return ret;
231 }
232 
233 static int scmi_clocks_probe(struct scmi_device *sdev)
234 {
235 	int idx, count, err;
236 	unsigned int atomic_threshold;
237 	bool is_atomic;
238 	struct clk_hw **hws;
239 	struct clk_hw_onecell_data *clk_data;
240 	struct device *dev = &sdev->dev;
241 	struct device_node *np = dev->of_node;
242 	const struct scmi_handle *handle = sdev->handle;
243 	struct scmi_protocol_handle *ph;
244 
245 	if (!handle)
246 		return -ENODEV;
247 
248 	scmi_proto_clk_ops =
249 		handle->devm_protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph);
250 	if (IS_ERR(scmi_proto_clk_ops))
251 		return PTR_ERR(scmi_proto_clk_ops);
252 
253 	count = scmi_proto_clk_ops->count_get(ph);
254 	if (count < 0) {
255 		dev_err(dev, "%pOFn: invalid clock output count\n", np);
256 		return -EINVAL;
257 	}
258 
259 	clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
260 				GFP_KERNEL);
261 	if (!clk_data)
262 		return -ENOMEM;
263 
264 	clk_data->num = count;
265 	hws = clk_data->hws;
266 
267 	is_atomic = handle->is_transport_atomic(handle, &atomic_threshold);
268 
269 	for (idx = 0; idx < count; idx++) {
270 		struct scmi_clk *sclk;
271 		const struct clk_ops *scmi_ops;
272 
273 		sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
274 		if (!sclk)
275 			return -ENOMEM;
276 
277 		sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
278 		if (!sclk->info) {
279 			dev_dbg(dev, "invalid clock info for idx %d\n", idx);
280 			devm_kfree(dev, sclk);
281 			continue;
282 		}
283 
284 		sclk->id = idx;
285 		sclk->ph = ph;
286 		sclk->dev = dev;
287 
288 		/*
289 		 * Note that when transport is atomic but SCMI protocol did not
290 		 * specify (or support) an enable_latency associated with a
291 		 * clock, we default to use atomic operations mode.
292 		 */
293 		if (is_atomic &&
294 		    sclk->info->enable_latency <= atomic_threshold)
295 			scmi_ops = &scmi_atomic_clk_ops;
296 		else
297 			scmi_ops = &scmi_clk_ops;
298 
299 		/* Initialize clock parent data. */
300 		if (sclk->info->num_parents > 0) {
301 			sclk->parent_data = devm_kcalloc(dev, sclk->info->num_parents,
302 							 sizeof(*sclk->parent_data), GFP_KERNEL);
303 			if (!sclk->parent_data)
304 				return -ENOMEM;
305 
306 			for (int i = 0; i < sclk->info->num_parents; i++) {
307 				sclk->parent_data[i].index = sclk->info->parents[i];
308 				sclk->parent_data[i].hw = hws[sclk->info->parents[i]];
309 			}
310 		}
311 
312 		err = scmi_clk_ops_init(dev, sclk, scmi_ops);
313 		if (err) {
314 			dev_err(dev, "failed to register clock %d\n", idx);
315 			devm_kfree(dev, sclk->parent_data);
316 			devm_kfree(dev, sclk);
317 			hws[idx] = NULL;
318 		} else {
319 			dev_dbg(dev, "Registered clock:%s%s\n",
320 				sclk->info->name,
321 				scmi_ops == &scmi_atomic_clk_ops ?
322 				" (atomic ops)" : "");
323 			hws[idx] = &sclk->hw;
324 		}
325 	}
326 
327 	return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
328 					   clk_data);
329 }
330 
331 static const struct scmi_device_id scmi_id_table[] = {
332 	{ SCMI_PROTOCOL_CLOCK, "clocks" },
333 	{ },
334 };
335 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
336 
337 static struct scmi_driver scmi_clocks_driver = {
338 	.name = "scmi-clocks",
339 	.probe = scmi_clocks_probe,
340 	.id_table = scmi_id_table,
341 };
342 module_scmi_driver(scmi_clocks_driver);
343 
344 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
345 MODULE_DESCRIPTION("ARM SCMI clock driver");
346 MODULE_LICENSE("GPL v2");
347