xref: /freebsd/sys/dev/clk/allwinner/aw_clk_nkmp.c (revision 78ae60b4)
1 /*-
2  * Copyright (c) 2017 Emmanuel Vadot <manu@freebsd.org>
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
18  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
20  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
21  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/bus.h>
29 
30 #include <dev/clk/clk.h>
31 
32 #include <dev/clk/allwinner/aw_clk.h>
33 #include <dev/clk/allwinner/aw_clk_nkmp.h>
34 
35 #include "clkdev_if.h"
36 
37 /*
38  * clknode for clocks matching the formula :
39  *
40  * clk = (clkin * n * k) / (m * p)
41  *
42  */
43 
44 struct aw_clk_nkmp_sc {
45 	uint32_t	offset;
46 
47 	struct aw_clk_factor	n;
48 	struct aw_clk_factor	k;
49 	struct aw_clk_factor	m;
50 	struct aw_clk_factor	p;
51 
52 	uint32_t	mux_shift;
53 	uint32_t	mux_mask;
54 	uint32_t	gate_shift;
55 	uint32_t	lock_shift;
56 	uint32_t	lock_retries;
57 	uint32_t	update_shift;
58 
59 	uint32_t	flags;
60 };
61 
62 #define	WRITE4(_clk, off, val)						\
63 	CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
64 #define	READ4(_clk, off, val)						\
65 	CLKDEV_READ_4(clknode_get_device(_clk), off, val)
66 #define	MODIFY4(_clk, off, clr, set )					\
67 	CLKDEV_MODIFY_4(clknode_get_device(_clk), off, clr, set)
68 #define	DEVICE_LOCK(_clk)							\
69 	CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
70 #define	DEVICE_UNLOCK(_clk)						\
71 	CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
72 
73 static int
74 aw_clk_nkmp_init(struct clknode *clk, device_t dev)
75 {
76 	struct aw_clk_nkmp_sc *sc;
77 	uint32_t val, idx;
78 
79 	sc = clknode_get_softc(clk);
80 
81 	idx = 0;
82 	if ((sc->flags & AW_CLK_HAS_MUX) != 0) {
83 		DEVICE_LOCK(clk);
84 		READ4(clk, sc->offset, &val);
85 		DEVICE_UNLOCK(clk);
86 
87 		idx = (val & sc->mux_mask) >> sc->mux_shift;
88 	}
89 
90 	clknode_init_parent_idx(clk, idx);
91 	return (0);
92 }
93 
94 static int
95 aw_clk_nkmp_set_gate(struct clknode *clk, bool enable)
96 {
97 	struct aw_clk_nkmp_sc *sc;
98 	uint32_t val;
99 
100 	sc = clknode_get_softc(clk);
101 
102 	if ((sc->flags & AW_CLK_HAS_GATE) == 0)
103 		return (0);
104 
105 	DEVICE_LOCK(clk);
106 	READ4(clk, sc->offset, &val);
107 	if (enable)
108 		val |= (1 << sc->gate_shift);
109 	else
110 		val &= ~(1 << sc->gate_shift);
111 	WRITE4(clk, sc->offset, val);
112 	DEVICE_UNLOCK(clk);
113 
114 	return (0);
115 }
116 
117 static int
118 aw_clk_nkmp_set_mux(struct clknode *clk, int index)
119 {
120 	struct aw_clk_nkmp_sc *sc;
121 	uint32_t val;
122 
123 	sc = clknode_get_softc(clk);
124 
125 	if ((sc->flags & AW_CLK_HAS_MUX) == 0)
126 		return (0);
127 
128 	DEVICE_LOCK(clk);
129 	READ4(clk, sc->offset, &val);
130 	val &= ~sc->mux_mask;
131 	val |= index << sc->mux_shift;
132 	WRITE4(clk, sc->offset, val);
133 	DEVICE_UNLOCK(clk);
134 
135 	return (0);
136 }
137 
138 static uint64_t
139 aw_clk_nkmp_find_best(struct aw_clk_nkmp_sc *sc, uint64_t fparent, uint64_t *fout,
140     uint32_t *factor_n, uint32_t *factor_k, uint32_t *factor_m, uint32_t *factor_p)
141 {
142 	uint64_t cur, best;
143 	uint32_t n, k, m, p;
144 
145 	best = 0;
146 	*factor_n = 0;
147 	*factor_k = 0;
148 	*factor_m = 0;
149 	*factor_p = 0;
150 
151 	for (n = aw_clk_factor_get_min(&sc->n); n <= aw_clk_factor_get_max(&sc->n); ) {
152 		for (k = aw_clk_factor_get_min(&sc->k); k <= aw_clk_factor_get_max(&sc->k); ) {
153 			for (m = aw_clk_factor_get_min(&sc->m); m <= aw_clk_factor_get_max(&sc->m); ) {
154 				for (p = aw_clk_factor_get_min(&sc->p); p <= aw_clk_factor_get_max(&sc->p); ) {
155 					cur = (fparent * n * k) / (m * p);
156 					if ((*fout - cur) < (*fout - best)) {
157 						best = cur;
158 						*factor_n = n;
159 						*factor_k = k;
160 						*factor_m = m;
161 						*factor_p = p;
162 					}
163 					if (best == *fout)
164 						return (best);
165 					if ((sc->p.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
166 						p <<= 1;
167 					else
168 						p++;
169 				}
170 				if ((sc->m.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
171 					m <<= 1;
172 				else
173 					m++;
174 			}
175 			if ((sc->k.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
176 				k <<= 1;
177 			else
178 				k++;
179 		}
180 		if ((sc->n.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
181 			n <<= 1;
182 		else
183 			n++;
184 	}
185 
186 	return best;
187 }
188 
189 static void
190 aw_clk_nkmp_set_freq_scale(struct clknode *clk, struct aw_clk_nkmp_sc *sc,
191     uint32_t factor_n, uint32_t factor_k, uint32_t factor_m, uint32_t factor_p)
192 {
193 	uint32_t val, m, p;
194 	int retry;
195 
196 	DEVICE_LOCK(clk);
197 	READ4(clk, sc->offset, &val);
198 
199 	m = aw_clk_get_factor(val, &sc->m);
200 	p = aw_clk_get_factor(val, &sc->p);
201 
202 	if (p < factor_p) {
203 		val &= ~sc->p.mask;
204 		val |= aw_clk_factor_get_value(&sc->p, factor_p) << sc->p.shift;
205 		WRITE4(clk, sc->offset, val);
206 		DELAY(2000);
207 	}
208 
209 	if (m < factor_m) {
210 		val &= ~sc->m.mask;
211 		val |= aw_clk_factor_get_value(&sc->m, factor_m) << sc->m.shift;
212 		WRITE4(clk, sc->offset, val);
213 		DELAY(2000);
214 	}
215 
216 	val &= ~sc->n.mask;
217 	val &= ~sc->k.mask;
218 	val |= aw_clk_factor_get_value(&sc->n, factor_n) << sc->n.shift;
219 	val |= aw_clk_factor_get_value(&sc->k, factor_k) << sc->k.shift;
220 	WRITE4(clk, sc->offset, val);
221 	DELAY(2000);
222 
223 	if (m > factor_m) {
224 		val &= ~sc->m.mask;
225 		val |= aw_clk_factor_get_value(&sc->m, factor_m) << sc->m.shift;
226 		WRITE4(clk, sc->offset, val);
227 		DELAY(2000);
228 	}
229 
230 	if (p > factor_p) {
231 		val &= ~sc->p.mask;
232 		val |= aw_clk_factor_get_value(&sc->p, factor_p) << sc->p.shift;
233 		WRITE4(clk, sc->offset, val);
234 		DELAY(2000);
235 	}
236 
237 	if ((sc->flags & AW_CLK_HAS_LOCK) != 0) {
238 		for (retry = 0; retry < sc->lock_retries; retry++) {
239 			READ4(clk, sc->offset, &val);
240 			if ((val & (1 << sc->lock_shift)) != 0)
241 				break;
242 			DELAY(1000);
243 		}
244 	}
245 
246 	DEVICE_UNLOCK(clk);
247 }
248 
249 static int
250 aw_clk_nkmp_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
251     int flags, int *stop)
252 {
253 	struct aw_clk_nkmp_sc *sc;
254 	uint64_t best;
255 	uint32_t val, best_n, best_k, best_m, best_p;
256 	int retry;
257 
258 	sc = clknode_get_softc(clk);
259 
260 	best = aw_clk_nkmp_find_best(sc, fparent, fout,
261 	    &best_n, &best_k, &best_m, &best_p);
262 	if ((flags & CLK_SET_DRYRUN) != 0) {
263 		*fout = best;
264 		*stop = 1;
265 		return (0);
266 	}
267 
268 	if ((best < *fout) &&
269 	  ((flags & CLK_SET_ROUND_DOWN) != 0)) {
270 		*stop = 1;
271 		return (ERANGE);
272 	}
273 	if ((best > *fout) &&
274 	  ((flags & CLK_SET_ROUND_UP) != 0)) {
275 		*stop = 1;
276 		return (ERANGE);
277 	}
278 
279 	if ((sc->flags & AW_CLK_SCALE_CHANGE) != 0)
280 		aw_clk_nkmp_set_freq_scale(clk, sc,
281 		    best_n, best_k, best_m, best_p);
282 	else {
283 		DEVICE_LOCK(clk);
284 		READ4(clk, sc->offset, &val);
285 		val &= ~sc->n.mask;
286 		val &= ~sc->k.mask;
287 		val &= ~sc->m.mask;
288 		val &= ~sc->p.mask;
289 		val |= aw_clk_factor_get_value(&sc->n, best_n) << sc->n.shift;
290 		val |= aw_clk_factor_get_value(&sc->k, best_k) << sc->k.shift;
291 		val |= aw_clk_factor_get_value(&sc->m, best_m) << sc->m.shift;
292 		val |= aw_clk_factor_get_value(&sc->p, best_p) << sc->p.shift;
293 		WRITE4(clk, sc->offset, val);
294 		DELAY(2000);
295 		DEVICE_UNLOCK(clk);
296 
297 		if ((sc->flags & AW_CLK_HAS_UPDATE) != 0) {
298 			DEVICE_LOCK(clk);
299 			READ4(clk, sc->offset, &val);
300 			val |= 1 << sc->update_shift;
301 			WRITE4(clk, sc->offset, val);
302 			DELAY(2000);
303 			DEVICE_UNLOCK(clk);
304 		}
305 
306 		if ((sc->flags & AW_CLK_HAS_LOCK) != 0) {
307 			for (retry = 0; retry < sc->lock_retries; retry++) {
308 				READ4(clk, sc->offset, &val);
309 				if ((val & (1 << sc->lock_shift)) != 0)
310 					break;
311 				DELAY(1000);
312 			}
313 		}
314 	}
315 
316 	*fout = best;
317 	*stop = 1;
318 
319 	return (0);
320 }
321 
322 static int
323 aw_clk_nkmp_recalc(struct clknode *clk, uint64_t *freq)
324 {
325 	struct aw_clk_nkmp_sc *sc;
326 	uint32_t val, m, n, k, p;
327 
328 	sc = clknode_get_softc(clk);
329 
330 	DEVICE_LOCK(clk);
331 	READ4(clk, sc->offset, &val);
332 	DEVICE_UNLOCK(clk);
333 
334 	n = aw_clk_get_factor(val, &sc->n);
335 	k = aw_clk_get_factor(val, &sc->k);
336 	m = aw_clk_get_factor(val, &sc->m);
337 	p = aw_clk_get_factor(val, &sc->p);
338 
339 	*freq = (*freq * n * k) / (m * p);
340 
341 	return (0);
342 }
343 
344 static clknode_method_t aw_nkmp_clknode_methods[] = {
345 	/* Device interface */
346 	CLKNODEMETHOD(clknode_init,		aw_clk_nkmp_init),
347 	CLKNODEMETHOD(clknode_set_gate,		aw_clk_nkmp_set_gate),
348 	CLKNODEMETHOD(clknode_set_mux,		aw_clk_nkmp_set_mux),
349 	CLKNODEMETHOD(clknode_recalc_freq,	aw_clk_nkmp_recalc),
350 	CLKNODEMETHOD(clknode_set_freq,		aw_clk_nkmp_set_freq),
351 	CLKNODEMETHOD_END
352 };
353 
354 DEFINE_CLASS_1(aw_nkmp_clknode, aw_nkmp_clknode_class, aw_nkmp_clknode_methods,
355     sizeof(struct aw_clk_nkmp_sc), clknode_class);
356 
357 int
358 aw_clk_nkmp_register(struct clkdom *clkdom, struct aw_clk_nkmp_def *clkdef)
359 {
360 	struct clknode *clk;
361 	struct aw_clk_nkmp_sc *sc;
362 
363 	clk = clknode_create(clkdom, &aw_nkmp_clknode_class, &clkdef->clkdef);
364 	if (clk == NULL)
365 		return (1);
366 
367 	sc = clknode_get_softc(clk);
368 
369 	sc->offset = clkdef->offset;
370 
371 	sc->n.shift = clkdef->n.shift;
372 	sc->n.width = clkdef->n.width;
373 	sc->n.mask = ((1 << clkdef->n.width) - 1) << sc->n.shift;
374 	sc->n.value = clkdef->n.value;
375 	sc->n.flags = clkdef->n.flags;
376 
377 	sc->k.shift = clkdef->k.shift;
378 	sc->k.width = clkdef->k.width;
379 	sc->k.mask = ((1 << clkdef->k.width) - 1) << sc->k.shift;
380 	sc->k.value = clkdef->k.value;
381 	sc->k.flags = clkdef->k.flags;
382 
383 	sc->m.shift = clkdef->m.shift;
384 	sc->m.width = clkdef->m.width;
385 	sc->m.mask = ((1 << clkdef->m.width) - 1) << sc->m.shift;
386 	sc->m.value = clkdef->m.value;
387 	sc->m.flags = clkdef->m.flags;
388 
389 	sc->p.shift = clkdef->p.shift;
390 	sc->p.width = clkdef->p.width;
391 	sc->p.mask = ((1 << clkdef->p.width) - 1) << sc->p.shift;
392 	sc->p.value = clkdef->p.value;
393 	sc->p.flags = clkdef->p.flags;
394 
395 	sc->mux_shift = clkdef->mux_shift;
396 	sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift;
397 
398 	sc->gate_shift = clkdef->gate_shift;
399 	sc->lock_shift = clkdef->lock_shift;
400 	sc->lock_retries = clkdef->lock_retries;
401 	sc->update_shift = clkdef->update_shift;
402 	sc->flags = clkdef->flags;
403 
404 	clknode_register(clkdom, clk);
405 
406 	return (0);
407 }
408