xref: /linux/drivers/clk/clk_test.c (revision d642ef71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kunit test for clk rate management
4  */
5 #include <linux/clk.h>
6 #include <linux/clk-provider.h>
7 
8 /* Needed for clk_hw_get_clk() */
9 #include "clk.h"
10 
11 #include <kunit/test.h>
12 
13 static const struct clk_ops empty_clk_ops = { };
14 
15 #define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
16 #define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
17 #define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
18 
19 struct clk_dummy_context {
20 	struct clk_hw hw;
21 	unsigned long rate;
22 };
23 
24 static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
25 					   unsigned long parent_rate)
26 {
27 	struct clk_dummy_context *ctx =
28 		container_of(hw, struct clk_dummy_context, hw);
29 
30 	return ctx->rate;
31 }
32 
33 static int clk_dummy_determine_rate(struct clk_hw *hw,
34 				    struct clk_rate_request *req)
35 {
36 	/* Just return the same rate without modifying it */
37 	return 0;
38 }
39 
40 static int clk_dummy_maximize_rate(struct clk_hw *hw,
41 				   struct clk_rate_request *req)
42 {
43 	/*
44 	 * If there's a maximum set, always run the clock at the maximum
45 	 * allowed.
46 	 */
47 	if (req->max_rate < ULONG_MAX)
48 		req->rate = req->max_rate;
49 
50 	return 0;
51 }
52 
53 static int clk_dummy_minimize_rate(struct clk_hw *hw,
54 				   struct clk_rate_request *req)
55 {
56 	/*
57 	 * If there's a minimum set, always run the clock at the minimum
58 	 * allowed.
59 	 */
60 	if (req->min_rate > 0)
61 		req->rate = req->min_rate;
62 
63 	return 0;
64 }
65 
66 static int clk_dummy_set_rate(struct clk_hw *hw,
67 			      unsigned long rate,
68 			      unsigned long parent_rate)
69 {
70 	struct clk_dummy_context *ctx =
71 		container_of(hw, struct clk_dummy_context, hw);
72 
73 	ctx->rate = rate;
74 	return 0;
75 }
76 
77 static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
78 {
79 	if (index >= clk_hw_get_num_parents(hw))
80 		return -EINVAL;
81 
82 	return 0;
83 }
84 
85 static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
86 {
87 	return 0;
88 }
89 
90 static const struct clk_ops clk_dummy_rate_ops = {
91 	.recalc_rate = clk_dummy_recalc_rate,
92 	.determine_rate = clk_dummy_determine_rate,
93 	.set_rate = clk_dummy_set_rate,
94 };
95 
96 static const struct clk_ops clk_dummy_maximize_rate_ops = {
97 	.recalc_rate = clk_dummy_recalc_rate,
98 	.determine_rate = clk_dummy_maximize_rate,
99 	.set_rate = clk_dummy_set_rate,
100 };
101 
102 static const struct clk_ops clk_dummy_minimize_rate_ops = {
103 	.recalc_rate = clk_dummy_recalc_rate,
104 	.determine_rate = clk_dummy_minimize_rate,
105 	.set_rate = clk_dummy_set_rate,
106 };
107 
108 static const struct clk_ops clk_dummy_single_parent_ops = {
109 	/*
110 	 * FIXME: Even though we should probably be able to use
111 	 * __clk_mux_determine_rate() here, if we use it and call
112 	 * clk_round_rate() or clk_set_rate() with a rate lower than
113 	 * what all the parents can provide, it will return -EINVAL.
114 	 *
115 	 * This is due to the fact that it has the undocumented
116 	 * behaviour to always pick up the closest rate higher than the
117 	 * requested rate. If we get something lower, it thus considers
118 	 * that it's not acceptable and will return an error.
119 	 *
120 	 * It's somewhat inconsistent and creates a weird threshold
121 	 * between rates above the parent rate which would be rounded to
122 	 * what the parent can provide, but rates below will simply
123 	 * return an error.
124 	 */
125 	.determine_rate = __clk_mux_determine_rate_closest,
126 	.set_parent = clk_dummy_single_set_parent,
127 	.get_parent = clk_dummy_single_get_parent,
128 };
129 
130 struct clk_multiple_parent_ctx {
131 	struct clk_dummy_context parents_ctx[2];
132 	struct clk_hw hw;
133 	u8 current_parent;
134 };
135 
136 static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
137 {
138 	struct clk_multiple_parent_ctx *ctx =
139 		container_of(hw, struct clk_multiple_parent_ctx, hw);
140 
141 	if (index >= clk_hw_get_num_parents(hw))
142 		return -EINVAL;
143 
144 	ctx->current_parent = index;
145 
146 	return 0;
147 }
148 
149 static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
150 {
151 	struct clk_multiple_parent_ctx *ctx =
152 		container_of(hw, struct clk_multiple_parent_ctx, hw);
153 
154 	return ctx->current_parent;
155 }
156 
157 static const struct clk_ops clk_multiple_parents_mux_ops = {
158 	.get_parent = clk_multiple_parents_mux_get_parent,
159 	.set_parent = clk_multiple_parents_mux_set_parent,
160 	.determine_rate = __clk_mux_determine_rate_closest,
161 };
162 
163 static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
164 	.determine_rate = clk_hw_determine_rate_no_reparent,
165 	.get_parent = clk_multiple_parents_mux_get_parent,
166 	.set_parent = clk_multiple_parents_mux_set_parent,
167 };
168 
169 static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
170 {
171 	struct clk_dummy_context *ctx;
172 	struct clk_init_data init = { };
173 	int ret;
174 
175 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
176 	if (!ctx)
177 		return -ENOMEM;
178 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
179 	test->priv = ctx;
180 
181 	init.name = "test_dummy_rate";
182 	init.ops = ops;
183 	ctx->hw.init = &init;
184 
185 	ret = clk_hw_register(NULL, &ctx->hw);
186 	if (ret)
187 		return ret;
188 
189 	return 0;
190 }
191 
192 static int clk_test_init(struct kunit *test)
193 {
194 	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
195 }
196 
197 static int clk_maximize_test_init(struct kunit *test)
198 {
199 	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
200 }
201 
202 static int clk_minimize_test_init(struct kunit *test)
203 {
204 	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
205 }
206 
207 static void clk_test_exit(struct kunit *test)
208 {
209 	struct clk_dummy_context *ctx = test->priv;
210 
211 	clk_hw_unregister(&ctx->hw);
212 }
213 
214 /*
215  * Test that the actual rate matches what is returned by clk_get_rate()
216  */
217 static void clk_test_get_rate(struct kunit *test)
218 {
219 	struct clk_dummy_context *ctx = test->priv;
220 	struct clk_hw *hw = &ctx->hw;
221 	struct clk *clk = clk_hw_get_clk(hw, NULL);
222 	unsigned long rate;
223 
224 	rate = clk_get_rate(clk);
225 	KUNIT_ASSERT_GT(test, rate, 0);
226 	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
227 
228 	clk_put(clk);
229 }
230 
231 /*
232  * Test that, after a call to clk_set_rate(), the rate returned by
233  * clk_get_rate() matches.
234  *
235  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
236  * modify the requested rate, which is our case in clk_dummy_rate_ops.
237  */
238 static void clk_test_set_get_rate(struct kunit *test)
239 {
240 	struct clk_dummy_context *ctx = test->priv;
241 	struct clk_hw *hw = &ctx->hw;
242 	struct clk *clk = clk_hw_get_clk(hw, NULL);
243 	unsigned long rate;
244 
245 	KUNIT_ASSERT_EQ(test,
246 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
247 			0);
248 
249 	rate = clk_get_rate(clk);
250 	KUNIT_ASSERT_GT(test, rate, 0);
251 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
252 
253 	clk_put(clk);
254 }
255 
256 /*
257  * Test that, after several calls to clk_set_rate(), the rate returned
258  * by clk_get_rate() matches the last one.
259  *
260  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
261  * modify the requested rate, which is our case in clk_dummy_rate_ops.
262  */
263 static void clk_test_set_set_get_rate(struct kunit *test)
264 {
265 	struct clk_dummy_context *ctx = test->priv;
266 	struct clk_hw *hw = &ctx->hw;
267 	struct clk *clk = clk_hw_get_clk(hw, NULL);
268 	unsigned long rate;
269 
270 	KUNIT_ASSERT_EQ(test,
271 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
272 			0);
273 
274 	KUNIT_ASSERT_EQ(test,
275 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
276 			0);
277 
278 	rate = clk_get_rate(clk);
279 	KUNIT_ASSERT_GT(test, rate, 0);
280 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
281 
282 	clk_put(clk);
283 }
284 
285 /*
286  * Test that clk_round_rate and clk_set_rate are consitent and will
287  * return the same frequency.
288  */
289 static void clk_test_round_set_get_rate(struct kunit *test)
290 {
291 	struct clk_dummy_context *ctx = test->priv;
292 	struct clk_hw *hw = &ctx->hw;
293 	struct clk *clk = clk_hw_get_clk(hw, NULL);
294 	unsigned long set_rate;
295 	long rounded_rate;
296 
297 	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
298 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
299 	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
300 
301 	KUNIT_ASSERT_EQ(test,
302 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
303 			0);
304 
305 	set_rate = clk_get_rate(clk);
306 	KUNIT_ASSERT_GT(test, set_rate, 0);
307 	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
308 
309 	clk_put(clk);
310 }
311 
312 static struct kunit_case clk_test_cases[] = {
313 	KUNIT_CASE(clk_test_get_rate),
314 	KUNIT_CASE(clk_test_set_get_rate),
315 	KUNIT_CASE(clk_test_set_set_get_rate),
316 	KUNIT_CASE(clk_test_round_set_get_rate),
317 	{}
318 };
319 
320 /*
321  * Test suite for a basic rate clock, without any parent.
322  *
323  * These tests exercise the rate API with simple scenarios
324  */
325 static struct kunit_suite clk_test_suite = {
326 	.name = "clk-test",
327 	.init = clk_test_init,
328 	.exit = clk_test_exit,
329 	.test_cases = clk_test_cases,
330 };
331 
332 static int clk_uncached_test_init(struct kunit *test)
333 {
334 	struct clk_dummy_context *ctx;
335 	int ret;
336 
337 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
338 	if (!ctx)
339 		return -ENOMEM;
340 	test->priv = ctx;
341 
342 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
343 	ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
344 					     &clk_dummy_rate_ops,
345 					     CLK_GET_RATE_NOCACHE);
346 
347 	ret = clk_hw_register(NULL, &ctx->hw);
348 	if (ret)
349 		return ret;
350 
351 	return 0;
352 }
353 
354 /*
355  * Test that for an uncached clock, the clock framework doesn't cache
356  * the rate and clk_get_rate() will return the underlying clock rate
357  * even if it changed.
358  */
359 static void clk_test_uncached_get_rate(struct kunit *test)
360 {
361 	struct clk_dummy_context *ctx = test->priv;
362 	struct clk_hw *hw = &ctx->hw;
363 	struct clk *clk = clk_hw_get_clk(hw, NULL);
364 	unsigned long rate;
365 
366 	rate = clk_get_rate(clk);
367 	KUNIT_ASSERT_GT(test, rate, 0);
368 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
369 
370 	/* We change the rate behind the clock framework's back */
371 	ctx->rate = DUMMY_CLOCK_RATE_1;
372 	rate = clk_get_rate(clk);
373 	KUNIT_ASSERT_GT(test, rate, 0);
374 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
375 
376 	clk_put(clk);
377 }
378 
379 /*
380  * Test that for an uncached clock, clk_set_rate_range() will work
381  * properly if the rate hasn't changed.
382  */
383 static void clk_test_uncached_set_range(struct kunit *test)
384 {
385 	struct clk_dummy_context *ctx = test->priv;
386 	struct clk_hw *hw = &ctx->hw;
387 	struct clk *clk = clk_hw_get_clk(hw, NULL);
388 	unsigned long rate;
389 
390 	KUNIT_ASSERT_EQ(test,
391 			clk_set_rate_range(clk,
392 					   DUMMY_CLOCK_RATE_1,
393 					   DUMMY_CLOCK_RATE_2),
394 			0);
395 
396 	rate = clk_get_rate(clk);
397 	KUNIT_ASSERT_GT(test, rate, 0);
398 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
399 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
400 
401 	clk_put(clk);
402 }
403 
404 /*
405  * Test that for an uncached clock, clk_set_rate_range() will work
406  * properly if the rate has changed in hardware.
407  *
408  * In this case, it means that if the rate wasn't initially in the range
409  * we're trying to set, but got changed at some point into the range
410  * without the kernel knowing about it, its rate shouldn't be affected.
411  */
412 static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
413 {
414 	struct clk_dummy_context *ctx = test->priv;
415 	struct clk_hw *hw = &ctx->hw;
416 	struct clk *clk = clk_hw_get_clk(hw, NULL);
417 	unsigned long rate;
418 
419 	/* We change the rate behind the clock framework's back */
420 	ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
421 	KUNIT_ASSERT_EQ(test,
422 			clk_set_rate_range(clk,
423 					   DUMMY_CLOCK_RATE_1,
424 					   DUMMY_CLOCK_RATE_2),
425 			0);
426 
427 	rate = clk_get_rate(clk);
428 	KUNIT_ASSERT_GT(test, rate, 0);
429 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
430 
431 	clk_put(clk);
432 }
433 
434 static struct kunit_case clk_uncached_test_cases[] = {
435 	KUNIT_CASE(clk_test_uncached_get_rate),
436 	KUNIT_CASE(clk_test_uncached_set_range),
437 	KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
438 	{}
439 };
440 
441 /*
442  * Test suite for a basic, uncached, rate clock, without any parent.
443  *
444  * These tests exercise the rate API with simple scenarios
445  */
446 static struct kunit_suite clk_uncached_test_suite = {
447 	.name = "clk-uncached-test",
448 	.init = clk_uncached_test_init,
449 	.exit = clk_test_exit,
450 	.test_cases = clk_uncached_test_cases,
451 };
452 
453 static int
454 clk_multiple_parents_mux_test_init(struct kunit *test)
455 {
456 	struct clk_multiple_parent_ctx *ctx;
457 	const char *parents[2] = { "parent-0", "parent-1"};
458 	int ret;
459 
460 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
461 	if (!ctx)
462 		return -ENOMEM;
463 	test->priv = ctx;
464 
465 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
466 							    &clk_dummy_rate_ops,
467 							    0);
468 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
469 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
470 	if (ret)
471 		return ret;
472 
473 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
474 							    &clk_dummy_rate_ops,
475 							    0);
476 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
477 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
478 	if (ret)
479 		return ret;
480 
481 	ctx->current_parent = 0;
482 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
483 					   &clk_multiple_parents_mux_ops,
484 					   CLK_SET_RATE_PARENT);
485 	ret = clk_hw_register(NULL, &ctx->hw);
486 	if (ret)
487 		return ret;
488 
489 	return 0;
490 }
491 
492 static void
493 clk_multiple_parents_mux_test_exit(struct kunit *test)
494 {
495 	struct clk_multiple_parent_ctx *ctx = test->priv;
496 
497 	clk_hw_unregister(&ctx->hw);
498 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
499 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
500 }
501 
502 /*
503  * Test that for a clock with multiple parents, clk_get_parent()
504  * actually returns the current one.
505  */
506 static void
507 clk_test_multiple_parents_mux_get_parent(struct kunit *test)
508 {
509 	struct clk_multiple_parent_ctx *ctx = test->priv;
510 	struct clk_hw *hw = &ctx->hw;
511 	struct clk *clk = clk_hw_get_clk(hw, NULL);
512 	struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
513 
514 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
515 
516 	clk_put(parent);
517 	clk_put(clk);
518 }
519 
520 /*
521  * Test that for a clock with a multiple parents, clk_has_parent()
522  * actually reports all of them as parents.
523  */
524 static void
525 clk_test_multiple_parents_mux_has_parent(struct kunit *test)
526 {
527 	struct clk_multiple_parent_ctx *ctx = test->priv;
528 	struct clk_hw *hw = &ctx->hw;
529 	struct clk *clk = clk_hw_get_clk(hw, NULL);
530 	struct clk *parent;
531 
532 	parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
533 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
534 	clk_put(parent);
535 
536 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
537 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
538 	clk_put(parent);
539 
540 	clk_put(clk);
541 }
542 
543 /*
544  * Test that for a clock with a multiple parents, if we set a range on
545  * that clock and the parent is changed, its rate after the reparenting
546  * is still within the range we asked for.
547  *
548  * FIXME: clk_set_parent() only does the reparenting but doesn't
549  * reevaluate whether the new clock rate is within its boundaries or
550  * not.
551  */
552 static void
553 clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
554 {
555 	struct clk_multiple_parent_ctx *ctx = test->priv;
556 	struct clk_hw *hw = &ctx->hw;
557 	struct clk *clk = clk_hw_get_clk(hw, NULL);
558 	struct clk *parent1, *parent2;
559 	unsigned long rate;
560 	int ret;
561 
562 	kunit_skip(test, "This needs to be fixed in the core.");
563 
564 	parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
565 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
566 	KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
567 
568 	parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
569 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
570 
571 	ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
572 	KUNIT_ASSERT_EQ(test, ret, 0);
573 
574 	ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
575 	KUNIT_ASSERT_EQ(test, ret, 0);
576 
577 	ret = clk_set_rate_range(clk,
578 				 DUMMY_CLOCK_RATE_1 - 1000,
579 				 DUMMY_CLOCK_RATE_1 + 1000);
580 	KUNIT_ASSERT_EQ(test, ret, 0);
581 
582 	ret = clk_set_parent(clk, parent2);
583 	KUNIT_ASSERT_EQ(test, ret, 0);
584 
585 	rate = clk_get_rate(clk);
586 	KUNIT_ASSERT_GT(test, rate, 0);
587 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
588 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
589 
590 	clk_put(parent2);
591 	clk_put(parent1);
592 	clk_put(clk);
593 }
594 
595 static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
596 	KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
597 	KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
598 	KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
599 	{}
600 };
601 
602 /*
603  * Test suite for a basic mux clock with two parents, with
604  * CLK_SET_RATE_PARENT on the child.
605  *
606  * These tests exercise the consumer API and check that the state of the
607  * child and parents are sane and consistent.
608  */
609 static struct kunit_suite
610 clk_multiple_parents_mux_test_suite = {
611 	.name = "clk-multiple-parents-mux-test",
612 	.init = clk_multiple_parents_mux_test_init,
613 	.exit = clk_multiple_parents_mux_test_exit,
614 	.test_cases = clk_multiple_parents_mux_test_cases,
615 };
616 
617 static int
618 clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
619 {
620 	struct clk_multiple_parent_ctx *ctx;
621 	const char *parents[2] = { "missing-parent", "proper-parent"};
622 	int ret;
623 
624 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
625 	if (!ctx)
626 		return -ENOMEM;
627 	test->priv = ctx;
628 
629 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
630 							    &clk_dummy_rate_ops,
631 							    0);
632 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
633 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
634 	if (ret)
635 		return ret;
636 
637 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
638 					   &clk_multiple_parents_mux_ops,
639 					   CLK_SET_RATE_PARENT);
640 	ret = clk_hw_register(NULL, &ctx->hw);
641 	if (ret)
642 		return ret;
643 
644 	return 0;
645 }
646 
647 static void
648 clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
649 {
650 	struct clk_multiple_parent_ctx *ctx = test->priv;
651 
652 	clk_hw_unregister(&ctx->hw);
653 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
654 }
655 
656 /*
657  * Test that, for a mux whose current parent hasn't been registered yet and is
658  * thus orphan, clk_get_parent() will return NULL.
659  */
660 static void
661 clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
662 {
663 	struct clk_multiple_parent_ctx *ctx = test->priv;
664 	struct clk_hw *hw = &ctx->hw;
665 	struct clk *clk = clk_hw_get_clk(hw, NULL);
666 
667 	KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
668 
669 	clk_put(clk);
670 }
671 
672 /*
673  * Test that, for a mux whose current parent hasn't been registered yet,
674  * calling clk_set_parent() to a valid parent will properly update the
675  * mux parent and its orphan status.
676  */
677 static void
678 clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
679 {
680 	struct clk_multiple_parent_ctx *ctx = test->priv;
681 	struct clk_hw *hw = &ctx->hw;
682 	struct clk *clk = clk_hw_get_clk(hw, NULL);
683 	struct clk *parent, *new_parent;
684 	int ret;
685 
686 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
687 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
688 
689 	ret = clk_set_parent(clk, parent);
690 	KUNIT_ASSERT_EQ(test, ret, 0);
691 
692 	new_parent = clk_get_parent(clk);
693 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
694 	KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
695 
696 	clk_put(parent);
697 	clk_put(clk);
698 }
699 
700 /*
701  * Test that, for a mux that started orphan but got switched to a valid
702  * parent, calling clk_drop_range() on the mux won't affect the parent
703  * rate.
704  */
705 static void
706 clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
707 {
708 	struct clk_multiple_parent_ctx *ctx = test->priv;
709 	struct clk_hw *hw = &ctx->hw;
710 	struct clk *clk = clk_hw_get_clk(hw, NULL);
711 	struct clk *parent;
712 	unsigned long parent_rate, new_parent_rate;
713 	int ret;
714 
715 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
716 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
717 
718 	parent_rate = clk_get_rate(parent);
719 	KUNIT_ASSERT_GT(test, parent_rate, 0);
720 
721 	ret = clk_set_parent(clk, parent);
722 	KUNIT_ASSERT_EQ(test, ret, 0);
723 
724 	ret = clk_drop_range(clk);
725 	KUNIT_ASSERT_EQ(test, ret, 0);
726 
727 	new_parent_rate = clk_get_rate(clk);
728 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
729 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
730 
731 	clk_put(parent);
732 	clk_put(clk);
733 }
734 
735 /*
736  * Test that, for a mux that started orphan but got switched to a valid
737  * parent, the rate of the mux and its new parent are consistent.
738  */
739 static void
740 clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
741 {
742 	struct clk_multiple_parent_ctx *ctx = test->priv;
743 	struct clk_hw *hw = &ctx->hw;
744 	struct clk *clk = clk_hw_get_clk(hw, NULL);
745 	struct clk *parent;
746 	unsigned long parent_rate, rate;
747 	int ret;
748 
749 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
750 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
751 
752 	parent_rate = clk_get_rate(parent);
753 	KUNIT_ASSERT_GT(test, parent_rate, 0);
754 
755 	ret = clk_set_parent(clk, parent);
756 	KUNIT_ASSERT_EQ(test, ret, 0);
757 
758 	rate = clk_get_rate(clk);
759 	KUNIT_ASSERT_GT(test, rate, 0);
760 	KUNIT_EXPECT_EQ(test, parent_rate, rate);
761 
762 	clk_put(parent);
763 	clk_put(clk);
764 }
765 
766 /*
767  * Test that, for a mux that started orphan but got switched to a valid
768  * parent, calling clk_put() on the mux won't affect the parent rate.
769  */
770 static void
771 clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
772 {
773 	struct clk_multiple_parent_ctx *ctx = test->priv;
774 	struct clk *clk, *parent;
775 	unsigned long parent_rate, new_parent_rate;
776 	int ret;
777 
778 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
779 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
780 
781 	clk = clk_hw_get_clk(&ctx->hw, NULL);
782 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
783 
784 	parent_rate = clk_get_rate(parent);
785 	KUNIT_ASSERT_GT(test, parent_rate, 0);
786 
787 	ret = clk_set_parent(clk, parent);
788 	KUNIT_ASSERT_EQ(test, ret, 0);
789 
790 	clk_put(clk);
791 
792 	new_parent_rate = clk_get_rate(parent);
793 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
794 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
795 
796 	clk_put(parent);
797 }
798 
799 /*
800  * Test that, for a mux that started orphan but got switched to a valid
801  * parent, calling clk_set_rate_range() will affect the parent state if
802  * its rate is out of range.
803  */
804 static void
805 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
806 {
807 	struct clk_multiple_parent_ctx *ctx = test->priv;
808 	struct clk_hw *hw = &ctx->hw;
809 	struct clk *clk = clk_hw_get_clk(hw, NULL);
810 	struct clk *parent;
811 	unsigned long rate;
812 	int ret;
813 
814 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
815 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
816 
817 	ret = clk_set_parent(clk, parent);
818 	KUNIT_ASSERT_EQ(test, ret, 0);
819 
820 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
821 	KUNIT_ASSERT_EQ(test, ret, 0);
822 
823 	rate = clk_get_rate(clk);
824 	KUNIT_ASSERT_GT(test, rate, 0);
825 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
826 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
827 
828 	clk_put(parent);
829 	clk_put(clk);
830 }
831 
832 /*
833  * Test that, for a mux that started orphan but got switched to a valid
834  * parent, calling clk_set_rate_range() won't affect the parent state if
835  * its rate is within range.
836  */
837 static void
838 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
839 {
840 	struct clk_multiple_parent_ctx *ctx = test->priv;
841 	struct clk_hw *hw = &ctx->hw;
842 	struct clk *clk = clk_hw_get_clk(hw, NULL);
843 	struct clk *parent;
844 	unsigned long parent_rate, new_parent_rate;
845 	int ret;
846 
847 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
848 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
849 
850 	parent_rate = clk_get_rate(parent);
851 	KUNIT_ASSERT_GT(test, parent_rate, 0);
852 
853 	ret = clk_set_parent(clk, parent);
854 	KUNIT_ASSERT_EQ(test, ret, 0);
855 
856 	ret = clk_set_rate_range(clk,
857 				 DUMMY_CLOCK_INIT_RATE - 1000,
858 				 DUMMY_CLOCK_INIT_RATE + 1000);
859 	KUNIT_ASSERT_EQ(test, ret, 0);
860 
861 	new_parent_rate = clk_get_rate(parent);
862 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
863 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
864 
865 	clk_put(parent);
866 	clk_put(clk);
867 }
868 
869 /*
870  * Test that, for a mux whose current parent hasn't been registered yet,
871  * calling clk_set_rate_range() will succeed, and will be taken into
872  * account when rounding a rate.
873  */
874 static void
875 clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
876 {
877 	struct clk_multiple_parent_ctx *ctx = test->priv;
878 	struct clk_hw *hw = &ctx->hw;
879 	struct clk *clk = clk_hw_get_clk(hw, NULL);
880 	long rate;
881 	int ret;
882 
883 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
884 	KUNIT_ASSERT_EQ(test, ret, 0);
885 
886 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
887 	KUNIT_ASSERT_GT(test, rate, 0);
888 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
889 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
890 
891 	clk_put(clk);
892 }
893 
894 /*
895  * Test that, for a mux that started orphan, was assigned and rate and
896  * then got switched to a valid parent, its rate is eventually within
897  * range.
898  *
899  * FIXME: Even though we update the rate as part of clk_set_parent(), we
900  * don't evaluate whether that new rate is within range and needs to be
901  * adjusted.
902  */
903 static void
904 clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
905 {
906 	struct clk_multiple_parent_ctx *ctx = test->priv;
907 	struct clk_hw *hw = &ctx->hw;
908 	struct clk *clk = clk_hw_get_clk(hw, NULL);
909 	struct clk *parent;
910 	unsigned long rate;
911 	int ret;
912 
913 	kunit_skip(test, "This needs to be fixed in the core.");
914 
915 	clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
916 
917 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
918 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
919 
920 	ret = clk_set_parent(clk, parent);
921 	KUNIT_ASSERT_EQ(test, ret, 0);
922 
923 	rate = clk_get_rate(clk);
924 	KUNIT_ASSERT_GT(test, rate, 0);
925 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
926 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
927 
928 	clk_put(parent);
929 	clk_put(clk);
930 }
931 
932 static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
933 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
934 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
935 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
936 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
937 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
938 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
939 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
940 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
941 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
942 	{}
943 };
944 
945 /*
946  * Test suite for a basic mux clock with two parents. The default parent
947  * isn't registered, only the second parent is. By default, the clock
948  * will thus be orphan.
949  *
950  * These tests exercise the behaviour of the consumer API when dealing
951  * with an orphan clock, and how we deal with the transition to a valid
952  * parent.
953  */
954 static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
955 	.name = "clk-orphan-transparent-multiple-parent-mux-test",
956 	.init = clk_orphan_transparent_multiple_parent_mux_test_init,
957 	.exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
958 	.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
959 };
960 
961 struct clk_single_parent_ctx {
962 	struct clk_dummy_context parent_ctx;
963 	struct clk_hw hw;
964 };
965 
966 static int clk_single_parent_mux_test_init(struct kunit *test)
967 {
968 	struct clk_single_parent_ctx *ctx;
969 	int ret;
970 
971 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
972 	if (!ctx)
973 		return -ENOMEM;
974 	test->priv = ctx;
975 
976 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
977 	ctx->parent_ctx.hw.init =
978 		CLK_HW_INIT_NO_PARENT("parent-clk",
979 				      &clk_dummy_rate_ops,
980 				      0);
981 
982 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
983 	if (ret)
984 		return ret;
985 
986 	ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
987 				   &clk_dummy_single_parent_ops,
988 				   CLK_SET_RATE_PARENT);
989 
990 	ret = clk_hw_register(NULL, &ctx->hw);
991 	if (ret)
992 		return ret;
993 
994 	return 0;
995 }
996 
997 static void
998 clk_single_parent_mux_test_exit(struct kunit *test)
999 {
1000 	struct clk_single_parent_ctx *ctx = test->priv;
1001 
1002 	clk_hw_unregister(&ctx->hw);
1003 	clk_hw_unregister(&ctx->parent_ctx.hw);
1004 }
1005 
1006 /*
1007  * Test that for a clock with a single parent, clk_get_parent() actually
1008  * returns the parent.
1009  */
1010 static void
1011 clk_test_single_parent_mux_get_parent(struct kunit *test)
1012 {
1013 	struct clk_single_parent_ctx *ctx = test->priv;
1014 	struct clk_hw *hw = &ctx->hw;
1015 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1016 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1017 
1018 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
1019 
1020 	clk_put(parent);
1021 	clk_put(clk);
1022 }
1023 
1024 /*
1025  * Test that for a clock with a single parent, clk_has_parent() actually
1026  * reports it as a parent.
1027  */
1028 static void
1029 clk_test_single_parent_mux_has_parent(struct kunit *test)
1030 {
1031 	struct clk_single_parent_ctx *ctx = test->priv;
1032 	struct clk_hw *hw = &ctx->hw;
1033 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1034 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1035 
1036 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1037 
1038 	clk_put(parent);
1039 	clk_put(clk);
1040 }
1041 
1042 /*
1043  * Test that for a clock that can't modify its rate and with a single
1044  * parent, if we set disjoints range on the parent and then the child,
1045  * the second will return an error.
1046  *
1047  * FIXME: clk_set_rate_range() only considers the current clock when
1048  * evaluating whether ranges are disjoints and not the upstream clocks
1049  * ranges.
1050  */
1051 static void
1052 clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1053 {
1054 	struct clk_single_parent_ctx *ctx = test->priv;
1055 	struct clk_hw *hw = &ctx->hw;
1056 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1057 	struct clk *parent;
1058 	int ret;
1059 
1060 	kunit_skip(test, "This needs to be fixed in the core.");
1061 
1062 	parent = clk_get_parent(clk);
1063 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1064 
1065 	ret = clk_set_rate_range(parent, 1000, 2000);
1066 	KUNIT_ASSERT_EQ(test, ret, 0);
1067 
1068 	ret = clk_set_rate_range(clk, 3000, 4000);
1069 	KUNIT_EXPECT_LT(test, ret, 0);
1070 
1071 	clk_put(clk);
1072 }
1073 
1074 /*
1075  * Test that for a clock that can't modify its rate and with a single
1076  * parent, if we set disjoints range on the child and then the parent,
1077  * the second will return an error.
1078  *
1079  * FIXME: clk_set_rate_range() only considers the current clock when
1080  * evaluating whether ranges are disjoints and not the downstream clocks
1081  * ranges.
1082  */
1083 static void
1084 clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1085 {
1086 	struct clk_single_parent_ctx *ctx = test->priv;
1087 	struct clk_hw *hw = &ctx->hw;
1088 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1089 	struct clk *parent;
1090 	int ret;
1091 
1092 	kunit_skip(test, "This needs to be fixed in the core.");
1093 
1094 	parent = clk_get_parent(clk);
1095 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1096 
1097 	ret = clk_set_rate_range(clk, 1000, 2000);
1098 	KUNIT_ASSERT_EQ(test, ret, 0);
1099 
1100 	ret = clk_set_rate_range(parent, 3000, 4000);
1101 	KUNIT_EXPECT_LT(test, ret, 0);
1102 
1103 	clk_put(clk);
1104 }
1105 
1106 /*
1107  * Test that for a clock that can't modify its rate and with a single
1108  * parent, if we set a range on the parent and then call
1109  * clk_round_rate(), the boundaries of the parent are taken into
1110  * account.
1111  */
1112 static void
1113 clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1114 {
1115 	struct clk_single_parent_ctx *ctx = test->priv;
1116 	struct clk_hw *hw = &ctx->hw;
1117 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1118 	struct clk *parent;
1119 	long rate;
1120 	int ret;
1121 
1122 	parent = clk_get_parent(clk);
1123 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1124 
1125 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1126 	KUNIT_ASSERT_EQ(test, ret, 0);
1127 
1128 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1129 	KUNIT_ASSERT_GT(test, rate, 0);
1130 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1131 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1132 
1133 	clk_put(clk);
1134 }
1135 
1136 /*
1137  * Test that for a clock that can't modify its rate and with a single
1138  * parent, if we set a range on the parent and a more restrictive one on
1139  * the child, and then call clk_round_rate(), the boundaries of the
1140  * two clocks are taken into account.
1141  */
1142 static void
1143 clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1144 {
1145 	struct clk_single_parent_ctx *ctx = test->priv;
1146 	struct clk_hw *hw = &ctx->hw;
1147 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1148 	struct clk *parent;
1149 	long rate;
1150 	int ret;
1151 
1152 	parent = clk_get_parent(clk);
1153 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1154 
1155 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1156 	KUNIT_ASSERT_EQ(test, ret, 0);
1157 
1158 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1159 	KUNIT_ASSERT_EQ(test, ret, 0);
1160 
1161 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1162 	KUNIT_ASSERT_GT(test, rate, 0);
1163 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1164 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1165 
1166 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1167 	KUNIT_ASSERT_GT(test, rate, 0);
1168 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1169 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1170 
1171 	clk_put(clk);
1172 }
1173 
1174 /*
1175  * Test that for a clock that can't modify its rate and with a single
1176  * parent, if we set a range on the child and a more restrictive one on
1177  * the parent, and then call clk_round_rate(), the boundaries of the
1178  * two clocks are taken into account.
1179  */
1180 static void
1181 clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1182 {
1183 	struct clk_single_parent_ctx *ctx = test->priv;
1184 	struct clk_hw *hw = &ctx->hw;
1185 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1186 	struct clk *parent;
1187 	long rate;
1188 	int ret;
1189 
1190 	parent = clk_get_parent(clk);
1191 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1192 
1193 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1194 	KUNIT_ASSERT_EQ(test, ret, 0);
1195 
1196 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1197 	KUNIT_ASSERT_EQ(test, ret, 0);
1198 
1199 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1200 	KUNIT_ASSERT_GT(test, rate, 0);
1201 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1202 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1203 
1204 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1205 	KUNIT_ASSERT_GT(test, rate, 0);
1206 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1207 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1208 
1209 	clk_put(clk);
1210 }
1211 
1212 static struct kunit_case clk_single_parent_mux_test_cases[] = {
1213 	KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1214 	KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1215 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1216 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1217 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1218 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1219 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1220 	{}
1221 };
1222 
1223 /*
1224  * Test suite for a basic mux clock with one parent, with
1225  * CLK_SET_RATE_PARENT on the child.
1226  *
1227  * These tests exercise the consumer API and check that the state of the
1228  * child and parent are sane and consistent.
1229  */
1230 static struct kunit_suite
1231 clk_single_parent_mux_test_suite = {
1232 	.name = "clk-single-parent-mux-test",
1233 	.init = clk_single_parent_mux_test_init,
1234 	.exit = clk_single_parent_mux_test_exit,
1235 	.test_cases = clk_single_parent_mux_test_cases,
1236 };
1237 
1238 static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1239 {
1240 	struct clk_single_parent_ctx *ctx;
1241 	struct clk_init_data init = { };
1242 	const char * const parents[] = { "orphan_parent" };
1243 	int ret;
1244 
1245 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1246 	if (!ctx)
1247 		return -ENOMEM;
1248 	test->priv = ctx;
1249 
1250 	init.name = "test_orphan_dummy_parent";
1251 	init.ops = &clk_dummy_single_parent_ops;
1252 	init.parent_names = parents;
1253 	init.num_parents = ARRAY_SIZE(parents);
1254 	init.flags = CLK_SET_RATE_PARENT;
1255 	ctx->hw.init = &init;
1256 
1257 	ret = clk_hw_register(NULL, &ctx->hw);
1258 	if (ret)
1259 		return ret;
1260 
1261 	memset(&init, 0, sizeof(init));
1262 	init.name = "orphan_parent";
1263 	init.ops = &clk_dummy_rate_ops;
1264 	ctx->parent_ctx.hw.init = &init;
1265 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1266 
1267 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1268 	if (ret)
1269 		return ret;
1270 
1271 	return 0;
1272 }
1273 
1274 /*
1275  * Test that a mux-only clock, with an initial rate within a range,
1276  * will still have the same rate after the range has been enforced.
1277  *
1278  * See:
1279  * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1280  */
1281 static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1282 {
1283 	struct clk_single_parent_ctx *ctx = test->priv;
1284 	struct clk_hw *hw = &ctx->hw;
1285 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1286 	unsigned long rate, new_rate;
1287 
1288 	rate = clk_get_rate(clk);
1289 	KUNIT_ASSERT_GT(test, rate, 0);
1290 
1291 	KUNIT_ASSERT_EQ(test,
1292 			clk_set_rate_range(clk,
1293 					   ctx->parent_ctx.rate - 1000,
1294 					   ctx->parent_ctx.rate + 1000),
1295 			0);
1296 
1297 	new_rate = clk_get_rate(clk);
1298 	KUNIT_ASSERT_GT(test, new_rate, 0);
1299 	KUNIT_EXPECT_EQ(test, rate, new_rate);
1300 
1301 	clk_put(clk);
1302 }
1303 
1304 static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1305 	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1306 	{}
1307 };
1308 
1309 /*
1310  * Test suite for a basic mux clock with one parent. The parent is
1311  * registered after its child. The clock will thus be an orphan when
1312  * registered, but will no longer be when the tests run.
1313  *
1314  * These tests make sure a clock that used to be orphan has a sane,
1315  * consistent, behaviour.
1316  */
1317 static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1318 	.name = "clk-orphan-transparent-single-parent-test",
1319 	.init = clk_orphan_transparent_single_parent_mux_test_init,
1320 	.exit = clk_single_parent_mux_test_exit,
1321 	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1322 };
1323 
1324 struct clk_single_parent_two_lvl_ctx {
1325 	struct clk_dummy_context parent_parent_ctx;
1326 	struct clk_dummy_context parent_ctx;
1327 	struct clk_hw hw;
1328 };
1329 
1330 static int
1331 clk_orphan_two_level_root_last_test_init(struct kunit *test)
1332 {
1333 	struct clk_single_parent_two_lvl_ctx *ctx;
1334 	int ret;
1335 
1336 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1337 	if (!ctx)
1338 		return -ENOMEM;
1339 	test->priv = ctx;
1340 
1341 	ctx->parent_ctx.hw.init =
1342 		CLK_HW_INIT("intermediate-parent",
1343 			    "root-parent",
1344 			    &clk_dummy_single_parent_ops,
1345 			    CLK_SET_RATE_PARENT);
1346 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1347 	if (ret)
1348 		return ret;
1349 
1350 	ctx->hw.init =
1351 		CLK_HW_INIT("test-clk", "intermediate-parent",
1352 			    &clk_dummy_single_parent_ops,
1353 			    CLK_SET_RATE_PARENT);
1354 	ret = clk_hw_register(NULL, &ctx->hw);
1355 	if (ret)
1356 		return ret;
1357 
1358 	ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1359 	ctx->parent_parent_ctx.hw.init =
1360 		CLK_HW_INIT_NO_PARENT("root-parent",
1361 				      &clk_dummy_rate_ops,
1362 				      0);
1363 	ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1364 	if (ret)
1365 		return ret;
1366 
1367 	return 0;
1368 }
1369 
1370 static void
1371 clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1372 {
1373 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1374 
1375 	clk_hw_unregister(&ctx->hw);
1376 	clk_hw_unregister(&ctx->parent_ctx.hw);
1377 	clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1378 }
1379 
1380 /*
1381  * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1382  * will return the proper rate.
1383  */
1384 static void
1385 clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1386 {
1387 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1388 	struct clk_hw *hw = &ctx->hw;
1389 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1390 	unsigned long rate;
1391 
1392 	rate = clk_get_rate(clk);
1393 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1394 
1395 	clk_put(clk);
1396 }
1397 
1398 /*
1399  * Test that, for a clock whose parent used to be orphan,
1400  * clk_set_rate_range() won't affect its rate if it is already within
1401  * range.
1402  *
1403  * See (for Exynos 4210):
1404  * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1405  */
1406 static void
1407 clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1408 {
1409 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1410 	struct clk_hw *hw = &ctx->hw;
1411 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1412 	unsigned long rate;
1413 	int ret;
1414 
1415 	ret = clk_set_rate_range(clk,
1416 				 DUMMY_CLOCK_INIT_RATE - 1000,
1417 				 DUMMY_CLOCK_INIT_RATE + 1000);
1418 	KUNIT_ASSERT_EQ(test, ret, 0);
1419 
1420 	rate = clk_get_rate(clk);
1421 	KUNIT_ASSERT_GT(test, rate, 0);
1422 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1423 
1424 	clk_put(clk);
1425 }
1426 
1427 static struct kunit_case
1428 clk_orphan_two_level_root_last_test_cases[] = {
1429 	KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1430 	KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1431 	{}
1432 };
1433 
1434 /*
1435  * Test suite for a basic, transparent, clock with a parent that is also
1436  * such a clock. The parent's parent is registered last, while the
1437  * parent and its child are registered in that order. The intermediate
1438  * and leaf clocks will thus be orphan when registered, but the leaf
1439  * clock itself will always have its parent and will never be
1440  * reparented. Indeed, it's only orphan because its parent is.
1441  *
1442  * These tests exercise the behaviour of the consumer API when dealing
1443  * with an orphan clock, and how we deal with the transition to a valid
1444  * parent.
1445  */
1446 static struct kunit_suite
1447 clk_orphan_two_level_root_last_test_suite = {
1448 	.name = "clk-orphan-two-level-root-last-test",
1449 	.init = clk_orphan_two_level_root_last_test_init,
1450 	.exit = clk_orphan_two_level_root_last_test_exit,
1451 	.test_cases = clk_orphan_two_level_root_last_test_cases,
1452 };
1453 
1454 /*
1455  * Test that clk_set_rate_range won't return an error for a valid range
1456  * and that it will make sure the rate of the clock is within the
1457  * boundaries.
1458  */
1459 static void clk_range_test_set_range(struct kunit *test)
1460 {
1461 	struct clk_dummy_context *ctx = test->priv;
1462 	struct clk_hw *hw = &ctx->hw;
1463 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1464 	unsigned long rate;
1465 
1466 	KUNIT_ASSERT_EQ(test,
1467 			clk_set_rate_range(clk,
1468 					   DUMMY_CLOCK_RATE_1,
1469 					   DUMMY_CLOCK_RATE_2),
1470 			0);
1471 
1472 	rate = clk_get_rate(clk);
1473 	KUNIT_ASSERT_GT(test, rate, 0);
1474 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1475 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1476 
1477 	clk_put(clk);
1478 }
1479 
1480 /*
1481  * Test that calling clk_set_rate_range with a minimum rate higher than
1482  * the maximum rate returns an error.
1483  */
1484 static void clk_range_test_set_range_invalid(struct kunit *test)
1485 {
1486 	struct clk_dummy_context *ctx = test->priv;
1487 	struct clk_hw *hw = &ctx->hw;
1488 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1489 
1490 	KUNIT_EXPECT_LT(test,
1491 			clk_set_rate_range(clk,
1492 					   DUMMY_CLOCK_RATE_1 + 1000,
1493 					   DUMMY_CLOCK_RATE_1),
1494 			0);
1495 
1496 	clk_put(clk);
1497 }
1498 
1499 /*
1500  * Test that users can't set multiple, disjoints, range that would be
1501  * impossible to meet.
1502  */
1503 static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1504 {
1505 	struct clk_dummy_context *ctx = test->priv;
1506 	struct clk_hw *hw = &ctx->hw;
1507 	struct clk *user1, *user2;
1508 
1509 	user1 = clk_hw_get_clk(hw, NULL);
1510 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1511 
1512 	user2 = clk_hw_get_clk(hw, NULL);
1513 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1514 
1515 	KUNIT_ASSERT_EQ(test,
1516 			clk_set_rate_range(user1, 1000, 2000),
1517 			0);
1518 
1519 	KUNIT_EXPECT_LT(test,
1520 			clk_set_rate_range(user2, 3000, 4000),
1521 			0);
1522 
1523 	clk_put(user2);
1524 	clk_put(user1);
1525 }
1526 
1527 /*
1528  * Test that if our clock has some boundaries and we try to round a rate
1529  * lower than the minimum, the returned rate will be within range.
1530  */
1531 static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1532 {
1533 	struct clk_dummy_context *ctx = test->priv;
1534 	struct clk_hw *hw = &ctx->hw;
1535 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1536 	long rate;
1537 
1538 	KUNIT_ASSERT_EQ(test,
1539 			clk_set_rate_range(clk,
1540 					   DUMMY_CLOCK_RATE_1,
1541 					   DUMMY_CLOCK_RATE_2),
1542 			0);
1543 
1544 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1545 	KUNIT_ASSERT_GT(test, rate, 0);
1546 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1547 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1548 
1549 	clk_put(clk);
1550 }
1551 
1552 /*
1553  * Test that if our clock has some boundaries and we try to set a rate
1554  * higher than the maximum, the new rate will be within range.
1555  */
1556 static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1557 {
1558 	struct clk_dummy_context *ctx = test->priv;
1559 	struct clk_hw *hw = &ctx->hw;
1560 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1561 	unsigned long rate;
1562 
1563 	KUNIT_ASSERT_EQ(test,
1564 			clk_set_rate_range(clk,
1565 					   DUMMY_CLOCK_RATE_1,
1566 					   DUMMY_CLOCK_RATE_2),
1567 			0);
1568 
1569 	KUNIT_ASSERT_EQ(test,
1570 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1571 			0);
1572 
1573 	rate = clk_get_rate(clk);
1574 	KUNIT_ASSERT_GT(test, rate, 0);
1575 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1576 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1577 
1578 	clk_put(clk);
1579 }
1580 
1581 /*
1582  * Test that if our clock has some boundaries and we try to round and
1583  * set a rate lower than the minimum, the rate returned by
1584  * clk_round_rate() will be consistent with the new rate set by
1585  * clk_set_rate().
1586  */
1587 static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1588 {
1589 	struct clk_dummy_context *ctx = test->priv;
1590 	struct clk_hw *hw = &ctx->hw;
1591 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1592 	long rounded;
1593 
1594 	KUNIT_ASSERT_EQ(test,
1595 			clk_set_rate_range(clk,
1596 					   DUMMY_CLOCK_RATE_1,
1597 					   DUMMY_CLOCK_RATE_2),
1598 			0);
1599 
1600 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1601 	KUNIT_ASSERT_GT(test, rounded, 0);
1602 
1603 	KUNIT_ASSERT_EQ(test,
1604 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1605 			0);
1606 
1607 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1608 
1609 	clk_put(clk);
1610 }
1611 
1612 /*
1613  * Test that if our clock has some boundaries and we try to round a rate
1614  * higher than the maximum, the returned rate will be within range.
1615  */
1616 static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1617 {
1618 	struct clk_dummy_context *ctx = test->priv;
1619 	struct clk_hw *hw = &ctx->hw;
1620 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1621 	long rate;
1622 
1623 	KUNIT_ASSERT_EQ(test,
1624 			clk_set_rate_range(clk,
1625 					   DUMMY_CLOCK_RATE_1,
1626 					   DUMMY_CLOCK_RATE_2),
1627 			0);
1628 
1629 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1630 	KUNIT_ASSERT_GT(test, rate, 0);
1631 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1632 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1633 
1634 	clk_put(clk);
1635 }
1636 
1637 /*
1638  * Test that if our clock has some boundaries and we try to set a rate
1639  * higher than the maximum, the new rate will be within range.
1640  */
1641 static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1642 {
1643 	struct clk_dummy_context *ctx = test->priv;
1644 	struct clk_hw *hw = &ctx->hw;
1645 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1646 	unsigned long rate;
1647 
1648 	KUNIT_ASSERT_EQ(test,
1649 			clk_set_rate_range(clk,
1650 					   DUMMY_CLOCK_RATE_1,
1651 					   DUMMY_CLOCK_RATE_2),
1652 			0);
1653 
1654 	KUNIT_ASSERT_EQ(test,
1655 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1656 			0);
1657 
1658 	rate = clk_get_rate(clk);
1659 	KUNIT_ASSERT_GT(test, rate, 0);
1660 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1661 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1662 
1663 	clk_put(clk);
1664 }
1665 
1666 /*
1667  * Test that if our clock has some boundaries and we try to round and
1668  * set a rate higher than the maximum, the rate returned by
1669  * clk_round_rate() will be consistent with the new rate set by
1670  * clk_set_rate().
1671  */
1672 static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1673 {
1674 	struct clk_dummy_context *ctx = test->priv;
1675 	struct clk_hw *hw = &ctx->hw;
1676 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1677 	long rounded;
1678 
1679 	KUNIT_ASSERT_EQ(test,
1680 			clk_set_rate_range(clk,
1681 					   DUMMY_CLOCK_RATE_1,
1682 					   DUMMY_CLOCK_RATE_2),
1683 			0);
1684 
1685 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1686 	KUNIT_ASSERT_GT(test, rounded, 0);
1687 
1688 	KUNIT_ASSERT_EQ(test,
1689 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1690 			0);
1691 
1692 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1693 
1694 	clk_put(clk);
1695 }
1696 
1697 /*
1698  * Test that if our clock has a rate lower than the minimum set by a
1699  * call to clk_set_rate_range(), the rate will be raised to match the
1700  * new minimum.
1701  *
1702  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1703  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1704  */
1705 static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1706 {
1707 	struct clk_dummy_context *ctx = test->priv;
1708 	struct clk_hw *hw = &ctx->hw;
1709 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1710 	unsigned long rate;
1711 
1712 	KUNIT_ASSERT_EQ(test,
1713 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1714 			0);
1715 
1716 	KUNIT_ASSERT_EQ(test,
1717 			clk_set_rate_range(clk,
1718 					   DUMMY_CLOCK_RATE_1,
1719 					   DUMMY_CLOCK_RATE_2),
1720 			0);
1721 
1722 	rate = clk_get_rate(clk);
1723 	KUNIT_ASSERT_GT(test, rate, 0);
1724 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1725 
1726 	clk_put(clk);
1727 }
1728 
1729 /*
1730  * Test that if our clock has a rate higher than the maximum set by a
1731  * call to clk_set_rate_range(), the rate will be lowered to match the
1732  * new maximum.
1733  *
1734  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1735  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1736  */
1737 static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1738 {
1739 	struct clk_dummy_context *ctx = test->priv;
1740 	struct clk_hw *hw = &ctx->hw;
1741 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1742 	unsigned long rate;
1743 
1744 	KUNIT_ASSERT_EQ(test,
1745 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1746 			0);
1747 
1748 	KUNIT_ASSERT_EQ(test,
1749 			clk_set_rate_range(clk,
1750 					   DUMMY_CLOCK_RATE_1,
1751 					   DUMMY_CLOCK_RATE_2),
1752 			0);
1753 
1754 	rate = clk_get_rate(clk);
1755 	KUNIT_ASSERT_GT(test, rate, 0);
1756 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1757 
1758 	clk_put(clk);
1759 }
1760 
1761 static struct kunit_case clk_range_test_cases[] = {
1762 	KUNIT_CASE(clk_range_test_set_range),
1763 	KUNIT_CASE(clk_range_test_set_range_invalid),
1764 	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1765 	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1766 	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1767 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1768 	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1769 	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1770 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1771 	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1772 	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1773 	{}
1774 };
1775 
1776 /*
1777  * Test suite for a basic rate clock, without any parent.
1778  *
1779  * These tests exercise the rate range API: clk_set_rate_range(),
1780  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1781  */
1782 static struct kunit_suite clk_range_test_suite = {
1783 	.name = "clk-range-test",
1784 	.init = clk_test_init,
1785 	.exit = clk_test_exit,
1786 	.test_cases = clk_range_test_cases,
1787 };
1788 
1789 /*
1790  * Test that if we have several subsequent calls to
1791  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1792  * needed each and every time.
1793  *
1794  * With clk_dummy_maximize_rate_ops, this means that the rate will
1795  * trail along the maximum as it evolves.
1796  */
1797 static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1798 {
1799 	struct clk_dummy_context *ctx = test->priv;
1800 	struct clk_hw *hw = &ctx->hw;
1801 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1802 	unsigned long rate;
1803 
1804 	KUNIT_ASSERT_EQ(test,
1805 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1806 			0);
1807 
1808 	KUNIT_ASSERT_EQ(test,
1809 			clk_set_rate_range(clk,
1810 					   DUMMY_CLOCK_RATE_1,
1811 					   DUMMY_CLOCK_RATE_2),
1812 			0);
1813 
1814 	rate = clk_get_rate(clk);
1815 	KUNIT_ASSERT_GT(test, rate, 0);
1816 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1817 
1818 	KUNIT_ASSERT_EQ(test,
1819 			clk_set_rate_range(clk,
1820 					   DUMMY_CLOCK_RATE_1,
1821 					   DUMMY_CLOCK_RATE_2 - 1000),
1822 			0);
1823 
1824 	rate = clk_get_rate(clk);
1825 	KUNIT_ASSERT_GT(test, rate, 0);
1826 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1827 
1828 	KUNIT_ASSERT_EQ(test,
1829 			clk_set_rate_range(clk,
1830 					   DUMMY_CLOCK_RATE_1,
1831 					   DUMMY_CLOCK_RATE_2),
1832 			0);
1833 
1834 	rate = clk_get_rate(clk);
1835 	KUNIT_ASSERT_GT(test, rate, 0);
1836 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1837 
1838 	clk_put(clk);
1839 }
1840 
1841 /*
1842  * Test that if we have several subsequent calls to
1843  * clk_set_rate_range(), across multiple users, the core will reevaluate
1844  * whether a new rate is needed each and every time.
1845  *
1846  * With clk_dummy_maximize_rate_ops, this means that the rate will
1847  * trail along the maximum as it evolves.
1848  */
1849 static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1850 {
1851 	struct clk_dummy_context *ctx = test->priv;
1852 	struct clk_hw *hw = &ctx->hw;
1853 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1854 	struct clk *user1, *user2;
1855 	unsigned long rate;
1856 
1857 	user1 = clk_hw_get_clk(hw, NULL);
1858 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1859 
1860 	user2 = clk_hw_get_clk(hw, NULL);
1861 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1862 
1863 	KUNIT_ASSERT_EQ(test,
1864 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1865 			0);
1866 
1867 	KUNIT_ASSERT_EQ(test,
1868 			clk_set_rate_range(user1,
1869 					   0,
1870 					   DUMMY_CLOCK_RATE_2),
1871 			0);
1872 
1873 	rate = clk_get_rate(clk);
1874 	KUNIT_ASSERT_GT(test, rate, 0);
1875 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1876 
1877 	KUNIT_ASSERT_EQ(test,
1878 			clk_set_rate_range(user2,
1879 					   0,
1880 					   DUMMY_CLOCK_RATE_1),
1881 			0);
1882 
1883 	rate = clk_get_rate(clk);
1884 	KUNIT_ASSERT_GT(test, rate, 0);
1885 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1886 
1887 	KUNIT_ASSERT_EQ(test,
1888 			clk_drop_range(user2),
1889 			0);
1890 
1891 	rate = clk_get_rate(clk);
1892 	KUNIT_ASSERT_GT(test, rate, 0);
1893 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1894 
1895 	clk_put(user2);
1896 	clk_put(user1);
1897 	clk_put(clk);
1898 }
1899 
1900 /*
1901  * Test that if we have several subsequent calls to
1902  * clk_set_rate_range(), across multiple users, the core will reevaluate
1903  * whether a new rate is needed, including when a user drop its clock.
1904  *
1905  * With clk_dummy_maximize_rate_ops, this means that the rate will
1906  * trail along the maximum as it evolves.
1907  */
1908 static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1909 {
1910 	struct clk_dummy_context *ctx = test->priv;
1911 	struct clk_hw *hw = &ctx->hw;
1912 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1913 	struct clk *user1, *user2;
1914 	unsigned long rate;
1915 
1916 	user1 = clk_hw_get_clk(hw, NULL);
1917 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1918 
1919 	user2 = clk_hw_get_clk(hw, NULL);
1920 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1921 
1922 	KUNIT_ASSERT_EQ(test,
1923 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1924 			0);
1925 
1926 	KUNIT_ASSERT_EQ(test,
1927 			clk_set_rate_range(user1,
1928 					   0,
1929 					   DUMMY_CLOCK_RATE_2),
1930 			0);
1931 
1932 	rate = clk_get_rate(clk);
1933 	KUNIT_ASSERT_GT(test, rate, 0);
1934 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1935 
1936 	KUNIT_ASSERT_EQ(test,
1937 			clk_set_rate_range(user2,
1938 					   0,
1939 					   DUMMY_CLOCK_RATE_1),
1940 			0);
1941 
1942 	rate = clk_get_rate(clk);
1943 	KUNIT_ASSERT_GT(test, rate, 0);
1944 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1945 
1946 	clk_put(user2);
1947 
1948 	rate = clk_get_rate(clk);
1949 	KUNIT_ASSERT_GT(test, rate, 0);
1950 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1951 
1952 	clk_put(user1);
1953 	clk_put(clk);
1954 }
1955 
1956 static struct kunit_case clk_range_maximize_test_cases[] = {
1957 	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1958 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1959 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1960 	{}
1961 };
1962 
1963 /*
1964  * Test suite for a basic rate clock, without any parent.
1965  *
1966  * These tests exercise the rate range API: clk_set_rate_range(),
1967  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1968  * driver that will always try to run at the highest possible rate.
1969  */
1970 static struct kunit_suite clk_range_maximize_test_suite = {
1971 	.name = "clk-range-maximize-test",
1972 	.init = clk_maximize_test_init,
1973 	.exit = clk_test_exit,
1974 	.test_cases = clk_range_maximize_test_cases,
1975 };
1976 
1977 /*
1978  * Test that if we have several subsequent calls to
1979  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1980  * needed each and every time.
1981  *
1982  * With clk_dummy_minimize_rate_ops, this means that the rate will
1983  * trail along the minimum as it evolves.
1984  */
1985 static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1986 {
1987 	struct clk_dummy_context *ctx = test->priv;
1988 	struct clk_hw *hw = &ctx->hw;
1989 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1990 	unsigned long rate;
1991 
1992 	KUNIT_ASSERT_EQ(test,
1993 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1994 			0);
1995 
1996 	KUNIT_ASSERT_EQ(test,
1997 			clk_set_rate_range(clk,
1998 					   DUMMY_CLOCK_RATE_1,
1999 					   DUMMY_CLOCK_RATE_2),
2000 			0);
2001 
2002 	rate = clk_get_rate(clk);
2003 	KUNIT_ASSERT_GT(test, rate, 0);
2004 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2005 
2006 	KUNIT_ASSERT_EQ(test,
2007 			clk_set_rate_range(clk,
2008 					   DUMMY_CLOCK_RATE_1 + 1000,
2009 					   DUMMY_CLOCK_RATE_2),
2010 			0);
2011 
2012 	rate = clk_get_rate(clk);
2013 	KUNIT_ASSERT_GT(test, rate, 0);
2014 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
2015 
2016 	KUNIT_ASSERT_EQ(test,
2017 			clk_set_rate_range(clk,
2018 					   DUMMY_CLOCK_RATE_1,
2019 					   DUMMY_CLOCK_RATE_2),
2020 			0);
2021 
2022 	rate = clk_get_rate(clk);
2023 	KUNIT_ASSERT_GT(test, rate, 0);
2024 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2025 
2026 	clk_put(clk);
2027 }
2028 
2029 /*
2030  * Test that if we have several subsequent calls to
2031  * clk_set_rate_range(), across multiple users, the core will reevaluate
2032  * whether a new rate is needed each and every time.
2033  *
2034  * With clk_dummy_minimize_rate_ops, this means that the rate will
2035  * trail along the minimum as it evolves.
2036  */
2037 static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2038 {
2039 	struct clk_dummy_context *ctx = test->priv;
2040 	struct clk_hw *hw = &ctx->hw;
2041 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2042 	struct clk *user1, *user2;
2043 	unsigned long rate;
2044 
2045 	user1 = clk_hw_get_clk(hw, NULL);
2046 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2047 
2048 	user2 = clk_hw_get_clk(hw, NULL);
2049 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2050 
2051 	KUNIT_ASSERT_EQ(test,
2052 			clk_set_rate_range(user1,
2053 					   DUMMY_CLOCK_RATE_1,
2054 					   ULONG_MAX),
2055 			0);
2056 
2057 	rate = clk_get_rate(clk);
2058 	KUNIT_ASSERT_GT(test, rate, 0);
2059 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2060 
2061 	KUNIT_ASSERT_EQ(test,
2062 			clk_set_rate_range(user2,
2063 					   DUMMY_CLOCK_RATE_2,
2064 					   ULONG_MAX),
2065 			0);
2066 
2067 	rate = clk_get_rate(clk);
2068 	KUNIT_ASSERT_GT(test, rate, 0);
2069 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2070 
2071 	KUNIT_ASSERT_EQ(test,
2072 			clk_drop_range(user2),
2073 			0);
2074 
2075 	rate = clk_get_rate(clk);
2076 	KUNIT_ASSERT_GT(test, rate, 0);
2077 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2078 
2079 	clk_put(user2);
2080 	clk_put(user1);
2081 	clk_put(clk);
2082 }
2083 
2084 /*
2085  * Test that if we have several subsequent calls to
2086  * clk_set_rate_range(), across multiple users, the core will reevaluate
2087  * whether a new rate is needed, including when a user drop its clock.
2088  *
2089  * With clk_dummy_minimize_rate_ops, this means that the rate will
2090  * trail along the minimum as it evolves.
2091  */
2092 static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2093 {
2094 	struct clk_dummy_context *ctx = test->priv;
2095 	struct clk_hw *hw = &ctx->hw;
2096 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2097 	struct clk *user1, *user2;
2098 	unsigned long rate;
2099 
2100 	user1 = clk_hw_get_clk(hw, NULL);
2101 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2102 
2103 	user2 = clk_hw_get_clk(hw, NULL);
2104 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2105 
2106 	KUNIT_ASSERT_EQ(test,
2107 			clk_set_rate_range(user1,
2108 					   DUMMY_CLOCK_RATE_1,
2109 					   ULONG_MAX),
2110 			0);
2111 
2112 	rate = clk_get_rate(clk);
2113 	KUNIT_ASSERT_GT(test, rate, 0);
2114 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2115 
2116 	KUNIT_ASSERT_EQ(test,
2117 			clk_set_rate_range(user2,
2118 					   DUMMY_CLOCK_RATE_2,
2119 					   ULONG_MAX),
2120 			0);
2121 
2122 	rate = clk_get_rate(clk);
2123 	KUNIT_ASSERT_GT(test, rate, 0);
2124 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2125 
2126 	clk_put(user2);
2127 
2128 	rate = clk_get_rate(clk);
2129 	KUNIT_ASSERT_GT(test, rate, 0);
2130 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2131 
2132 	clk_put(user1);
2133 	clk_put(clk);
2134 }
2135 
2136 static struct kunit_case clk_range_minimize_test_cases[] = {
2137 	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2138 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2139 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2140 	{}
2141 };
2142 
2143 /*
2144  * Test suite for a basic rate clock, without any parent.
2145  *
2146  * These tests exercise the rate range API: clk_set_rate_range(),
2147  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2148  * driver that will always try to run at the lowest possible rate.
2149  */
2150 static struct kunit_suite clk_range_minimize_test_suite = {
2151 	.name = "clk-range-minimize-test",
2152 	.init = clk_minimize_test_init,
2153 	.exit = clk_test_exit,
2154 	.test_cases = clk_range_minimize_test_cases,
2155 };
2156 
2157 struct clk_leaf_mux_ctx {
2158 	struct clk_multiple_parent_ctx mux_ctx;
2159 	struct clk_hw hw;
2160 	struct clk_hw parent;
2161 	struct clk_rate_request *req;
2162 	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2163 };
2164 
2165 static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
2166 {
2167 	struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
2168 	int ret;
2169 	struct clk_rate_request *parent_req = ctx->req;
2170 
2171 	clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
2172 	ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
2173 	if (ret)
2174 		return ret;
2175 
2176 	req->rate = parent_req->rate;
2177 
2178 	return 0;
2179 }
2180 
2181 static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
2182 	.determine_rate = clk_leaf_mux_determine_rate,
2183 	.set_parent = clk_dummy_single_set_parent,
2184 	.get_parent = clk_dummy_single_get_parent,
2185 };
2186 
2187 static int
2188 clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2189 {
2190 	struct clk_leaf_mux_ctx *ctx;
2191 	const char *top_parents[2] = { "parent-0", "parent-1" };
2192 	int ret;
2193 
2194 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2195 	if (!ctx)
2196 		return -ENOMEM;
2197 	test->priv = ctx;
2198 
2199 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2200 								    &clk_dummy_rate_ops,
2201 								    0);
2202 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2203 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2204 	if (ret)
2205 		return ret;
2206 
2207 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2208 								    &clk_dummy_rate_ops,
2209 								    0);
2210 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2211 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2212 	if (ret)
2213 		return ret;
2214 
2215 	ctx->mux_ctx.current_parent = 0;
2216 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2217 						   &clk_multiple_parents_mux_ops,
2218 						   0);
2219 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2220 	if (ret)
2221 		return ret;
2222 
2223 	ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
2224 					  &empty_clk_ops, CLK_SET_RATE_PARENT);
2225 	ret = clk_hw_register(NULL, &ctx->parent);
2226 	if (ret)
2227 		return ret;
2228 
2229 	ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
2230 				      &clk_leaf_mux_set_rate_parent_ops,
2231 				      CLK_SET_RATE_PARENT);
2232 	ret = clk_hw_register(NULL, &ctx->hw);
2233 	if (ret)
2234 		return ret;
2235 
2236 	return 0;
2237 }
2238 
2239 static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2240 {
2241 	struct clk_leaf_mux_ctx *ctx = test->priv;
2242 
2243 	clk_hw_unregister(&ctx->hw);
2244 	clk_hw_unregister(&ctx->parent);
2245 	clk_hw_unregister(&ctx->mux_ctx.hw);
2246 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2247 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2248 }
2249 
2250 struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
2251 	const char *desc;
2252 	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2253 };
2254 
2255 static void
2256 clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
2257 		const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
2258 {
2259 	strcpy(desc, t->desc);
2260 }
2261 
2262 static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
2263 clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
2264 	{
2265 		/*
2266 		 * Test that __clk_determine_rate() on the parent that can't
2267 		 * change rate doesn't return a clk_rate_request structure with
2268 		 * the best_parent_hw pointer pointing to the parent.
2269 		 */
2270 		.desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
2271 		.determine_rate_func = __clk_determine_rate,
2272 	},
2273 	{
2274 		/*
2275 		 * Test that __clk_mux_determine_rate() on the parent that
2276 		 * can't change rate doesn't return a clk_rate_request
2277 		 * structure with the best_parent_hw pointer pointing to
2278 		 * the parent.
2279 		 */
2280 		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
2281 		.determine_rate_func = __clk_mux_determine_rate,
2282 	},
2283 	{
2284 		/*
2285 		 * Test that __clk_mux_determine_rate_closest() on the parent
2286 		 * that can't change rate doesn't return a clk_rate_request
2287 		 * structure with the best_parent_hw pointer pointing to
2288 		 * the parent.
2289 		 */
2290 		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
2291 		.determine_rate_func = __clk_mux_determine_rate_closest,
2292 	},
2293 	{
2294 		/*
2295 		 * Test that clk_hw_determine_rate_no_reparent() on the parent
2296 		 * that can't change rate doesn't return a clk_rate_request
2297 		 * structure with the best_parent_hw pointer pointing to
2298 		 * the parent.
2299 		 */
2300 		.desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
2301 		.determine_rate_func = clk_hw_determine_rate_no_reparent,
2302 	},
2303 };
2304 
2305 KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2306 		  clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
2307 		  clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
2308 
2309 /*
2310  * Test that when a clk that can't change rate itself calls a function like
2311  * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
2312  * structure that has the best_parent_hw pointer point to the clk_hw passed
2313  * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
2314  * forwarding clk_rate_requests to the parent") for more background.
2315  */
2316 static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
2317 {
2318 	struct clk_leaf_mux_ctx *ctx = test->priv;
2319 	struct clk_hw *hw = &ctx->hw;
2320 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2321 	struct clk_rate_request req;
2322 	unsigned long rate;
2323 	const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
2324 
2325 	test_param = test->param_value;
2326 	ctx->determine_rate_func = test_param->determine_rate_func;
2327 
2328 	ctx->req = &req;
2329 	rate = clk_get_rate(clk);
2330 	KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2331 	KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
2332 
2333 	KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2334 	KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2335 	KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2336 
2337 	clk_put(clk);
2338 }
2339 
2340 static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2341 	KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2342 			 clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
2343 	{}
2344 };
2345 
2346 /*
2347  * Test suite for a clock whose parent is a pass-through clk whose parent is a
2348  * mux with multiple parents. The leaf and pass-through clocks have the
2349  * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
2350  * will then select which parent is the best fit for a given rate.
2351  *
2352  * These tests exercise the behaviour of muxes, and the proper selection
2353  * of parents.
2354  */
2355 static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2356 	.name = "clk-leaf-mux-set-rate-parent",
2357 	.init = clk_leaf_mux_set_rate_parent_test_init,
2358 	.exit = clk_leaf_mux_set_rate_parent_test_exit,
2359 	.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2360 };
2361 
2362 struct clk_mux_notifier_rate_change {
2363 	bool done;
2364 	unsigned long old_rate;
2365 	unsigned long new_rate;
2366 	wait_queue_head_t wq;
2367 };
2368 
2369 struct clk_mux_notifier_ctx {
2370 	struct clk_multiple_parent_ctx mux_ctx;
2371 	struct clk *clk;
2372 	struct notifier_block clk_nb;
2373 	struct clk_mux_notifier_rate_change pre_rate_change;
2374 	struct clk_mux_notifier_rate_change post_rate_change;
2375 };
2376 
2377 #define NOTIFIER_TIMEOUT_MS 100
2378 
2379 static int clk_mux_notifier_callback(struct notifier_block *nb,
2380 				     unsigned long action, void *data)
2381 {
2382 	struct clk_notifier_data *clk_data = data;
2383 	struct clk_mux_notifier_ctx *ctx = container_of(nb,
2384 							struct clk_mux_notifier_ctx,
2385 							clk_nb);
2386 
2387 	if (action & PRE_RATE_CHANGE) {
2388 		ctx->pre_rate_change.old_rate = clk_data->old_rate;
2389 		ctx->pre_rate_change.new_rate = clk_data->new_rate;
2390 		ctx->pre_rate_change.done = true;
2391 		wake_up_interruptible(&ctx->pre_rate_change.wq);
2392 	}
2393 
2394 	if (action & POST_RATE_CHANGE) {
2395 		ctx->post_rate_change.old_rate = clk_data->old_rate;
2396 		ctx->post_rate_change.new_rate = clk_data->new_rate;
2397 		ctx->post_rate_change.done = true;
2398 		wake_up_interruptible(&ctx->post_rate_change.wq);
2399 	}
2400 
2401 	return 0;
2402 }
2403 
2404 static int clk_mux_notifier_test_init(struct kunit *test)
2405 {
2406 	struct clk_mux_notifier_ctx *ctx;
2407 	const char *top_parents[2] = { "parent-0", "parent-1" };
2408 	int ret;
2409 
2410 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2411 	if (!ctx)
2412 		return -ENOMEM;
2413 	test->priv = ctx;
2414 	ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2415 	init_waitqueue_head(&ctx->pre_rate_change.wq);
2416 	init_waitqueue_head(&ctx->post_rate_change.wq);
2417 
2418 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2419 								    &clk_dummy_rate_ops,
2420 								    0);
2421 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2422 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2423 	if (ret)
2424 		return ret;
2425 
2426 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2427 								    &clk_dummy_rate_ops,
2428 								    0);
2429 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2430 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2431 	if (ret)
2432 		return ret;
2433 
2434 	ctx->mux_ctx.current_parent = 0;
2435 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2436 						   &clk_multiple_parents_mux_ops,
2437 						   0);
2438 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2439 	if (ret)
2440 		return ret;
2441 
2442 	ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2443 	ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2444 	if (ret)
2445 		return ret;
2446 
2447 	return 0;
2448 }
2449 
2450 static void clk_mux_notifier_test_exit(struct kunit *test)
2451 {
2452 	struct clk_mux_notifier_ctx *ctx = test->priv;
2453 	struct clk *clk = ctx->clk;
2454 
2455 	clk_notifier_unregister(clk, &ctx->clk_nb);
2456 	clk_put(clk);
2457 
2458 	clk_hw_unregister(&ctx->mux_ctx.hw);
2459 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2460 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2461 }
2462 
2463 /*
2464  * Test that if the we have a notifier registered on a mux, the core
2465  * will notify us when we switch to another parent, and with the proper
2466  * old and new rates.
2467  */
2468 static void clk_mux_notifier_set_parent_test(struct kunit *test)
2469 {
2470 	struct clk_mux_notifier_ctx *ctx = test->priv;
2471 	struct clk_hw *hw = &ctx->mux_ctx.hw;
2472 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2473 	struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2474 	int ret;
2475 
2476 	ret = clk_set_parent(clk, new_parent);
2477 	KUNIT_ASSERT_EQ(test, ret, 0);
2478 
2479 	ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2480 					       ctx->pre_rate_change.done,
2481 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2482 	KUNIT_ASSERT_GT(test, ret, 0);
2483 
2484 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2485 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2486 
2487 	ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2488 					       ctx->post_rate_change.done,
2489 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2490 	KUNIT_ASSERT_GT(test, ret, 0);
2491 
2492 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2493 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2494 
2495 	clk_put(new_parent);
2496 	clk_put(clk);
2497 }
2498 
2499 static struct kunit_case clk_mux_notifier_test_cases[] = {
2500 	KUNIT_CASE(clk_mux_notifier_set_parent_test),
2501 	{}
2502 };
2503 
2504 /*
2505  * Test suite for a mux with multiple parents, and a notifier registered
2506  * on the mux.
2507  *
2508  * These tests exercise the behaviour of notifiers.
2509  */
2510 static struct kunit_suite clk_mux_notifier_test_suite = {
2511 	.name = "clk-mux-notifier",
2512 	.init = clk_mux_notifier_test_init,
2513 	.exit = clk_mux_notifier_test_exit,
2514 	.test_cases = clk_mux_notifier_test_cases,
2515 };
2516 
2517 static int
2518 clk_mux_no_reparent_test_init(struct kunit *test)
2519 {
2520 	struct clk_multiple_parent_ctx *ctx;
2521 	const char *parents[2] = { "parent-0", "parent-1"};
2522 	int ret;
2523 
2524 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2525 	if (!ctx)
2526 		return -ENOMEM;
2527 	test->priv = ctx;
2528 
2529 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2530 							    &clk_dummy_rate_ops,
2531 							    0);
2532 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2533 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2534 	if (ret)
2535 		return ret;
2536 
2537 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2538 							    &clk_dummy_rate_ops,
2539 							    0);
2540 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2541 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2542 	if (ret)
2543 		return ret;
2544 
2545 	ctx->current_parent = 0;
2546 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2547 					   &clk_multiple_parents_no_reparent_mux_ops,
2548 					   0);
2549 	ret = clk_hw_register(NULL, &ctx->hw);
2550 	if (ret)
2551 		return ret;
2552 
2553 	return 0;
2554 }
2555 
2556 static void
2557 clk_mux_no_reparent_test_exit(struct kunit *test)
2558 {
2559 	struct clk_multiple_parent_ctx *ctx = test->priv;
2560 
2561 	clk_hw_unregister(&ctx->hw);
2562 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
2563 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
2564 }
2565 
2566 /*
2567  * Test that if the we have a mux that cannot change parent and we call
2568  * clk_round_rate() on it with a rate that should cause it to change
2569  * parent, it won't.
2570  */
2571 static void clk_mux_no_reparent_round_rate(struct kunit *test)
2572 {
2573 	struct clk_multiple_parent_ctx *ctx = test->priv;
2574 	struct clk_hw *hw = &ctx->hw;
2575 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2576 	struct clk *other_parent, *parent;
2577 	unsigned long other_parent_rate;
2578 	unsigned long parent_rate;
2579 	long rounded_rate;
2580 
2581 	parent = clk_get_parent(clk);
2582 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2583 
2584 	parent_rate = clk_get_rate(parent);
2585 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2586 
2587 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2588 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2589 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2590 
2591 	other_parent_rate = clk_get_rate(other_parent);
2592 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2593 	clk_put(other_parent);
2594 
2595 	rounded_rate = clk_round_rate(clk, other_parent_rate);
2596 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
2597 	KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2598 
2599 	clk_put(clk);
2600 }
2601 
2602 /*
2603  * Test that if the we have a mux that cannot change parent and we call
2604  * clk_set_rate() on it with a rate that should cause it to change
2605  * parent, it won't.
2606  */
2607 static void clk_mux_no_reparent_set_rate(struct kunit *test)
2608 {
2609 	struct clk_multiple_parent_ctx *ctx = test->priv;
2610 	struct clk_hw *hw = &ctx->hw;
2611 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2612 	struct clk *other_parent, *parent;
2613 	unsigned long other_parent_rate;
2614 	unsigned long parent_rate;
2615 	unsigned long rate;
2616 	int ret;
2617 
2618 	parent = clk_get_parent(clk);
2619 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2620 
2621 	parent_rate = clk_get_rate(parent);
2622 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2623 
2624 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2625 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2626 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2627 
2628 	other_parent_rate = clk_get_rate(other_parent);
2629 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2630 	clk_put(other_parent);
2631 
2632 	ret = clk_set_rate(clk, other_parent_rate);
2633 	KUNIT_ASSERT_EQ(test, ret, 0);
2634 
2635 	rate = clk_get_rate(clk);
2636 	KUNIT_ASSERT_GT(test, rate, 0);
2637 	KUNIT_EXPECT_EQ(test, rate, parent_rate);
2638 
2639 	clk_put(clk);
2640 }
2641 
2642 static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2643 	KUNIT_CASE(clk_mux_no_reparent_round_rate),
2644 	KUNIT_CASE(clk_mux_no_reparent_set_rate),
2645 	{}
2646 };
2647 
2648 /*
2649  * Test suite for a clock mux that isn't allowed to change parent, using
2650  * the clk_hw_determine_rate_no_reparent() helper.
2651  *
2652  * These tests exercise that helper, and the proper selection of
2653  * rates and parents.
2654  */
2655 static struct kunit_suite clk_mux_no_reparent_test_suite = {
2656 	.name = "clk-mux-no-reparent",
2657 	.init = clk_mux_no_reparent_test_init,
2658 	.exit = clk_mux_no_reparent_test_exit,
2659 	.test_cases = clk_mux_no_reparent_test_cases,
2660 };
2661 
2662 kunit_test_suites(
2663 	&clk_leaf_mux_set_rate_parent_test_suite,
2664 	&clk_test_suite,
2665 	&clk_multiple_parents_mux_test_suite,
2666 	&clk_mux_no_reparent_test_suite,
2667 	&clk_mux_notifier_test_suite,
2668 	&clk_orphan_transparent_multiple_parent_mux_test_suite,
2669 	&clk_orphan_transparent_single_parent_test_suite,
2670 	&clk_orphan_two_level_root_last_test_suite,
2671 	&clk_range_test_suite,
2672 	&clk_range_maximize_test_suite,
2673 	&clk_range_minimize_test_suite,
2674 	&clk_single_parent_mux_test_suite,
2675 	&clk_uncached_test_suite
2676 );
2677 MODULE_LICENSE("GPL v2");
2678