1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2015 Google, Inc
4 * Written by Simon Glass <sjg@chromium.org>
5 * Copyright (c) 2016, NVIDIA CORPORATION.
6 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
7 */
8
9 #include <common.h>
10 #include <clk.h>
11 #include <clk-uclass.h>
12 #include <dm.h>
13 #include <dt-structs.h>
14 #include <errno.h>
15 #include <log.h>
16 #include <malloc.h>
17 #include <dm/device_compat.h>
18 #include <dm/device-internal.h>
19 #include <dm/devres.h>
20 #include <dm/read.h>
21 #include <linux/bug.h>
22 #include <linux/clk-provider.h>
23 #include <linux/err.h>
24 #include <asm/global_data.h>
25
clk_dev_ops(struct udevice * dev)26 static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
27 {
28 return (const struct clk_ops *)dev->driver->ops;
29 }
30
dev_get_clk_ptr(struct udevice * dev)31 struct clk *dev_get_clk_ptr(struct udevice *dev)
32 {
33 return (struct clk *)dev_get_uclass_priv(dev);
34 }
35
36 #if CONFIG_IS_ENABLED(OF_CONTROL)
37 # if CONFIG_IS_ENABLED(OF_PLATDATA)
clk_get_by_driver_info(struct udevice * dev,struct phandle_1_arg * cells,struct clk * clk)38 int clk_get_by_driver_info(struct udevice *dev, struct phandle_1_arg *cells,
39 struct clk *clk)
40 {
41 int ret;
42
43 ret = device_get_by_ofplat_idx(cells->idx, &clk->dev);
44 if (ret)
45 return ret;
46 clk->id = cells->arg[0];
47
48 return 0;
49 }
50 # else
clk_of_xlate_default(struct clk * clk,struct ofnode_phandle_args * args)51 static int clk_of_xlate_default(struct clk *clk,
52 struct ofnode_phandle_args *args)
53 {
54 debug("%s(clk=%p)\n", __func__, clk);
55
56 if (args->args_count > 1) {
57 debug("Invaild args_count: %d\n", args->args_count);
58 return -EINVAL;
59 }
60
61 if (args->args_count)
62 clk->id = args->args[0];
63 else
64 clk->id = 0;
65
66 clk->data = 0;
67
68 return 0;
69 }
70
clk_get_by_index_tail(int ret,ofnode node,struct ofnode_phandle_args * args,const char * list_name,int index,struct clk * clk)71 static int clk_get_by_index_tail(int ret, ofnode node,
72 struct ofnode_phandle_args *args,
73 const char *list_name, int index,
74 struct clk *clk)
75 {
76 struct udevice *dev_clk;
77 const struct clk_ops *ops;
78
79 assert(clk);
80 clk->dev = NULL;
81 if (ret)
82 goto err;
83
84 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
85 if (ret) {
86 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
87 __func__, ret);
88 return log_msg_ret("get", ret);
89 }
90
91 clk->dev = dev_clk;
92
93 ops = clk_dev_ops(dev_clk);
94
95 if (ops->of_xlate)
96 ret = ops->of_xlate(clk, args);
97 else
98 ret = clk_of_xlate_default(clk, args);
99 if (ret) {
100 debug("of_xlate() failed: %d\n", ret);
101 return log_msg_ret("xlate", ret);
102 }
103
104 return clk_request(dev_clk, clk);
105 err:
106 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
107 __func__, ofnode_get_name(node), list_name, index, ret);
108
109 return log_msg_ret("prop", ret);
110 }
111
clk_get_by_indexed_prop(struct udevice * dev,const char * prop_name,int index,struct clk * clk)112 static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
113 int index, struct clk *clk)
114 {
115 int ret;
116 struct ofnode_phandle_args args;
117
118 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
119
120 assert(clk);
121 clk->dev = NULL;
122
123 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
124 index, &args);
125 if (ret) {
126 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
127 __func__, ret);
128 return log_ret(ret);
129 }
130
131
132 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
133 index, clk);
134 }
135
clk_get_by_index(struct udevice * dev,int index,struct clk * clk)136 int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
137 {
138 struct ofnode_phandle_args args;
139 int ret;
140
141 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
142 index, &args);
143
144 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
145 index, clk);
146 }
147
clk_get_by_index_nodev(ofnode node,int index,struct clk * clk)148 int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
149 {
150 struct ofnode_phandle_args args;
151 int ret;
152
153 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
154 index, &args);
155
156 return clk_get_by_index_tail(ret, node, &args, "clocks",
157 index, clk);
158 }
159
clk_get_bulk(struct udevice * dev,struct clk_bulk * bulk)160 int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
161 {
162 int i, ret, err, count;
163
164 bulk->count = 0;
165
166 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
167 if (count < 1)
168 return count;
169
170 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
171 if (!bulk->clks)
172 return -ENOMEM;
173
174 for (i = 0; i < count; i++) {
175 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
176 if (ret < 0)
177 goto bulk_get_err;
178
179 ++bulk->count;
180 }
181
182 return 0;
183
184 bulk_get_err:
185 err = clk_release_all(bulk->clks, bulk->count);
186 if (err)
187 debug("%s: could release all clocks for %p\n",
188 __func__, dev);
189
190 return ret;
191 }
192
clk_set_default_get_by_id(struct clk * clk)193 static struct clk *clk_set_default_get_by_id(struct clk *clk)
194 {
195 struct clk *c = clk;
196
197 if (CONFIG_IS_ENABLED(CLK_CCF)) {
198 int ret = clk_get_by_id(clk->id, &c);
199
200 if (ret) {
201 debug("%s(): could not get parent clock pointer, id %lu\n",
202 __func__, clk->id);
203 ERR_PTR(ret);
204 }
205 }
206
207 return c;
208 }
209
clk_set_default_parents(struct udevice * dev,int stage)210 static int clk_set_default_parents(struct udevice *dev, int stage)
211 {
212 struct clk clk, parent_clk, *c, *p;
213 int index;
214 int num_parents;
215 int ret;
216
217 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
218 "#clock-cells", 0);
219 if (num_parents < 0) {
220 debug("%s: could not read assigned-clock-parents for %p\n",
221 __func__, dev);
222 return 0;
223 }
224
225 for (index = 0; index < num_parents; index++) {
226 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
227 index, &parent_clk);
228 /* If -ENOENT, this is a no-op entry */
229 if (ret == -ENOENT)
230 continue;
231
232 if (ret) {
233 debug("%s: could not get parent clock %d for %s\n",
234 __func__, index, dev_read_name(dev));
235 return ret;
236 }
237
238 p = clk_set_default_get_by_id(&parent_clk);
239 if (IS_ERR(p))
240 return PTR_ERR(p);
241
242 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
243 index, &clk);
244 if (ret) {
245 debug("%s: could not get assigned clock %d for %s\n",
246 __func__, index, dev_read_name(dev));
247 return ret;
248 }
249
250 /* This is clk provider device trying to reparent itself
251 * It cannot be done right now but need to wait after the
252 * device is probed
253 */
254 if (stage == 0 && clk.dev == dev)
255 continue;
256
257 if (stage > 0 && clk.dev != dev)
258 /* do not setup twice the parent clocks */
259 continue;
260
261 c = clk_set_default_get_by_id(&clk);
262 if (IS_ERR(c))
263 return PTR_ERR(c);
264
265 ret = clk_set_parent(c, p);
266 /*
267 * Not all drivers may support clock-reparenting (as of now).
268 * Ignore errors due to this.
269 */
270 if (ret == -ENOSYS)
271 continue;
272
273 if (ret < 0) {
274 debug("%s: failed to reparent clock %d for %s\n",
275 __func__, index, dev_read_name(dev));
276 return ret;
277 }
278 }
279
280 return 0;
281 }
282
clk_set_default_rates(struct udevice * dev,int stage)283 static int clk_set_default_rates(struct udevice *dev, int stage)
284 {
285 struct clk clk, *c;
286 int index;
287 int num_rates;
288 int size;
289 int ret = 0;
290 u32 *rates = NULL;
291
292 size = dev_read_size(dev, "assigned-clock-rates");
293 if (size < 0)
294 return 0;
295
296 num_rates = size / sizeof(u32);
297 rates = calloc(num_rates, sizeof(u32));
298 if (!rates)
299 return -ENOMEM;
300
301 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
302 if (ret)
303 goto fail;
304
305 for (index = 0; index < num_rates; index++) {
306 /* If 0 is passed, this is a no-op */
307 if (!rates[index])
308 continue;
309
310 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
311 index, &clk);
312 if (ret) {
313 dev_dbg(dev,
314 "could not get assigned clock %d (err = %d)\n",
315 index, ret);
316 continue;
317 }
318
319 /* This is clk provider device trying to program itself
320 * It cannot be done right now but need to wait after the
321 * device is probed
322 */
323 if (stage == 0 && clk.dev == dev)
324 continue;
325
326 if (stage > 0 && clk.dev != dev)
327 /* do not setup twice the parent clocks */
328 continue;
329
330 c = clk_set_default_get_by_id(&clk);
331 if (IS_ERR(c))
332 return PTR_ERR(c);
333
334 ret = clk_set_rate(c, rates[index]);
335
336 if (ret < 0) {
337 dev_warn(dev,
338 "failed to set rate on clock index %d (%ld) (error = %d)\n",
339 index, clk.id, ret);
340 break;
341 }
342 }
343
344 fail:
345 free(rates);
346 return ret;
347 }
348
clk_set_defaults(struct udevice * dev,int stage)349 int clk_set_defaults(struct udevice *dev, int stage)
350 {
351 int ret;
352
353 if (!dev_has_ofnode(dev))
354 return 0;
355
356 /* If this not in SPL and pre-reloc state, don't take any action. */
357 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
358 return 0;
359
360 debug("%s(%s)\n", __func__, dev_read_name(dev));
361
362 ret = clk_set_default_parents(dev, stage);
363 if (ret)
364 return ret;
365
366 ret = clk_set_default_rates(dev, stage);
367 if (ret < 0)
368 return ret;
369
370 return 0;
371 }
372
clk_get_by_name(struct udevice * dev,const char * name,struct clk * clk)373 int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
374 {
375 int index;
376
377 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
378 clk->dev = NULL;
379
380 index = dev_read_stringlist_search(dev, "clock-names", name);
381 if (index < 0) {
382 debug("fdt_stringlist_search() failed: %d\n", index);
383 return index;
384 }
385
386 return clk_get_by_index(dev, index, clk);
387 }
388 # endif /* OF_PLATDATA */
389
clk_get_by_name_nodev(ofnode node,const char * name,struct clk * clk)390 int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
391 {
392 int index;
393
394 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
395 ofnode_get_name(node), name, clk);
396 clk->dev = NULL;
397
398 index = ofnode_stringlist_search(node, "clock-names", name);
399 if (index < 0) {
400 debug("fdt_stringlist_search() failed: %d\n", index);
401 return index;
402 }
403
404 return clk_get_by_index_nodev(node, index, clk);
405 }
406
clk_get_optional_nodev(ofnode node,const char * name,struct clk * clk)407 int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
408 {
409 int ret;
410
411 ret = clk_get_by_name_nodev(node, name, clk);
412 if (ret == -ENODATA)
413 return 0;
414
415 return ret;
416 }
417
clk_release_all(struct clk * clk,int count)418 int clk_release_all(struct clk *clk, int count)
419 {
420 int i, ret;
421
422 for (i = 0; i < count; i++) {
423 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
424
425 /* check if clock has been previously requested */
426 if (!clk[i].dev)
427 continue;
428
429 ret = clk_disable(&clk[i]);
430 if (ret && ret != -ENOSYS)
431 return ret;
432
433 ret = clk_free(&clk[i]);
434 if (ret && ret != -ENOSYS)
435 return ret;
436 }
437
438 return 0;
439 }
440
441 #endif /* OF_CONTROL */
442
clk_request(struct udevice * dev,struct clk * clk)443 int clk_request(struct udevice *dev, struct clk *clk)
444 {
445 const struct clk_ops *ops;
446
447 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
448 if (!clk)
449 return 0;
450 ops = clk_dev_ops(dev);
451
452 clk->dev = dev;
453
454 if (!ops->request)
455 return 0;
456
457 return ops->request(clk);
458 }
459
clk_free(struct clk * clk)460 int clk_free(struct clk *clk)
461 {
462 const struct clk_ops *ops;
463
464 debug("%s(clk=%p)\n", __func__, clk);
465 if (!clk_valid(clk))
466 return 0;
467 ops = clk_dev_ops(clk->dev);
468
469 if (!ops->rfree)
470 return 0;
471
472 return ops->rfree(clk);
473 }
474
clk_get_rate(struct clk * clk)475 ulong clk_get_rate(struct clk *clk)
476 {
477 const struct clk_ops *ops;
478 int ret;
479
480 debug("%s(clk=%p)\n", __func__, clk);
481 if (!clk_valid(clk))
482 return 0;
483 ops = clk_dev_ops(clk->dev);
484
485 if (!ops->get_rate)
486 return -ENOSYS;
487
488 ret = ops->get_rate(clk);
489 if (ret)
490 return log_ret(ret);
491
492 return 0;
493 }
494
clk_get_parent(struct clk * clk)495 struct clk *clk_get_parent(struct clk *clk)
496 {
497 struct udevice *pdev;
498 struct clk *pclk;
499
500 debug("%s(clk=%p)\n", __func__, clk);
501 if (!clk_valid(clk))
502 return NULL;
503
504 pdev = dev_get_parent(clk->dev);
505 pclk = dev_get_clk_ptr(pdev);
506 if (!pclk)
507 return ERR_PTR(-ENODEV);
508
509 return pclk;
510 }
511
clk_get_parent_rate(struct clk * clk)512 long long clk_get_parent_rate(struct clk *clk)
513 {
514 const struct clk_ops *ops;
515 struct clk *pclk;
516
517 debug("%s(clk=%p)\n", __func__, clk);
518 if (!clk_valid(clk))
519 return 0;
520
521 pclk = clk_get_parent(clk);
522 if (IS_ERR(pclk))
523 return -ENODEV;
524
525 ops = clk_dev_ops(pclk->dev);
526 if (!ops->get_rate)
527 return -ENOSYS;
528
529 /* Read the 'rate' if not already set or if proper flag set*/
530 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
531 pclk->rate = clk_get_rate(pclk);
532
533 return pclk->rate;
534 }
535
clk_round_rate(struct clk * clk,ulong rate)536 ulong clk_round_rate(struct clk *clk, ulong rate)
537 {
538 const struct clk_ops *ops;
539
540 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
541 if (!clk_valid(clk))
542 return 0;
543
544 ops = clk_dev_ops(clk->dev);
545 if (!ops->round_rate)
546 return -ENOSYS;
547
548 return ops->round_rate(clk, rate);
549 }
550
clk_set_rate(struct clk * clk,ulong rate)551 ulong clk_set_rate(struct clk *clk, ulong rate)
552 {
553 const struct clk_ops *ops;
554
555 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
556 if (!clk_valid(clk))
557 return 0;
558 ops = clk_dev_ops(clk->dev);
559
560 if (!ops->set_rate)
561 return -ENOSYS;
562
563 return ops->set_rate(clk, rate);
564 }
565
clk_set_parent(struct clk * clk,struct clk * parent)566 int clk_set_parent(struct clk *clk, struct clk *parent)
567 {
568 const struct clk_ops *ops;
569 int ret;
570
571 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
572 if (!clk_valid(clk))
573 return 0;
574 ops = clk_dev_ops(clk->dev);
575
576 if (!ops->set_parent)
577 return -ENOSYS;
578
579 ret = ops->set_parent(clk, parent);
580 if (ret)
581 return ret;
582
583 if (CONFIG_IS_ENABLED(CLK_CCF))
584 ret = device_reparent(clk->dev, parent->dev);
585
586 return ret;
587 }
588
clk_enable(struct clk * clk)589 int clk_enable(struct clk *clk)
590 {
591 const struct clk_ops *ops;
592 struct clk *clkp = NULL;
593 int ret;
594
595 debug("%s(clk=%p)\n", __func__, clk);
596 if (!clk_valid(clk))
597 return 0;
598 ops = clk_dev_ops(clk->dev);
599
600 if (CONFIG_IS_ENABLED(CLK_CCF)) {
601 /* Take id 0 as a non-valid clk, such as dummy */
602 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
603 if (clkp->enable_count) {
604 clkp->enable_count++;
605 return 0;
606 }
607 if (clkp->dev->parent &&
608 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
609 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
610 if (ret) {
611 printf("Enable %s failed\n",
612 clkp->dev->parent->name);
613 return ret;
614 }
615 }
616 }
617
618 if (ops->enable) {
619 ret = ops->enable(clk);
620 if (ret) {
621 printf("Enable %s failed\n", clk->dev->name);
622 return ret;
623 }
624 }
625 if (clkp)
626 clkp->enable_count++;
627 } else {
628 if (!ops->enable)
629 return -ENOSYS;
630 return ops->enable(clk);
631 }
632
633 return 0;
634 }
635
clk_enable_bulk(struct clk_bulk * bulk)636 int clk_enable_bulk(struct clk_bulk *bulk)
637 {
638 int i, ret;
639
640 for (i = 0; i < bulk->count; i++) {
641 ret = clk_enable(&bulk->clks[i]);
642 if (ret < 0 && ret != -ENOSYS)
643 return ret;
644 }
645
646 return 0;
647 }
648
clk_disable(struct clk * clk)649 int clk_disable(struct clk *clk)
650 {
651 const struct clk_ops *ops;
652 struct clk *clkp = NULL;
653 int ret;
654
655 debug("%s(clk=%p)\n", __func__, clk);
656 if (!clk_valid(clk))
657 return 0;
658 ops = clk_dev_ops(clk->dev);
659
660 if (CONFIG_IS_ENABLED(CLK_CCF)) {
661 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
662 if (clkp->flags & CLK_IS_CRITICAL)
663 return 0;
664
665 if (clkp->enable_count == 0) {
666 printf("clk %s already disabled\n",
667 clkp->dev->name);
668 return 0;
669 }
670
671 if (--clkp->enable_count > 0)
672 return 0;
673 }
674
675 if (ops->disable) {
676 ret = ops->disable(clk);
677 if (ret)
678 return ret;
679 }
680
681 if (clkp && clkp->dev->parent &&
682 device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
683 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
684 if (ret) {
685 printf("Disable %s failed\n",
686 clkp->dev->parent->name);
687 return ret;
688 }
689 }
690 } else {
691 if (!ops->disable)
692 return -ENOSYS;
693
694 return ops->disable(clk);
695 }
696
697 return 0;
698 }
699
clk_disable_bulk(struct clk_bulk * bulk)700 int clk_disable_bulk(struct clk_bulk *bulk)
701 {
702 int i, ret;
703
704 for (i = 0; i < bulk->count; i++) {
705 ret = clk_disable(&bulk->clks[i]);
706 if (ret < 0 && ret != -ENOSYS)
707 return ret;
708 }
709
710 return 0;
711 }
712
clk_get_by_id(ulong id,struct clk ** clkp)713 int clk_get_by_id(ulong id, struct clk **clkp)
714 {
715 struct udevice *dev;
716 struct uclass *uc;
717 int ret;
718
719 ret = uclass_get(UCLASS_CLK, &uc);
720 if (ret)
721 return ret;
722
723 uclass_foreach_dev(dev, uc) {
724 struct clk *clk = dev_get_clk_ptr(dev);
725
726 if (clk && clk->id == id) {
727 *clkp = clk;
728 return 0;
729 }
730 }
731
732 return -ENOENT;
733 }
734
clk_is_match(const struct clk * p,const struct clk * q)735 bool clk_is_match(const struct clk *p, const struct clk *q)
736 {
737 /* trivial case: identical struct clk's or both NULL */
738 if (p == q)
739 return true;
740
741 /* trivial case #2: on the clk pointer is NULL */
742 if (!p || !q)
743 return false;
744
745 /* same device, id and data */
746 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
747 return true;
748
749 return false;
750 }
751
devm_clk_release(struct udevice * dev,void * res)752 static void devm_clk_release(struct udevice *dev, void *res)
753 {
754 clk_free(res);
755 }
756
devm_clk_match(struct udevice * dev,void * res,void * data)757 static int devm_clk_match(struct udevice *dev, void *res, void *data)
758 {
759 return res == data;
760 }
761
devm_clk_get(struct udevice * dev,const char * id)762 struct clk *devm_clk_get(struct udevice *dev, const char *id)
763 {
764 int rc;
765 struct clk *clk;
766
767 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
768 if (unlikely(!clk))
769 return ERR_PTR(-ENOMEM);
770
771 rc = clk_get_by_name(dev, id, clk);
772 if (rc)
773 return ERR_PTR(rc);
774
775 devres_add(dev, clk);
776 return clk;
777 }
778
devm_clk_get_optional(struct udevice * dev,const char * id)779 struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
780 {
781 struct clk *clk = devm_clk_get(dev, id);
782
783 if (PTR_ERR(clk) == -ENODATA)
784 return NULL;
785
786 return clk;
787 }
788
devm_clk_put(struct udevice * dev,struct clk * clk)789 void devm_clk_put(struct udevice *dev, struct clk *clk)
790 {
791 int rc;
792
793 if (!clk)
794 return;
795
796 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
797 WARN_ON(rc);
798 }
799
clk_uclass_post_probe(struct udevice * dev)800 int clk_uclass_post_probe(struct udevice *dev)
801 {
802 /*
803 * when a clock provider is probed. Call clk_set_defaults()
804 * also after the device is probed. This takes care of cases
805 * where the DT is used to setup default parents and rates
806 * using assigned-clocks
807 */
808 clk_set_defaults(dev, 1);
809
810 return 0;
811 }
812
813 UCLASS_DRIVER(clk) = {
814 .id = UCLASS_CLK,
815 .name = "clk",
816 .post_probe = clk_uclass_post_probe,
817 };
818