xref: /linux/drivers/interconnect/core.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interconnect framework core driver
4  *
5  * Copyright (c) 2017-2019, Linaro Ltd.
6  * Author: Georgi Djakov <georgi.djakov@linaro.org>
7  */
8 
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/init.h>
13 #include <linux/interconnect.h>
14 #include <linux/interconnect-provider.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/overflow.h>
21 
22 static DEFINE_IDR(icc_idr);
23 static LIST_HEAD(icc_providers);
24 static DEFINE_MUTEX(icc_lock);
25 static struct dentry *icc_debugfs_dir;
26 
27 /**
28  * struct icc_req - constraints that are attached to each node
29  * @req_node: entry in list of requests for the particular @node
30  * @node: the interconnect node to which this constraint applies
31  * @dev: reference to the device that sets the constraints
32  * @tag: path tag (optional)
33  * @avg_bw: an integer describing the average bandwidth in kBps
34  * @peak_bw: an integer describing the peak bandwidth in kBps
35  */
36 struct icc_req {
37 	struct hlist_node req_node;
38 	struct icc_node *node;
39 	struct device *dev;
40 	u32 tag;
41 	u32 avg_bw;
42 	u32 peak_bw;
43 };
44 
45 /**
46  * struct icc_path - interconnect path structure
47  * @num_nodes: number of hops (nodes)
48  * @reqs: array of the requests applicable to this path of nodes
49  */
50 struct icc_path {
51 	size_t num_nodes;
52 	struct icc_req reqs[];
53 };
54 
55 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
56 {
57 	if (!n)
58 		return;
59 
60 	seq_printf(s, "%-30s %12u %12u\n",
61 		   n->name, n->avg_bw, n->peak_bw);
62 }
63 
64 static int icc_summary_show(struct seq_file *s, void *data)
65 {
66 	struct icc_provider *provider;
67 
68 	seq_puts(s, " node                                   avg         peak\n");
69 	seq_puts(s, "--------------------------------------------------------\n");
70 
71 	mutex_lock(&icc_lock);
72 
73 	list_for_each_entry(provider, &icc_providers, provider_list) {
74 		struct icc_node *n;
75 
76 		list_for_each_entry(n, &provider->nodes, node_list) {
77 			struct icc_req *r;
78 
79 			icc_summary_show_one(s, n);
80 			hlist_for_each_entry(r, &n->req_list, req_node) {
81 				if (!r->dev)
82 					continue;
83 
84 				seq_printf(s, "    %-26s %12u %12u\n",
85 					   dev_name(r->dev), r->avg_bw,
86 					   r->peak_bw);
87 			}
88 		}
89 	}
90 
91 	mutex_unlock(&icc_lock);
92 
93 	return 0;
94 }
95 DEFINE_SHOW_ATTRIBUTE(icc_summary);
96 
97 static struct icc_node *node_find(const int id)
98 {
99 	return idr_find(&icc_idr, id);
100 }
101 
102 static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
103 				  ssize_t num_nodes)
104 {
105 	struct icc_node *node = dst;
106 	struct icc_path *path;
107 	int i;
108 
109 	path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
110 	if (!path)
111 		return ERR_PTR(-ENOMEM);
112 
113 	path->num_nodes = num_nodes;
114 
115 	for (i = num_nodes - 1; i >= 0; i--) {
116 		node->provider->users++;
117 		hlist_add_head(&path->reqs[i].req_node, &node->req_list);
118 		path->reqs[i].node = node;
119 		path->reqs[i].dev = dev;
120 		/* reference to previous node was saved during path traversal */
121 		node = node->reverse;
122 	}
123 
124 	return path;
125 }
126 
127 static struct icc_path *path_find(struct device *dev, struct icc_node *src,
128 				  struct icc_node *dst)
129 {
130 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
131 	struct icc_node *n, *node = NULL;
132 	struct list_head traverse_list;
133 	struct list_head edge_list;
134 	struct list_head visited_list;
135 	size_t i, depth = 1;
136 	bool found = false;
137 
138 	INIT_LIST_HEAD(&traverse_list);
139 	INIT_LIST_HEAD(&edge_list);
140 	INIT_LIST_HEAD(&visited_list);
141 
142 	list_add(&src->search_list, &traverse_list);
143 	src->reverse = NULL;
144 
145 	do {
146 		list_for_each_entry_safe(node, n, &traverse_list, search_list) {
147 			if (node == dst) {
148 				found = true;
149 				list_splice_init(&edge_list, &visited_list);
150 				list_splice_init(&traverse_list, &visited_list);
151 				break;
152 			}
153 			for (i = 0; i < node->num_links; i++) {
154 				struct icc_node *tmp = node->links[i];
155 
156 				if (!tmp) {
157 					path = ERR_PTR(-ENOENT);
158 					goto out;
159 				}
160 
161 				if (tmp->is_traversed)
162 					continue;
163 
164 				tmp->is_traversed = true;
165 				tmp->reverse = node;
166 				list_add_tail(&tmp->search_list, &edge_list);
167 			}
168 		}
169 
170 		if (found)
171 			break;
172 
173 		list_splice_init(&traverse_list, &visited_list);
174 		list_splice_init(&edge_list, &traverse_list);
175 
176 		/* count the hops including the source */
177 		depth++;
178 
179 	} while (!list_empty(&traverse_list));
180 
181 out:
182 
183 	/* reset the traversed state */
184 	list_for_each_entry_reverse(n, &visited_list, search_list)
185 		n->is_traversed = false;
186 
187 	if (found)
188 		path = path_init(dev, dst, depth);
189 
190 	return path;
191 }
192 
193 /*
194  * We want the path to honor all bandwidth requests, so the average and peak
195  * bandwidth requirements from each consumer are aggregated at each node.
196  * The aggregation is platform specific, so each platform can customize it by
197  * implementing its own aggregate() function.
198  */
199 
200 static int aggregate_requests(struct icc_node *node)
201 {
202 	struct icc_provider *p = node->provider;
203 	struct icc_req *r;
204 
205 	node->avg_bw = 0;
206 	node->peak_bw = 0;
207 
208 	if (p->pre_aggregate)
209 		p->pre_aggregate(node);
210 
211 	hlist_for_each_entry(r, &node->req_list, req_node)
212 		p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
213 			     &node->avg_bw, &node->peak_bw);
214 
215 	return 0;
216 }
217 
218 static int apply_constraints(struct icc_path *path)
219 {
220 	struct icc_node *next, *prev = NULL;
221 	int ret = -EINVAL;
222 	int i;
223 
224 	for (i = 0; i < path->num_nodes; i++) {
225 		next = path->reqs[i].node;
226 
227 		/*
228 		 * Both endpoints should be valid master-slave pairs of the
229 		 * same interconnect provider that will be configured.
230 		 */
231 		if (!prev || next->provider != prev->provider) {
232 			prev = next;
233 			continue;
234 		}
235 
236 		/* set the constraints */
237 		ret = next->provider->set(prev, next);
238 		if (ret)
239 			goto out;
240 
241 		prev = next;
242 	}
243 out:
244 	return ret;
245 }
246 
247 /* of_icc_xlate_onecell() - Translate function using a single index.
248  * @spec: OF phandle args to map into an interconnect node.
249  * @data: private data (pointer to struct icc_onecell_data)
250  *
251  * This is a generic translate function that can be used to model simple
252  * interconnect providers that have one device tree node and provide
253  * multiple interconnect nodes. A single cell is used as an index into
254  * an array of icc nodes specified in the icc_onecell_data struct when
255  * registering the provider.
256  */
257 struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
258 				      void *data)
259 {
260 	struct icc_onecell_data *icc_data = data;
261 	unsigned int idx = spec->args[0];
262 
263 	if (idx >= icc_data->num_nodes) {
264 		pr_err("%s: invalid index %u\n", __func__, idx);
265 		return ERR_PTR(-EINVAL);
266 	}
267 
268 	return icc_data->nodes[idx];
269 }
270 EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
271 
272 /**
273  * of_icc_get_from_provider() - Look-up interconnect node
274  * @spec: OF phandle args to use for look-up
275  *
276  * Looks for interconnect provider under the node specified by @spec and if
277  * found, uses xlate function of the provider to map phandle args to node.
278  *
279  * Returns a valid pointer to struct icc_node on success or ERR_PTR()
280  * on failure.
281  */
282 static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
283 {
284 	struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
285 	struct icc_provider *provider;
286 
287 	if (!spec || spec->args_count != 1)
288 		return ERR_PTR(-EINVAL);
289 
290 	mutex_lock(&icc_lock);
291 	list_for_each_entry(provider, &icc_providers, provider_list) {
292 		if (provider->dev->of_node == spec->np)
293 			node = provider->xlate(spec, provider->data);
294 		if (!IS_ERR(node))
295 			break;
296 	}
297 	mutex_unlock(&icc_lock);
298 
299 	return node;
300 }
301 
302 /**
303  * of_icc_get() - get a path handle from a DT node based on name
304  * @dev: device pointer for the consumer device
305  * @name: interconnect path name
306  *
307  * This function will search for a path between two endpoints and return an
308  * icc_path handle on success. Use icc_put() to release constraints when they
309  * are not needed anymore.
310  * If the interconnect API is disabled, NULL is returned and the consumer
311  * drivers will still build. Drivers are free to handle this specifically,
312  * but they don't have to.
313  *
314  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
315  * when the API is disabled or the "interconnects" DT property is missing.
316  */
317 struct icc_path *of_icc_get(struct device *dev, const char *name)
318 {
319 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
320 	struct icc_node *src_node, *dst_node;
321 	struct device_node *np = NULL;
322 	struct of_phandle_args src_args, dst_args;
323 	int idx = 0;
324 	int ret;
325 
326 	if (!dev || !dev->of_node)
327 		return ERR_PTR(-ENODEV);
328 
329 	np = dev->of_node;
330 
331 	/*
332 	 * When the consumer DT node do not have "interconnects" property
333 	 * return a NULL path to skip setting constraints.
334 	 */
335 	if (!of_find_property(np, "interconnects", NULL))
336 		return NULL;
337 
338 	/*
339 	 * We use a combination of phandle and specifier for endpoint. For now
340 	 * lets support only global ids and extend this in the future if needed
341 	 * without breaking DT compatibility.
342 	 */
343 	if (name) {
344 		idx = of_property_match_string(np, "interconnect-names", name);
345 		if (idx < 0)
346 			return ERR_PTR(idx);
347 	}
348 
349 	ret = of_parse_phandle_with_args(np, "interconnects",
350 					 "#interconnect-cells", idx * 2,
351 					 &src_args);
352 	if (ret)
353 		return ERR_PTR(ret);
354 
355 	of_node_put(src_args.np);
356 
357 	ret = of_parse_phandle_with_args(np, "interconnects",
358 					 "#interconnect-cells", idx * 2 + 1,
359 					 &dst_args);
360 	if (ret)
361 		return ERR_PTR(ret);
362 
363 	of_node_put(dst_args.np);
364 
365 	src_node = of_icc_get_from_provider(&src_args);
366 
367 	if (IS_ERR(src_node)) {
368 		if (PTR_ERR(src_node) != -EPROBE_DEFER)
369 			dev_err(dev, "error finding src node: %ld\n",
370 				PTR_ERR(src_node));
371 		return ERR_CAST(src_node);
372 	}
373 
374 	dst_node = of_icc_get_from_provider(&dst_args);
375 
376 	if (IS_ERR(dst_node)) {
377 		if (PTR_ERR(dst_node) != -EPROBE_DEFER)
378 			dev_err(dev, "error finding dst node: %ld\n",
379 				PTR_ERR(dst_node));
380 		return ERR_CAST(dst_node);
381 	}
382 
383 	mutex_lock(&icc_lock);
384 	path = path_find(dev, src_node, dst_node);
385 	if (IS_ERR(path))
386 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
387 	mutex_unlock(&icc_lock);
388 
389 	return path;
390 }
391 EXPORT_SYMBOL_GPL(of_icc_get);
392 
393 /**
394  * icc_set_tag() - set an optional tag on a path
395  * @path: the path we want to tag
396  * @tag: the tag value
397  *
398  * This function allows consumers to append a tag to the requests associated
399  * with a path, so that a different aggregation could be done based on this tag.
400  */
401 void icc_set_tag(struct icc_path *path, u32 tag)
402 {
403 	int i;
404 
405 	if (!path)
406 		return;
407 
408 	for (i = 0; i < path->num_nodes; i++)
409 		path->reqs[i].tag = tag;
410 }
411 EXPORT_SYMBOL_GPL(icc_set_tag);
412 
413 /**
414  * icc_set_bw() - set bandwidth constraints on an interconnect path
415  * @path: reference to the path returned by icc_get()
416  * @avg_bw: average bandwidth in kilobytes per second
417  * @peak_bw: peak bandwidth in kilobytes per second
418  *
419  * This function is used by an interconnect consumer to express its own needs
420  * in terms of bandwidth for a previously requested path between two endpoints.
421  * The requests are aggregated and each node is updated accordingly. The entire
422  * path is locked by a mutex to ensure that the set() is completed.
423  * The @path can be NULL when the "interconnects" DT properties is missing,
424  * which will mean that no constraints will be set.
425  *
426  * Returns 0 on success, or an appropriate error code otherwise.
427  */
428 int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
429 {
430 	struct icc_node *node;
431 	u32 old_avg, old_peak;
432 	size_t i;
433 	int ret;
434 
435 	if (!path || !path->num_nodes)
436 		return 0;
437 
438 	mutex_lock(&icc_lock);
439 
440 	old_avg = path->reqs[0].avg_bw;
441 	old_peak = path->reqs[0].peak_bw;
442 
443 	for (i = 0; i < path->num_nodes; i++) {
444 		node = path->reqs[i].node;
445 
446 		/* update the consumer request for this path */
447 		path->reqs[i].avg_bw = avg_bw;
448 		path->reqs[i].peak_bw = peak_bw;
449 
450 		/* aggregate requests for this node */
451 		aggregate_requests(node);
452 	}
453 
454 	ret = apply_constraints(path);
455 	if (ret) {
456 		pr_debug("interconnect: error applying constraints (%d)\n",
457 			 ret);
458 
459 		for (i = 0; i < path->num_nodes; i++) {
460 			node = path->reqs[i].node;
461 			path->reqs[i].avg_bw = old_avg;
462 			path->reqs[i].peak_bw = old_peak;
463 			aggregate_requests(node);
464 		}
465 		apply_constraints(path);
466 	}
467 
468 	mutex_unlock(&icc_lock);
469 
470 	return ret;
471 }
472 EXPORT_SYMBOL_GPL(icc_set_bw);
473 
474 /**
475  * icc_get() - return a handle for path between two endpoints
476  * @dev: the device requesting the path
477  * @src_id: source device port id
478  * @dst_id: destination device port id
479  *
480  * This function will search for a path between two endpoints and return an
481  * icc_path handle on success. Use icc_put() to release
482  * constraints when they are not needed anymore.
483  * If the interconnect API is disabled, NULL is returned and the consumer
484  * drivers will still build. Drivers are free to handle this specifically,
485  * but they don't have to.
486  *
487  * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
488  * interconnect API is disabled.
489  */
490 struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
491 {
492 	struct icc_node *src, *dst;
493 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
494 
495 	mutex_lock(&icc_lock);
496 
497 	src = node_find(src_id);
498 	if (!src)
499 		goto out;
500 
501 	dst = node_find(dst_id);
502 	if (!dst)
503 		goto out;
504 
505 	path = path_find(dev, src, dst);
506 	if (IS_ERR(path))
507 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
508 
509 out:
510 	mutex_unlock(&icc_lock);
511 	return path;
512 }
513 EXPORT_SYMBOL_GPL(icc_get);
514 
515 /**
516  * icc_put() - release the reference to the icc_path
517  * @path: interconnect path
518  *
519  * Use this function to release the constraints on a path when the path is
520  * no longer needed. The constraints will be re-aggregated.
521  */
522 void icc_put(struct icc_path *path)
523 {
524 	struct icc_node *node;
525 	size_t i;
526 	int ret;
527 
528 	if (!path || WARN_ON(IS_ERR(path)))
529 		return;
530 
531 	ret = icc_set_bw(path, 0, 0);
532 	if (ret)
533 		pr_err("%s: error (%d)\n", __func__, ret);
534 
535 	mutex_lock(&icc_lock);
536 	for (i = 0; i < path->num_nodes; i++) {
537 		node = path->reqs[i].node;
538 		hlist_del(&path->reqs[i].req_node);
539 		if (!WARN_ON(!node->provider->users))
540 			node->provider->users--;
541 	}
542 	mutex_unlock(&icc_lock);
543 
544 	kfree(path);
545 }
546 EXPORT_SYMBOL_GPL(icc_put);
547 
548 static struct icc_node *icc_node_create_nolock(int id)
549 {
550 	struct icc_node *node;
551 
552 	/* check if node already exists */
553 	node = node_find(id);
554 	if (node)
555 		return node;
556 
557 	node = kzalloc(sizeof(*node), GFP_KERNEL);
558 	if (!node)
559 		return ERR_PTR(-ENOMEM);
560 
561 	id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
562 	if (id < 0) {
563 		WARN(1, "%s: couldn't get idr\n", __func__);
564 		kfree(node);
565 		return ERR_PTR(id);
566 	}
567 
568 	node->id = id;
569 
570 	return node;
571 }
572 
573 /**
574  * icc_node_create() - create a node
575  * @id: node id
576  *
577  * Return: icc_node pointer on success, or ERR_PTR() on error
578  */
579 struct icc_node *icc_node_create(int id)
580 {
581 	struct icc_node *node;
582 
583 	mutex_lock(&icc_lock);
584 
585 	node = icc_node_create_nolock(id);
586 
587 	mutex_unlock(&icc_lock);
588 
589 	return node;
590 }
591 EXPORT_SYMBOL_GPL(icc_node_create);
592 
593 /**
594  * icc_node_destroy() - destroy a node
595  * @id: node id
596  */
597 void icc_node_destroy(int id)
598 {
599 	struct icc_node *node;
600 
601 	mutex_lock(&icc_lock);
602 
603 	node = node_find(id);
604 	if (node) {
605 		idr_remove(&icc_idr, node->id);
606 		WARN_ON(!hlist_empty(&node->req_list));
607 	}
608 
609 	mutex_unlock(&icc_lock);
610 
611 	kfree(node);
612 }
613 EXPORT_SYMBOL_GPL(icc_node_destroy);
614 
615 /**
616  * icc_link_create() - create a link between two nodes
617  * @node: source node id
618  * @dst_id: destination node id
619  *
620  * Create a link between two nodes. The nodes might belong to different
621  * interconnect providers and the @dst_id node might not exist (if the
622  * provider driver has not probed yet). So just create the @dst_id node
623  * and when the actual provider driver is probed, the rest of the node
624  * data is filled.
625  *
626  * Return: 0 on success, or an error code otherwise
627  */
628 int icc_link_create(struct icc_node *node, const int dst_id)
629 {
630 	struct icc_node *dst;
631 	struct icc_node **new;
632 	int ret = 0;
633 
634 	if (!node->provider)
635 		return -EINVAL;
636 
637 	mutex_lock(&icc_lock);
638 
639 	dst = node_find(dst_id);
640 	if (!dst) {
641 		dst = icc_node_create_nolock(dst_id);
642 
643 		if (IS_ERR(dst)) {
644 			ret = PTR_ERR(dst);
645 			goto out;
646 		}
647 	}
648 
649 	new = krealloc(node->links,
650 		       (node->num_links + 1) * sizeof(*node->links),
651 		       GFP_KERNEL);
652 	if (!new) {
653 		ret = -ENOMEM;
654 		goto out;
655 	}
656 
657 	node->links = new;
658 	node->links[node->num_links++] = dst;
659 
660 out:
661 	mutex_unlock(&icc_lock);
662 
663 	return ret;
664 }
665 EXPORT_SYMBOL_GPL(icc_link_create);
666 
667 /**
668  * icc_link_destroy() - destroy a link between two nodes
669  * @src: pointer to source node
670  * @dst: pointer to destination node
671  *
672  * Return: 0 on success, or an error code otherwise
673  */
674 int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
675 {
676 	struct icc_node **new;
677 	size_t slot;
678 	int ret = 0;
679 
680 	if (IS_ERR_OR_NULL(src))
681 		return -EINVAL;
682 
683 	if (IS_ERR_OR_NULL(dst))
684 		return -EINVAL;
685 
686 	mutex_lock(&icc_lock);
687 
688 	for (slot = 0; slot < src->num_links; slot++)
689 		if (src->links[slot] == dst)
690 			break;
691 
692 	if (WARN_ON(slot == src->num_links)) {
693 		ret = -ENXIO;
694 		goto out;
695 	}
696 
697 	src->links[slot] = src->links[--src->num_links];
698 
699 	new = krealloc(src->links, src->num_links * sizeof(*src->links),
700 		       GFP_KERNEL);
701 	if (new)
702 		src->links = new;
703 
704 out:
705 	mutex_unlock(&icc_lock);
706 
707 	return ret;
708 }
709 EXPORT_SYMBOL_GPL(icc_link_destroy);
710 
711 /**
712  * icc_node_add() - add interconnect node to interconnect provider
713  * @node: pointer to the interconnect node
714  * @provider: pointer to the interconnect provider
715  */
716 void icc_node_add(struct icc_node *node, struct icc_provider *provider)
717 {
718 	mutex_lock(&icc_lock);
719 
720 	node->provider = provider;
721 	list_add_tail(&node->node_list, &provider->nodes);
722 
723 	mutex_unlock(&icc_lock);
724 }
725 EXPORT_SYMBOL_GPL(icc_node_add);
726 
727 /**
728  * icc_node_del() - delete interconnect node from interconnect provider
729  * @node: pointer to the interconnect node
730  */
731 void icc_node_del(struct icc_node *node)
732 {
733 	mutex_lock(&icc_lock);
734 
735 	list_del(&node->node_list);
736 
737 	mutex_unlock(&icc_lock);
738 }
739 EXPORT_SYMBOL_GPL(icc_node_del);
740 
741 /**
742  * icc_provider_add() - add a new interconnect provider
743  * @provider: the interconnect provider that will be added into topology
744  *
745  * Return: 0 on success, or an error code otherwise
746  */
747 int icc_provider_add(struct icc_provider *provider)
748 {
749 	if (WARN_ON(!provider->set))
750 		return -EINVAL;
751 	if (WARN_ON(!provider->xlate))
752 		return -EINVAL;
753 
754 	mutex_lock(&icc_lock);
755 
756 	INIT_LIST_HEAD(&provider->nodes);
757 	list_add_tail(&provider->provider_list, &icc_providers);
758 
759 	mutex_unlock(&icc_lock);
760 
761 	dev_dbg(provider->dev, "interconnect provider added to topology\n");
762 
763 	return 0;
764 }
765 EXPORT_SYMBOL_GPL(icc_provider_add);
766 
767 /**
768  * icc_provider_del() - delete previously added interconnect provider
769  * @provider: the interconnect provider that will be removed from topology
770  *
771  * Return: 0 on success, or an error code otherwise
772  */
773 int icc_provider_del(struct icc_provider *provider)
774 {
775 	mutex_lock(&icc_lock);
776 	if (provider->users) {
777 		pr_warn("interconnect provider still has %d users\n",
778 			provider->users);
779 		mutex_unlock(&icc_lock);
780 		return -EBUSY;
781 	}
782 
783 	if (!list_empty(&provider->nodes)) {
784 		pr_warn("interconnect provider still has nodes\n");
785 		mutex_unlock(&icc_lock);
786 		return -EBUSY;
787 	}
788 
789 	list_del(&provider->provider_list);
790 	mutex_unlock(&icc_lock);
791 
792 	return 0;
793 }
794 EXPORT_SYMBOL_GPL(icc_provider_del);
795 
796 static int __init icc_init(void)
797 {
798 	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
799 	debugfs_create_file("interconnect_summary", 0444,
800 			    icc_debugfs_dir, NULL, &icc_summary_fops);
801 	return 0;
802 }
803 
804 static void __exit icc_exit(void)
805 {
806 	debugfs_remove_recursive(icc_debugfs_dir);
807 }
808 module_init(icc_init);
809 module_exit(icc_exit);
810 
811 MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
812 MODULE_DESCRIPTION("Interconnect Driver Core");
813 MODULE_LICENSE("GPL v2");
814