Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Interconnect framework core driver
4 *
5 * Copyright (c) 2017-2019, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
9#include <linux/debugfs.h>
10#include <linux/device.h>
11#include <linux/idr.h>
12#include <linux/init.h>
13#include <linux/interconnect.h>
14#include <linux/interconnect-provider.h>
15#include <linux/list.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/of.h>
19#include <linux/overflow.h>
20
21#include "internal.h"
22
23#define CREATE_TRACE_POINTS
24#include "trace.h"
25
26static DEFINE_IDR(icc_idr);
27static LIST_HEAD(icc_providers);
28static int providers_count;
29static bool synced_state;
30static DEFINE_MUTEX(icc_lock);
31static struct dentry *icc_debugfs_dir;
32
33static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
34{
35 if (!n)
36 return;
37
38 seq_printf(s, "%-42s %12u %12u\n",
39 n->name, n->avg_bw, n->peak_bw);
40}
41
42static int icc_summary_show(struct seq_file *s, void *data)
43{
44 struct icc_provider *provider;
45
46 seq_puts(s, " node tag avg peak\n");
47 seq_puts(s, "--------------------------------------------------------------------\n");
48
49 mutex_lock(&icc_lock);
50
51 list_for_each_entry(provider, &icc_providers, provider_list) {
52 struct icc_node *n;
53
54 list_for_each_entry(n, &provider->nodes, node_list) {
55 struct icc_req *r;
56
57 icc_summary_show_one(s, n);
58 hlist_for_each_entry(r, &n->req_list, req_node) {
59 u32 avg_bw = 0, peak_bw = 0;
60
61 if (!r->dev)
62 continue;
63
64 if (r->enabled) {
65 avg_bw = r->avg_bw;
66 peak_bw = r->peak_bw;
67 }
68
69 seq_printf(s, " %-27s %12u %12u %12u\n",
70 dev_name(r->dev), r->tag, avg_bw, peak_bw);
71 }
72 }
73 }
74
75 mutex_unlock(&icc_lock);
76
77 return 0;
78}
79DEFINE_SHOW_ATTRIBUTE(icc_summary);
80
81static void icc_graph_show_link(struct seq_file *s, int level,
82 struct icc_node *n, struct icc_node *m)
83{
84 seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
85 level == 2 ? "\t\t" : "\t",
86 n->id, n->name, m->id, m->name);
87}
88
89static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
90{
91 seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
92 n->id, n->name, n->id, n->name);
93 seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
94 seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
95 seq_puts(s, "\"]\n");
96}
97
98static int icc_graph_show(struct seq_file *s, void *data)
99{
100 struct icc_provider *provider;
101 struct icc_node *n;
102 int cluster_index = 0;
103 int i;
104
105 seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
106 mutex_lock(&icc_lock);
107
108 /* draw providers as cluster subgraphs */
109 cluster_index = 0;
110 list_for_each_entry(provider, &icc_providers, provider_list) {
111 seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
112 if (provider->dev)
113 seq_printf(s, "\t\tlabel = \"%s\"\n",
114 dev_name(provider->dev));
115
116 /* draw nodes */
117 list_for_each_entry(n, &provider->nodes, node_list)
118 icc_graph_show_node(s, n);
119
120 /* draw internal links */
121 list_for_each_entry(n, &provider->nodes, node_list)
122 for (i = 0; i < n->num_links; ++i)
123 if (n->provider == n->links[i]->provider)
124 icc_graph_show_link(s, 2, n,
125 n->links[i]);
126
127 seq_puts(s, "\t}\n");
128 }
129
130 /* draw external links */
131 list_for_each_entry(provider, &icc_providers, provider_list)
132 list_for_each_entry(n, &provider->nodes, node_list)
133 for (i = 0; i < n->num_links; ++i)
134 if (n->provider != n->links[i]->provider)
135 icc_graph_show_link(s, 1, n,
136 n->links[i]);
137
138 mutex_unlock(&icc_lock);
139 seq_puts(s, "}");
140
141 return 0;
142}
143DEFINE_SHOW_ATTRIBUTE(icc_graph);
144
145static struct icc_node *node_find(const int id)
146{
147 return idr_find(&icc_idr, id);
148}
149
150static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
151 ssize_t num_nodes)
152{
153 struct icc_node *node = dst;
154 struct icc_path *path;
155 int i;
156
157 path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
158 if (!path)
159 return ERR_PTR(-ENOMEM);
160
161 path->num_nodes = num_nodes;
162
163 for (i = num_nodes - 1; i >= 0; i--) {
164 node->provider->users++;
165 hlist_add_head(&path->reqs[i].req_node, &node->req_list);
166 path->reqs[i].node = node;
167 path->reqs[i].dev = dev;
168 path->reqs[i].enabled = true;
169 /* reference to previous node was saved during path traversal */
170 node = node->reverse;
171 }
172
173 return path;
174}
175
176static struct icc_path *path_find(struct device *dev, struct icc_node *src,
177 struct icc_node *dst)
178{
179 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
180 struct icc_node *n, *node = NULL;
181 struct list_head traverse_list;
182 struct list_head edge_list;
183 struct list_head visited_list;
184 size_t i, depth = 1;
185 bool found = false;
186
187 INIT_LIST_HEAD(&traverse_list);
188 INIT_LIST_HEAD(&edge_list);
189 INIT_LIST_HEAD(&visited_list);
190
191 list_add(&src->search_list, &traverse_list);
192 src->reverse = NULL;
193
194 do {
195 list_for_each_entry_safe(node, n, &traverse_list, search_list) {
196 if (node == dst) {
197 found = true;
198 list_splice_init(&edge_list, &visited_list);
199 list_splice_init(&traverse_list, &visited_list);
200 break;
201 }
202 for (i = 0; i < node->num_links; i++) {
203 struct icc_node *tmp = node->links[i];
204
205 if (!tmp) {
206 path = ERR_PTR(-ENOENT);
207 goto out;
208 }
209
210 if (tmp->is_traversed)
211 continue;
212
213 tmp->is_traversed = true;
214 tmp->reverse = node;
215 list_add_tail(&tmp->search_list, &edge_list);
216 }
217 }
218
219 if (found)
220 break;
221
222 list_splice_init(&traverse_list, &visited_list);
223 list_splice_init(&edge_list, &traverse_list);
224
225 /* count the hops including the source */
226 depth++;
227
228 } while (!list_empty(&traverse_list));
229
230out:
231
232 /* reset the traversed state */
233 list_for_each_entry_reverse(n, &visited_list, search_list)
234 n->is_traversed = false;
235
236 if (found)
237 path = path_init(dev, dst, depth);
238
239 return path;
240}
241
242/*
243 * We want the path to honor all bandwidth requests, so the average and peak
244 * bandwidth requirements from each consumer are aggregated at each node.
245 * The aggregation is platform specific, so each platform can customize it by
246 * implementing its own aggregate() function.
247 */
248
249static int aggregate_requests(struct icc_node *node)
250{
251 struct icc_provider *p = node->provider;
252 struct icc_req *r;
253 u32 avg_bw, peak_bw;
254
255 node->avg_bw = 0;
256 node->peak_bw = 0;
257
258 if (p->pre_aggregate)
259 p->pre_aggregate(node);
260
261 hlist_for_each_entry(r, &node->req_list, req_node) {
262 if (r->enabled) {
263 avg_bw = r->avg_bw;
264 peak_bw = r->peak_bw;
265 } else {
266 avg_bw = 0;
267 peak_bw = 0;
268 }
269 p->aggregate(node, r->tag, avg_bw, peak_bw,
270 &node->avg_bw, &node->peak_bw);
271
272 /* during boot use the initial bandwidth as a floor value */
273 if (!synced_state) {
274 node->avg_bw = max(node->avg_bw, node->init_avg);
275 node->peak_bw = max(node->peak_bw, node->init_peak);
276 }
277 }
278
279 return 0;
280}
281
282static int apply_constraints(struct icc_path *path)
283{
284 struct icc_node *next, *prev = NULL;
285 struct icc_provider *p;
286 int ret = -EINVAL;
287 int i;
288
289 for (i = 0; i < path->num_nodes; i++) {
290 next = path->reqs[i].node;
291 p = next->provider;
292
293 /* both endpoints should be valid master-slave pairs */
294 if (!prev || (p != prev->provider && !p->inter_set)) {
295 prev = next;
296 continue;
297 }
298
299 /* set the constraints */
300 ret = p->set(prev, next);
301 if (ret)
302 goto out;
303
304 prev = next;
305 }
306out:
307 return ret;
308}
309
310int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
311 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
312{
313 *agg_avg += avg_bw;
314 *agg_peak = max(*agg_peak, peak_bw);
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(icc_std_aggregate);
319
320/* of_icc_xlate_onecell() - Translate function using a single index.
321 * @spec: OF phandle args to map into an interconnect node.
322 * @data: private data (pointer to struct icc_onecell_data)
323 *
324 * This is a generic translate function that can be used to model simple
325 * interconnect providers that have one device tree node and provide
326 * multiple interconnect nodes. A single cell is used as an index into
327 * an array of icc nodes specified in the icc_onecell_data struct when
328 * registering the provider.
329 */
330struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
331 void *data)
332{
333 struct icc_onecell_data *icc_data = data;
334 unsigned int idx = spec->args[0];
335
336 if (idx >= icc_data->num_nodes) {
337 pr_err("%s: invalid index %u\n", __func__, idx);
338 return ERR_PTR(-EINVAL);
339 }
340
341 return icc_data->nodes[idx];
342}
343EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
344
345/**
346 * of_icc_get_from_provider() - Look-up interconnect node
347 * @spec: OF phandle args to use for look-up
348 *
349 * Looks for interconnect provider under the node specified by @spec and if
350 * found, uses xlate function of the provider to map phandle args to node.
351 *
352 * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
353 * on failure.
354 */
355struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
356{
357 struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
358 struct icc_node_data *data = NULL;
359 struct icc_provider *provider;
360
361 if (!spec)
362 return ERR_PTR(-EINVAL);
363
364 mutex_lock(&icc_lock);
365 list_for_each_entry(provider, &icc_providers, provider_list) {
366 if (provider->dev->of_node == spec->np) {
367 if (provider->xlate_extended) {
368 data = provider->xlate_extended(spec, provider->data);
369 if (!IS_ERR(data)) {
370 node = data->node;
371 break;
372 }
373 } else {
374 node = provider->xlate(spec, provider->data);
375 if (!IS_ERR(node))
376 break;
377 }
378 }
379 }
380 mutex_unlock(&icc_lock);
381
382 if (IS_ERR(node))
383 return ERR_CAST(node);
384
385 if (!data) {
386 data = kzalloc(sizeof(*data), GFP_KERNEL);
387 if (!data)
388 return ERR_PTR(-ENOMEM);
389 data->node = node;
390 }
391
392 return data;
393}
394EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
395
396static void devm_icc_release(struct device *dev, void *res)
397{
398 icc_put(*(struct icc_path **)res);
399}
400
401struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
402{
403 struct icc_path **ptr, *path;
404
405 ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
406 if (!ptr)
407 return ERR_PTR(-ENOMEM);
408
409 path = of_icc_get(dev, name);
410 if (!IS_ERR(path)) {
411 *ptr = path;
412 devres_add(dev, ptr);
413 } else {
414 devres_free(ptr);
415 }
416
417 return path;
418}
419EXPORT_SYMBOL_GPL(devm_of_icc_get);
420
421/**
422 * of_icc_get_by_index() - get a path handle from a DT node based on index
423 * @dev: device pointer for the consumer device
424 * @idx: interconnect path index
425 *
426 * This function will search for a path between two endpoints and return an
427 * icc_path handle on success. Use icc_put() to release constraints when they
428 * are not needed anymore.
429 * If the interconnect API is disabled, NULL is returned and the consumer
430 * drivers will still build. Drivers are free to handle this specifically,
431 * but they don't have to.
432 *
433 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
434 * when the API is disabled or the "interconnects" DT property is missing.
435 */
436struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
437{
438 struct icc_path *path;
439 struct icc_node_data *src_data, *dst_data;
440 struct device_node *np;
441 struct of_phandle_args src_args, dst_args;
442 int ret;
443
444 if (!dev || !dev->of_node)
445 return ERR_PTR(-ENODEV);
446
447 np = dev->of_node;
448
449 /*
450 * When the consumer DT node do not have "interconnects" property
451 * return a NULL path to skip setting constraints.
452 */
453 if (!of_property_present(np, "interconnects"))
454 return NULL;
455
456 /*
457 * We use a combination of phandle and specifier for endpoint. For now
458 * lets support only global ids and extend this in the future if needed
459 * without breaking DT compatibility.
460 */
461 ret = of_parse_phandle_with_args(np, "interconnects",
462 "#interconnect-cells", idx * 2,
463 &src_args);
464 if (ret)
465 return ERR_PTR(ret);
466
467 of_node_put(src_args.np);
468
469 ret = of_parse_phandle_with_args(np, "interconnects",
470 "#interconnect-cells", idx * 2 + 1,
471 &dst_args);
472 if (ret)
473 return ERR_PTR(ret);
474
475 of_node_put(dst_args.np);
476
477 src_data = of_icc_get_from_provider(&src_args);
478
479 if (IS_ERR(src_data)) {
480 dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
481 return ERR_CAST(src_data);
482 }
483
484 dst_data = of_icc_get_from_provider(&dst_args);
485
486 if (IS_ERR(dst_data)) {
487 dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
488 kfree(src_data);
489 return ERR_CAST(dst_data);
490 }
491
492 mutex_lock(&icc_lock);
493 path = path_find(dev, src_data->node, dst_data->node);
494 mutex_unlock(&icc_lock);
495 if (IS_ERR(path)) {
496 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
497 goto free_icc_data;
498 }
499
500 if (src_data->tag && src_data->tag == dst_data->tag)
501 icc_set_tag(path, src_data->tag);
502
503 path->name = kasprintf(GFP_KERNEL, "%s-%s",
504 src_data->node->name, dst_data->node->name);
505 if (!path->name) {
506 kfree(path);
507 path = ERR_PTR(-ENOMEM);
508 }
509
510free_icc_data:
511 kfree(src_data);
512 kfree(dst_data);
513 return path;
514}
515EXPORT_SYMBOL_GPL(of_icc_get_by_index);
516
517/**
518 * of_icc_get() - get a path handle from a DT node based on name
519 * @dev: device pointer for the consumer device
520 * @name: interconnect path name
521 *
522 * This function will search for a path between two endpoints and return an
523 * icc_path handle on success. Use icc_put() to release constraints when they
524 * are not needed anymore.
525 * If the interconnect API is disabled, NULL is returned and the consumer
526 * drivers will still build. Drivers are free to handle this specifically,
527 * but they don't have to.
528 *
529 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
530 * when the API is disabled or the "interconnects" DT property is missing.
531 */
532struct icc_path *of_icc_get(struct device *dev, const char *name)
533{
534 struct device_node *np;
535 int idx = 0;
536
537 if (!dev || !dev->of_node)
538 return ERR_PTR(-ENODEV);
539
540 np = dev->of_node;
541
542 /*
543 * When the consumer DT node do not have "interconnects" property
544 * return a NULL path to skip setting constraints.
545 */
546 if (!of_property_present(np, "interconnects"))
547 return NULL;
548
549 /*
550 * We use a combination of phandle and specifier for endpoint. For now
551 * lets support only global ids and extend this in the future if needed
552 * without breaking DT compatibility.
553 */
554 if (name) {
555 idx = of_property_match_string(np, "interconnect-names", name);
556 if (idx < 0)
557 return ERR_PTR(idx);
558 }
559
560 return of_icc_get_by_index(dev, idx);
561}
562EXPORT_SYMBOL_GPL(of_icc_get);
563
564/**
565 * icc_set_tag() - set an optional tag on a path
566 * @path: the path we want to tag
567 * @tag: the tag value
568 *
569 * This function allows consumers to append a tag to the requests associated
570 * with a path, so that a different aggregation could be done based on this tag.
571 */
572void icc_set_tag(struct icc_path *path, u32 tag)
573{
574 int i;
575
576 if (!path)
577 return;
578
579 mutex_lock(&icc_lock);
580
581 for (i = 0; i < path->num_nodes; i++)
582 path->reqs[i].tag = tag;
583
584 mutex_unlock(&icc_lock);
585}
586EXPORT_SYMBOL_GPL(icc_set_tag);
587
588/**
589 * icc_get_name() - Get name of the icc path
590 * @path: reference to the path returned by icc_get()
591 *
592 * This function is used by an interconnect consumer to get the name of the icc
593 * path.
594 *
595 * Returns a valid pointer on success, or NULL otherwise.
596 */
597const char *icc_get_name(struct icc_path *path)
598{
599 if (!path)
600 return NULL;
601
602 return path->name;
603}
604EXPORT_SYMBOL_GPL(icc_get_name);
605
606/**
607 * icc_set_bw() - set bandwidth constraints on an interconnect path
608 * @path: reference to the path returned by icc_get()
609 * @avg_bw: average bandwidth in kilobytes per second
610 * @peak_bw: peak bandwidth in kilobytes per second
611 *
612 * This function is used by an interconnect consumer to express its own needs
613 * in terms of bandwidth for a previously requested path between two endpoints.
614 * The requests are aggregated and each node is updated accordingly. The entire
615 * path is locked by a mutex to ensure that the set() is completed.
616 * The @path can be NULL when the "interconnects" DT properties is missing,
617 * which will mean that no constraints will be set.
618 *
619 * Returns 0 on success, or an appropriate error code otherwise.
620 */
621int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
622{
623 struct icc_node *node;
624 u32 old_avg, old_peak;
625 size_t i;
626 int ret;
627
628 if (!path)
629 return 0;
630
631 if (WARN_ON(IS_ERR(path) || !path->num_nodes))
632 return -EINVAL;
633
634 mutex_lock(&icc_lock);
635
636 old_avg = path->reqs[0].avg_bw;
637 old_peak = path->reqs[0].peak_bw;
638
639 for (i = 0; i < path->num_nodes; i++) {
640 node = path->reqs[i].node;
641
642 /* update the consumer request for this path */
643 path->reqs[i].avg_bw = avg_bw;
644 path->reqs[i].peak_bw = peak_bw;
645
646 /* aggregate requests for this node */
647 aggregate_requests(node);
648
649 trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
650 }
651
652 ret = apply_constraints(path);
653 if (ret) {
654 pr_debug("interconnect: error applying constraints (%d)\n",
655 ret);
656
657 for (i = 0; i < path->num_nodes; i++) {
658 node = path->reqs[i].node;
659 path->reqs[i].avg_bw = old_avg;
660 path->reqs[i].peak_bw = old_peak;
661 aggregate_requests(node);
662 }
663 apply_constraints(path);
664 }
665
666 mutex_unlock(&icc_lock);
667
668 trace_icc_set_bw_end(path, ret);
669
670 return ret;
671}
672EXPORT_SYMBOL_GPL(icc_set_bw);
673
674static int __icc_enable(struct icc_path *path, bool enable)
675{
676 int i;
677
678 if (!path)
679 return 0;
680
681 if (WARN_ON(IS_ERR(path) || !path->num_nodes))
682 return -EINVAL;
683
684 mutex_lock(&icc_lock);
685
686 for (i = 0; i < path->num_nodes; i++)
687 path->reqs[i].enabled = enable;
688
689 mutex_unlock(&icc_lock);
690
691 return icc_set_bw(path, path->reqs[0].avg_bw,
692 path->reqs[0].peak_bw);
693}
694
695int icc_enable(struct icc_path *path)
696{
697 return __icc_enable(path, true);
698}
699EXPORT_SYMBOL_GPL(icc_enable);
700
701int icc_disable(struct icc_path *path)
702{
703 return __icc_enable(path, false);
704}
705EXPORT_SYMBOL_GPL(icc_disable);
706
707/**
708 * icc_get() - return a handle for path between two endpoints
709 * @dev: the device requesting the path
710 * @src_id: source device port id
711 * @dst_id: destination device port id
712 *
713 * This function will search for a path between two endpoints and return an
714 * icc_path handle on success. Use icc_put() to release
715 * constraints when they are not needed anymore.
716 * If the interconnect API is disabled, NULL is returned and the consumer
717 * drivers will still build. Drivers are free to handle this specifically,
718 * but they don't have to.
719 *
720 * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
721 * interconnect API is disabled.
722 */
723struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
724{
725 struct icc_node *src, *dst;
726 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
727
728 mutex_lock(&icc_lock);
729
730 src = node_find(src_id);
731 if (!src)
732 goto out;
733
734 dst = node_find(dst_id);
735 if (!dst)
736 goto out;
737
738 path = path_find(dev, src, dst);
739 if (IS_ERR(path)) {
740 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
741 goto out;
742 }
743
744 path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
745 if (!path->name) {
746 kfree(path);
747 path = ERR_PTR(-ENOMEM);
748 }
749out:
750 mutex_unlock(&icc_lock);
751 return path;
752}
753EXPORT_SYMBOL_GPL(icc_get);
754
755/**
756 * icc_put() - release the reference to the icc_path
757 * @path: interconnect path
758 *
759 * Use this function to release the constraints on a path when the path is
760 * no longer needed. The constraints will be re-aggregated.
761 */
762void icc_put(struct icc_path *path)
763{
764 struct icc_node *node;
765 size_t i;
766 int ret;
767
768 if (!path || WARN_ON(IS_ERR(path)))
769 return;
770
771 ret = icc_set_bw(path, 0, 0);
772 if (ret)
773 pr_err("%s: error (%d)\n", __func__, ret);
774
775 mutex_lock(&icc_lock);
776 for (i = 0; i < path->num_nodes; i++) {
777 node = path->reqs[i].node;
778 hlist_del(&path->reqs[i].req_node);
779 if (!WARN_ON(!node->provider->users))
780 node->provider->users--;
781 }
782 mutex_unlock(&icc_lock);
783
784 kfree_const(path->name);
785 kfree(path);
786}
787EXPORT_SYMBOL_GPL(icc_put);
788
789static struct icc_node *icc_node_create_nolock(int id)
790{
791 struct icc_node *node;
792
793 /* check if node already exists */
794 node = node_find(id);
795 if (node)
796 return node;
797
798 node = kzalloc(sizeof(*node), GFP_KERNEL);
799 if (!node)
800 return ERR_PTR(-ENOMEM);
801
802 id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
803 if (id < 0) {
804 WARN(1, "%s: couldn't get idr\n", __func__);
805 kfree(node);
806 return ERR_PTR(id);
807 }
808
809 node->id = id;
810
811 return node;
812}
813
814/**
815 * icc_node_create() - create a node
816 * @id: node id
817 *
818 * Return: icc_node pointer on success, or ERR_PTR() on error
819 */
820struct icc_node *icc_node_create(int id)
821{
822 struct icc_node *node;
823
824 mutex_lock(&icc_lock);
825
826 node = icc_node_create_nolock(id);
827
828 mutex_unlock(&icc_lock);
829
830 return node;
831}
832EXPORT_SYMBOL_GPL(icc_node_create);
833
834/**
835 * icc_node_destroy() - destroy a node
836 * @id: node id
837 */
838void icc_node_destroy(int id)
839{
840 struct icc_node *node;
841
842 mutex_lock(&icc_lock);
843
844 node = node_find(id);
845 if (node) {
846 idr_remove(&icc_idr, node->id);
847 WARN_ON(!hlist_empty(&node->req_list));
848 }
849
850 mutex_unlock(&icc_lock);
851
852 if (!node)
853 return;
854
855 kfree(node->links);
856 kfree(node);
857}
858EXPORT_SYMBOL_GPL(icc_node_destroy);
859
860/**
861 * icc_link_create() - create a link between two nodes
862 * @node: source node id
863 * @dst_id: destination node id
864 *
865 * Create a link between two nodes. The nodes might belong to different
866 * interconnect providers and the @dst_id node might not exist (if the
867 * provider driver has not probed yet). So just create the @dst_id node
868 * and when the actual provider driver is probed, the rest of the node
869 * data is filled.
870 *
871 * Return: 0 on success, or an error code otherwise
872 */
873int icc_link_create(struct icc_node *node, const int dst_id)
874{
875 struct icc_node *dst;
876 struct icc_node **new;
877 int ret = 0;
878
879 if (!node->provider)
880 return -EINVAL;
881
882 mutex_lock(&icc_lock);
883
884 dst = node_find(dst_id);
885 if (!dst) {
886 dst = icc_node_create_nolock(dst_id);
887
888 if (IS_ERR(dst)) {
889 ret = PTR_ERR(dst);
890 goto out;
891 }
892 }
893
894 new = krealloc(node->links,
895 (node->num_links + 1) * sizeof(*node->links),
896 GFP_KERNEL);
897 if (!new) {
898 ret = -ENOMEM;
899 goto out;
900 }
901
902 node->links = new;
903 node->links[node->num_links++] = dst;
904
905out:
906 mutex_unlock(&icc_lock);
907
908 return ret;
909}
910EXPORT_SYMBOL_GPL(icc_link_create);
911
912/**
913 * icc_node_add() - add interconnect node to interconnect provider
914 * @node: pointer to the interconnect node
915 * @provider: pointer to the interconnect provider
916 */
917void icc_node_add(struct icc_node *node, struct icc_provider *provider)
918{
919 if (WARN_ON(node->provider))
920 return;
921
922 mutex_lock(&icc_lock);
923
924 node->provider = provider;
925 list_add_tail(&node->node_list, &provider->nodes);
926
927 /* get the initial bandwidth values and sync them with hardware */
928 if (provider->get_bw) {
929 provider->get_bw(node, &node->init_avg, &node->init_peak);
930 } else {
931 node->init_avg = INT_MAX;
932 node->init_peak = INT_MAX;
933 }
934 node->avg_bw = node->init_avg;
935 node->peak_bw = node->init_peak;
936
937 if (node->avg_bw || node->peak_bw) {
938 if (provider->pre_aggregate)
939 provider->pre_aggregate(node);
940
941 if (provider->aggregate)
942 provider->aggregate(node, 0, node->init_avg, node->init_peak,
943 &node->avg_bw, &node->peak_bw);
944 if (provider->set)
945 provider->set(node, node);
946 }
947
948 node->avg_bw = 0;
949 node->peak_bw = 0;
950
951 mutex_unlock(&icc_lock);
952}
953EXPORT_SYMBOL_GPL(icc_node_add);
954
955/**
956 * icc_node_del() - delete interconnect node from interconnect provider
957 * @node: pointer to the interconnect node
958 */
959void icc_node_del(struct icc_node *node)
960{
961 mutex_lock(&icc_lock);
962
963 list_del(&node->node_list);
964
965 mutex_unlock(&icc_lock);
966}
967EXPORT_SYMBOL_GPL(icc_node_del);
968
969/**
970 * icc_nodes_remove() - remove all previously added nodes from provider
971 * @provider: the interconnect provider we are removing nodes from
972 *
973 * Return: 0 on success, or an error code otherwise
974 */
975int icc_nodes_remove(struct icc_provider *provider)
976{
977 struct icc_node *n, *tmp;
978
979 if (WARN_ON(IS_ERR_OR_NULL(provider)))
980 return -EINVAL;
981
982 list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
983 icc_node_del(n);
984 icc_node_destroy(n->id);
985 }
986
987 return 0;
988}
989EXPORT_SYMBOL_GPL(icc_nodes_remove);
990
991/**
992 * icc_provider_init() - initialize a new interconnect provider
993 * @provider: the interconnect provider to initialize
994 *
995 * Must be called before adding nodes to the provider.
996 */
997void icc_provider_init(struct icc_provider *provider)
998{
999 WARN_ON(!provider->set);
1000
1001 INIT_LIST_HEAD(&provider->nodes);
1002}
1003EXPORT_SYMBOL_GPL(icc_provider_init);
1004
1005/**
1006 * icc_provider_register() - register a new interconnect provider
1007 * @provider: the interconnect provider to register
1008 *
1009 * Return: 0 on success, or an error code otherwise
1010 */
1011int icc_provider_register(struct icc_provider *provider)
1012{
1013 if (WARN_ON(!provider->xlate && !provider->xlate_extended))
1014 return -EINVAL;
1015
1016 mutex_lock(&icc_lock);
1017 list_add_tail(&provider->provider_list, &icc_providers);
1018 mutex_unlock(&icc_lock);
1019
1020 dev_dbg(provider->dev, "interconnect provider registered\n");
1021
1022 return 0;
1023}
1024EXPORT_SYMBOL_GPL(icc_provider_register);
1025
1026/**
1027 * icc_provider_deregister() - deregister an interconnect provider
1028 * @provider: the interconnect provider to deregister
1029 */
1030void icc_provider_deregister(struct icc_provider *provider)
1031{
1032 mutex_lock(&icc_lock);
1033 WARN_ON(provider->users);
1034
1035 list_del(&provider->provider_list);
1036 mutex_unlock(&icc_lock);
1037}
1038EXPORT_SYMBOL_GPL(icc_provider_deregister);
1039
1040static const struct of_device_id __maybe_unused ignore_list[] = {
1041 { .compatible = "qcom,sc7180-ipa-virt" },
1042 { .compatible = "qcom,sc8180x-ipa-virt" },
1043 { .compatible = "qcom,sdx55-ipa-virt" },
1044 { .compatible = "qcom,sm8150-ipa-virt" },
1045 { .compatible = "qcom,sm8250-ipa-virt" },
1046 {}
1047};
1048
1049static int of_count_icc_providers(struct device_node *np)
1050{
1051 struct device_node *child;
1052 int count = 0;
1053
1054 for_each_available_child_of_node(np, child) {
1055 if (of_property_read_bool(child, "#interconnect-cells") &&
1056 likely(!of_match_node(ignore_list, child)))
1057 count++;
1058 count += of_count_icc_providers(child);
1059 }
1060
1061 return count;
1062}
1063
1064void icc_sync_state(struct device *dev)
1065{
1066 struct icc_provider *p;
1067 struct icc_node *n;
1068 static int count;
1069
1070 count++;
1071
1072 if (count < providers_count)
1073 return;
1074
1075 mutex_lock(&icc_lock);
1076 synced_state = true;
1077 list_for_each_entry(p, &icc_providers, provider_list) {
1078 dev_dbg(p->dev, "interconnect provider is in synced state\n");
1079 list_for_each_entry(n, &p->nodes, node_list) {
1080 if (n->init_avg || n->init_peak) {
1081 n->init_avg = 0;
1082 n->init_peak = 0;
1083 aggregate_requests(n);
1084 p->set(n, n);
1085 }
1086 }
1087 }
1088 mutex_unlock(&icc_lock);
1089}
1090EXPORT_SYMBOL_GPL(icc_sync_state);
1091
1092static int __init icc_init(void)
1093{
1094 struct device_node *root = of_find_node_by_path("/");
1095
1096 providers_count = of_count_icc_providers(root);
1097 of_node_put(root);
1098
1099 icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
1100 debugfs_create_file("interconnect_summary", 0444,
1101 icc_debugfs_dir, NULL, &icc_summary_fops);
1102 debugfs_create_file("interconnect_graph", 0444,
1103 icc_debugfs_dir, NULL, &icc_graph_fops);
1104 return 0;
1105}
1106
1107device_initcall(icc_init);