]>
Commit | Line | Data |
---|---|---|
11f1ceca GD |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Interconnect framework core driver | |
4 | * | |
5 | * Copyright (c) 2017-2019, Linaro Ltd. | |
6 | * Author: Georgi Djakov <georgi.djakov@linaro.org> | |
7 | */ | |
8 | ||
3697ff43 | 9 | #include <linux/debugfs.h> |
11f1ceca GD |
10 | #include <linux/device.h> |
11 | #include <linux/idr.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/interconnect.h> | |
14 | #include <linux/interconnect-provider.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/mutex.h> | |
18 | #include <linux/slab.h> | |
87e3031b | 19 | #include <linux/of.h> |
11f1ceca GD |
20 | #include <linux/overflow.h> |
21 | ||
dd018a9c GD |
22 | #include "internal.h" |
23 | ||
c46ab9db GD |
24 | #define CREATE_TRACE_POINTS |
25 | #include "trace.h" | |
26 | ||
11f1ceca GD |
27 | static DEFINE_IDR(icc_idr); |
28 | static LIST_HEAD(icc_providers); | |
29 | static DEFINE_MUTEX(icc_lock); | |
3697ff43 | 30 | static struct dentry *icc_debugfs_dir; |
11f1ceca | 31 | |
3697ff43 GD |
32 | static void icc_summary_show_one(struct seq_file *s, struct icc_node *n) |
33 | { | |
34 | if (!n) | |
35 | return; | |
36 | ||
2c5127a7 | 37 | seq_printf(s, "%-42s %12u %12u\n", |
3697ff43 GD |
38 | n->name, n->avg_bw, n->peak_bw); |
39 | } | |
40 | ||
41 | static int icc_summary_show(struct seq_file *s, void *data) | |
42 | { | |
43 | struct icc_provider *provider; | |
44 | ||
2c5127a7 GD |
45 | seq_puts(s, " node tag avg peak\n"); |
46 | seq_puts(s, "--------------------------------------------------------------------\n"); | |
3697ff43 GD |
47 | |
48 | mutex_lock(&icc_lock); | |
49 | ||
50 | list_for_each_entry(provider, &icc_providers, provider_list) { | |
51 | struct icc_node *n; | |
52 | ||
53 | list_for_each_entry(n, &provider->nodes, node_list) { | |
54 | struct icc_req *r; | |
55 | ||
56 | icc_summary_show_one(s, n); | |
57 | hlist_for_each_entry(r, &n->req_list, req_node) { | |
58 | if (!r->dev) | |
59 | continue; | |
60 | ||
2c5127a7 GD |
61 | seq_printf(s, " %-27s %12u %12u %12u\n", |
62 | dev_name(r->dev), r->tag, r->avg_bw, | |
3697ff43 GD |
63 | r->peak_bw); |
64 | } | |
65 | } | |
66 | } | |
67 | ||
68 | mutex_unlock(&icc_lock); | |
69 | ||
70 | return 0; | |
71 | } | |
83fdb2df | 72 | DEFINE_SHOW_ATTRIBUTE(icc_summary); |
3697ff43 | 73 | |
1a0013c6 LC |
74 | static void icc_graph_show_link(struct seq_file *s, int level, |
75 | struct icc_node *n, struct icc_node *m) | |
76 | { | |
77 | seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n", | |
78 | level == 2 ? "\t\t" : "\t", | |
79 | n->id, n->name, m->id, m->name); | |
80 | } | |
81 | ||
82 | static void icc_graph_show_node(struct seq_file *s, struct icc_node *n) | |
83 | { | |
84 | seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s", | |
85 | n->id, n->name, n->id, n->name); | |
86 | seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw); | |
87 | seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw); | |
88 | seq_puts(s, "\"]\n"); | |
89 | } | |
90 | ||
91 | static int icc_graph_show(struct seq_file *s, void *data) | |
92 | { | |
93 | struct icc_provider *provider; | |
94 | struct icc_node *n; | |
95 | int cluster_index = 0; | |
96 | int i; | |
97 | ||
98 | seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n"); | |
99 | mutex_lock(&icc_lock); | |
100 | ||
101 | /* draw providers as cluster subgraphs */ | |
102 | cluster_index = 0; | |
103 | list_for_each_entry(provider, &icc_providers, provider_list) { | |
104 | seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index); | |
105 | if (provider->dev) | |
106 | seq_printf(s, "\t\tlabel = \"%s\"\n", | |
107 | dev_name(provider->dev)); | |
108 | ||
109 | /* draw nodes */ | |
110 | list_for_each_entry(n, &provider->nodes, node_list) | |
111 | icc_graph_show_node(s, n); | |
112 | ||
113 | /* draw internal links */ | |
114 | list_for_each_entry(n, &provider->nodes, node_list) | |
115 | for (i = 0; i < n->num_links; ++i) | |
116 | if (n->provider == n->links[i]->provider) | |
117 | icc_graph_show_link(s, 2, n, | |
118 | n->links[i]); | |
119 | ||
120 | seq_puts(s, "\t}\n"); | |
121 | } | |
122 | ||
123 | /* draw external links */ | |
124 | list_for_each_entry(provider, &icc_providers, provider_list) | |
125 | list_for_each_entry(n, &provider->nodes, node_list) | |
126 | for (i = 0; i < n->num_links; ++i) | |
127 | if (n->provider != n->links[i]->provider) | |
128 | icc_graph_show_link(s, 1, n, | |
129 | n->links[i]); | |
130 | ||
131 | mutex_unlock(&icc_lock); | |
132 | seq_puts(s, "}"); | |
133 | ||
134 | return 0; | |
135 | } | |
136 | DEFINE_SHOW_ATTRIBUTE(icc_graph); | |
137 | ||
11f1ceca GD |
138 | static struct icc_node *node_find(const int id) |
139 | { | |
140 | return idr_find(&icc_idr, id); | |
141 | } | |
142 | ||
143 | static struct icc_path *path_init(struct device *dev, struct icc_node *dst, | |
144 | ssize_t num_nodes) | |
145 | { | |
146 | struct icc_node *node = dst; | |
147 | struct icc_path *path; | |
148 | int i; | |
149 | ||
150 | path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL); | |
151 | if (!path) | |
152 | return ERR_PTR(-ENOMEM); | |
153 | ||
154 | path->num_nodes = num_nodes; | |
155 | ||
156 | for (i = num_nodes - 1; i >= 0; i--) { | |
157 | node->provider->users++; | |
158 | hlist_add_head(&path->reqs[i].req_node, &node->req_list); | |
159 | path->reqs[i].node = node; | |
160 | path->reqs[i].dev = dev; | |
161 | /* reference to previous node was saved during path traversal */ | |
162 | node = node->reverse; | |
163 | } | |
164 | ||
165 | return path; | |
166 | } | |
167 | ||
168 | static struct icc_path *path_find(struct device *dev, struct icc_node *src, | |
169 | struct icc_node *dst) | |
170 | { | |
171 | struct icc_path *path = ERR_PTR(-EPROBE_DEFER); | |
172 | struct icc_node *n, *node = NULL; | |
173 | struct list_head traverse_list; | |
174 | struct list_head edge_list; | |
175 | struct list_head visited_list; | |
176 | size_t i, depth = 1; | |
177 | bool found = false; | |
178 | ||
179 | INIT_LIST_HEAD(&traverse_list); | |
180 | INIT_LIST_HEAD(&edge_list); | |
181 | INIT_LIST_HEAD(&visited_list); | |
182 | ||
183 | list_add(&src->search_list, &traverse_list); | |
184 | src->reverse = NULL; | |
185 | ||
186 | do { | |
187 | list_for_each_entry_safe(node, n, &traverse_list, search_list) { | |
188 | if (node == dst) { | |
189 | found = true; | |
190 | list_splice_init(&edge_list, &visited_list); | |
191 | list_splice_init(&traverse_list, &visited_list); | |
192 | break; | |
193 | } | |
194 | for (i = 0; i < node->num_links; i++) { | |
195 | struct icc_node *tmp = node->links[i]; | |
196 | ||
197 | if (!tmp) { | |
198 | path = ERR_PTR(-ENOENT); | |
199 | goto out; | |
200 | } | |
201 | ||
202 | if (tmp->is_traversed) | |
203 | continue; | |
204 | ||
205 | tmp->is_traversed = true; | |
206 | tmp->reverse = node; | |
207 | list_add_tail(&tmp->search_list, &edge_list); | |
208 | } | |
209 | } | |
210 | ||
211 | if (found) | |
212 | break; | |
213 | ||
214 | list_splice_init(&traverse_list, &visited_list); | |
215 | list_splice_init(&edge_list, &traverse_list); | |
216 | ||
217 | /* count the hops including the source */ | |
218 | depth++; | |
219 | ||
220 | } while (!list_empty(&traverse_list)); | |
221 | ||
222 | out: | |
223 | ||
224 | /* reset the traversed state */ | |
225 | list_for_each_entry_reverse(n, &visited_list, search_list) | |
226 | n->is_traversed = false; | |
227 | ||
228 | if (found) | |
229 | path = path_init(dev, dst, depth); | |
230 | ||
231 | return path; | |
232 | } | |
233 | ||
234 | /* | |
235 | * We want the path to honor all bandwidth requests, so the average and peak | |
236 | * bandwidth requirements from each consumer are aggregated at each node. | |
237 | * The aggregation is platform specific, so each platform can customize it by | |
238 | * implementing its own aggregate() function. | |
239 | */ | |
240 | ||
241 | static int aggregate_requests(struct icc_node *node) | |
242 | { | |
243 | struct icc_provider *p = node->provider; | |
244 | struct icc_req *r; | |
245 | ||
246 | node->avg_bw = 0; | |
247 | node->peak_bw = 0; | |
248 | ||
cbd5a9c2 GD |
249 | if (p->pre_aggregate) |
250 | p->pre_aggregate(node); | |
251 | ||
11f1ceca | 252 | hlist_for_each_entry(r, &node->req_list, req_node) |
127ab2cc | 253 | p->aggregate(node, r->tag, r->avg_bw, r->peak_bw, |
11f1ceca GD |
254 | &node->avg_bw, &node->peak_bw); |
255 | ||
256 | return 0; | |
257 | } | |
258 | ||
259 | static int apply_constraints(struct icc_path *path) | |
260 | { | |
261 | struct icc_node *next, *prev = NULL; | |
262 | int ret = -EINVAL; | |
263 | int i; | |
264 | ||
265 | for (i = 0; i < path->num_nodes; i++) { | |
266 | next = path->reqs[i].node; | |
267 | ||
268 | /* | |
269 | * Both endpoints should be valid master-slave pairs of the | |
270 | * same interconnect provider that will be configured. | |
271 | */ | |
272 | if (!prev || next->provider != prev->provider) { | |
273 | prev = next; | |
274 | continue; | |
275 | } | |
276 | ||
277 | /* set the constraints */ | |
278 | ret = next->provider->set(prev, next); | |
279 | if (ret) | |
280 | goto out; | |
281 | ||
282 | prev = next; | |
283 | } | |
284 | out: | |
285 | return ret; | |
286 | } | |
287 | ||
3172e4d2 GD |
288 | int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, |
289 | u32 peak_bw, u32 *agg_avg, u32 *agg_peak) | |
290 | { | |
291 | *agg_avg += avg_bw; | |
292 | *agg_peak = max(*agg_peak, peak_bw); | |
293 | ||
294 | return 0; | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(icc_std_aggregate); | |
297 | ||
87e3031b GD |
298 | /* of_icc_xlate_onecell() - Translate function using a single index. |
299 | * @spec: OF phandle args to map into an interconnect node. | |
300 | * @data: private data (pointer to struct icc_onecell_data) | |
301 | * | |
302 | * This is a generic translate function that can be used to model simple | |
303 | * interconnect providers that have one device tree node and provide | |
304 | * multiple interconnect nodes. A single cell is used as an index into | |
305 | * an array of icc nodes specified in the icc_onecell_data struct when | |
306 | * registering the provider. | |
307 | */ | |
308 | struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, | |
309 | void *data) | |
310 | { | |
311 | struct icc_onecell_data *icc_data = data; | |
312 | unsigned int idx = spec->args[0]; | |
313 | ||
314 | if (idx >= icc_data->num_nodes) { | |
315 | pr_err("%s: invalid index %u\n", __func__, idx); | |
316 | return ERR_PTR(-EINVAL); | |
317 | } | |
318 | ||
319 | return icc_data->nodes[idx]; | |
320 | } | |
321 | EXPORT_SYMBOL_GPL(of_icc_xlate_onecell); | |
322 | ||
323 | /** | |
324 | * of_icc_get_from_provider() - Look-up interconnect node | |
325 | * @spec: OF phandle args to use for look-up | |
326 | * | |
327 | * Looks for interconnect provider under the node specified by @spec and if | |
328 | * found, uses xlate function of the provider to map phandle args to node. | |
329 | * | |
330 | * Returns a valid pointer to struct icc_node on success or ERR_PTR() | |
331 | * on failure. | |
332 | */ | |
333 | static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) | |
334 | { | |
335 | struct icc_node *node = ERR_PTR(-EPROBE_DEFER); | |
336 | struct icc_provider *provider; | |
337 | ||
338 | if (!spec || spec->args_count != 1) | |
339 | return ERR_PTR(-EINVAL); | |
340 | ||
341 | mutex_lock(&icc_lock); | |
342 | list_for_each_entry(provider, &icc_providers, provider_list) { | |
343 | if (provider->dev->of_node == spec->np) | |
344 | node = provider->xlate(spec, provider->data); | |
345 | if (!IS_ERR(node)) | |
346 | break; | |
347 | } | |
348 | mutex_unlock(&icc_lock); | |
349 | ||
350 | return node; | |
351 | } | |
352 | ||
353 | /** | |
1597d453 | 354 | * of_icc_get_by_index() - get a path handle from a DT node based on index |
87e3031b | 355 | * @dev: device pointer for the consumer device |
1597d453 | 356 | * @idx: interconnect path index |
87e3031b GD |
357 | * |
358 | * This function will search for a path between two endpoints and return an | |
359 | * icc_path handle on success. Use icc_put() to release constraints when they | |
360 | * are not needed anymore. | |
361 | * If the interconnect API is disabled, NULL is returned and the consumer | |
362 | * drivers will still build. Drivers are free to handle this specifically, | |
363 | * but they don't have to. | |
364 | * | |
365 | * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned | |
366 | * when the API is disabled or the "interconnects" DT property is missing. | |
367 | */ | |
1597d453 | 368 | struct icc_path *of_icc_get_by_index(struct device *dev, int idx) |
87e3031b | 369 | { |
1597d453 | 370 | struct icc_path *path; |
87e3031b | 371 | struct icc_node *src_node, *dst_node; |
1597d453 | 372 | struct device_node *np; |
87e3031b | 373 | struct of_phandle_args src_args, dst_args; |
87e3031b GD |
374 | int ret; |
375 | ||
376 | if (!dev || !dev->of_node) | |
377 | return ERR_PTR(-ENODEV); | |
378 | ||
379 | np = dev->of_node; | |
380 | ||
381 | /* | |
382 | * When the consumer DT node do not have "interconnects" property | |
383 | * return a NULL path to skip setting constraints. | |
384 | */ | |
385 | if (!of_find_property(np, "interconnects", NULL)) | |
386 | return NULL; | |
387 | ||
388 | /* | |
389 | * We use a combination of phandle and specifier for endpoint. For now | |
390 | * lets support only global ids and extend this in the future if needed | |
391 | * without breaking DT compatibility. | |
392 | */ | |
87e3031b GD |
393 | ret = of_parse_phandle_with_args(np, "interconnects", |
394 | "#interconnect-cells", idx * 2, | |
395 | &src_args); | |
396 | if (ret) | |
397 | return ERR_PTR(ret); | |
398 | ||
399 | of_node_put(src_args.np); | |
400 | ||
401 | ret = of_parse_phandle_with_args(np, "interconnects", | |
402 | "#interconnect-cells", idx * 2 + 1, | |
403 | &dst_args); | |
404 | if (ret) | |
405 | return ERR_PTR(ret); | |
406 | ||
407 | of_node_put(dst_args.np); | |
408 | ||
409 | src_node = of_icc_get_from_provider(&src_args); | |
410 | ||
411 | if (IS_ERR(src_node)) { | |
412 | if (PTR_ERR(src_node) != -EPROBE_DEFER) | |
413 | dev_err(dev, "error finding src node: %ld\n", | |
414 | PTR_ERR(src_node)); | |
415 | return ERR_CAST(src_node); | |
416 | } | |
417 | ||
418 | dst_node = of_icc_get_from_provider(&dst_args); | |
419 | ||
420 | if (IS_ERR(dst_node)) { | |
421 | if (PTR_ERR(dst_node) != -EPROBE_DEFER) | |
422 | dev_err(dev, "error finding dst node: %ld\n", | |
423 | PTR_ERR(dst_node)); | |
424 | return ERR_CAST(dst_node); | |
425 | } | |
426 | ||
427 | mutex_lock(&icc_lock); | |
428 | path = path_find(dev, src_node, dst_node); | |
87e3031b | 429 | mutex_unlock(&icc_lock); |
05309830 GD |
430 | if (IS_ERR(path)) { |
431 | dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path)); | |
432 | return path; | |
433 | } | |
434 | ||
1597d453 GD |
435 | path->name = kasprintf(GFP_KERNEL, "%s-%s", |
436 | src_node->name, dst_node->name); | |
37911636 GD |
437 | if (!path->name) { |
438 | kfree(path); | |
439 | return ERR_PTR(-ENOMEM); | |
440 | } | |
441 | ||
87e3031b GD |
442 | return path; |
443 | } | |
1597d453 GD |
444 | EXPORT_SYMBOL_GPL(of_icc_get_by_index); |
445 | ||
446 | /** | |
447 | * of_icc_get() - get a path handle from a DT node based on name | |
448 | * @dev: device pointer for the consumer device | |
449 | * @name: interconnect path name | |
450 | * | |
451 | * This function will search for a path between two endpoints and return an | |
452 | * icc_path handle on success. Use icc_put() to release constraints when they | |
453 | * are not needed anymore. | |
454 | * If the interconnect API is disabled, NULL is returned and the consumer | |
455 | * drivers will still build. Drivers are free to handle this specifically, | |
456 | * but they don't have to. | |
457 | * | |
458 | * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned | |
459 | * when the API is disabled or the "interconnects" DT property is missing. | |
460 | */ | |
461 | struct icc_path *of_icc_get(struct device *dev, const char *name) | |
462 | { | |
463 | struct device_node *np; | |
464 | int idx = 0; | |
465 | ||
466 | if (!dev || !dev->of_node) | |
467 | return ERR_PTR(-ENODEV); | |
468 | ||
469 | np = dev->of_node; | |
470 | ||
471 | /* | |
472 | * When the consumer DT node do not have "interconnects" property | |
473 | * return a NULL path to skip setting constraints. | |
474 | */ | |
475 | if (!of_find_property(np, "interconnects", NULL)) | |
476 | return NULL; | |
477 | ||
478 | /* | |
479 | * We use a combination of phandle and specifier for endpoint. For now | |
480 | * lets support only global ids and extend this in the future if needed | |
481 | * without breaking DT compatibility. | |
482 | */ | |
483 | if (name) { | |
484 | idx = of_property_match_string(np, "interconnect-names", name); | |
485 | if (idx < 0) | |
486 | return ERR_PTR(idx); | |
487 | } | |
488 | ||
489 | return of_icc_get_by_index(dev, idx); | |
490 | } | |
87e3031b GD |
491 | EXPORT_SYMBOL_GPL(of_icc_get); |
492 | ||
127ab2cc GD |
493 | /** |
494 | * icc_set_tag() - set an optional tag on a path | |
495 | * @path: the path we want to tag | |
496 | * @tag: the tag value | |
497 | * | |
498 | * This function allows consumers to append a tag to the requests associated | |
499 | * with a path, so that a different aggregation could be done based on this tag. | |
500 | */ | |
501 | void icc_set_tag(struct icc_path *path, u32 tag) | |
502 | { | |
503 | int i; | |
504 | ||
505 | if (!path) | |
506 | return; | |
507 | ||
a8dfe193 GD |
508 | mutex_lock(&icc_lock); |
509 | ||
127ab2cc GD |
510 | for (i = 0; i < path->num_nodes; i++) |
511 | path->reqs[i].tag = tag; | |
a8dfe193 GD |
512 | |
513 | mutex_unlock(&icc_lock); | |
127ab2cc GD |
514 | } |
515 | EXPORT_SYMBOL_GPL(icc_set_tag); | |
516 | ||
0430b1d5 VK |
517 | /** |
518 | * icc_get_name() - Get name of the icc path | |
519 | * @path: reference to the path returned by icc_get() | |
520 | * | |
521 | * This function is used by an interconnect consumer to get the name of the icc | |
522 | * path. | |
523 | * | |
524 | * Returns a valid pointer on success, or NULL otherwise. | |
525 | */ | |
526 | const char *icc_get_name(struct icc_path *path) | |
527 | { | |
528 | if (!path) | |
529 | return NULL; | |
530 | ||
531 | return path->name; | |
532 | } | |
533 | EXPORT_SYMBOL_GPL(icc_get_name); | |
534 | ||
11f1ceca GD |
535 | /** |
536 | * icc_set_bw() - set bandwidth constraints on an interconnect path | |
537 | * @path: reference to the path returned by icc_get() | |
538 | * @avg_bw: average bandwidth in kilobytes per second | |
539 | * @peak_bw: peak bandwidth in kilobytes per second | |
540 | * | |
541 | * This function is used by an interconnect consumer to express its own needs | |
542 | * in terms of bandwidth for a previously requested path between two endpoints. | |
543 | * The requests are aggregated and each node is updated accordingly. The entire | |
544 | * path is locked by a mutex to ensure that the set() is completed. | |
545 | * The @path can be NULL when the "interconnects" DT properties is missing, | |
546 | * which will mean that no constraints will be set. | |
547 | * | |
548 | * Returns 0 on success, or an appropriate error code otherwise. | |
549 | */ | |
550 | int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) | |
551 | { | |
552 | struct icc_node *node; | |
dce6d406 | 553 | u32 old_avg, old_peak; |
11f1ceca GD |
554 | size_t i; |
555 | int ret; | |
556 | ||
7d7899c5 | 557 | if (!path) |
11f1ceca GD |
558 | return 0; |
559 | ||
7d7899c5 GD |
560 | if (WARN_ON(IS_ERR(path) || !path->num_nodes)) |
561 | return -EINVAL; | |
562 | ||
11f1ceca GD |
563 | mutex_lock(&icc_lock); |
564 | ||
dce6d406 GD |
565 | old_avg = path->reqs[0].avg_bw; |
566 | old_peak = path->reqs[0].peak_bw; | |
567 | ||
11f1ceca GD |
568 | for (i = 0; i < path->num_nodes; i++) { |
569 | node = path->reqs[i].node; | |
570 | ||
571 | /* update the consumer request for this path */ | |
572 | path->reqs[i].avg_bw = avg_bw; | |
573 | path->reqs[i].peak_bw = peak_bw; | |
574 | ||
575 | /* aggregate requests for this node */ | |
576 | aggregate_requests(node); | |
c46ab9db GD |
577 | |
578 | trace_icc_set_bw(path, node, i, avg_bw, peak_bw); | |
11f1ceca GD |
579 | } |
580 | ||
581 | ret = apply_constraints(path); | |
dce6d406 | 582 | if (ret) { |
11f1ceca GD |
583 | pr_debug("interconnect: error applying constraints (%d)\n", |
584 | ret); | |
585 | ||
dce6d406 GD |
586 | for (i = 0; i < path->num_nodes; i++) { |
587 | node = path->reqs[i].node; | |
588 | path->reqs[i].avg_bw = old_avg; | |
589 | path->reqs[i].peak_bw = old_peak; | |
590 | aggregate_requests(node); | |
591 | } | |
592 | apply_constraints(path); | |
593 | } | |
594 | ||
11f1ceca GD |
595 | mutex_unlock(&icc_lock); |
596 | ||
c46ab9db GD |
597 | trace_icc_set_bw_end(path, ret); |
598 | ||
11f1ceca GD |
599 | return ret; |
600 | } | |
601 | EXPORT_SYMBOL_GPL(icc_set_bw); | |
602 | ||
603 | /** | |
604 | * icc_get() - return a handle for path between two endpoints | |
605 | * @dev: the device requesting the path | |
606 | * @src_id: source device port id | |
607 | * @dst_id: destination device port id | |
608 | * | |
609 | * This function will search for a path between two endpoints and return an | |
610 | * icc_path handle on success. Use icc_put() to release | |
611 | * constraints when they are not needed anymore. | |
612 | * If the interconnect API is disabled, NULL is returned and the consumer | |
613 | * drivers will still build. Drivers are free to handle this specifically, | |
614 | * but they don't have to. | |
615 | * | |
616 | * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the | |
617 | * interconnect API is disabled. | |
618 | */ | |
619 | struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id) | |
620 | { | |
621 | struct icc_node *src, *dst; | |
622 | struct icc_path *path = ERR_PTR(-EPROBE_DEFER); | |
623 | ||
624 | mutex_lock(&icc_lock); | |
625 | ||
626 | src = node_find(src_id); | |
627 | if (!src) | |
628 | goto out; | |
629 | ||
630 | dst = node_find(dst_id); | |
631 | if (!dst) | |
632 | goto out; | |
633 | ||
634 | path = path_find(dev, src, dst); | |
05309830 | 635 | if (IS_ERR(path)) { |
11f1ceca | 636 | dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path)); |
05309830 GD |
637 | goto out; |
638 | } | |
11f1ceca | 639 | |
05309830 | 640 | path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name); |
37911636 GD |
641 | if (!path->name) { |
642 | kfree(path); | |
643 | path = ERR_PTR(-ENOMEM); | |
644 | } | |
11f1ceca GD |
645 | out: |
646 | mutex_unlock(&icc_lock); | |
647 | return path; | |
648 | } | |
649 | EXPORT_SYMBOL_GPL(icc_get); | |
650 | ||
651 | /** | |
652 | * icc_put() - release the reference to the icc_path | |
653 | * @path: interconnect path | |
654 | * | |
655 | * Use this function to release the constraints on a path when the path is | |
656 | * no longer needed. The constraints will be re-aggregated. | |
657 | */ | |
658 | void icc_put(struct icc_path *path) | |
659 | { | |
660 | struct icc_node *node; | |
661 | size_t i; | |
662 | int ret; | |
663 | ||
664 | if (!path || WARN_ON(IS_ERR(path))) | |
665 | return; | |
666 | ||
667 | ret = icc_set_bw(path, 0, 0); | |
668 | if (ret) | |
669 | pr_err("%s: error (%d)\n", __func__, ret); | |
670 | ||
671 | mutex_lock(&icc_lock); | |
672 | for (i = 0; i < path->num_nodes; i++) { | |
673 | node = path->reqs[i].node; | |
674 | hlist_del(&path->reqs[i].req_node); | |
675 | if (!WARN_ON(!node->provider->users)) | |
676 | node->provider->users--; | |
677 | } | |
678 | mutex_unlock(&icc_lock); | |
679 | ||
05309830 | 680 | kfree_const(path->name); |
11f1ceca GD |
681 | kfree(path); |
682 | } | |
683 | EXPORT_SYMBOL_GPL(icc_put); | |
684 | ||
685 | static struct icc_node *icc_node_create_nolock(int id) | |
686 | { | |
687 | struct icc_node *node; | |
688 | ||
689 | /* check if node already exists */ | |
690 | node = node_find(id); | |
691 | if (node) | |
692 | return node; | |
693 | ||
694 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
695 | if (!node) | |
696 | return ERR_PTR(-ENOMEM); | |
697 | ||
698 | id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL); | |
699 | if (id < 0) { | |
700 | WARN(1, "%s: couldn't get idr\n", __func__); | |
701 | kfree(node); | |
702 | return ERR_PTR(id); | |
703 | } | |
704 | ||
705 | node->id = id; | |
706 | ||
707 | return node; | |
708 | } | |
709 | ||
710 | /** | |
711 | * icc_node_create() - create a node | |
712 | * @id: node id | |
713 | * | |
714 | * Return: icc_node pointer on success, or ERR_PTR() on error | |
715 | */ | |
716 | struct icc_node *icc_node_create(int id) | |
717 | { | |
718 | struct icc_node *node; | |
719 | ||
720 | mutex_lock(&icc_lock); | |
721 | ||
722 | node = icc_node_create_nolock(id); | |
723 | ||
724 | mutex_unlock(&icc_lock); | |
725 | ||
726 | return node; | |
727 | } | |
728 | EXPORT_SYMBOL_GPL(icc_node_create); | |
729 | ||
730 | /** | |
731 | * icc_node_destroy() - destroy a node | |
732 | * @id: node id | |
733 | */ | |
734 | void icc_node_destroy(int id) | |
735 | { | |
736 | struct icc_node *node; | |
737 | ||
738 | mutex_lock(&icc_lock); | |
739 | ||
740 | node = node_find(id); | |
741 | if (node) { | |
742 | idr_remove(&icc_idr, node->id); | |
743 | WARN_ON(!hlist_empty(&node->req_list)); | |
744 | } | |
745 | ||
746 | mutex_unlock(&icc_lock); | |
747 | ||
748 | kfree(node); | |
749 | } | |
750 | EXPORT_SYMBOL_GPL(icc_node_destroy); | |
751 | ||
752 | /** | |
753 | * icc_link_create() - create a link between two nodes | |
754 | * @node: source node id | |
755 | * @dst_id: destination node id | |
756 | * | |
757 | * Create a link between two nodes. The nodes might belong to different | |
758 | * interconnect providers and the @dst_id node might not exist (if the | |
759 | * provider driver has not probed yet). So just create the @dst_id node | |
760 | * and when the actual provider driver is probed, the rest of the node | |
761 | * data is filled. | |
762 | * | |
763 | * Return: 0 on success, or an error code otherwise | |
764 | */ | |
765 | int icc_link_create(struct icc_node *node, const int dst_id) | |
766 | { | |
767 | struct icc_node *dst; | |
768 | struct icc_node **new; | |
769 | int ret = 0; | |
770 | ||
771 | if (!node->provider) | |
772 | return -EINVAL; | |
773 | ||
774 | mutex_lock(&icc_lock); | |
775 | ||
776 | dst = node_find(dst_id); | |
777 | if (!dst) { | |
778 | dst = icc_node_create_nolock(dst_id); | |
779 | ||
780 | if (IS_ERR(dst)) { | |
781 | ret = PTR_ERR(dst); | |
782 | goto out; | |
783 | } | |
784 | } | |
785 | ||
786 | new = krealloc(node->links, | |
787 | (node->num_links + 1) * sizeof(*node->links), | |
788 | GFP_KERNEL); | |
789 | if (!new) { | |
790 | ret = -ENOMEM; | |
791 | goto out; | |
792 | } | |
793 | ||
794 | node->links = new; | |
795 | node->links[node->num_links++] = dst; | |
796 | ||
797 | out: | |
798 | mutex_unlock(&icc_lock); | |
799 | ||
800 | return ret; | |
801 | } | |
802 | EXPORT_SYMBOL_GPL(icc_link_create); | |
803 | ||
804 | /** | |
805 | * icc_link_destroy() - destroy a link between two nodes | |
806 | * @src: pointer to source node | |
807 | * @dst: pointer to destination node | |
808 | * | |
809 | * Return: 0 on success, or an error code otherwise | |
810 | */ | |
811 | int icc_link_destroy(struct icc_node *src, struct icc_node *dst) | |
812 | { | |
813 | struct icc_node **new; | |
814 | size_t slot; | |
815 | int ret = 0; | |
816 | ||
817 | if (IS_ERR_OR_NULL(src)) | |
818 | return -EINVAL; | |
819 | ||
820 | if (IS_ERR_OR_NULL(dst)) | |
821 | return -EINVAL; | |
822 | ||
823 | mutex_lock(&icc_lock); | |
824 | ||
825 | for (slot = 0; slot < src->num_links; slot++) | |
826 | if (src->links[slot] == dst) | |
827 | break; | |
828 | ||
829 | if (WARN_ON(slot == src->num_links)) { | |
830 | ret = -ENXIO; | |
831 | goto out; | |
832 | } | |
833 | ||
834 | src->links[slot] = src->links[--src->num_links]; | |
835 | ||
836 | new = krealloc(src->links, src->num_links * sizeof(*src->links), | |
837 | GFP_KERNEL); | |
838 | if (new) | |
839 | src->links = new; | |
840 | ||
841 | out: | |
842 | mutex_unlock(&icc_lock); | |
843 | ||
844 | return ret; | |
845 | } | |
846 | EXPORT_SYMBOL_GPL(icc_link_destroy); | |
847 | ||
848 | /** | |
849 | * icc_node_add() - add interconnect node to interconnect provider | |
850 | * @node: pointer to the interconnect node | |
851 | * @provider: pointer to the interconnect provider | |
852 | */ | |
853 | void icc_node_add(struct icc_node *node, struct icc_provider *provider) | |
854 | { | |
855 | mutex_lock(&icc_lock); | |
856 | ||
857 | node->provider = provider; | |
858 | list_add_tail(&node->node_list, &provider->nodes); | |
859 | ||
860 | mutex_unlock(&icc_lock); | |
861 | } | |
862 | EXPORT_SYMBOL_GPL(icc_node_add); | |
863 | ||
864 | /** | |
865 | * icc_node_del() - delete interconnect node from interconnect provider | |
866 | * @node: pointer to the interconnect node | |
867 | */ | |
868 | void icc_node_del(struct icc_node *node) | |
869 | { | |
870 | mutex_lock(&icc_lock); | |
871 | ||
872 | list_del(&node->node_list); | |
873 | ||
874 | mutex_unlock(&icc_lock); | |
875 | } | |
876 | EXPORT_SYMBOL_GPL(icc_node_del); | |
877 | ||
3cce2c6f GD |
878 | /** |
879 | * icc_nodes_remove() - remove all previously added nodes from provider | |
880 | * @provider: the interconnect provider we are removing nodes from | |
881 | * | |
882 | * Return: 0 on success, or an error code otherwise | |
883 | */ | |
884 | int icc_nodes_remove(struct icc_provider *provider) | |
885 | { | |
886 | struct icc_node *n, *tmp; | |
887 | ||
888 | if (WARN_ON(IS_ERR_OR_NULL(provider))) | |
889 | return -EINVAL; | |
890 | ||
891 | list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) { | |
892 | icc_node_del(n); | |
893 | icc_node_destroy(n->id); | |
894 | } | |
895 | ||
896 | return 0; | |
897 | } | |
898 | EXPORT_SYMBOL_GPL(icc_nodes_remove); | |
899 | ||
11f1ceca GD |
900 | /** |
901 | * icc_provider_add() - add a new interconnect provider | |
902 | * @provider: the interconnect provider that will be added into topology | |
903 | * | |
904 | * Return: 0 on success, or an error code otherwise | |
905 | */ | |
906 | int icc_provider_add(struct icc_provider *provider) | |
907 | { | |
908 | if (WARN_ON(!provider->set)) | |
909 | return -EINVAL; | |
87e3031b GD |
910 | if (WARN_ON(!provider->xlate)) |
911 | return -EINVAL; | |
11f1ceca GD |
912 | |
913 | mutex_lock(&icc_lock); | |
914 | ||
915 | INIT_LIST_HEAD(&provider->nodes); | |
916 | list_add_tail(&provider->provider_list, &icc_providers); | |
917 | ||
918 | mutex_unlock(&icc_lock); | |
919 | ||
920 | dev_dbg(provider->dev, "interconnect provider added to topology\n"); | |
921 | ||
922 | return 0; | |
923 | } | |
924 | EXPORT_SYMBOL_GPL(icc_provider_add); | |
925 | ||
926 | /** | |
927 | * icc_provider_del() - delete previously added interconnect provider | |
928 | * @provider: the interconnect provider that will be removed from topology | |
929 | * | |
930 | * Return: 0 on success, or an error code otherwise | |
931 | */ | |
932 | int icc_provider_del(struct icc_provider *provider) | |
933 | { | |
934 | mutex_lock(&icc_lock); | |
935 | if (provider->users) { | |
936 | pr_warn("interconnect provider still has %d users\n", | |
937 | provider->users); | |
938 | mutex_unlock(&icc_lock); | |
939 | return -EBUSY; | |
940 | } | |
941 | ||
942 | if (!list_empty(&provider->nodes)) { | |
943 | pr_warn("interconnect provider still has nodes\n"); | |
944 | mutex_unlock(&icc_lock); | |
945 | return -EBUSY; | |
946 | } | |
947 | ||
948 | list_del(&provider->provider_list); | |
949 | mutex_unlock(&icc_lock); | |
950 | ||
951 | return 0; | |
952 | } | |
953 | EXPORT_SYMBOL_GPL(icc_provider_del); | |
954 | ||
3697ff43 GD |
955 | static int __init icc_init(void) |
956 | { | |
957 | icc_debugfs_dir = debugfs_create_dir("interconnect", NULL); | |
958 | debugfs_create_file("interconnect_summary", 0444, | |
959 | icc_debugfs_dir, NULL, &icc_summary_fops); | |
1a0013c6 LC |
960 | debugfs_create_file("interconnect_graph", 0444, |
961 | icc_debugfs_dir, NULL, &icc_graph_fops); | |
3697ff43 GD |
962 | return 0; |
963 | } | |
964 | ||
8fd3574b | 965 | device_initcall(icc_init); |
3697ff43 | 966 | |
11f1ceca GD |
967 | MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>"); |
968 | MODULE_DESCRIPTION("Interconnect Driver Core"); | |
969 | MODULE_LICENSE("GPL v2"); |