]>
Commit | Line | Data |
---|---|---|
885dcd70 AS |
1 | /* |
2 | * In-Memory Collection (IMC) Performance Monitor counter support. | |
3 | * | |
4 | * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. | |
5 | * (C) 2017 Anju T Sudhakar, IBM Corporation. | |
6 | * (C) 2017 Hemant K Shaw, IBM Corporation. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * as published by the Free Software Foundation; either version | |
11 | * 2 of the License, or later version. | |
12 | */ | |
13 | #include <linux/perf_event.h> | |
14 | #include <linux/slab.h> | |
15 | #include <asm/opal.h> | |
16 | #include <asm/imc-pmu.h> | |
17 | #include <asm/cputhreads.h> | |
18 | #include <asm/smp.h> | |
19 | #include <linux/string.h> | |
20 | ||
21 | /* Nest IMC data structures and variables */ | |
22 | ||
23 | /* | |
24 | * Used to avoid races in counting the nest-pmu units during hotplug | |
25 | * register and unregister | |
26 | */ | |
27 | static DEFINE_MUTEX(nest_init_lock); | |
28 | static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); | |
73ce9aec | 29 | static struct imc_pmu **per_nest_pmu_arr; |
885dcd70 AS |
30 | static cpumask_t nest_imc_cpumask; |
31 | struct imc_pmu_ref *nest_imc_refc; | |
32 | static int nest_pmus; | |
33 | ||
39a846db AS |
34 | /* Core IMC data structures and variables */ |
35 | ||
36 | static cpumask_t core_imc_cpumask; | |
37 | struct imc_pmu_ref *core_imc_refc; | |
38 | static struct imc_pmu *core_imc_pmu; | |
39 | ||
f74c89bd AS |
40 | /* Thread IMC data structures and variables */ |
41 | ||
42 | static DEFINE_PER_CPU(u64 *, thread_imc_mem); | |
43 | static struct imc_pmu *thread_imc_pmu; | |
44 | static int thread_imc_mem_size; | |
45 | ||
885dcd70 AS |
46 | struct imc_pmu *imc_event_to_pmu(struct perf_event *event) |
47 | { | |
48 | return container_of(event->pmu, struct imc_pmu, pmu); | |
49 | } | |
50 | ||
51 | PMU_FORMAT_ATTR(event, "config:0-40"); | |
52 | PMU_FORMAT_ATTR(offset, "config:0-31"); | |
53 | PMU_FORMAT_ATTR(rvalue, "config:32"); | |
54 | PMU_FORMAT_ATTR(mode, "config:33-40"); | |
55 | static struct attribute *imc_format_attrs[] = { | |
56 | &format_attr_event.attr, | |
57 | &format_attr_offset.attr, | |
58 | &format_attr_rvalue.attr, | |
59 | &format_attr_mode.attr, | |
60 | NULL, | |
61 | }; | |
62 | ||
63 | static struct attribute_group imc_format_group = { | |
64 | .name = "format", | |
65 | .attrs = imc_format_attrs, | |
66 | }; | |
67 | ||
68 | /* Get the cpumask printed to a buffer "buf" */ | |
69 | static ssize_t imc_pmu_cpumask_get_attr(struct device *dev, | |
70 | struct device_attribute *attr, | |
71 | char *buf) | |
72 | { | |
73 | struct pmu *pmu = dev_get_drvdata(dev); | |
74 | struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu); | |
75 | cpumask_t *active_mask; | |
76 | ||
885dcd70 AS |
77 | switch(imc_pmu->domain){ |
78 | case IMC_DOMAIN_NEST: | |
79 | active_mask = &nest_imc_cpumask; | |
80 | break; | |
39a846db AS |
81 | case IMC_DOMAIN_CORE: |
82 | active_mask = &core_imc_cpumask; | |
83 | break; | |
885dcd70 AS |
84 | default: |
85 | return 0; | |
86 | } | |
87 | ||
88 | return cpumap_print_to_pagebuf(true, buf, active_mask); | |
89 | } | |
90 | ||
91 | static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL); | |
92 | ||
93 | static struct attribute *imc_pmu_cpumask_attrs[] = { | |
94 | &dev_attr_cpumask.attr, | |
95 | NULL, | |
96 | }; | |
97 | ||
98 | static struct attribute_group imc_pmu_cpumask_attr_group = { | |
99 | .attrs = imc_pmu_cpumask_attrs, | |
100 | }; | |
101 | ||
102 | /* device_str_attr_create : Populate event "name" and string "str" in attribute */ | |
103 | static struct attribute *device_str_attr_create(const char *name, const char *str) | |
104 | { | |
105 | struct perf_pmu_events_attr *attr; | |
106 | ||
107 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); | |
108 | if (!attr) | |
109 | return NULL; | |
110 | sysfs_attr_init(&attr->attr.attr); | |
111 | ||
112 | attr->event_str = str; | |
113 | attr->attr.attr.name = name; | |
114 | attr->attr.attr.mode = 0444; | |
115 | attr->attr.show = perf_event_sysfs_show; | |
116 | ||
117 | return &attr->attr.attr; | |
118 | } | |
119 | ||
120 | struct imc_events *imc_parse_event(struct device_node *np, const char *scale, | |
121 | const char *unit, const char *prefix, u32 base) | |
122 | { | |
123 | struct imc_events *event; | |
124 | const char *s; | |
125 | u32 reg; | |
126 | ||
127 | event = kzalloc(sizeof(struct imc_events), GFP_KERNEL); | |
128 | if (!event) | |
129 | return NULL; | |
130 | ||
131 | if (of_property_read_u32(np, "reg", ®)) | |
132 | goto error; | |
133 | /* Add the base_reg value to the "reg" */ | |
134 | event->value = base + reg; | |
135 | ||
136 | if (of_property_read_string(np, "event-name", &s)) | |
137 | goto error; | |
138 | ||
139 | event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); | |
140 | if (!event->name) | |
141 | goto error; | |
142 | ||
143 | if (of_property_read_string(np, "scale", &s)) | |
144 | s = scale; | |
145 | ||
146 | if (s) { | |
147 | event->scale = kstrdup(s, GFP_KERNEL); | |
148 | if (!event->scale) | |
149 | goto error; | |
150 | } | |
151 | ||
152 | if (of_property_read_string(np, "unit", &s)) | |
153 | s = unit; | |
154 | ||
155 | if (s) { | |
156 | event->unit = kstrdup(s, GFP_KERNEL); | |
157 | if (!event->unit) | |
158 | goto error; | |
159 | } | |
160 | ||
161 | return event; | |
162 | error: | |
163 | kfree(event->unit); | |
164 | kfree(event->scale); | |
165 | kfree(event->name); | |
166 | kfree(event); | |
167 | ||
168 | return NULL; | |
169 | } | |
170 | ||
171 | /* | |
172 | * update_events_in_group: Update the "events" information in an attr_group | |
173 | * and assign the attr_group to the pmu "pmu". | |
174 | */ | |
175 | static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) | |
176 | { | |
177 | struct attribute_group *attr_group; | |
178 | struct attribute **attrs, *dev_str; | |
179 | struct device_node *np, *pmu_events; | |
180 | struct imc_events *ev; | |
181 | u32 handle, base_reg; | |
182 | int i=0, j=0, ct; | |
183 | const char *prefix, *g_scale, *g_unit; | |
184 | const char *ev_val_str, *ev_scale_str, *ev_unit_str; | |
185 | ||
186 | if (!of_property_read_u32(node, "events", &handle)) | |
187 | pmu_events = of_find_node_by_phandle(handle); | |
188 | else | |
189 | return 0; | |
190 | ||
191 | /* Did not find any node with a given phandle */ | |
192 | if (!pmu_events) | |
193 | return 0; | |
194 | ||
195 | /* Get a count of number of child nodes */ | |
196 | ct = of_get_child_count(pmu_events); | |
197 | ||
198 | /* Get the event prefix */ | |
199 | if (of_property_read_string(node, "events-prefix", &prefix)) | |
200 | return 0; | |
201 | ||
202 | /* Get a global unit and scale data if available */ | |
203 | if (of_property_read_string(node, "scale", &g_scale)) | |
204 | g_scale = NULL; | |
205 | ||
206 | if (of_property_read_string(node, "unit", &g_unit)) | |
207 | g_unit = NULL; | |
208 | ||
209 | /* "reg" property gives out the base offset of the counters data */ | |
210 | of_property_read_u32(node, "reg", &base_reg); | |
211 | ||
212 | /* Allocate memory for the events */ | |
213 | pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); | |
214 | if (!pmu->events) | |
215 | return -ENOMEM; | |
216 | ||
217 | ct = 0; | |
218 | /* Parse the events and update the struct */ | |
219 | for_each_child_of_node(pmu_events, np) { | |
220 | ev = imc_parse_event(np, g_scale, g_unit, prefix, base_reg); | |
221 | if (ev) | |
222 | pmu->events[ct++] = ev; | |
223 | } | |
224 | ||
225 | /* Allocate memory for attribute group */ | |
226 | attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL); | |
227 | if (!attr_group) | |
228 | return -ENOMEM; | |
229 | ||
230 | /* | |
231 | * Allocate memory for attributes. | |
232 | * Since we have count of events for this pmu, we also allocate | |
233 | * memory for the scale and unit attribute for now. | |
234 | * "ct" has the total event structs added from the events-parent node. | |
235 | * So allocate three times the "ct" (this includes event, event_scale and | |
236 | * event_unit). | |
237 | */ | |
238 | attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL); | |
239 | if (!attrs) { | |
240 | kfree(attr_group); | |
241 | kfree(pmu->events); | |
242 | return -ENOMEM; | |
243 | } | |
244 | ||
245 | attr_group->name = "events"; | |
246 | attr_group->attrs = attrs; | |
247 | do { | |
248 | ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i]->value); | |
249 | dev_str = device_str_attr_create(pmu->events[i]->name, ev_val_str); | |
250 | if (!dev_str) | |
251 | continue; | |
252 | ||
253 | attrs[j++] = dev_str; | |
254 | if (pmu->events[i]->scale) { | |
255 | ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale",pmu->events[i]->name); | |
256 | dev_str = device_str_attr_create(ev_scale_str, pmu->events[i]->scale); | |
257 | if (!dev_str) | |
258 | continue; | |
259 | ||
260 | attrs[j++] = dev_str; | |
261 | } | |
262 | ||
263 | if (pmu->events[i]->unit) { | |
264 | ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit",pmu->events[i]->name); | |
265 | dev_str = device_str_attr_create(ev_unit_str, pmu->events[i]->unit); | |
266 | if (!dev_str) | |
267 | continue; | |
268 | ||
269 | attrs[j++] = dev_str; | |
270 | } | |
271 | } while (++i < ct); | |
272 | ||
273 | /* Save the event attribute */ | |
274 | pmu->attr_groups[IMC_EVENT_ATTR] = attr_group; | |
275 | ||
276 | kfree(pmu->events); | |
277 | return 0; | |
278 | } | |
279 | ||
280 | /* get_nest_pmu_ref: Return the imc_pmu_ref struct for the given node */ | |
281 | static struct imc_pmu_ref *get_nest_pmu_ref(int cpu) | |
282 | { | |
283 | return per_cpu(local_nest_imc_refc, cpu); | |
284 | } | |
285 | ||
286 | static void nest_change_cpu_context(int old_cpu, int new_cpu) | |
287 | { | |
288 | struct imc_pmu **pn = per_nest_pmu_arr; | |
885dcd70 AS |
289 | |
290 | if (old_cpu < 0 || new_cpu < 0) | |
291 | return; | |
292 | ||
73ce9aec | 293 | while (*pn) { |
885dcd70 | 294 | perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); |
73ce9aec MS |
295 | pn++; |
296 | } | |
885dcd70 AS |
297 | } |
298 | ||
299 | static int ppc_nest_imc_cpu_offline(unsigned int cpu) | |
300 | { | |
301 | int nid, target = -1; | |
302 | const struct cpumask *l_cpumask; | |
303 | struct imc_pmu_ref *ref; | |
304 | ||
305 | /* | |
306 | * Check in the designated list for this cpu. Dont bother | |
307 | * if not one of them. | |
308 | */ | |
309 | if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask)) | |
310 | return 0; | |
311 | ||
312 | /* | |
313 | * Now that this cpu is one of the designated, | |
314 | * find a next cpu a) which is online and b) in same chip. | |
315 | */ | |
316 | nid = cpu_to_node(cpu); | |
317 | l_cpumask = cpumask_of_node(nid); | |
318 | target = cpumask_any_but(l_cpumask, cpu); | |
319 | ||
320 | /* | |
321 | * Update the cpumask with the target cpu and | |
322 | * migrate the context if needed | |
323 | */ | |
324 | if (target >= 0 && target < nr_cpu_ids) { | |
325 | cpumask_set_cpu(target, &nest_imc_cpumask); | |
326 | nest_change_cpu_context(cpu, target); | |
327 | } else { | |
328 | opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | |
329 | get_hard_smp_processor_id(cpu)); | |
330 | /* | |
331 | * If this is the last cpu in this chip then, skip the reference | |
332 | * count mutex lock and make the reference count on this chip zero. | |
333 | */ | |
334 | ref = get_nest_pmu_ref(cpu); | |
335 | if (!ref) | |
336 | return -EINVAL; | |
337 | ||
338 | ref->refc = 0; | |
339 | } | |
340 | return 0; | |
341 | } | |
342 | ||
343 | static int ppc_nest_imc_cpu_online(unsigned int cpu) | |
344 | { | |
345 | const struct cpumask *l_cpumask; | |
346 | static struct cpumask tmp_mask; | |
347 | int res; | |
348 | ||
349 | /* Get the cpumask of this node */ | |
350 | l_cpumask = cpumask_of_node(cpu_to_node(cpu)); | |
351 | ||
352 | /* | |
353 | * If this is not the first online CPU on this node, then | |
354 | * just return. | |
355 | */ | |
356 | if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask)) | |
357 | return 0; | |
358 | ||
359 | /* | |
360 | * If this is the first online cpu on this node | |
361 | * disable the nest counters by making an OPAL call. | |
362 | */ | |
363 | res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | |
364 | get_hard_smp_processor_id(cpu)); | |
365 | if (res) | |
366 | return res; | |
367 | ||
368 | /* Make this CPU the designated target for counter collection */ | |
369 | cpumask_set_cpu(cpu, &nest_imc_cpumask); | |
370 | return 0; | |
371 | } | |
372 | ||
373 | static int nest_pmu_cpumask_init(void) | |
374 | { | |
375 | return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, | |
376 | "perf/powerpc/imc:online", | |
377 | ppc_nest_imc_cpu_online, | |
378 | ppc_nest_imc_cpu_offline); | |
379 | } | |
380 | ||
381 | static void nest_imc_counters_release(struct perf_event *event) | |
382 | { | |
383 | int rc, node_id; | |
384 | struct imc_pmu_ref *ref; | |
385 | ||
386 | if (event->cpu < 0) | |
387 | return; | |
388 | ||
389 | node_id = cpu_to_node(event->cpu); | |
390 | ||
391 | /* | |
392 | * See if we need to disable the nest PMU. | |
393 | * If no events are currently in use, then we have to take a | |
394 | * mutex to ensure that we don't race with another task doing | |
395 | * enable or disable the nest counters. | |
396 | */ | |
397 | ref = get_nest_pmu_ref(event->cpu); | |
398 | if (!ref) | |
399 | return; | |
400 | ||
401 | /* Take the mutex lock for this node and then decrement the reference count */ | |
402 | mutex_lock(&ref->lock); | |
0d923820 AS |
403 | if (ref->refc == 0) { |
404 | /* | |
405 | * The scenario where this is true is, when perf session is | |
406 | * started, followed by offlining of all cpus in a given node. | |
407 | * | |
408 | * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline() | |
409 | * function set the ref->count to zero, if the cpu which is | |
410 | * about to offline is the last cpu in a given node and make | |
411 | * an OPAL call to disable the engine in that node. | |
412 | * | |
413 | */ | |
414 | mutex_unlock(&ref->lock); | |
415 | return; | |
416 | } | |
885dcd70 AS |
417 | ref->refc--; |
418 | if (ref->refc == 0) { | |
419 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | |
420 | get_hard_smp_processor_id(event->cpu)); | |
421 | if (rc) { | |
711bd207 | 422 | mutex_unlock(&ref->lock); |
885dcd70 AS |
423 | pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); |
424 | return; | |
425 | } | |
426 | } else if (ref->refc < 0) { | |
427 | WARN(1, "nest-imc: Invalid event reference count\n"); | |
428 | ref->refc = 0; | |
429 | } | |
430 | mutex_unlock(&ref->lock); | |
431 | } | |
432 | ||
433 | static int nest_imc_event_init(struct perf_event *event) | |
434 | { | |
435 | int chip_id, rc, node_id; | |
436 | u32 l_config, config = event->attr.config; | |
437 | struct imc_mem_info *pcni; | |
438 | struct imc_pmu *pmu; | |
439 | struct imc_pmu_ref *ref; | |
440 | bool flag = false; | |
441 | ||
442 | if (event->attr.type != event->pmu->type) | |
443 | return -ENOENT; | |
444 | ||
445 | /* Sampling not supported */ | |
446 | if (event->hw.sample_period) | |
447 | return -EINVAL; | |
448 | ||
449 | /* unsupported modes and filters */ | |
450 | if (event->attr.exclude_user || | |
451 | event->attr.exclude_kernel || | |
452 | event->attr.exclude_hv || | |
453 | event->attr.exclude_idle || | |
454 | event->attr.exclude_host || | |
455 | event->attr.exclude_guest) | |
456 | return -EINVAL; | |
457 | ||
458 | if (event->cpu < 0) | |
459 | return -EINVAL; | |
460 | ||
461 | pmu = imc_event_to_pmu(event); | |
462 | ||
463 | /* Sanity check for config (event offset) */ | |
464 | if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size) | |
465 | return -EINVAL; | |
466 | ||
467 | /* | |
468 | * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). | |
469 | * Get the base memory addresss for this cpu. | |
470 | */ | |
f3f1dfd6 | 471 | chip_id = cpu_to_chip_id(event->cpu); |
885dcd70 AS |
472 | pcni = pmu->mem_info; |
473 | do { | |
474 | if (pcni->id == chip_id) { | |
475 | flag = true; | |
476 | break; | |
477 | } | |
478 | pcni++; | |
479 | } while (pcni); | |
480 | ||
481 | if (!flag) | |
482 | return -ENODEV; | |
483 | ||
484 | /* | |
485 | * Add the event offset to the base address. | |
486 | */ | |
487 | l_config = config & IMC_EVENT_OFFSET_MASK; | |
488 | event->hw.event_base = (u64)pcni->vbase + l_config; | |
489 | node_id = cpu_to_node(event->cpu); | |
490 | ||
491 | /* | |
492 | * Get the imc_pmu_ref struct for this node. | |
493 | * Take the mutex lock and then increment the count of nest pmu events | |
494 | * inited. | |
495 | */ | |
496 | ref = get_nest_pmu_ref(event->cpu); | |
497 | if (!ref) | |
498 | return -EINVAL; | |
499 | ||
500 | mutex_lock(&ref->lock); | |
501 | if (ref->refc == 0) { | |
502 | rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, | |
503 | get_hard_smp_processor_id(event->cpu)); | |
504 | if (rc) { | |
711bd207 | 505 | mutex_unlock(&ref->lock); |
885dcd70 AS |
506 | pr_err("nest-imc: Unable to start the counters for node %d\n", |
507 | node_id); | |
508 | return rc; | |
509 | } | |
510 | } | |
511 | ++ref->refc; | |
512 | mutex_unlock(&ref->lock); | |
513 | ||
514 | event->destroy = nest_imc_counters_release; | |
515 | return 0; | |
516 | } | |
517 | ||
39a846db AS |
518 | /* |
519 | * core_imc_mem_init : Initializes memory for the current core. | |
520 | * | |
521 | * Uses alloc_pages_node() and uses the returned address as an argument to | |
522 | * an opal call to configure the pdbar. The address sent as an argument is | |
523 | * converted to physical address before the opal call is made. This is the | |
524 | * base address at which the core imc counters are populated. | |
525 | */ | |
526 | static int core_imc_mem_init(int cpu, int size) | |
527 | { | |
f3f1dfd6 | 528 | int nid, rc = 0, core_id = (cpu / threads_per_core); |
39a846db AS |
529 | struct imc_mem_info *mem_info; |
530 | ||
531 | /* | |
532 | * alloc_pages_node() will allocate memory for core in the | |
533 | * local node only. | |
534 | */ | |
f3f1dfd6 | 535 | nid = cpu_to_node(cpu); |
39a846db AS |
536 | mem_info = &core_imc_pmu->mem_info[core_id]; |
537 | mem_info->id = core_id; | |
538 | ||
539 | /* We need only vbase for core counters */ | |
f3f1dfd6 | 540 | mem_info->vbase = page_address(alloc_pages_node(nid, |
cd4f2b30 AS |
541 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
542 | __GFP_NOWARN, get_order(size))); | |
39a846db AS |
543 | if (!mem_info->vbase) |
544 | return -ENOMEM; | |
545 | ||
546 | /* Init the mutex */ | |
547 | core_imc_refc[core_id].id = core_id; | |
548 | mutex_init(&core_imc_refc[core_id].lock); | |
549 | ||
550 | rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, | |
551 | __pa((void *)mem_info->vbase), | |
552 | get_hard_smp_processor_id(cpu)); | |
553 | if (rc) { | |
554 | free_pages((u64)mem_info->vbase, get_order(size)); | |
555 | mem_info->vbase = NULL; | |
556 | } | |
557 | ||
558 | return rc; | |
559 | } | |
560 | ||
561 | static bool is_core_imc_mem_inited(int cpu) | |
562 | { | |
563 | struct imc_mem_info *mem_info; | |
564 | int core_id = (cpu / threads_per_core); | |
565 | ||
566 | mem_info = &core_imc_pmu->mem_info[core_id]; | |
567 | if (!mem_info->vbase) | |
568 | return false; | |
569 | ||
570 | return true; | |
571 | } | |
572 | ||
573 | static int ppc_core_imc_cpu_online(unsigned int cpu) | |
574 | { | |
575 | const struct cpumask *l_cpumask; | |
576 | static struct cpumask tmp_mask; | |
577 | int ret = 0; | |
578 | ||
579 | /* Get the cpumask for this core */ | |
580 | l_cpumask = cpu_sibling_mask(cpu); | |
581 | ||
582 | /* If a cpu for this core is already set, then, don't do anything */ | |
583 | if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask)) | |
584 | return 0; | |
585 | ||
586 | if (!is_core_imc_mem_inited(cpu)) { | |
587 | ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size); | |
588 | if (ret) { | |
589 | pr_info("core_imc memory allocation for cpu %d failed\n", cpu); | |
590 | return ret; | |
591 | } | |
592 | } | |
593 | ||
594 | /* set the cpu in the mask */ | |
595 | cpumask_set_cpu(cpu, &core_imc_cpumask); | |
596 | return 0; | |
597 | } | |
598 | ||
599 | static int ppc_core_imc_cpu_offline(unsigned int cpu) | |
600 | { | |
601 | unsigned int ncpu, core_id; | |
602 | struct imc_pmu_ref *ref; | |
603 | ||
604 | /* | |
605 | * clear this cpu out of the mask, if not present in the mask, | |
606 | * don't bother doing anything. | |
607 | */ | |
608 | if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask)) | |
609 | return 0; | |
610 | ||
7ecb37f6 MS |
611 | /* |
612 | * Check whether core_imc is registered. We could end up here | |
613 | * if the cpuhotplug callback registration fails. i.e, callback | |
614 | * invokes the offline path for all sucessfully registered cpus. | |
615 | * At this stage, core_imc pmu will not be registered and we | |
616 | * should return here. | |
617 | * | |
618 | * We return with a zero since this is not an offline failure. | |
619 | * And cpuhp_setup_state() returns the actual failure reason | |
620 | * to the caller, which inturn will call the cleanup routine. | |
621 | */ | |
622 | if (!core_imc_pmu->pmu.event_init) | |
623 | return 0; | |
624 | ||
39a846db AS |
625 | /* Find any online cpu in that core except the current "cpu" */ |
626 | ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu); | |
627 | ||
628 | if (ncpu >= 0 && ncpu < nr_cpu_ids) { | |
629 | cpumask_set_cpu(ncpu, &core_imc_cpumask); | |
630 | perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); | |
631 | } else { | |
632 | /* | |
633 | * If this is the last cpu in this core then, skip taking refernce | |
634 | * count mutex lock for this core and directly zero "refc" for | |
635 | * this core. | |
636 | */ | |
637 | opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | |
638 | get_hard_smp_processor_id(cpu)); | |
639 | core_id = cpu / threads_per_core; | |
640 | ref = &core_imc_refc[core_id]; | |
641 | if (!ref) | |
642 | return -EINVAL; | |
643 | ||
644 | ref->refc = 0; | |
645 | } | |
646 | return 0; | |
647 | } | |
648 | ||
649 | static int core_imc_pmu_cpumask_init(void) | |
650 | { | |
651 | return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, | |
652 | "perf/powerpc/imc_core:online", | |
653 | ppc_core_imc_cpu_online, | |
654 | ppc_core_imc_cpu_offline); | |
655 | } | |
656 | ||
657 | static void core_imc_counters_release(struct perf_event *event) | |
658 | { | |
659 | int rc, core_id; | |
660 | struct imc_pmu_ref *ref; | |
661 | ||
662 | if (event->cpu < 0) | |
663 | return; | |
664 | /* | |
665 | * See if we need to disable the IMC PMU. | |
666 | * If no events are currently in use, then we have to take a | |
667 | * mutex to ensure that we don't race with another task doing | |
668 | * enable or disable the core counters. | |
669 | */ | |
670 | core_id = event->cpu / threads_per_core; | |
671 | ||
672 | /* Take the mutex lock and decrement the refernce count for this core */ | |
673 | ref = &core_imc_refc[core_id]; | |
674 | if (!ref) | |
675 | return; | |
676 | ||
677 | mutex_lock(&ref->lock); | |
0d923820 AS |
678 | if (ref->refc == 0) { |
679 | /* | |
680 | * The scenario where this is true is, when perf session is | |
681 | * started, followed by offlining of all cpus in a given core. | |
682 | * | |
683 | * In the cpuhotplug offline path, ppc_core_imc_cpu_offline() | |
684 | * function set the ref->count to zero, if the cpu which is | |
685 | * about to offline is the last cpu in a given core and make | |
686 | * an OPAL call to disable the engine in that core. | |
687 | * | |
688 | */ | |
689 | mutex_unlock(&ref->lock); | |
690 | return; | |
691 | } | |
39a846db AS |
692 | ref->refc--; |
693 | if (ref->refc == 0) { | |
694 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | |
695 | get_hard_smp_processor_id(event->cpu)); | |
696 | if (rc) { | |
697 | mutex_unlock(&ref->lock); | |
698 | pr_err("IMC: Unable to stop the counters for core %d\n", core_id); | |
699 | return; | |
700 | } | |
701 | } else if (ref->refc < 0) { | |
702 | WARN(1, "core-imc: Invalid event reference count\n"); | |
703 | ref->refc = 0; | |
704 | } | |
705 | mutex_unlock(&ref->lock); | |
706 | } | |
707 | ||
708 | static int core_imc_event_init(struct perf_event *event) | |
709 | { | |
710 | int core_id, rc; | |
711 | u64 config = event->attr.config; | |
712 | struct imc_mem_info *pcmi; | |
713 | struct imc_pmu *pmu; | |
714 | struct imc_pmu_ref *ref; | |
715 | ||
716 | if (event->attr.type != event->pmu->type) | |
717 | return -ENOENT; | |
718 | ||
719 | /* Sampling not supported */ | |
720 | if (event->hw.sample_period) | |
721 | return -EINVAL; | |
722 | ||
723 | /* unsupported modes and filters */ | |
724 | if (event->attr.exclude_user || | |
725 | event->attr.exclude_kernel || | |
726 | event->attr.exclude_hv || | |
727 | event->attr.exclude_idle || | |
728 | event->attr.exclude_host || | |
729 | event->attr.exclude_guest) | |
730 | return -EINVAL; | |
731 | ||
732 | if (event->cpu < 0) | |
733 | return -EINVAL; | |
734 | ||
735 | event->hw.idx = -1; | |
736 | pmu = imc_event_to_pmu(event); | |
737 | ||
738 | /* Sanity check for config (event offset) */ | |
739 | if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) | |
740 | return -EINVAL; | |
741 | ||
742 | if (!is_core_imc_mem_inited(event->cpu)) | |
743 | return -ENODEV; | |
744 | ||
745 | core_id = event->cpu / threads_per_core; | |
746 | pcmi = &core_imc_pmu->mem_info[core_id]; | |
747 | if ((!pcmi->vbase)) | |
748 | return -ENODEV; | |
749 | ||
750 | /* Get the core_imc mutex for this core */ | |
751 | ref = &core_imc_refc[core_id]; | |
752 | if (!ref) | |
753 | return -EINVAL; | |
754 | ||
755 | /* | |
756 | * Core pmu units are enabled only when it is used. | |
757 | * See if this is triggered for the first time. | |
758 | * If yes, take the mutex lock and enable the core counters. | |
759 | * If not, just increment the count in core_imc_refc struct. | |
760 | */ | |
761 | mutex_lock(&ref->lock); | |
762 | if (ref->refc == 0) { | |
763 | rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, | |
764 | get_hard_smp_processor_id(event->cpu)); | |
765 | if (rc) { | |
766 | mutex_unlock(&ref->lock); | |
767 | pr_err("core-imc: Unable to start the counters for core %d\n", | |
768 | core_id); | |
769 | return rc; | |
770 | } | |
771 | } | |
772 | ++ref->refc; | |
773 | mutex_unlock(&ref->lock); | |
774 | ||
775 | event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); | |
776 | event->destroy = core_imc_counters_release; | |
777 | return 0; | |
778 | } | |
779 | ||
f74c89bd AS |
780 | /* |
781 | * Allocates a page of memory for each of the online cpus, and write the | |
782 | * physical base address of that page to the LDBAR for that cpu. | |
783 | * | |
784 | * LDBAR Register Layout: | |
785 | * | |
786 | * 0 4 8 12 16 20 24 28 | |
787 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
788 | * | | [ ] [ Counter Address [8:50] | |
789 | * | * Mode | | |
790 | * | * PB Scope | |
791 | * * Enable/Disable | |
792 | * | |
793 | * 32 36 40 44 48 52 56 60 | |
794 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
795 | * Counter Address [8:50] ] | |
796 | * | |
797 | */ | |
798 | static int thread_imc_mem_alloc(int cpu_id, int size) | |
799 | { | |
800 | u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); | |
f3f1dfd6 | 801 | int nid = cpu_to_node(cpu_id); |
f74c89bd AS |
802 | |
803 | if (!local_mem) { | |
804 | /* | |
805 | * This case could happen only once at start, since we dont | |
806 | * free the memory in cpu offline path. | |
807 | */ | |
f3f1dfd6 | 808 | local_mem = page_address(alloc_pages_node(nid, |
cd4f2b30 AS |
809 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
810 | __GFP_NOWARN, get_order(size))); | |
f74c89bd AS |
811 | if (!local_mem) |
812 | return -ENOMEM; | |
813 | ||
814 | per_cpu(thread_imc_mem, cpu_id) = local_mem; | |
815 | } | |
816 | ||
817 | ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE; | |
818 | ||
819 | mtspr(SPRN_LDBAR, ldbar_value); | |
820 | return 0; | |
821 | } | |
822 | ||
823 | static int ppc_thread_imc_cpu_online(unsigned int cpu) | |
885dcd70 | 824 | { |
f74c89bd AS |
825 | return thread_imc_mem_alloc(cpu, thread_imc_mem_size); |
826 | } | |
827 | ||
828 | static int ppc_thread_imc_cpu_offline(unsigned int cpu) | |
829 | { | |
830 | mtspr(SPRN_LDBAR, 0); | |
831 | return 0; | |
832 | } | |
833 | ||
834 | static int thread_imc_cpu_init(void) | |
835 | { | |
836 | return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, | |
837 | "perf/powerpc/imc_thread:online", | |
838 | ppc_thread_imc_cpu_online, | |
839 | ppc_thread_imc_cpu_offline); | |
840 | } | |
841 | ||
842 | void thread_imc_pmu_sched_task(struct perf_event_context *ctx, | |
843 | bool sched_in) | |
844 | { | |
845 | int core_id; | |
846 | struct imc_pmu_ref *ref; | |
847 | ||
848 | if (!is_core_imc_mem_inited(smp_processor_id())) | |
849 | return; | |
850 | ||
851 | core_id = smp_processor_id() / threads_per_core; | |
885dcd70 | 852 | /* |
f74c89bd AS |
853 | * imc pmus are enabled only when it is used. |
854 | * See if this is triggered for the first time. | |
855 | * If yes, take the mutex lock and enable the counters. | |
856 | * If not, just increment the count in ref count struct. | |
885dcd70 | 857 | */ |
f74c89bd AS |
858 | ref = &core_imc_refc[core_id]; |
859 | if (!ref) | |
860 | return; | |
861 | ||
862 | if (sched_in) { | |
863 | mutex_lock(&ref->lock); | |
864 | if (ref->refc == 0) { | |
865 | if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, | |
866 | get_hard_smp_processor_id(smp_processor_id()))) { | |
867 | mutex_unlock(&ref->lock); | |
868 | pr_err("thread-imc: Unable to start the counter\ | |
869 | for core %d\n", core_id); | |
870 | return; | |
871 | } | |
872 | } | |
873 | ++ref->refc; | |
874 | mutex_unlock(&ref->lock); | |
875 | } else { | |
876 | mutex_lock(&ref->lock); | |
877 | ref->refc--; | |
878 | if (ref->refc == 0) { | |
879 | if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | |
880 | get_hard_smp_processor_id(smp_processor_id()))) { | |
881 | mutex_unlock(&ref->lock); | |
882 | pr_err("thread-imc: Unable to stop the counters\ | |
883 | for core %d\n", core_id); | |
884 | return; | |
885 | } | |
886 | } else if (ref->refc < 0) { | |
887 | ref->refc = 0; | |
888 | } | |
889 | mutex_unlock(&ref->lock); | |
890 | } | |
891 | ||
892 | return; | |
893 | } | |
894 | ||
895 | static int thread_imc_event_init(struct perf_event *event) | |
896 | { | |
897 | u32 config = event->attr.config; | |
898 | struct task_struct *target; | |
899 | struct imc_pmu *pmu; | |
900 | ||
901 | if (event->attr.type != event->pmu->type) | |
902 | return -ENOENT; | |
903 | ||
904 | /* Sampling not supported */ | |
905 | if (event->hw.sample_period) | |
906 | return -EINVAL; | |
907 | ||
908 | event->hw.idx = -1; | |
909 | pmu = imc_event_to_pmu(event); | |
910 | ||
911 | /* Sanity check for config offset */ | |
912 | if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) | |
913 | return -EINVAL; | |
914 | ||
915 | target = event->hw.target; | |
916 | if (!target) | |
917 | return -EINVAL; | |
918 | ||
919 | event->pmu->task_ctx_nr = perf_sw_context; | |
920 | return 0; | |
921 | } | |
922 | ||
923 | static bool is_thread_imc_pmu(struct perf_event *event) | |
924 | { | |
925 | if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) | |
926 | return true; | |
927 | ||
928 | return false; | |
929 | } | |
930 | ||
931 | static u64 * get_event_base_addr(struct perf_event *event) | |
932 | { | |
933 | u64 addr; | |
934 | ||
935 | if (is_thread_imc_pmu(event)) { | |
936 | addr = (u64)per_cpu(thread_imc_mem, smp_processor_id()); | |
937 | return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); | |
938 | } | |
939 | ||
885dcd70 AS |
940 | return (u64 *)event->hw.event_base; |
941 | } | |
942 | ||
f74c89bd AS |
943 | static void thread_imc_pmu_start_txn(struct pmu *pmu, |
944 | unsigned int txn_flags) | |
945 | { | |
946 | if (txn_flags & ~PERF_PMU_TXN_ADD) | |
947 | return; | |
948 | perf_pmu_disable(pmu); | |
949 | } | |
950 | ||
951 | static void thread_imc_pmu_cancel_txn(struct pmu *pmu) | |
952 | { | |
953 | perf_pmu_enable(pmu); | |
954 | } | |
955 | ||
956 | static int thread_imc_pmu_commit_txn(struct pmu *pmu) | |
957 | { | |
958 | perf_pmu_enable(pmu); | |
959 | return 0; | |
960 | } | |
961 | ||
885dcd70 AS |
962 | static u64 imc_read_counter(struct perf_event *event) |
963 | { | |
964 | u64 *addr, data; | |
965 | ||
966 | /* | |
967 | * In-Memory Collection (IMC) counters are free flowing counters. | |
968 | * So we take a snapshot of the counter value on enable and save it | |
969 | * to calculate the delta at later stage to present the event counter | |
970 | * value. | |
971 | */ | |
972 | addr = get_event_base_addr(event); | |
973 | data = be64_to_cpu(READ_ONCE(*addr)); | |
974 | local64_set(&event->hw.prev_count, data); | |
975 | ||
976 | return data; | |
977 | } | |
978 | ||
979 | static void imc_event_update(struct perf_event *event) | |
980 | { | |
981 | u64 counter_prev, counter_new, final_count; | |
982 | ||
983 | counter_prev = local64_read(&event->hw.prev_count); | |
984 | counter_new = imc_read_counter(event); | |
985 | final_count = counter_new - counter_prev; | |
986 | ||
987 | /* Update the delta to the event count */ | |
988 | local64_add(final_count, &event->count); | |
989 | } | |
990 | ||
991 | static void imc_event_start(struct perf_event *event, int flags) | |
992 | { | |
993 | /* | |
994 | * In Memory Counters are free flowing counters. HW or the microcode | |
995 | * keeps adding to the counter offset in memory. To get event | |
996 | * counter value, we snapshot the value here and we calculate | |
997 | * delta at later point. | |
998 | */ | |
999 | imc_read_counter(event); | |
1000 | } | |
1001 | ||
1002 | static void imc_event_stop(struct perf_event *event, int flags) | |
1003 | { | |
1004 | /* | |
1005 | * Take a snapshot and calculate the delta and update | |
1006 | * the event counter values. | |
1007 | */ | |
1008 | imc_event_update(event); | |
1009 | } | |
1010 | ||
1011 | static int imc_event_add(struct perf_event *event, int flags) | |
1012 | { | |
1013 | if (flags & PERF_EF_START) | |
1014 | imc_event_start(event, flags); | |
1015 | ||
1016 | return 0; | |
1017 | } | |
1018 | ||
f74c89bd AS |
1019 | static int thread_imc_event_add(struct perf_event *event, int flags) |
1020 | { | |
1021 | if (flags & PERF_EF_START) | |
1022 | imc_event_start(event, flags); | |
1023 | ||
1024 | /* Enable the sched_task to start the engine */ | |
1025 | perf_sched_cb_inc(event->ctx->pmu); | |
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | static void thread_imc_event_del(struct perf_event *event, int flags) | |
1030 | { | |
1031 | /* | |
1032 | * Take a snapshot and calculate the delta and update | |
1033 | * the event counter values. | |
1034 | */ | |
1035 | imc_event_update(event); | |
1036 | perf_sched_cb_dec(event->ctx->pmu); | |
1037 | } | |
1038 | ||
885dcd70 AS |
1039 | /* update_pmu_ops : Populate the appropriate operations for "pmu" */ |
1040 | static int update_pmu_ops(struct imc_pmu *pmu) | |
1041 | { | |
1042 | pmu->pmu.task_ctx_nr = perf_invalid_context; | |
1043 | pmu->pmu.add = imc_event_add; | |
1044 | pmu->pmu.del = imc_event_stop; | |
1045 | pmu->pmu.start = imc_event_start; | |
1046 | pmu->pmu.stop = imc_event_stop; | |
1047 | pmu->pmu.read = imc_event_update; | |
1048 | pmu->pmu.attr_groups = pmu->attr_groups; | |
1049 | pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; | |
1050 | ||
885dcd70 AS |
1051 | switch (pmu->domain) { |
1052 | case IMC_DOMAIN_NEST: | |
1053 | pmu->pmu.event_init = nest_imc_event_init; | |
1054 | pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; | |
1055 | break; | |
39a846db AS |
1056 | case IMC_DOMAIN_CORE: |
1057 | pmu->pmu.event_init = core_imc_event_init; | |
1058 | pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; | |
1059 | break; | |
f74c89bd AS |
1060 | case IMC_DOMAIN_THREAD: |
1061 | pmu->pmu.event_init = thread_imc_event_init; | |
1062 | pmu->pmu.sched_task = thread_imc_pmu_sched_task; | |
1063 | pmu->pmu.add = thread_imc_event_add; | |
1064 | pmu->pmu.del = thread_imc_event_del; | |
1065 | pmu->pmu.start_txn = thread_imc_pmu_start_txn; | |
1066 | pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn; | |
1067 | pmu->pmu.commit_txn = thread_imc_pmu_commit_txn; | |
1068 | break; | |
885dcd70 AS |
1069 | default: |
1070 | break; | |
1071 | } | |
1072 | ||
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | /* init_nest_pmu_ref: Initialize the imc_pmu_ref struct for all the nodes */ | |
1077 | static int init_nest_pmu_ref(void) | |
1078 | { | |
1079 | int nid, i, cpu; | |
1080 | ||
1081 | nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc), | |
1082 | GFP_KERNEL); | |
1083 | ||
1084 | if (!nest_imc_refc) | |
1085 | return -ENOMEM; | |
1086 | ||
1087 | i = 0; | |
1088 | for_each_node(nid) { | |
1089 | /* | |
1090 | * Mutex lock to avoid races while tracking the number of | |
1091 | * sessions using the chip's nest pmu units. | |
1092 | */ | |
1093 | mutex_init(&nest_imc_refc[i].lock); | |
1094 | ||
1095 | /* | |
1096 | * Loop to init the "id" with the node_id. Variable "i" initialized to | |
1097 | * 0 and will be used as index to the array. "i" will not go off the | |
1098 | * end of the array since the "for_each_node" loops for "N_POSSIBLE" | |
1099 | * nodes only. | |
1100 | */ | |
1101 | nest_imc_refc[i++].id = nid; | |
1102 | } | |
1103 | ||
1104 | /* | |
1105 | * Loop to init the per_cpu "local_nest_imc_refc" with the proper | |
1106 | * "nest_imc_refc" index. This makes get_nest_pmu_ref() alot simple. | |
1107 | */ | |
1108 | for_each_possible_cpu(cpu) { | |
1109 | nid = cpu_to_node(cpu); | |
7efbae90 | 1110 | for (i = 0; i < num_possible_nodes(); i++) { |
885dcd70 AS |
1111 | if (nest_imc_refc[i].id == nid) { |
1112 | per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i]; | |
1113 | break; | |
1114 | } | |
1115 | } | |
1116 | } | |
1117 | return 0; | |
1118 | } | |
1119 | ||
39a846db AS |
1120 | static void cleanup_all_core_imc_memory(void) |
1121 | { | |
0b167f11 | 1122 | int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); |
39a846db AS |
1123 | struct imc_mem_info *ptr = core_imc_pmu->mem_info; |
1124 | int size = core_imc_pmu->counter_mem_size; | |
1125 | ||
1126 | /* mem_info will never be NULL */ | |
1127 | for (i = 0; i < nr_cores; i++) { | |
1128 | if (ptr[i].vbase) | |
1129 | free_pages((u64)ptr->vbase, get_order(size)); | |
1130 | } | |
1131 | ||
1132 | kfree(ptr); | |
1133 | kfree(core_imc_refc); | |
1134 | } | |
1135 | ||
f74c89bd AS |
1136 | static void thread_imc_ldbar_disable(void *dummy) |
1137 | { | |
1138 | /* | |
1139 | * By Zeroing LDBAR, we disable thread-imc | |
1140 | * updates. | |
1141 | */ | |
1142 | mtspr(SPRN_LDBAR, 0); | |
1143 | } | |
1144 | ||
1145 | void thread_imc_disable(void) | |
1146 | { | |
1147 | on_each_cpu(thread_imc_ldbar_disable, NULL, 1); | |
1148 | } | |
1149 | ||
1150 | static void cleanup_all_thread_imc_memory(void) | |
1151 | { | |
1152 | int i, order = get_order(thread_imc_mem_size); | |
1153 | ||
1154 | for_each_online_cpu(i) { | |
1155 | if (per_cpu(thread_imc_mem, i)) | |
1156 | free_pages((u64)per_cpu(thread_imc_mem, i), order); | |
1157 | ||
1158 | } | |
1159 | } | |
1160 | ||
885dcd70 AS |
1161 | /* |
1162 | * Common function to unregister cpu hotplug callback and | |
1163 | * free the memory. | |
1164 | * TODO: Need to handle pmu unregistering, which will be | |
1165 | * done in followup series. | |
1166 | */ | |
1167 | static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) | |
1168 | { | |
1169 | if (pmu_ptr->domain == IMC_DOMAIN_NEST) { | |
b3376dcc | 1170 | mutex_lock(&nest_init_lock); |
885dcd70 AS |
1171 | if (nest_pmus == 1) { |
1172 | cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); | |
1173 | kfree(nest_imc_refc); | |
1174 | } | |
1175 | ||
1176 | if (nest_pmus > 0) | |
1177 | nest_pmus--; | |
1178 | mutex_unlock(&nest_init_lock); | |
1179 | } | |
1180 | ||
39a846db AS |
1181 | /* Free core_imc memory */ |
1182 | if (pmu_ptr->domain == IMC_DOMAIN_CORE) { | |
1183 | cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE); | |
1184 | cleanup_all_core_imc_memory(); | |
1185 | } | |
1186 | ||
f74c89bd AS |
1187 | /* Free thread_imc memory */ |
1188 | if (pmu_ptr->domain == IMC_DOMAIN_THREAD) { | |
1189 | cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE); | |
1190 | cleanup_all_thread_imc_memory(); | |
1191 | } | |
1192 | ||
885dcd70 | 1193 | /* Only free the attr_groups which are dynamically allocated */ |
0d8ba162 AS |
1194 | if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) |
1195 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); | |
885dcd70 AS |
1196 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); |
1197 | kfree(pmu_ptr); | |
73ce9aec | 1198 | kfree(per_nest_pmu_arr); |
885dcd70 AS |
1199 | return; |
1200 | } | |
1201 | ||
1202 | ||
1203 | /* | |
1204 | * imc_mem_init : Function to support memory allocation for core imc. | |
1205 | */ | |
1206 | static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, | |
1207 | int pmu_index) | |
1208 | { | |
1209 | const char *s; | |
f74c89bd | 1210 | int nr_cores, cpu, res; |
885dcd70 AS |
1211 | |
1212 | if (of_property_read_string(parent, "name", &s)) | |
1213 | return -ENODEV; | |
1214 | ||
885dcd70 AS |
1215 | switch (pmu_ptr->domain) { |
1216 | case IMC_DOMAIN_NEST: | |
1217 | /* Update the pmu name */ | |
1218 | pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); | |
1219 | if (!pmu_ptr->pmu.name) | |
1220 | return -ENOMEM; | |
1221 | ||
1222 | /* Needed for hotplug/migration */ | |
73ce9aec MS |
1223 | if (!per_nest_pmu_arr) { |
1224 | per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1, | |
1225 | sizeof(struct imc_pmu *), | |
1226 | GFP_KERNEL); | |
1227 | if (!per_nest_pmu_arr) | |
1228 | return -ENOMEM; | |
1229 | } | |
885dcd70 AS |
1230 | per_nest_pmu_arr[pmu_index] = pmu_ptr; |
1231 | break; | |
39a846db AS |
1232 | case IMC_DOMAIN_CORE: |
1233 | /* Update the pmu name */ | |
1234 | pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); | |
1235 | if (!pmu_ptr->pmu.name) | |
1236 | return -ENOMEM; | |
1237 | ||
0b167f11 | 1238 | nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); |
39a846db AS |
1239 | pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), |
1240 | GFP_KERNEL); | |
1241 | ||
1242 | if (!pmu_ptr->mem_info) | |
1243 | return -ENOMEM; | |
1244 | ||
1245 | core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref), | |
1246 | GFP_KERNEL); | |
1247 | ||
1248 | if (!core_imc_refc) | |
1249 | return -ENOMEM; | |
1250 | ||
1251 | core_imc_pmu = pmu_ptr; | |
1252 | break; | |
f74c89bd AS |
1253 | case IMC_DOMAIN_THREAD: |
1254 | /* Update the pmu name */ | |
1255 | pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); | |
1256 | if (!pmu_ptr->pmu.name) | |
1257 | return -ENOMEM; | |
1258 | ||
1259 | thread_imc_mem_size = pmu_ptr->counter_mem_size; | |
1260 | for_each_online_cpu(cpu) { | |
1261 | res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); | |
1262 | if (res) | |
1263 | return res; | |
1264 | } | |
1265 | ||
1266 | thread_imc_pmu = pmu_ptr; | |
1267 | break; | |
885dcd70 AS |
1268 | default: |
1269 | return -EINVAL; | |
1270 | } | |
1271 | ||
1272 | return 0; | |
1273 | } | |
1274 | ||
1275 | /* | |
1276 | * init_imc_pmu : Setup and register the IMC pmu device. | |
1277 | * | |
1278 | * @parent: Device tree unit node | |
1279 | * @pmu_ptr: memory allocated for this pmu | |
1280 | * @pmu_idx: Count of nest pmc registered | |
1281 | * | |
1282 | * init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback. | |
1283 | * Handles failure cases and accordingly frees memory. | |
1284 | */ | |
1285 | int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx) | |
1286 | { | |
1287 | int ret; | |
1288 | ||
1289 | ret = imc_mem_init(pmu_ptr, parent, pmu_idx); | |
1290 | if (ret) | |
1291 | goto err_free; | |
1292 | ||
885dcd70 AS |
1293 | switch (pmu_ptr->domain) { |
1294 | case IMC_DOMAIN_NEST: | |
1295 | /* | |
1296 | * Nest imc pmu need only one cpu per chip, we initialize the | |
1297 | * cpumask for the first nest imc pmu and use the same for the | |
1298 | * rest. To handle the cpuhotplug callback unregister, we track | |
1299 | * the number of nest pmus in "nest_pmus". | |
1300 | */ | |
1301 | mutex_lock(&nest_init_lock); | |
1302 | if (nest_pmus == 0) { | |
1303 | ret = init_nest_pmu_ref(); | |
1304 | if (ret) { | |
1305 | mutex_unlock(&nest_init_lock); | |
1306 | goto err_free; | |
1307 | } | |
1308 | /* Register for cpu hotplug notification. */ | |
1309 | ret = nest_pmu_cpumask_init(); | |
1310 | if (ret) { | |
1311 | mutex_unlock(&nest_init_lock); | |
1312 | goto err_free; | |
1313 | } | |
1314 | } | |
1315 | nest_pmus++; | |
1316 | mutex_unlock(&nest_init_lock); | |
39a846db AS |
1317 | break; |
1318 | case IMC_DOMAIN_CORE: | |
1319 | ret = core_imc_pmu_cpumask_init(); | |
1320 | if (ret) { | |
1321 | cleanup_all_core_imc_memory(); | |
1322 | return ret; | |
1323 | } | |
1324 | ||
f74c89bd AS |
1325 | break; |
1326 | case IMC_DOMAIN_THREAD: | |
1327 | ret = thread_imc_cpu_init(); | |
1328 | if (ret) { | |
1329 | cleanup_all_thread_imc_memory(); | |
1330 | return ret; | |
1331 | } | |
1332 | ||
885dcd70 AS |
1333 | break; |
1334 | default: | |
1335 | return -1; /* Unknown domain */ | |
1336 | } | |
1337 | ||
1338 | ret = update_events_in_group(parent, pmu_ptr); | |
1339 | if (ret) | |
1340 | goto err_free; | |
1341 | ||
1342 | ret = update_pmu_ops(pmu_ptr); | |
1343 | if (ret) | |
1344 | goto err_free; | |
1345 | ||
1346 | ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); | |
1347 | if (ret) | |
1348 | goto err_free; | |
1349 | ||
1350 | pr_info("%s performance monitor hardware support registered\n", | |
1351 | pmu_ptr->pmu.name); | |
1352 | ||
1353 | return 0; | |
1354 | ||
1355 | err_free: | |
1356 | imc_common_cpuhp_mem_free(pmu_ptr); | |
1357 | return ret; | |
1358 | } |