]>
Commit | Line | Data |
---|---|---|
212188a5 HB |
1 | /* |
2 | * Performance event support for s390x - CPU-measurement Counter Facility | |
3 | * | |
4 | * Copyright IBM Corp. 2012 | |
5 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License (version 2 only) | |
9 | * as published by the Free Software Foundation. | |
10 | */ | |
11 | #define KMSG_COMPONENT "cpum_cf" | |
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/kernel_stat.h> | |
16 | #include <linux/perf_event.h> | |
17 | #include <linux/percpu.h> | |
18 | #include <linux/notifier.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/export.h> | |
1e3cab2f | 21 | #include <asm/ctl_reg.h> |
212188a5 HB |
22 | #include <asm/irq.h> |
23 | #include <asm/cpu_mf.h> | |
24 | ||
25 | /* CPU-measurement counter facility supports these CPU counter sets: | |
26 | * For CPU counter sets: | |
27 | * Basic counter set: 0-31 | |
28 | * Problem-state counter set: 32-63 | |
29 | * Crypto-activity counter set: 64-127 | |
30 | * Extented counter set: 128-159 | |
31 | */ | |
32 | enum cpumf_ctr_set { | |
33 | /* CPU counter sets */ | |
34 | CPUMF_CTR_SET_BASIC = 0, | |
35 | CPUMF_CTR_SET_USER = 1, | |
36 | CPUMF_CTR_SET_CRYPTO = 2, | |
37 | CPUMF_CTR_SET_EXT = 3, | |
38 | ||
39 | /* Maximum number of counter sets */ | |
40 | CPUMF_CTR_SET_MAX, | |
41 | }; | |
42 | ||
43 | #define CPUMF_LCCTL_ENABLE_SHIFT 16 | |
44 | #define CPUMF_LCCTL_ACTCTL_SHIFT 0 | |
45 | static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = { | |
46 | [CPUMF_CTR_SET_BASIC] = 0x02, | |
47 | [CPUMF_CTR_SET_USER] = 0x04, | |
48 | [CPUMF_CTR_SET_CRYPTO] = 0x08, | |
49 | [CPUMF_CTR_SET_EXT] = 0x01, | |
50 | }; | |
51 | ||
52 | static void ctr_set_enable(u64 *state, int ctr_set) | |
53 | { | |
54 | *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT; | |
55 | } | |
56 | static void ctr_set_disable(u64 *state, int ctr_set) | |
57 | { | |
58 | *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT); | |
59 | } | |
60 | static void ctr_set_start(u64 *state, int ctr_set) | |
61 | { | |
62 | *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT; | |
63 | } | |
64 | static void ctr_set_stop(u64 *state, int ctr_set) | |
65 | { | |
66 | *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); | |
67 | } | |
68 | ||
69 | /* Local CPUMF event structure */ | |
70 | struct cpu_hw_events { | |
71 | struct cpumf_ctr_info info; | |
72 | atomic_t ctr_set[CPUMF_CTR_SET_MAX]; | |
73 | u64 state, tx_state; | |
74 | unsigned int flags; | |
75 | }; | |
76 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | |
77 | .ctr_set = { | |
78 | [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0), | |
79 | [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0), | |
80 | [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0), | |
81 | [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0), | |
82 | }, | |
83 | .state = 0, | |
84 | .flags = 0, | |
85 | }; | |
86 | ||
87 | static int get_counter_set(u64 event) | |
88 | { | |
89 | int set = -1; | |
90 | ||
91 | if (event < 32) | |
92 | set = CPUMF_CTR_SET_BASIC; | |
93 | else if (event < 64) | |
94 | set = CPUMF_CTR_SET_USER; | |
95 | else if (event < 128) | |
96 | set = CPUMF_CTR_SET_CRYPTO; | |
f47586b2 | 97 | else if (event < 256) |
212188a5 HB |
98 | set = CPUMF_CTR_SET_EXT; |
99 | ||
100 | return set; | |
101 | } | |
102 | ||
103 | static int validate_event(const struct hw_perf_event *hwc) | |
104 | { | |
105 | switch (hwc->config_base) { | |
106 | case CPUMF_CTR_SET_BASIC: | |
107 | case CPUMF_CTR_SET_USER: | |
108 | case CPUMF_CTR_SET_CRYPTO: | |
109 | case CPUMF_CTR_SET_EXT: | |
110 | /* check for reserved counters */ | |
111 | if ((hwc->config >= 6 && hwc->config <= 31) || | |
112 | (hwc->config >= 38 && hwc->config <= 63) || | |
113 | (hwc->config >= 80 && hwc->config <= 127)) | |
114 | return -EOPNOTSUPP; | |
115 | break; | |
116 | default: | |
117 | return -EINVAL; | |
118 | } | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
123 | static int validate_ctr_version(const struct hw_perf_event *hwc) | |
124 | { | |
125 | struct cpu_hw_events *cpuhw; | |
126 | int err = 0; | |
127 | ||
128 | cpuhw = &get_cpu_var(cpu_hw_events); | |
129 | ||
130 | /* check required version for counter sets */ | |
131 | switch (hwc->config_base) { | |
132 | case CPUMF_CTR_SET_BASIC: | |
133 | case CPUMF_CTR_SET_USER: | |
134 | if (cpuhw->info.cfvn < 1) | |
135 | err = -EOPNOTSUPP; | |
136 | break; | |
137 | case CPUMF_CTR_SET_CRYPTO: | |
138 | case CPUMF_CTR_SET_EXT: | |
139 | if (cpuhw->info.csvn < 1) | |
140 | err = -EOPNOTSUPP; | |
f47586b2 HB |
141 | if ((cpuhw->info.csvn == 1 && hwc->config > 159) || |
142 | (cpuhw->info.csvn == 2 && hwc->config > 175) || | |
143 | (cpuhw->info.csvn > 2 && hwc->config > 255)) | |
144 | err = -EOPNOTSUPP; | |
212188a5 HB |
145 | break; |
146 | } | |
147 | ||
148 | put_cpu_var(cpu_hw_events); | |
149 | return err; | |
150 | } | |
151 | ||
152 | static int validate_ctr_auth(const struct hw_perf_event *hwc) | |
153 | { | |
154 | struct cpu_hw_events *cpuhw; | |
155 | u64 ctrs_state; | |
156 | int err = 0; | |
157 | ||
158 | cpuhw = &get_cpu_var(cpu_hw_events); | |
159 | ||
160 | /* check authorization for cpu counter sets */ | |
161 | ctrs_state = cpumf_state_ctl[hwc->config_base]; | |
162 | if (!(ctrs_state & cpuhw->info.auth_ctl)) | |
163 | err = -EPERM; | |
164 | ||
165 | put_cpu_var(cpu_hw_events); | |
166 | return err; | |
167 | } | |
168 | ||
169 | /* | |
170 | * Change the CPUMF state to active. | |
171 | * Enable and activate the CPU-counter sets according | |
172 | * to the per-cpu control state. | |
173 | */ | |
174 | static void cpumf_pmu_enable(struct pmu *pmu) | |
175 | { | |
176 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
177 | int err; | |
178 | ||
179 | if (cpuhw->flags & PMU_F_ENABLED) | |
180 | return; | |
181 | ||
182 | err = lcctl(cpuhw->state); | |
183 | if (err) { | |
184 | pr_err("Enabling the performance measuring unit " | |
af0ee94e | 185 | "failed with rc=%x\n", err); |
212188a5 HB |
186 | return; |
187 | } | |
188 | ||
189 | cpuhw->flags |= PMU_F_ENABLED; | |
190 | } | |
191 | ||
192 | /* | |
193 | * Change the CPUMF state to inactive. | |
194 | * Disable and enable (inactive) the CPU-counter sets according | |
195 | * to the per-cpu control state. | |
196 | */ | |
197 | static void cpumf_pmu_disable(struct pmu *pmu) | |
198 | { | |
199 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
200 | int err; | |
201 | u64 inactive; | |
202 | ||
203 | if (!(cpuhw->flags & PMU_F_ENABLED)) | |
204 | return; | |
205 | ||
206 | inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); | |
207 | err = lcctl(inactive); | |
208 | if (err) { | |
209 | pr_err("Disabling the performance measuring unit " | |
af0ee94e | 210 | "failed with rc=%x\n", err); |
212188a5 HB |
211 | return; |
212 | } | |
213 | ||
214 | cpuhw->flags &= ~PMU_F_ENABLED; | |
215 | } | |
216 | ||
217 | ||
218 | /* Number of perf events counting hardware events */ | |
219 | static atomic_t num_events = ATOMIC_INIT(0); | |
220 | /* Used to avoid races in calling reserve/release_cpumf_hardware */ | |
221 | static DEFINE_MUTEX(pmc_reserve_mutex); | |
222 | ||
223 | /* CPU-measurement alerts for the counter facility */ | |
224 | static void cpumf_measurement_alert(struct ext_code ext_code, | |
225 | unsigned int alert, unsigned long unused) | |
226 | { | |
227 | struct cpu_hw_events *cpuhw; | |
228 | ||
229 | if (!(alert & CPU_MF_INT_CF_MASK)) | |
230 | return; | |
231 | ||
420f42ec | 232 | inc_irq_stat(IRQEXT_CMC); |
212188a5 HB |
233 | cpuhw = &__get_cpu_var(cpu_hw_events); |
234 | ||
235 | /* Measurement alerts are shared and might happen when the PMU | |
236 | * is not reserved. Ignore these alerts in this case. */ | |
237 | if (!(cpuhw->flags & PMU_F_RESERVED)) | |
238 | return; | |
239 | ||
240 | /* counter authorization change alert */ | |
241 | if (alert & CPU_MF_INT_CF_CACA) | |
242 | qctri(&cpuhw->info); | |
243 | ||
244 | /* loss of counter data alert */ | |
245 | if (alert & CPU_MF_INT_CF_LCDA) | |
246 | pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); | |
247 | } | |
248 | ||
249 | #define PMC_INIT 0 | |
250 | #define PMC_RELEASE 1 | |
251 | static void setup_pmc_cpu(void *flags) | |
252 | { | |
253 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
254 | ||
255 | switch (*((int *) flags)) { | |
256 | case PMC_INIT: | |
257 | memset(&cpuhw->info, 0, sizeof(cpuhw->info)); | |
258 | qctri(&cpuhw->info); | |
259 | cpuhw->flags |= PMU_F_RESERVED; | |
260 | break; | |
261 | ||
262 | case PMC_RELEASE: | |
263 | cpuhw->flags &= ~PMU_F_RESERVED; | |
264 | break; | |
265 | } | |
266 | ||
267 | /* Disable CPU counter sets */ | |
268 | lcctl(0); | |
269 | } | |
270 | ||
271 | /* Initialize the CPU-measurement facility */ | |
272 | static int reserve_pmc_hardware(void) | |
273 | { | |
274 | int flags = PMC_INIT; | |
275 | ||
276 | on_each_cpu(setup_pmc_cpu, &flags, 1); | |
82003c3e | 277 | irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
212188a5 HB |
278 | |
279 | return 0; | |
280 | } | |
281 | ||
282 | /* Release the CPU-measurement facility */ | |
283 | static void release_pmc_hardware(void) | |
284 | { | |
285 | int flags = PMC_RELEASE; | |
286 | ||
287 | on_each_cpu(setup_pmc_cpu, &flags, 1); | |
82003c3e | 288 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
212188a5 HB |
289 | } |
290 | ||
291 | /* Release the PMU if event is the last perf event */ | |
292 | static void hw_perf_event_destroy(struct perf_event *event) | |
293 | { | |
294 | if (!atomic_add_unless(&num_events, -1, 1)) { | |
295 | mutex_lock(&pmc_reserve_mutex); | |
296 | if (atomic_dec_return(&num_events) == 0) | |
297 | release_pmc_hardware(); | |
298 | mutex_unlock(&pmc_reserve_mutex); | |
299 | } | |
300 | } | |
301 | ||
302 | /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */ | |
303 | static const int cpumf_generic_events_basic[] = { | |
304 | [PERF_COUNT_HW_CPU_CYCLES] = 0, | |
305 | [PERF_COUNT_HW_INSTRUCTIONS] = 1, | |
306 | [PERF_COUNT_HW_CACHE_REFERENCES] = -1, | |
307 | [PERF_COUNT_HW_CACHE_MISSES] = -1, | |
308 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, | |
309 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | |
310 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | |
311 | }; | |
312 | /* CPUMF <-> perf event mappings for userspace (problem-state set) */ | |
313 | static const int cpumf_generic_events_user[] = { | |
314 | [PERF_COUNT_HW_CPU_CYCLES] = 32, | |
315 | [PERF_COUNT_HW_INSTRUCTIONS] = 33, | |
316 | [PERF_COUNT_HW_CACHE_REFERENCES] = -1, | |
317 | [PERF_COUNT_HW_CACHE_MISSES] = -1, | |
318 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, | |
319 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | |
320 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | |
321 | }; | |
322 | ||
323 | static int __hw_perf_event_init(struct perf_event *event) | |
324 | { | |
325 | struct perf_event_attr *attr = &event->attr; | |
326 | struct hw_perf_event *hwc = &event->hw; | |
327 | int err; | |
328 | u64 ev; | |
329 | ||
330 | switch (attr->type) { | |
331 | case PERF_TYPE_RAW: | |
332 | /* Raw events are used to access counters directly, | |
333 | * hence do not permit excludes */ | |
334 | if (attr->exclude_kernel || attr->exclude_user || | |
335 | attr->exclude_hv) | |
336 | return -EOPNOTSUPP; | |
337 | ev = attr->config; | |
338 | break; | |
339 | ||
340 | case PERF_TYPE_HARDWARE: | |
341 | ev = attr->config; | |
342 | /* Count user space (problem-state) only */ | |
343 | if (!attr->exclude_user && attr->exclude_kernel) { | |
344 | if (ev >= ARRAY_SIZE(cpumf_generic_events_user)) | |
345 | return -EOPNOTSUPP; | |
346 | ev = cpumf_generic_events_user[ev]; | |
347 | ||
348 | /* No support for kernel space counters only */ | |
349 | } else if (!attr->exclude_kernel && attr->exclude_user) { | |
350 | return -EOPNOTSUPP; | |
351 | ||
352 | /* Count user and kernel space */ | |
353 | } else { | |
354 | if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) | |
355 | return -EOPNOTSUPP; | |
356 | ev = cpumf_generic_events_basic[ev]; | |
357 | } | |
358 | break; | |
359 | ||
360 | default: | |
361 | return -ENOENT; | |
362 | } | |
363 | ||
364 | if (ev == -1) | |
365 | return -ENOENT; | |
366 | ||
367 | if (ev >= PERF_CPUM_CF_MAX_CTR) | |
368 | return -EINVAL; | |
369 | ||
212188a5 HB |
370 | /* Use the hardware perf event structure to store the counter number |
371 | * in 'config' member and the counter set to which the counter belongs | |
372 | * in the 'config_base'. The counter set (config_base) is then used | |
373 | * to enable/disable the counters. | |
374 | */ | |
375 | hwc->config = ev; | |
376 | hwc->config_base = get_counter_set(ev); | |
377 | ||
378 | /* Validate the counter that is assigned to this event. | |
379 | * Because the counter facility can use numerous counters at the | |
380 | * same time without constraints, it is not necessary to explicity | |
381 | * validate event groups (event->group_leader != event). | |
382 | */ | |
383 | err = validate_event(hwc); | |
384 | if (err) | |
385 | return err; | |
386 | ||
387 | /* Initialize for using the CPU-measurement counter facility */ | |
388 | if (!atomic_inc_not_zero(&num_events)) { | |
389 | mutex_lock(&pmc_reserve_mutex); | |
390 | if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) | |
391 | err = -EBUSY; | |
392 | else | |
393 | atomic_inc(&num_events); | |
394 | mutex_unlock(&pmc_reserve_mutex); | |
395 | } | |
396 | event->destroy = hw_perf_event_destroy; | |
397 | ||
398 | /* Finally, validate version and authorization of the counter set */ | |
399 | err = validate_ctr_auth(hwc); | |
400 | if (!err) | |
401 | err = validate_ctr_version(hwc); | |
402 | ||
403 | return err; | |
404 | } | |
405 | ||
406 | static int cpumf_pmu_event_init(struct perf_event *event) | |
407 | { | |
408 | int err; | |
409 | ||
410 | switch (event->attr.type) { | |
411 | case PERF_TYPE_HARDWARE: | |
412 | case PERF_TYPE_HW_CACHE: | |
413 | case PERF_TYPE_RAW: | |
151a0eb6 HB |
414 | /* The CPU measurement counter facility does not have overflow |
415 | * interrupts to do sampling. Sampling must be provided by | |
416 | * external means, for example, by timers. | |
417 | */ | |
418 | if (is_sampling_event(event)) | |
419 | return -ENOENT; | |
212188a5 HB |
420 | err = __hw_perf_event_init(event); |
421 | break; | |
422 | default: | |
423 | return -ENOENT; | |
424 | } | |
425 | ||
426 | if (unlikely(err) && event->destroy) | |
427 | event->destroy(event); | |
428 | ||
429 | return err; | |
430 | } | |
431 | ||
432 | static int hw_perf_event_reset(struct perf_event *event) | |
433 | { | |
434 | u64 prev, new; | |
435 | int err; | |
436 | ||
437 | do { | |
438 | prev = local64_read(&event->hw.prev_count); | |
439 | err = ecctr(event->hw.config, &new); | |
440 | if (err) { | |
441 | if (err != 3) | |
442 | break; | |
443 | /* The counter is not (yet) available. This | |
444 | * might happen if the counter set to which | |
445 | * this counter belongs is in the disabled | |
446 | * state. | |
447 | */ | |
448 | new = 0; | |
449 | } | |
450 | } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); | |
451 | ||
452 | return err; | |
453 | } | |
454 | ||
455 | static int hw_perf_event_update(struct perf_event *event) | |
456 | { | |
457 | u64 prev, new, delta; | |
458 | int err; | |
459 | ||
460 | do { | |
461 | prev = local64_read(&event->hw.prev_count); | |
462 | err = ecctr(event->hw.config, &new); | |
463 | if (err) | |
464 | goto out; | |
465 | } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); | |
466 | ||
467 | delta = (prev <= new) ? new - prev | |
468 | : (-1ULL - prev) + new + 1; /* overflow */ | |
469 | local64_add(delta, &event->count); | |
470 | out: | |
471 | return err; | |
472 | } | |
473 | ||
474 | static void cpumf_pmu_read(struct perf_event *event) | |
475 | { | |
476 | if (event->hw.state & PERF_HES_STOPPED) | |
477 | return; | |
478 | ||
479 | hw_perf_event_update(event); | |
480 | } | |
481 | ||
482 | static void cpumf_pmu_start(struct perf_event *event, int flags) | |
483 | { | |
484 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
485 | struct hw_perf_event *hwc = &event->hw; | |
486 | ||
487 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | |
488 | return; | |
489 | ||
490 | if (WARN_ON_ONCE(hwc->config == -1)) | |
491 | return; | |
492 | ||
493 | if (flags & PERF_EF_RELOAD) | |
494 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
495 | ||
496 | hwc->state = 0; | |
497 | ||
498 | /* (Re-)enable and activate the counter set */ | |
499 | ctr_set_enable(&cpuhw->state, hwc->config_base); | |
500 | ctr_set_start(&cpuhw->state, hwc->config_base); | |
501 | ||
502 | /* The counter set to which this counter belongs can be already active. | |
503 | * Because all counters in a set are active, the event->hw.prev_count | |
504 | * needs to be synchronized. At this point, the counter set can be in | |
505 | * the inactive or disabled state. | |
506 | */ | |
507 | hw_perf_event_reset(event); | |
508 | ||
509 | /* increment refcount for this counter set */ | |
510 | atomic_inc(&cpuhw->ctr_set[hwc->config_base]); | |
511 | } | |
512 | ||
513 | static void cpumf_pmu_stop(struct perf_event *event, int flags) | |
514 | { | |
515 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
516 | struct hw_perf_event *hwc = &event->hw; | |
517 | ||
518 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
519 | /* Decrement reference count for this counter set and if this | |
520 | * is the last used counter in the set, clear activation | |
521 | * control and set the counter set state to inactive. | |
522 | */ | |
523 | if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base])) | |
524 | ctr_set_stop(&cpuhw->state, hwc->config_base); | |
525 | event->hw.state |= PERF_HES_STOPPED; | |
526 | } | |
527 | ||
528 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
529 | hw_perf_event_update(event); | |
530 | event->hw.state |= PERF_HES_UPTODATE; | |
531 | } | |
532 | } | |
533 | ||
534 | static int cpumf_pmu_add(struct perf_event *event, int flags) | |
535 | { | |
536 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
537 | ||
538 | /* Check authorization for the counter set to which this | |
539 | * counter belongs. | |
540 | * For group events transaction, the authorization check is | |
541 | * done in cpumf_pmu_commit_txn(). | |
542 | */ | |
543 | if (!(cpuhw->flags & PERF_EVENT_TXN)) | |
544 | if (validate_ctr_auth(&event->hw)) | |
545 | return -EPERM; | |
546 | ||
547 | ctr_set_enable(&cpuhw->state, event->hw.config_base); | |
548 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | |
549 | ||
550 | if (flags & PERF_EF_START) | |
551 | cpumf_pmu_start(event, PERF_EF_RELOAD); | |
552 | ||
553 | perf_event_update_userpage(event); | |
554 | ||
555 | return 0; | |
556 | } | |
557 | ||
558 | static void cpumf_pmu_del(struct perf_event *event, int flags) | |
559 | { | |
560 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
561 | ||
562 | cpumf_pmu_stop(event, PERF_EF_UPDATE); | |
563 | ||
564 | /* Check if any counter in the counter set is still used. If not used, | |
565 | * change the counter set to the disabled state. This also clears the | |
566 | * content of all counters in the set. | |
567 | * | |
568 | * When a new perf event has been added but not yet started, this can | |
569 | * clear enable control and resets all counters in a set. Therefore, | |
570 | * cpumf_pmu_start() always has to reenable a counter set. | |
571 | */ | |
572 | if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) | |
573 | ctr_set_disable(&cpuhw->state, event->hw.config_base); | |
574 | ||
575 | perf_event_update_userpage(event); | |
576 | } | |
577 | ||
578 | /* | |
579 | * Start group events scheduling transaction. | |
580 | * Set flags to perform a single test at commit time. | |
581 | */ | |
582 | static void cpumf_pmu_start_txn(struct pmu *pmu) | |
583 | { | |
584 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
585 | ||
586 | perf_pmu_disable(pmu); | |
587 | cpuhw->flags |= PERF_EVENT_TXN; | |
588 | cpuhw->tx_state = cpuhw->state; | |
589 | } | |
590 | ||
591 | /* | |
592 | * Stop and cancel a group events scheduling tranctions. | |
593 | * Assumes cpumf_pmu_del() is called for each successful added | |
594 | * cpumf_pmu_add() during the transaction. | |
595 | */ | |
596 | static void cpumf_pmu_cancel_txn(struct pmu *pmu) | |
597 | { | |
598 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
599 | ||
600 | WARN_ON(cpuhw->tx_state != cpuhw->state); | |
601 | ||
602 | cpuhw->flags &= ~PERF_EVENT_TXN; | |
603 | perf_pmu_enable(pmu); | |
604 | } | |
605 | ||
606 | /* | |
607 | * Commit the group events scheduling transaction. On success, the | |
608 | * transaction is closed. On error, the transaction is kept open | |
609 | * until cpumf_pmu_cancel_txn() is called. | |
610 | */ | |
611 | static int cpumf_pmu_commit_txn(struct pmu *pmu) | |
612 | { | |
613 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
614 | u64 state; | |
615 | ||
616 | /* check if the updated state can be scheduled */ | |
617 | state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); | |
618 | state >>= CPUMF_LCCTL_ENABLE_SHIFT; | |
619 | if ((state & cpuhw->info.auth_ctl) != state) | |
620 | return -EPERM; | |
621 | ||
622 | cpuhw->flags &= ~PERF_EVENT_TXN; | |
623 | perf_pmu_enable(pmu); | |
624 | return 0; | |
625 | } | |
626 | ||
627 | /* Performance monitoring unit for s390x */ | |
628 | static struct pmu cpumf_pmu = { | |
629 | .pmu_enable = cpumf_pmu_enable, | |
630 | .pmu_disable = cpumf_pmu_disable, | |
631 | .event_init = cpumf_pmu_event_init, | |
632 | .add = cpumf_pmu_add, | |
633 | .del = cpumf_pmu_del, | |
634 | .start = cpumf_pmu_start, | |
635 | .stop = cpumf_pmu_stop, | |
636 | .read = cpumf_pmu_read, | |
637 | .start_txn = cpumf_pmu_start_txn, | |
638 | .commit_txn = cpumf_pmu_commit_txn, | |
639 | .cancel_txn = cpumf_pmu_cancel_txn, | |
640 | }; | |
641 | ||
e2741f17 PG |
642 | static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action, |
643 | void *hcpu) | |
212188a5 HB |
644 | { |
645 | unsigned int cpu = (long) hcpu; | |
646 | int flags; | |
647 | ||
648 | switch (action & ~CPU_TASKS_FROZEN) { | |
649 | case CPU_ONLINE: | |
650 | flags = PMC_INIT; | |
651 | smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); | |
652 | break; | |
653 | case CPU_DOWN_PREPARE: | |
654 | flags = PMC_RELEASE; | |
655 | smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); | |
656 | break; | |
657 | default: | |
658 | break; | |
659 | } | |
660 | ||
661 | return NOTIFY_OK; | |
662 | } | |
663 | ||
664 | static int __init cpumf_pmu_init(void) | |
665 | { | |
666 | int rc; | |
667 | ||
668 | if (!cpum_cf_avail()) | |
669 | return -ENODEV; | |
670 | ||
671 | /* clear bit 15 of cr0 to unauthorize problem-state to | |
672 | * extract measurement counters */ | |
673 | ctl_clear_bit(0, 48); | |
674 | ||
675 | /* register handler for measurement-alert interruptions */ | |
676 | rc = register_external_interrupt(0x1407, cpumf_measurement_alert); | |
677 | if (rc) { | |
678 | pr_err("Registering for CPU-measurement alerts " | |
679 | "failed with rc=%i\n", rc); | |
680 | goto out; | |
681 | } | |
682 | ||
683 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); | |
684 | if (rc) { | |
685 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); | |
686 | unregister_external_interrupt(0x1407, cpumf_measurement_alert); | |
687 | goto out; | |
688 | } | |
689 | perf_cpu_notifier(cpumf_pmu_notifier); | |
690 | out: | |
691 | return rc; | |
692 | } | |
693 | early_initcall(cpumf_pmu_init); |