]>
Commit | Line | Data |
---|---|---|
a17ae4c3 | 1 | // SPDX-License-Identifier: GPL-2.0 |
212188a5 HB |
2 | /* |
3 | * Performance event support for s390x - CPU-measurement Counter Facility | |
4 | * | |
46a984ff TR |
5 | * Copyright IBM Corp. 2012, 2019 |
6 | * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com> | |
212188a5 HB |
7 | */ |
8 | #define KMSG_COMPONENT "cpum_cf" | |
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
10 | ||
11 | #include <linux/kernel.h> | |
12 | #include <linux/kernel_stat.h> | |
212188a5 HB |
13 | #include <linux/percpu.h> |
14 | #include <linux/notifier.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/export.h> | |
30e145f8 | 17 | #include <asm/cpu_mcf.h> |
212188a5 | 18 | |
ee699f32 | 19 | static enum cpumf_ctr_set get_counter_set(u64 event) |
212188a5 | 20 | { |
ee699f32 | 21 | int set = CPUMF_CTR_SET_MAX; |
212188a5 HB |
22 | |
23 | if (event < 32) | |
24 | set = CPUMF_CTR_SET_BASIC; | |
25 | else if (event < 64) | |
26 | set = CPUMF_CTR_SET_USER; | |
27 | else if (event < 128) | |
28 | set = CPUMF_CTR_SET_CRYPTO; | |
46a984ff | 29 | else if (event < 288) |
212188a5 | 30 | set = CPUMF_CTR_SET_EXT; |
ee699f32 HB |
31 | else if (event >= 448 && event < 496) |
32 | set = CPUMF_CTR_SET_MT_DIAG; | |
212188a5 HB |
33 | |
34 | return set; | |
35 | } | |
36 | ||
212188a5 HB |
37 | static int validate_ctr_version(const struct hw_perf_event *hwc) |
38 | { | |
f1c0b831 | 39 | struct cpu_cf_events *cpuhw; |
212188a5 | 40 | int err = 0; |
ee699f32 | 41 | u16 mtdiag_ctl; |
212188a5 | 42 | |
f1c0b831 | 43 | cpuhw = &get_cpu_var(cpu_cf_events); |
212188a5 HB |
44 | |
45 | /* check required version for counter sets */ | |
46 | switch (hwc->config_base) { | |
47 | case CPUMF_CTR_SET_BASIC: | |
48 | case CPUMF_CTR_SET_USER: | |
49 | if (cpuhw->info.cfvn < 1) | |
50 | err = -EOPNOTSUPP; | |
51 | break; | |
52 | case CPUMF_CTR_SET_CRYPTO: | |
46a984ff TR |
53 | if ((cpuhw->info.csvn >= 1 && cpuhw->info.csvn <= 5 && |
54 | hwc->config > 79) || | |
55 | (cpuhw->info.csvn >= 6 && hwc->config > 83)) | |
56 | err = -EOPNOTSUPP; | |
57 | break; | |
212188a5 HB |
58 | case CPUMF_CTR_SET_EXT: |
59 | if (cpuhw->info.csvn < 1) | |
60 | err = -EOPNOTSUPP; | |
f47586b2 HB |
61 | if ((cpuhw->info.csvn == 1 && hwc->config > 159) || |
62 | (cpuhw->info.csvn == 2 && hwc->config > 175) || | |
46a984ff TR |
63 | (cpuhw->info.csvn >= 3 && cpuhw->info.csvn <= 5 |
64 | && hwc->config > 255) || | |
65 | (cpuhw->info.csvn >= 6 && hwc->config > 287)) | |
f47586b2 | 66 | err = -EOPNOTSUPP; |
212188a5 | 67 | break; |
ee699f32 HB |
68 | case CPUMF_CTR_SET_MT_DIAG: |
69 | if (cpuhw->info.csvn <= 3) | |
70 | err = -EOPNOTSUPP; | |
71 | /* | |
72 | * MT-diagnostic counters are read-only. The counter set | |
73 | * is automatically enabled and activated on all CPUs with | |
74 | * multithreading (SMT). Deactivation of multithreading | |
75 | * also disables the counter set. State changes are ignored | |
76 | * by lcctl(). Because Linux controls SMT enablement through | |
77 | * a kernel parameter only, the counter set is either disabled | |
78 | * or enabled and active. | |
79 | * | |
80 | * Thus, the counters can only be used if SMT is on and the | |
81 | * counter set is enabled and active. | |
82 | */ | |
30e145f8 | 83 | mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]; |
ee699f32 HB |
84 | if (!((cpuhw->info.auth_ctl & mtdiag_ctl) && |
85 | (cpuhw->info.enable_ctl & mtdiag_ctl) && | |
86 | (cpuhw->info.act_ctl & mtdiag_ctl))) | |
87 | err = -EOPNOTSUPP; | |
88 | break; | |
212188a5 HB |
89 | } |
90 | ||
f1c0b831 | 91 | put_cpu_var(cpu_cf_events); |
212188a5 HB |
92 | return err; |
93 | } | |
94 | ||
95 | static int validate_ctr_auth(const struct hw_perf_event *hwc) | |
96 | { | |
f1c0b831 | 97 | struct cpu_cf_events *cpuhw; |
212188a5 HB |
98 | u64 ctrs_state; |
99 | int err = 0; | |
100 | ||
f1c0b831 | 101 | cpuhw = &get_cpu_var(cpu_cf_events); |
212188a5 | 102 | |
58f8e9da HB |
103 | /* Check authorization for cpu counter sets. |
104 | * If the particular CPU counter set is not authorized, | |
105 | * return with -ENOENT in order to fall back to other | |
106 | * PMUs that might suffice the event request. | |
107 | */ | |
30e145f8 | 108 | ctrs_state = cpumf_ctr_ctl[hwc->config_base]; |
212188a5 | 109 | if (!(ctrs_state & cpuhw->info.auth_ctl)) |
58f8e9da | 110 | err = -ENOENT; |
212188a5 | 111 | |
f1c0b831 | 112 | put_cpu_var(cpu_cf_events); |
212188a5 HB |
113 | return err; |
114 | } | |
115 | ||
116 | /* | |
117 | * Change the CPUMF state to active. | |
118 | * Enable and activate the CPU-counter sets according | |
119 | * to the per-cpu control state. | |
120 | */ | |
121 | static void cpumf_pmu_enable(struct pmu *pmu) | |
122 | { | |
f1c0b831 | 123 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 HB |
124 | int err; |
125 | ||
126 | if (cpuhw->flags & PMU_F_ENABLED) | |
127 | return; | |
128 | ||
129 | err = lcctl(cpuhw->state); | |
130 | if (err) { | |
131 | pr_err("Enabling the performance measuring unit " | |
af0ee94e | 132 | "failed with rc=%x\n", err); |
212188a5 HB |
133 | return; |
134 | } | |
135 | ||
136 | cpuhw->flags |= PMU_F_ENABLED; | |
137 | } | |
138 | ||
139 | /* | |
140 | * Change the CPUMF state to inactive. | |
141 | * Disable and enable (inactive) the CPU-counter sets according | |
142 | * to the per-cpu control state. | |
143 | */ | |
144 | static void cpumf_pmu_disable(struct pmu *pmu) | |
145 | { | |
f1c0b831 | 146 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 HB |
147 | int err; |
148 | u64 inactive; | |
149 | ||
150 | if (!(cpuhw->flags & PMU_F_ENABLED)) | |
151 | return; | |
152 | ||
153 | inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); | |
154 | err = lcctl(inactive); | |
155 | if (err) { | |
156 | pr_err("Disabling the performance measuring unit " | |
af0ee94e | 157 | "failed with rc=%x\n", err); |
212188a5 HB |
158 | return; |
159 | } | |
160 | ||
161 | cpuhw->flags &= ~PMU_F_ENABLED; | |
162 | } | |
163 | ||
164 | ||
165 | /* Number of perf events counting hardware events */ | |
166 | static atomic_t num_events = ATOMIC_INIT(0); | |
167 | /* Used to avoid races in calling reserve/release_cpumf_hardware */ | |
168 | static DEFINE_MUTEX(pmc_reserve_mutex); | |
169 | ||
212188a5 HB |
170 | /* Release the PMU if event is the last perf event */ |
171 | static void hw_perf_event_destroy(struct perf_event *event) | |
172 | { | |
173 | if (!atomic_add_unless(&num_events, -1, 1)) { | |
174 | mutex_lock(&pmc_reserve_mutex); | |
175 | if (atomic_dec_return(&num_events) == 0) | |
3d33345a | 176 | __kernel_cpumcf_end(); |
212188a5 HB |
177 | mutex_unlock(&pmc_reserve_mutex); |
178 | } | |
179 | } | |
180 | ||
181 | /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */ | |
182 | static const int cpumf_generic_events_basic[] = { | |
183 | [PERF_COUNT_HW_CPU_CYCLES] = 0, | |
184 | [PERF_COUNT_HW_INSTRUCTIONS] = 1, | |
185 | [PERF_COUNT_HW_CACHE_REFERENCES] = -1, | |
186 | [PERF_COUNT_HW_CACHE_MISSES] = -1, | |
187 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, | |
188 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | |
189 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | |
190 | }; | |
191 | /* CPUMF <-> perf event mappings for userspace (problem-state set) */ | |
192 | static const int cpumf_generic_events_user[] = { | |
193 | [PERF_COUNT_HW_CPU_CYCLES] = 32, | |
194 | [PERF_COUNT_HW_INSTRUCTIONS] = 33, | |
195 | [PERF_COUNT_HW_CACHE_REFERENCES] = -1, | |
196 | [PERF_COUNT_HW_CACHE_MISSES] = -1, | |
197 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, | |
198 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | |
199 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | |
200 | }; | |
201 | ||
202 | static int __hw_perf_event_init(struct perf_event *event) | |
203 | { | |
204 | struct perf_event_attr *attr = &event->attr; | |
205 | struct hw_perf_event *hwc = &event->hw; | |
ee699f32 | 206 | enum cpumf_ctr_set set; |
47b74785 | 207 | int err = 0; |
212188a5 HB |
208 | u64 ev; |
209 | ||
210 | switch (attr->type) { | |
211 | case PERF_TYPE_RAW: | |
212 | /* Raw events are used to access counters directly, | |
213 | * hence do not permit excludes */ | |
214 | if (attr->exclude_kernel || attr->exclude_user || | |
215 | attr->exclude_hv) | |
216 | return -EOPNOTSUPP; | |
217 | ev = attr->config; | |
218 | break; | |
219 | ||
220 | case PERF_TYPE_HARDWARE: | |
613a41b0 TR |
221 | if (is_sampling_event(event)) /* No sampling support */ |
222 | return -ENOENT; | |
212188a5 HB |
223 | ev = attr->config; |
224 | /* Count user space (problem-state) only */ | |
225 | if (!attr->exclude_user && attr->exclude_kernel) { | |
226 | if (ev >= ARRAY_SIZE(cpumf_generic_events_user)) | |
227 | return -EOPNOTSUPP; | |
228 | ev = cpumf_generic_events_user[ev]; | |
229 | ||
230 | /* No support for kernel space counters only */ | |
231 | } else if (!attr->exclude_kernel && attr->exclude_user) { | |
232 | return -EOPNOTSUPP; | |
233 | ||
234 | /* Count user and kernel space */ | |
235 | } else { | |
236 | if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) | |
237 | return -EOPNOTSUPP; | |
238 | ev = cpumf_generic_events_basic[ev]; | |
239 | } | |
240 | break; | |
241 | ||
242 | default: | |
243 | return -ENOENT; | |
244 | } | |
245 | ||
246 | if (ev == -1) | |
247 | return -ENOENT; | |
248 | ||
20ba46da | 249 | if (ev > PERF_CPUM_CF_MAX_CTR) |
0bb2ae1b | 250 | return -ENOENT; |
212188a5 | 251 | |
ee699f32 HB |
252 | /* Obtain the counter set to which the specified counter belongs */ |
253 | set = get_counter_set(ev); | |
254 | switch (set) { | |
255 | case CPUMF_CTR_SET_BASIC: | |
256 | case CPUMF_CTR_SET_USER: | |
257 | case CPUMF_CTR_SET_CRYPTO: | |
258 | case CPUMF_CTR_SET_EXT: | |
259 | case CPUMF_CTR_SET_MT_DIAG: | |
260 | /* | |
261 | * Use the hardware perf event structure to store the | |
262 | * counter number in the 'config' member and the counter | |
263 | * set number in the 'config_base'. The counter set number | |
264 | * is then later used to enable/disable the counter(s). | |
265 | */ | |
266 | hwc->config = ev; | |
267 | hwc->config_base = set; | |
268 | break; | |
269 | case CPUMF_CTR_SET_MAX: | |
270 | /* The counter could not be associated to a counter set */ | |
271 | return -EINVAL; | |
272 | }; | |
212188a5 | 273 | |
212188a5 HB |
274 | /* Initialize for using the CPU-measurement counter facility */ |
275 | if (!atomic_inc_not_zero(&num_events)) { | |
276 | mutex_lock(&pmc_reserve_mutex); | |
3d33345a | 277 | if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin()) |
212188a5 HB |
278 | err = -EBUSY; |
279 | else | |
280 | atomic_inc(&num_events); | |
281 | mutex_unlock(&pmc_reserve_mutex); | |
282 | } | |
47b74785 TR |
283 | if (err) |
284 | return err; | |
212188a5 HB |
285 | event->destroy = hw_perf_event_destroy; |
286 | ||
287 | /* Finally, validate version and authorization of the counter set */ | |
288 | err = validate_ctr_auth(hwc); | |
289 | if (!err) | |
290 | err = validate_ctr_version(hwc); | |
291 | ||
292 | return err; | |
293 | } | |
294 | ||
295 | static int cpumf_pmu_event_init(struct perf_event *event) | |
296 | { | |
297 | int err; | |
298 | ||
299 | switch (event->attr.type) { | |
300 | case PERF_TYPE_HARDWARE: | |
301 | case PERF_TYPE_HW_CACHE: | |
302 | case PERF_TYPE_RAW: | |
303 | err = __hw_perf_event_init(event); | |
304 | break; | |
305 | default: | |
306 | return -ENOENT; | |
307 | } | |
308 | ||
309 | if (unlikely(err) && event->destroy) | |
310 | event->destroy(event); | |
311 | ||
312 | return err; | |
313 | } | |
314 | ||
315 | static int hw_perf_event_reset(struct perf_event *event) | |
316 | { | |
317 | u64 prev, new; | |
318 | int err; | |
319 | ||
320 | do { | |
321 | prev = local64_read(&event->hw.prev_count); | |
322 | err = ecctr(event->hw.config, &new); | |
323 | if (err) { | |
324 | if (err != 3) | |
325 | break; | |
326 | /* The counter is not (yet) available. This | |
327 | * might happen if the counter set to which | |
328 | * this counter belongs is in the disabled | |
329 | * state. | |
330 | */ | |
331 | new = 0; | |
332 | } | |
333 | } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); | |
334 | ||
335 | return err; | |
336 | } | |
337 | ||
485527ba | 338 | static void hw_perf_event_update(struct perf_event *event) |
212188a5 HB |
339 | { |
340 | u64 prev, new, delta; | |
341 | int err; | |
342 | ||
343 | do { | |
344 | prev = local64_read(&event->hw.prev_count); | |
345 | err = ecctr(event->hw.config, &new); | |
346 | if (err) | |
485527ba | 347 | return; |
212188a5 HB |
348 | } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); |
349 | ||
350 | delta = (prev <= new) ? new - prev | |
351 | : (-1ULL - prev) + new + 1; /* overflow */ | |
352 | local64_add(delta, &event->count); | |
212188a5 HB |
353 | } |
354 | ||
355 | static void cpumf_pmu_read(struct perf_event *event) | |
356 | { | |
357 | if (event->hw.state & PERF_HES_STOPPED) | |
358 | return; | |
359 | ||
360 | hw_perf_event_update(event); | |
361 | } | |
362 | ||
363 | static void cpumf_pmu_start(struct perf_event *event, int flags) | |
364 | { | |
f1c0b831 | 365 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 HB |
366 | struct hw_perf_event *hwc = &event->hw; |
367 | ||
368 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | |
369 | return; | |
370 | ||
371 | if (WARN_ON_ONCE(hwc->config == -1)) | |
372 | return; | |
373 | ||
374 | if (flags & PERF_EF_RELOAD) | |
375 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
376 | ||
377 | hwc->state = 0; | |
378 | ||
379 | /* (Re-)enable and activate the counter set */ | |
380 | ctr_set_enable(&cpuhw->state, hwc->config_base); | |
381 | ctr_set_start(&cpuhw->state, hwc->config_base); | |
382 | ||
383 | /* The counter set to which this counter belongs can be already active. | |
384 | * Because all counters in a set are active, the event->hw.prev_count | |
385 | * needs to be synchronized. At this point, the counter set can be in | |
386 | * the inactive or disabled state. | |
387 | */ | |
388 | hw_perf_event_reset(event); | |
389 | ||
390 | /* increment refcount for this counter set */ | |
391 | atomic_inc(&cpuhw->ctr_set[hwc->config_base]); | |
392 | } | |
393 | ||
394 | static void cpumf_pmu_stop(struct perf_event *event, int flags) | |
395 | { | |
f1c0b831 | 396 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 HB |
397 | struct hw_perf_event *hwc = &event->hw; |
398 | ||
399 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
400 | /* Decrement reference count for this counter set and if this | |
401 | * is the last used counter in the set, clear activation | |
402 | * control and set the counter set state to inactive. | |
403 | */ | |
404 | if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base])) | |
405 | ctr_set_stop(&cpuhw->state, hwc->config_base); | |
406 | event->hw.state |= PERF_HES_STOPPED; | |
407 | } | |
408 | ||
409 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
410 | hw_perf_event_update(event); | |
411 | event->hw.state |= PERF_HES_UPTODATE; | |
412 | } | |
413 | } | |
414 | ||
415 | static int cpumf_pmu_add(struct perf_event *event, int flags) | |
416 | { | |
f1c0b831 | 417 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 HB |
418 | |
419 | /* Check authorization for the counter set to which this | |
420 | * counter belongs. | |
421 | * For group events transaction, the authorization check is | |
422 | * done in cpumf_pmu_commit_txn(). | |
423 | */ | |
8f3e5684 | 424 | if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD)) |
212188a5 | 425 | if (validate_ctr_auth(&event->hw)) |
58f8e9da | 426 | return -ENOENT; |
212188a5 HB |
427 | |
428 | ctr_set_enable(&cpuhw->state, event->hw.config_base); | |
429 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | |
430 | ||
431 | if (flags & PERF_EF_START) | |
432 | cpumf_pmu_start(event, PERF_EF_RELOAD); | |
433 | ||
434 | perf_event_update_userpage(event); | |
435 | ||
436 | return 0; | |
437 | } | |
438 | ||
439 | static void cpumf_pmu_del(struct perf_event *event, int flags) | |
440 | { | |
f1c0b831 | 441 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 HB |
442 | |
443 | cpumf_pmu_stop(event, PERF_EF_UPDATE); | |
444 | ||
445 | /* Check if any counter in the counter set is still used. If not used, | |
446 | * change the counter set to the disabled state. This also clears the | |
447 | * content of all counters in the set. | |
448 | * | |
449 | * When a new perf event has been added but not yet started, this can | |
450 | * clear enable control and resets all counters in a set. Therefore, | |
451 | * cpumf_pmu_start() always has to reenable a counter set. | |
452 | */ | |
453 | if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) | |
454 | ctr_set_disable(&cpuhw->state, event->hw.config_base); | |
455 | ||
456 | perf_event_update_userpage(event); | |
457 | } | |
458 | ||
459 | /* | |
460 | * Start group events scheduling transaction. | |
461 | * Set flags to perform a single test at commit time. | |
fbbe0701 SB |
462 | * |
463 | * We only support PERF_PMU_TXN_ADD transactions. Save the | |
464 | * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD | |
465 | * transactions. | |
212188a5 | 466 | */ |
fbbe0701 | 467 | static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) |
212188a5 | 468 | { |
f1c0b831 | 469 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 | 470 | |
fbbe0701 SB |
471 | WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ |
472 | ||
473 | cpuhw->txn_flags = txn_flags; | |
474 | if (txn_flags & ~PERF_PMU_TXN_ADD) | |
475 | return; | |
476 | ||
212188a5 | 477 | perf_pmu_disable(pmu); |
212188a5 HB |
478 | cpuhw->tx_state = cpuhw->state; |
479 | } | |
480 | ||
481 | /* | |
482 | * Stop and cancel a group events scheduling tranctions. | |
483 | * Assumes cpumf_pmu_del() is called for each successful added | |
484 | * cpumf_pmu_add() during the transaction. | |
485 | */ | |
486 | static void cpumf_pmu_cancel_txn(struct pmu *pmu) | |
487 | { | |
fbbe0701 | 488 | unsigned int txn_flags; |
f1c0b831 | 489 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 | 490 | |
fbbe0701 SB |
491 | WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ |
492 | ||
493 | txn_flags = cpuhw->txn_flags; | |
494 | cpuhw->txn_flags = 0; | |
495 | if (txn_flags & ~PERF_PMU_TXN_ADD) | |
496 | return; | |
497 | ||
212188a5 HB |
498 | WARN_ON(cpuhw->tx_state != cpuhw->state); |
499 | ||
212188a5 HB |
500 | perf_pmu_enable(pmu); |
501 | } | |
502 | ||
503 | /* | |
504 | * Commit the group events scheduling transaction. On success, the | |
505 | * transaction is closed. On error, the transaction is kept open | |
506 | * until cpumf_pmu_cancel_txn() is called. | |
507 | */ | |
508 | static int cpumf_pmu_commit_txn(struct pmu *pmu) | |
509 | { | |
f1c0b831 | 510 | struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); |
212188a5 HB |
511 | u64 state; |
512 | ||
fbbe0701 SB |
513 | WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ |
514 | ||
515 | if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { | |
516 | cpuhw->txn_flags = 0; | |
517 | return 0; | |
518 | } | |
519 | ||
212188a5 HB |
520 | /* check if the updated state can be scheduled */ |
521 | state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); | |
522 | state >>= CPUMF_LCCTL_ENABLE_SHIFT; | |
523 | if ((state & cpuhw->info.auth_ctl) != state) | |
58f8e9da | 524 | return -ENOENT; |
212188a5 | 525 | |
fbbe0701 | 526 | cpuhw->txn_flags = 0; |
212188a5 HB |
527 | perf_pmu_enable(pmu); |
528 | return 0; | |
529 | } | |
530 | ||
531 | /* Performance monitoring unit for s390x */ | |
532 | static struct pmu cpumf_pmu = { | |
9254e70c HB |
533 | .task_ctx_nr = perf_sw_context, |
534 | .capabilities = PERF_PMU_CAP_NO_INTERRUPT, | |
212188a5 HB |
535 | .pmu_enable = cpumf_pmu_enable, |
536 | .pmu_disable = cpumf_pmu_disable, | |
537 | .event_init = cpumf_pmu_event_init, | |
538 | .add = cpumf_pmu_add, | |
539 | .del = cpumf_pmu_del, | |
540 | .start = cpumf_pmu_start, | |
541 | .stop = cpumf_pmu_stop, | |
542 | .read = cpumf_pmu_read, | |
543 | .start_txn = cpumf_pmu_start_txn, | |
544 | .commit_txn = cpumf_pmu_commit_txn, | |
545 | .cancel_txn = cpumf_pmu_cancel_txn, | |
546 | }; | |
547 | ||
212188a5 HB |
548 | static int __init cpumf_pmu_init(void) |
549 | { | |
550 | int rc; | |
551 | ||
7f5ac1a0 | 552 | if (!kernel_cpumcf_avail()) |
212188a5 HB |
553 | return -ENODEV; |
554 | ||
c7168325 | 555 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); |
212188a5 | 556 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); |
7f5ac1a0 | 557 | if (rc) |
212188a5 | 558 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); |
869f4f98 | 559 | return rc; |
212188a5 | 560 | } |
7f5ac1a0 | 561 | subsys_initcall(cpumf_pmu_init); |