]>
Commit | Line | Data |
---|---|---|
4afbb24c MF |
1 | /* |
2 | * Intel Cache Quality-of-Service Monitoring (CQM) support. | |
3 | * | |
4 | * Based very, very heavily on work by Peter Zijlstra. | |
5 | */ | |
6 | ||
7 | #include <linux/perf_event.h> | |
8 | #include <linux/slab.h> | |
9 | #include <asm/cpu_device_id.h> | |
27f6d22b | 10 | #include "../perf_event.h" |
4afbb24c MF |
11 | |
12 | #define MSR_IA32_PQR_ASSOC 0x0c8f | |
13 | #define MSR_IA32_QM_CTR 0x0c8e | |
14 | #define MSR_IA32_QM_EVTSEL 0x0c8d | |
15 | ||
adafa999 | 16 | static u32 cqm_max_rmid = -1; |
4afbb24c MF |
17 | static unsigned int cqm_l3_scale; /* supposedly cacheline size */ |
18 | ||
bf926731 TG |
19 | /** |
20 | * struct intel_pqr_state - State cache for the PQR MSR | |
21 | * @rmid: The cached Resource Monitoring ID | |
22 | * @closid: The cached Class Of Service ID | |
23 | * @rmid_usecnt: The usage counter for rmid | |
24 | * | |
25 | * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the | |
26 | * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always | |
27 | * contains both parts, so we need to cache them. | |
28 | * | |
29 | * The cache also helps to avoid pointless updates if the value does | |
30 | * not change. | |
31 | */ | |
32 | struct intel_pqr_state { | |
b3df4ec4 | 33 | u32 rmid; |
bf926731 TG |
34 | u32 closid; |
35 | int rmid_usecnt; | |
4afbb24c MF |
36 | }; |
37 | ||
9e7eaac9 | 38 | /* |
bf926731 | 39 | * The cached intel_pqr_state is strictly per CPU and can never be |
9e7eaac9 TG |
40 | * updated from a remote CPU. Both functions which modify the state |
41 | * (intel_cqm_event_start and intel_cqm_event_stop) are called with | |
42 | * interrupts disabled, which is sufficient for the protection. | |
43 | */ | |
bf926731 | 44 | static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); |
4afbb24c MF |
45 | |
46 | /* | |
bff671db MF |
47 | * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru. |
48 | * Also protects event->hw.cqm_rmid | |
49 | * | |
50 | * Hold either for stability, both for modification of ->hw.cqm_rmid. | |
4afbb24c MF |
51 | */ |
52 | static DEFINE_MUTEX(cache_mutex); | |
bff671db | 53 | static DEFINE_RAW_SPINLOCK(cache_lock); |
4afbb24c MF |
54 | |
55 | /* | |
56 | * Groups of events that have the same target(s), one RMID per group. | |
57 | */ | |
58 | static LIST_HEAD(cache_groups); | |
59 | ||
60 | /* | |
61 | * Mask of CPUs for reading CQM values. We only need one per-socket. | |
62 | */ | |
63 | static cpumask_t cqm_cpumask; | |
64 | ||
65 | #define RMID_VAL_ERROR (1ULL << 63) | |
66 | #define RMID_VAL_UNAVAIL (1ULL << 62) | |
67 | ||
68 | #define QOS_L3_OCCUP_EVENT_ID (1 << 0) | |
69 | ||
70 | #define QOS_EVENT_MASK QOS_L3_OCCUP_EVENT_ID | |
71 | ||
bff671db MF |
72 | /* |
73 | * This is central to the rotation algorithm in __intel_cqm_rmid_rotate(). | |
74 | * | |
75 | * This rmid is always free and is guaranteed to have an associated | |
76 | * near-zero occupancy value, i.e. no cachelines are tagged with this | |
77 | * RMID, once __intel_cqm_rmid_rotate() returns. | |
78 | */ | |
adafa999 | 79 | static u32 intel_cqm_rotation_rmid; |
bff671db MF |
80 | |
81 | #define INVALID_RMID (-1) | |
82 | ||
83 | /* | |
84 | * Is @rmid valid for programming the hardware? | |
85 | * | |
86 | * rmid 0 is reserved by the hardware for all non-monitored tasks, which | |
87 | * means that we should never come across an rmid with that value. | |
88 | * Likewise, an rmid value of -1 is used to indicate "no rmid currently | |
89 | * assigned" and is used as part of the rotation code. | |
90 | */ | |
adafa999 | 91 | static inline bool __rmid_valid(u32 rmid) |
bff671db MF |
92 | { |
93 | if (!rmid || rmid == INVALID_RMID) | |
94 | return false; | |
95 | ||
96 | return true; | |
97 | } | |
98 | ||
adafa999 | 99 | static u64 __rmid_read(u32 rmid) |
4afbb24c MF |
100 | { |
101 | u64 val; | |
102 | ||
103 | /* | |
104 | * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt, | |
105 | * it just says that to increase confusion. | |
106 | */ | |
107 | wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid); | |
108 | rdmsrl(MSR_IA32_QM_CTR, val); | |
109 | ||
110 | /* | |
111 | * Aside from the ERROR and UNAVAIL bits, assume this thing returns | |
112 | * the number of cachelines tagged with @rmid. | |
113 | */ | |
114 | return val; | |
115 | } | |
116 | ||
bff671db MF |
117 | enum rmid_recycle_state { |
118 | RMID_YOUNG = 0, | |
119 | RMID_AVAILABLE, | |
120 | RMID_DIRTY, | |
121 | }; | |
122 | ||
35298e55 | 123 | struct cqm_rmid_entry { |
adafa999 | 124 | u32 rmid; |
bff671db | 125 | enum rmid_recycle_state state; |
35298e55 | 126 | struct list_head list; |
bff671db | 127 | unsigned long queue_time; |
35298e55 MF |
128 | }; |
129 | ||
130 | /* | |
bff671db | 131 | * cqm_rmid_free_lru - A least recently used list of RMIDs. |
35298e55 MF |
132 | * |
133 | * Oldest entry at the head, newest (most recently used) entry at the | |
134 | * tail. This list is never traversed, it's only used to keep track of | |
135 | * the lru order. That is, we only pick entries of the head or insert | |
136 | * them on the tail. | |
137 | * | |
138 | * All entries on the list are 'free', and their RMIDs are not currently | |
139 | * in use. To mark an RMID as in use, remove its entry from the lru | |
140 | * list. | |
141 | * | |
bff671db MF |
142 | * |
143 | * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs. | |
144 | * | |
145 | * This list is contains RMIDs that no one is currently using but that | |
146 | * may have a non-zero occupancy value associated with them. The | |
147 | * rotation worker moves RMIDs from the limbo list to the free list once | |
148 | * the occupancy value drops below __intel_cqm_threshold. | |
149 | * | |
150 | * Both lists are protected by cache_mutex. | |
35298e55 | 151 | */ |
bff671db MF |
152 | static LIST_HEAD(cqm_rmid_free_lru); |
153 | static LIST_HEAD(cqm_rmid_limbo_lru); | |
35298e55 MF |
154 | |
155 | /* | |
156 | * We use a simple array of pointers so that we can lookup a struct | |
157 | * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid() | |
158 | * and __put_rmid() from having to worry about dealing with struct | |
159 | * cqm_rmid_entry - they just deal with rmids, i.e. integers. | |
160 | * | |
161 | * Once this array is initialized it is read-only. No locks are required | |
162 | * to access it. | |
163 | * | |
164 | * All entries for all RMIDs can be looked up in the this array at all | |
165 | * times. | |
166 | */ | |
167 | static struct cqm_rmid_entry **cqm_rmid_ptrs; | |
168 | ||
adafa999 | 169 | static inline struct cqm_rmid_entry *__rmid_entry(u32 rmid) |
35298e55 MF |
170 | { |
171 | struct cqm_rmid_entry *entry; | |
172 | ||
173 | entry = cqm_rmid_ptrs[rmid]; | |
174 | WARN_ON(entry->rmid != rmid); | |
175 | ||
176 | return entry; | |
177 | } | |
4afbb24c MF |
178 | |
179 | /* | |
180 | * Returns < 0 on fail. | |
35298e55 MF |
181 | * |
182 | * We expect to be called with cache_mutex held. | |
4afbb24c | 183 | */ |
adafa999 | 184 | static u32 __get_rmid(void) |
4afbb24c | 185 | { |
35298e55 MF |
186 | struct cqm_rmid_entry *entry; |
187 | ||
188 | lockdep_assert_held(&cache_mutex); | |
189 | ||
bff671db MF |
190 | if (list_empty(&cqm_rmid_free_lru)) |
191 | return INVALID_RMID; | |
35298e55 | 192 | |
bff671db | 193 | entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list); |
35298e55 MF |
194 | list_del(&entry->list); |
195 | ||
196 | return entry->rmid; | |
4afbb24c MF |
197 | } |
198 | ||
adafa999 | 199 | static void __put_rmid(u32 rmid) |
4afbb24c | 200 | { |
35298e55 MF |
201 | struct cqm_rmid_entry *entry; |
202 | ||
203 | lockdep_assert_held(&cache_mutex); | |
204 | ||
bff671db | 205 | WARN_ON(!__rmid_valid(rmid)); |
35298e55 MF |
206 | entry = __rmid_entry(rmid); |
207 | ||
bff671db MF |
208 | entry->queue_time = jiffies; |
209 | entry->state = RMID_YOUNG; | |
210 | ||
211 | list_add_tail(&entry->list, &cqm_rmid_limbo_lru); | |
4afbb24c MF |
212 | } |
213 | ||
ada2f634 VS |
214 | static void cqm_cleanup(void) |
215 | { | |
216 | int i; | |
217 | ||
218 | if (!cqm_rmid_ptrs) | |
219 | return; | |
220 | ||
221 | for (i = 0; i < cqm_max_rmid; i++) | |
222 | kfree(cqm_rmid_ptrs[i]); | |
223 | ||
224 | kfree(cqm_rmid_ptrs); | |
225 | cqm_rmid_ptrs = NULL; | |
226 | } | |
227 | ||
4afbb24c MF |
228 | static int intel_cqm_setup_rmid_cache(void) |
229 | { | |
35298e55 | 230 | struct cqm_rmid_entry *entry; |
bff671db MF |
231 | unsigned int nr_rmids; |
232 | int r = 0; | |
35298e55 | 233 | |
bff671db | 234 | nr_rmids = cqm_max_rmid + 1; |
ada2f634 | 235 | cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) * |
bff671db | 236 | nr_rmids, GFP_KERNEL); |
35298e55 | 237 | if (!cqm_rmid_ptrs) |
4afbb24c MF |
238 | return -ENOMEM; |
239 | ||
bff671db | 240 | for (; r <= cqm_max_rmid; r++) { |
35298e55 MF |
241 | struct cqm_rmid_entry *entry; |
242 | ||
243 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | |
244 | if (!entry) | |
245 | goto fail; | |
246 | ||
247 | INIT_LIST_HEAD(&entry->list); | |
248 | entry->rmid = r; | |
249 | cqm_rmid_ptrs[r] = entry; | |
250 | ||
bff671db | 251 | list_add_tail(&entry->list, &cqm_rmid_free_lru); |
35298e55 | 252 | } |
4afbb24c MF |
253 | |
254 | /* | |
255 | * RMID 0 is special and is always allocated. It's used for all | |
256 | * tasks that are not monitored. | |
257 | */ | |
35298e55 MF |
258 | entry = __rmid_entry(0); |
259 | list_del(&entry->list); | |
4afbb24c | 260 | |
bff671db MF |
261 | mutex_lock(&cache_mutex); |
262 | intel_cqm_rotation_rmid = __get_rmid(); | |
263 | mutex_unlock(&cache_mutex); | |
264 | ||
4afbb24c | 265 | return 0; |
35298e55 | 266 | |
ada2f634 VS |
267 | fail: |
268 | cqm_cleanup(); | |
35298e55 | 269 | return -ENOMEM; |
4afbb24c MF |
270 | } |
271 | ||
272 | /* | |
273 | * Determine if @a and @b measure the same set of tasks. | |
bfe1fcd2 MF |
274 | * |
275 | * If @a and @b measure the same set of tasks then we want to share a | |
276 | * single RMID. | |
4afbb24c MF |
277 | */ |
278 | static bool __match_event(struct perf_event *a, struct perf_event *b) | |
279 | { | |
bfe1fcd2 | 280 | /* Per-cpu and task events don't mix */ |
4afbb24c MF |
281 | if ((a->attach_state & PERF_ATTACH_TASK) != |
282 | (b->attach_state & PERF_ATTACH_TASK)) | |
283 | return false; | |
284 | ||
bfe1fcd2 MF |
285 | #ifdef CONFIG_CGROUP_PERF |
286 | if (a->cgrp != b->cgrp) | |
287 | return false; | |
288 | #endif | |
289 | ||
290 | /* If not task event, we're machine wide */ | |
291 | if (!(b->attach_state & PERF_ATTACH_TASK)) | |
292 | return true; | |
293 | ||
294 | /* | |
295 | * Events that target same task are placed into the same cache group. | |
a223c1c7 VS |
296 | * Mark it as a multi event group, so that we update ->count |
297 | * for every event rather than just the group leader later. | |
bfe1fcd2 | 298 | */ |
a223c1c7 VS |
299 | if (a->hw.target == b->hw.target) { |
300 | b->hw.is_group_event = true; | |
bfe1fcd2 | 301 | return true; |
a223c1c7 | 302 | } |
bfe1fcd2 MF |
303 | |
304 | /* | |
305 | * Are we an inherited event? | |
306 | */ | |
307 | if (b->parent == a) | |
308 | return true; | |
309 | ||
310 | return false; | |
311 | } | |
312 | ||
313 | #ifdef CONFIG_CGROUP_PERF | |
314 | static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) | |
315 | { | |
316 | if (event->attach_state & PERF_ATTACH_TASK) | |
614e4c4e | 317 | return perf_cgroup_from_task(event->hw.target, event->ctx); |
4afbb24c | 318 | |
bfe1fcd2 | 319 | return event->cgrp; |
4afbb24c | 320 | } |
bfe1fcd2 | 321 | #endif |
4afbb24c MF |
322 | |
323 | /* | |
324 | * Determine if @a's tasks intersect with @b's tasks | |
bfe1fcd2 MF |
325 | * |
326 | * There are combinations of events that we explicitly prohibit, | |
327 | * | |
328 | * PROHIBITS | |
329 | * system-wide -> cgroup and task | |
330 | * cgroup -> system-wide | |
331 | * -> task in cgroup | |
332 | * task -> system-wide | |
333 | * -> task in cgroup | |
334 | * | |
335 | * Call this function before allocating an RMID. | |
4afbb24c MF |
336 | */ |
337 | static bool __conflict_event(struct perf_event *a, struct perf_event *b) | |
338 | { | |
bfe1fcd2 MF |
339 | #ifdef CONFIG_CGROUP_PERF |
340 | /* | |
341 | * We can have any number of cgroups but only one system-wide | |
342 | * event at a time. | |
343 | */ | |
344 | if (a->cgrp && b->cgrp) { | |
345 | struct perf_cgroup *ac = a->cgrp; | |
346 | struct perf_cgroup *bc = b->cgrp; | |
347 | ||
348 | /* | |
349 | * This condition should have been caught in | |
350 | * __match_event() and we should be sharing an RMID. | |
351 | */ | |
352 | WARN_ON_ONCE(ac == bc); | |
353 | ||
354 | if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) || | |
355 | cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup)) | |
356 | return true; | |
357 | ||
358 | return false; | |
359 | } | |
360 | ||
361 | if (a->cgrp || b->cgrp) { | |
362 | struct perf_cgroup *ac, *bc; | |
363 | ||
364 | /* | |
365 | * cgroup and system-wide events are mutually exclusive | |
366 | */ | |
367 | if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) || | |
368 | (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK))) | |
369 | return true; | |
370 | ||
371 | /* | |
372 | * Ensure neither event is part of the other's cgroup | |
373 | */ | |
374 | ac = event_to_cgroup(a); | |
375 | bc = event_to_cgroup(b); | |
376 | if (ac == bc) | |
377 | return true; | |
378 | ||
379 | /* | |
380 | * Must have cgroup and non-intersecting task events. | |
381 | */ | |
382 | if (!ac || !bc) | |
383 | return false; | |
384 | ||
385 | /* | |
386 | * We have cgroup and task events, and the task belongs | |
387 | * to a cgroup. Check for for overlap. | |
388 | */ | |
389 | if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) || | |
390 | cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup)) | |
391 | return true; | |
392 | ||
393 | return false; | |
394 | } | |
395 | #endif | |
4afbb24c MF |
396 | /* |
397 | * If one of them is not a task, same story as above with cgroups. | |
398 | */ | |
399 | if (!(a->attach_state & PERF_ATTACH_TASK) || | |
400 | !(b->attach_state & PERF_ATTACH_TASK)) | |
401 | return true; | |
402 | ||
403 | /* | |
404 | * Must be non-overlapping. | |
405 | */ | |
406 | return false; | |
407 | } | |
408 | ||
bff671db | 409 | struct rmid_read { |
adafa999 | 410 | u32 rmid; |
bff671db MF |
411 | atomic64_t value; |
412 | }; | |
413 | ||
414 | static void __intel_cqm_event_count(void *info); | |
415 | ||
416 | /* | |
417 | * Exchange the RMID of a group of events. | |
418 | */ | |
adafa999 | 419 | static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid) |
bff671db MF |
420 | { |
421 | struct perf_event *event; | |
bff671db | 422 | struct list_head *head = &group->hw.cqm_group_entry; |
adafa999 | 423 | u32 old_rmid = group->hw.cqm_rmid; |
bff671db MF |
424 | |
425 | lockdep_assert_held(&cache_mutex); | |
426 | ||
427 | /* | |
428 | * If our RMID is being deallocated, perform a read now. | |
429 | */ | |
430 | if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) { | |
431 | struct rmid_read rr = { | |
432 | .value = ATOMIC64_INIT(0), | |
433 | .rmid = old_rmid, | |
434 | }; | |
435 | ||
436 | on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, | |
437 | &rr, 1); | |
438 | local64_set(&group->count, atomic64_read(&rr.value)); | |
439 | } | |
440 | ||
441 | raw_spin_lock_irq(&cache_lock); | |
442 | ||
443 | group->hw.cqm_rmid = rmid; | |
444 | list_for_each_entry(event, head, hw.cqm_group_entry) | |
445 | event->hw.cqm_rmid = rmid; | |
446 | ||
447 | raw_spin_unlock_irq(&cache_lock); | |
448 | ||
449 | return old_rmid; | |
450 | } | |
451 | ||
452 | /* | |
453 | * If we fail to assign a new RMID for intel_cqm_rotation_rmid because | |
454 | * cachelines are still tagged with RMIDs in limbo, we progressively | |
455 | * increment the threshold until we find an RMID in limbo with <= | |
456 | * __intel_cqm_threshold lines tagged. This is designed to mitigate the | |
457 | * problem where cachelines tagged with an RMID are not steadily being | |
458 | * evicted. | |
459 | * | |
460 | * On successful rotations we decrease the threshold back towards zero. | |
461 | * | |
462 | * __intel_cqm_max_threshold provides an upper bound on the threshold, | |
463 | * and is measured in bytes because it's exposed to userland. | |
464 | */ | |
465 | static unsigned int __intel_cqm_threshold; | |
466 | static unsigned int __intel_cqm_max_threshold; | |
467 | ||
468 | /* | |
469 | * Test whether an RMID has a zero occupancy value on this cpu. | |
470 | */ | |
471 | static void intel_cqm_stable(void *arg) | |
472 | { | |
473 | struct cqm_rmid_entry *entry; | |
474 | ||
475 | list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) { | |
476 | if (entry->state != RMID_AVAILABLE) | |
477 | break; | |
478 | ||
479 | if (__rmid_read(entry->rmid) > __intel_cqm_threshold) | |
480 | entry->state = RMID_DIRTY; | |
481 | } | |
482 | } | |
483 | ||
484 | /* | |
485 | * If we have group events waiting for an RMID that don't conflict with | |
486 | * events already running, assign @rmid. | |
487 | */ | |
adafa999 | 488 | static bool intel_cqm_sched_in_event(u32 rmid) |
bff671db MF |
489 | { |
490 | struct perf_event *leader, *event; | |
491 | ||
492 | lockdep_assert_held(&cache_mutex); | |
493 | ||
494 | leader = list_first_entry(&cache_groups, struct perf_event, | |
495 | hw.cqm_groups_entry); | |
496 | event = leader; | |
497 | ||
498 | list_for_each_entry_continue(event, &cache_groups, | |
499 | hw.cqm_groups_entry) { | |
500 | if (__rmid_valid(event->hw.cqm_rmid)) | |
501 | continue; | |
502 | ||
503 | if (__conflict_event(event, leader)) | |
504 | continue; | |
505 | ||
506 | intel_cqm_xchg_rmid(event, rmid); | |
507 | return true; | |
508 | } | |
509 | ||
510 | return false; | |
511 | } | |
512 | ||
513 | /* | |
514 | * Initially use this constant for both the limbo queue time and the | |
515 | * rotation timer interval, pmu::hrtimer_interval_ms. | |
516 | * | |
517 | * They don't need to be the same, but the two are related since if you | |
518 | * rotate faster than you recycle RMIDs, you may run out of available | |
519 | * RMIDs. | |
520 | */ | |
521 | #define RMID_DEFAULT_QUEUE_TIME 250 /* ms */ | |
522 | ||
523 | static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME; | |
524 | ||
525 | /* | |
526 | * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list | |
527 | * @nr_available: number of freeable RMIDs on the limbo list | |
528 | * | |
529 | * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no | |
530 | * cachelines are tagged with those RMIDs. After this we can reuse them | |
531 | * and know that the current set of active RMIDs is stable. | |
532 | * | |
533 | * Return %true or %false depending on whether stabilization needs to be | |
534 | * reattempted. | |
535 | * | |
536 | * If we return %true then @nr_available is updated to indicate the | |
537 | * number of RMIDs on the limbo list that have been queued for the | |
538 | * minimum queue time (RMID_AVAILABLE), but whose data occupancy values | |
539 | * are above __intel_cqm_threshold. | |
540 | */ | |
541 | static bool intel_cqm_rmid_stabilize(unsigned int *available) | |
542 | { | |
543 | struct cqm_rmid_entry *entry, *tmp; | |
bff671db MF |
544 | |
545 | lockdep_assert_held(&cache_mutex); | |
546 | ||
547 | *available = 0; | |
548 | list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) { | |
549 | unsigned long min_queue_time; | |
550 | unsigned long now = jiffies; | |
551 | ||
552 | /* | |
553 | * We hold RMIDs placed into limbo for a minimum queue | |
554 | * time. Before the minimum queue time has elapsed we do | |
555 | * not recycle RMIDs. | |
556 | * | |
557 | * The reasoning is that until a sufficient time has | |
558 | * passed since we stopped using an RMID, any RMID | |
559 | * placed onto the limbo list will likely still have | |
560 | * data tagged in the cache, which means we'll probably | |
561 | * fail to recycle it anyway. | |
562 | * | |
563 | * We can save ourselves an expensive IPI by skipping | |
564 | * any RMIDs that have not been queued for the minimum | |
565 | * time. | |
566 | */ | |
567 | min_queue_time = entry->queue_time + | |
568 | msecs_to_jiffies(__rmid_queue_time_ms); | |
569 | ||
570 | if (time_after(min_queue_time, now)) | |
571 | break; | |
572 | ||
573 | entry->state = RMID_AVAILABLE; | |
574 | (*available)++; | |
575 | } | |
576 | ||
577 | /* | |
578 | * Fast return if none of the RMIDs on the limbo list have been | |
579 | * sitting on the queue for the minimum queue time. | |
580 | */ | |
581 | if (!*available) | |
582 | return false; | |
583 | ||
584 | /* | |
585 | * Test whether an RMID is free for each package. | |
586 | */ | |
587 | on_each_cpu_mask(&cqm_cpumask, intel_cqm_stable, NULL, true); | |
588 | ||
589 | list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) { | |
590 | /* | |
591 | * Exhausted all RMIDs that have waited min queue time. | |
592 | */ | |
593 | if (entry->state == RMID_YOUNG) | |
594 | break; | |
595 | ||
596 | if (entry->state == RMID_DIRTY) | |
597 | continue; | |
598 | ||
599 | list_del(&entry->list); /* remove from limbo */ | |
600 | ||
601 | /* | |
602 | * The rotation RMID gets priority if it's | |
603 | * currently invalid. In which case, skip adding | |
604 | * the RMID to the the free lru. | |
605 | */ | |
606 | if (!__rmid_valid(intel_cqm_rotation_rmid)) { | |
607 | intel_cqm_rotation_rmid = entry->rmid; | |
608 | continue; | |
609 | } | |
610 | ||
611 | /* | |
612 | * If we have groups waiting for RMIDs, hand | |
59bf7fd4 | 613 | * them one now provided they don't conflict. |
bff671db | 614 | */ |
59bf7fd4 | 615 | if (intel_cqm_sched_in_event(entry->rmid)) |
bff671db MF |
616 | continue; |
617 | ||
618 | /* | |
619 | * Otherwise place it onto the free list. | |
620 | */ | |
621 | list_add_tail(&entry->list, &cqm_rmid_free_lru); | |
622 | } | |
623 | ||
624 | ||
625 | return __rmid_valid(intel_cqm_rotation_rmid); | |
626 | } | |
627 | ||
628 | /* | |
629 | * Pick a victim group and move it to the tail of the group list. | |
59bf7fd4 | 630 | * @next: The first group without an RMID |
bff671db | 631 | */ |
59bf7fd4 | 632 | static void __intel_cqm_pick_and_rotate(struct perf_event *next) |
bff671db MF |
633 | { |
634 | struct perf_event *rotor; | |
adafa999 | 635 | u32 rmid; |
bff671db MF |
636 | |
637 | lockdep_assert_held(&cache_mutex); | |
bff671db MF |
638 | |
639 | rotor = list_first_entry(&cache_groups, struct perf_event, | |
640 | hw.cqm_groups_entry); | |
59bf7fd4 MF |
641 | |
642 | /* | |
643 | * The group at the front of the list should always have a valid | |
644 | * RMID. If it doesn't then no groups have RMIDs assigned and we | |
645 | * don't need to rotate the list. | |
646 | */ | |
647 | if (next == rotor) | |
648 | return; | |
649 | ||
650 | rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID); | |
651 | __put_rmid(rmid); | |
652 | ||
bff671db | 653 | list_rotate_left(&cache_groups); |
59bf7fd4 MF |
654 | } |
655 | ||
656 | /* | |
657 | * Deallocate the RMIDs from any events that conflict with @event, and | |
658 | * place them on the back of the group list. | |
659 | */ | |
660 | static void intel_cqm_sched_out_conflicting_events(struct perf_event *event) | |
661 | { | |
662 | struct perf_event *group, *g; | |
adafa999 | 663 | u32 rmid; |
59bf7fd4 MF |
664 | |
665 | lockdep_assert_held(&cache_mutex); | |
666 | ||
667 | list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) { | |
668 | if (group == event) | |
669 | continue; | |
670 | ||
671 | rmid = group->hw.cqm_rmid; | |
672 | ||
673 | /* | |
674 | * Skip events that don't have a valid RMID. | |
675 | */ | |
676 | if (!__rmid_valid(rmid)) | |
677 | continue; | |
678 | ||
679 | /* | |
680 | * No conflict? No problem! Leave the event alone. | |
681 | */ | |
682 | if (!__conflict_event(group, event)) | |
683 | continue; | |
bff671db | 684 | |
59bf7fd4 MF |
685 | intel_cqm_xchg_rmid(group, INVALID_RMID); |
686 | __put_rmid(rmid); | |
687 | } | |
bff671db MF |
688 | } |
689 | ||
690 | /* | |
691 | * Attempt to rotate the groups and assign new RMIDs. | |
692 | * | |
59bf7fd4 MF |
693 | * We rotate for two reasons, |
694 | * 1. To handle the scheduling of conflicting events | |
695 | * 2. To recycle RMIDs | |
696 | * | |
bff671db MF |
697 | * Rotating RMIDs is complicated because the hardware doesn't give us |
698 | * any clues. | |
699 | * | |
700 | * There's problems with the hardware interface; when you change the | |
701 | * task:RMID map cachelines retain their 'old' tags, giving a skewed | |
702 | * picture. In order to work around this, we must always keep one free | |
703 | * RMID - intel_cqm_rotation_rmid. | |
704 | * | |
705 | * Rotation works by taking away an RMID from a group (the old RMID), | |
706 | * and assigning the free RMID to another group (the new RMID). We must | |
707 | * then wait for the old RMID to not be used (no cachelines tagged). | |
708 | * This ensure that all cachelines are tagged with 'active' RMIDs. At | |
709 | * this point we can start reading values for the new RMID and treat the | |
710 | * old RMID as the free RMID for the next rotation. | |
711 | * | |
712 | * Return %true or %false depending on whether we did any rotating. | |
713 | */ | |
714 | static bool __intel_cqm_rmid_rotate(void) | |
715 | { | |
59bf7fd4 | 716 | struct perf_event *group, *start = NULL; |
bff671db MF |
717 | unsigned int threshold_limit; |
718 | unsigned int nr_needed = 0; | |
719 | unsigned int nr_available; | |
bff671db MF |
720 | bool rotated = false; |
721 | ||
722 | mutex_lock(&cache_mutex); | |
723 | ||
724 | again: | |
725 | /* | |
726 | * Fast path through this function if there are no groups and no | |
727 | * RMIDs that need cleaning. | |
728 | */ | |
729 | if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru)) | |
730 | goto out; | |
731 | ||
732 | list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) { | |
733 | if (!__rmid_valid(group->hw.cqm_rmid)) { | |
734 | if (!start) | |
735 | start = group; | |
736 | nr_needed++; | |
737 | } | |
738 | } | |
739 | ||
740 | /* | |
741 | * We have some event groups, but they all have RMIDs assigned | |
742 | * and no RMIDs need cleaning. | |
743 | */ | |
744 | if (!nr_needed && list_empty(&cqm_rmid_limbo_lru)) | |
745 | goto out; | |
746 | ||
747 | if (!nr_needed) | |
748 | goto stabilize; | |
749 | ||
750 | /* | |
59bf7fd4 MF |
751 | * We have more event groups without RMIDs than available RMIDs, |
752 | * or we have event groups that conflict with the ones currently | |
753 | * scheduled. | |
bff671db MF |
754 | * |
755 | * We force deallocate the rmid of the group at the head of | |
756 | * cache_groups. The first event group without an RMID then gets | |
757 | * assigned intel_cqm_rotation_rmid. This ensures we always make | |
758 | * forward progress. | |
759 | * | |
760 | * Rotate the cache_groups list so the previous head is now the | |
761 | * tail. | |
762 | */ | |
59bf7fd4 | 763 | __intel_cqm_pick_and_rotate(start); |
bff671db MF |
764 | |
765 | /* | |
766 | * If the rotation is going to succeed, reduce the threshold so | |
767 | * that we don't needlessly reuse dirty RMIDs. | |
768 | */ | |
769 | if (__rmid_valid(intel_cqm_rotation_rmid)) { | |
770 | intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid); | |
59bf7fd4 MF |
771 | intel_cqm_rotation_rmid = __get_rmid(); |
772 | ||
773 | intel_cqm_sched_out_conflicting_events(start); | |
bff671db MF |
774 | |
775 | if (__intel_cqm_threshold) | |
776 | __intel_cqm_threshold--; | |
777 | } | |
778 | ||
bff671db MF |
779 | rotated = true; |
780 | ||
781 | stabilize: | |
782 | /* | |
783 | * We now need to stablize the RMID we freed above (if any) to | |
784 | * ensure that the next time we rotate we have an RMID with zero | |
785 | * occupancy value. | |
786 | * | |
787 | * Alternatively, if we didn't need to perform any rotation, | |
788 | * we'll have a bunch of RMIDs in limbo that need stabilizing. | |
789 | */ | |
790 | threshold_limit = __intel_cqm_max_threshold / cqm_l3_scale; | |
791 | ||
792 | while (intel_cqm_rmid_stabilize(&nr_available) && | |
793 | __intel_cqm_threshold < threshold_limit) { | |
794 | unsigned int steal_limit; | |
795 | ||
796 | /* | |
797 | * Don't spin if nobody is actively waiting for an RMID, | |
798 | * the rotation worker will be kicked as soon as an | |
799 | * event needs an RMID anyway. | |
800 | */ | |
801 | if (!nr_needed) | |
802 | break; | |
803 | ||
804 | /* Allow max 25% of RMIDs to be in limbo. */ | |
805 | steal_limit = (cqm_max_rmid + 1) / 4; | |
806 | ||
807 | /* | |
808 | * We failed to stabilize any RMIDs so our rotation | |
809 | * logic is now stuck. In order to make forward progress | |
810 | * we have a few options: | |
811 | * | |
812 | * 1. rotate ("steal") another RMID | |
813 | * 2. increase the threshold | |
814 | * 3. do nothing | |
815 | * | |
816 | * We do both of 1. and 2. until we hit the steal limit. | |
817 | * | |
818 | * The steal limit prevents all RMIDs ending up on the | |
819 | * limbo list. This can happen if every RMID has a | |
820 | * non-zero occupancy above threshold_limit, and the | |
821 | * occupancy values aren't dropping fast enough. | |
822 | * | |
823 | * Note that there is prioritisation at work here - we'd | |
824 | * rather increase the number of RMIDs on the limbo list | |
825 | * than increase the threshold, because increasing the | |
826 | * threshold skews the event data (because we reuse | |
827 | * dirty RMIDs) - threshold bumps are a last resort. | |
828 | */ | |
829 | if (nr_available < steal_limit) | |
830 | goto again; | |
831 | ||
832 | __intel_cqm_threshold++; | |
833 | } | |
834 | ||
835 | out: | |
836 | mutex_unlock(&cache_mutex); | |
837 | return rotated; | |
838 | } | |
839 | ||
840 | static void intel_cqm_rmid_rotate(struct work_struct *work); | |
841 | ||
842 | static DECLARE_DELAYED_WORK(intel_cqm_rmid_work, intel_cqm_rmid_rotate); | |
843 | ||
844 | static struct pmu intel_cqm_pmu; | |
845 | ||
846 | static void intel_cqm_rmid_rotate(struct work_struct *work) | |
847 | { | |
848 | unsigned long delay; | |
849 | ||
850 | __intel_cqm_rmid_rotate(); | |
851 | ||
852 | delay = msecs_to_jiffies(intel_cqm_pmu.hrtimer_interval_ms); | |
853 | schedule_delayed_work(&intel_cqm_rmid_work, delay); | |
854 | } | |
855 | ||
4afbb24c MF |
856 | /* |
857 | * Find a group and setup RMID. | |
858 | * | |
859 | * If we're part of a group, we use the group's RMID. | |
860 | */ | |
59bf7fd4 MF |
861 | static void intel_cqm_setup_event(struct perf_event *event, |
862 | struct perf_event **group) | |
4afbb24c MF |
863 | { |
864 | struct perf_event *iter; | |
59bf7fd4 | 865 | bool conflict = false; |
adafa999 | 866 | u32 rmid; |
4afbb24c | 867 | |
a223c1c7 | 868 | event->hw.is_group_event = false; |
4afbb24c | 869 | list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) { |
59bf7fd4 MF |
870 | rmid = iter->hw.cqm_rmid; |
871 | ||
4afbb24c MF |
872 | if (__match_event(iter, event)) { |
873 | /* All tasks in a group share an RMID */ | |
59bf7fd4 | 874 | event->hw.cqm_rmid = rmid; |
4afbb24c | 875 | *group = iter; |
59bf7fd4 | 876 | return; |
4afbb24c MF |
877 | } |
878 | ||
59bf7fd4 MF |
879 | /* |
880 | * We only care about conflicts for events that are | |
881 | * actually scheduled in (and hence have a valid RMID). | |
882 | */ | |
883 | if (__conflict_event(iter, event) && __rmid_valid(rmid)) | |
884 | conflict = true; | |
4afbb24c MF |
885 | } |
886 | ||
59bf7fd4 MF |
887 | if (conflict) |
888 | rmid = INVALID_RMID; | |
889 | else | |
890 | rmid = __get_rmid(); | |
891 | ||
892 | event->hw.cqm_rmid = rmid; | |
4afbb24c MF |
893 | } |
894 | ||
895 | static void intel_cqm_event_read(struct perf_event *event) | |
896 | { | |
bff671db | 897 | unsigned long flags; |
adafa999 | 898 | u32 rmid; |
4afbb24c MF |
899 | u64 val; |
900 | ||
bfe1fcd2 MF |
901 | /* |
902 | * Task events are handled by intel_cqm_event_count(). | |
903 | */ | |
904 | if (event->cpu == -1) | |
905 | return; | |
906 | ||
bff671db | 907 | raw_spin_lock_irqsave(&cache_lock, flags); |
bfe1fcd2 | 908 | rmid = event->hw.cqm_rmid; |
bff671db MF |
909 | |
910 | if (!__rmid_valid(rmid)) | |
911 | goto out; | |
912 | ||
4afbb24c MF |
913 | val = __rmid_read(rmid); |
914 | ||
915 | /* | |
916 | * Ignore this reading on error states and do not update the value. | |
917 | */ | |
918 | if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) | |
bff671db | 919 | goto out; |
4afbb24c MF |
920 | |
921 | local64_set(&event->count, val); | |
bff671db MF |
922 | out: |
923 | raw_spin_unlock_irqrestore(&cache_lock, flags); | |
4afbb24c MF |
924 | } |
925 | ||
bfe1fcd2 MF |
926 | static void __intel_cqm_event_count(void *info) |
927 | { | |
928 | struct rmid_read *rr = info; | |
929 | u64 val; | |
930 | ||
931 | val = __rmid_read(rr->rmid); | |
932 | ||
933 | if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) | |
934 | return; | |
935 | ||
936 | atomic64_add(val, &rr->value); | |
937 | } | |
938 | ||
939 | static inline bool cqm_group_leader(struct perf_event *event) | |
940 | { | |
941 | return !list_empty(&event->hw.cqm_groups_entry); | |
942 | } | |
943 | ||
944 | static u64 intel_cqm_event_count(struct perf_event *event) | |
945 | { | |
bff671db | 946 | unsigned long flags; |
bfe1fcd2 | 947 | struct rmid_read rr = { |
bfe1fcd2 MF |
948 | .value = ATOMIC64_INIT(0), |
949 | }; | |
950 | ||
951 | /* | |
952 | * We only need to worry about task events. System-wide events | |
953 | * are handled like usual, i.e. entirely with | |
954 | * intel_cqm_event_read(). | |
955 | */ | |
956 | if (event->cpu != -1) | |
957 | return __perf_event_count(event); | |
958 | ||
959 | /* | |
a223c1c7 VS |
960 | * Only the group leader gets to report values except in case of |
961 | * multiple events in the same group, we still need to read the | |
962 | * other events.This stops us | |
bfe1fcd2 MF |
963 | * reporting duplicate values to userspace, and gives us a clear |
964 | * rule for which task gets to report the values. | |
965 | * | |
966 | * Note that it is impossible to attribute these values to | |
967 | * specific packages - we forfeit that ability when we create | |
968 | * task events. | |
969 | */ | |
a223c1c7 | 970 | if (!cqm_group_leader(event) && !event->hw.is_group_event) |
bfe1fcd2 MF |
971 | return 0; |
972 | ||
2c534c0d MF |
973 | /* |
974 | * Getting up-to-date values requires an SMP IPI which is not | |
975 | * possible if we're being called in interrupt context. Return | |
976 | * the cached values instead. | |
977 | */ | |
978 | if (unlikely(in_interrupt())) | |
979 | goto out; | |
980 | ||
bff671db MF |
981 | /* |
982 | * Notice that we don't perform the reading of an RMID | |
983 | * atomically, because we can't hold a spin lock across the | |
984 | * IPIs. | |
985 | * | |
986 | * Speculatively perform the read, since @event might be | |
987 | * assigned a different (possibly invalid) RMID while we're | |
988 | * busying performing the IPI calls. It's therefore necessary to | |
989 | * check @event's RMID afterwards, and if it has changed, | |
990 | * discard the result of the read. | |
991 | */ | |
992 | rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid); | |
bfe1fcd2 | 993 | |
bff671db MF |
994 | if (!__rmid_valid(rr.rmid)) |
995 | goto out; | |
996 | ||
997 | on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1); | |
bfe1fcd2 | 998 | |
bff671db MF |
999 | raw_spin_lock_irqsave(&cache_lock, flags); |
1000 | if (event->hw.cqm_rmid == rr.rmid) | |
1001 | local64_set(&event->count, atomic64_read(&rr.value)); | |
1002 | raw_spin_unlock_irqrestore(&cache_lock, flags); | |
1003 | out: | |
bfe1fcd2 MF |
1004 | return __perf_event_count(event); |
1005 | } | |
1006 | ||
4afbb24c MF |
1007 | static void intel_cqm_event_start(struct perf_event *event, int mode) |
1008 | { | |
bf926731 | 1009 | struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); |
b3df4ec4 | 1010 | u32 rmid = event->hw.cqm_rmid; |
4afbb24c MF |
1011 | |
1012 | if (!(event->hw.cqm_state & PERF_HES_STOPPED)) | |
1013 | return; | |
1014 | ||
1015 | event->hw.cqm_state &= ~PERF_HES_STOPPED; | |
1016 | ||
bf926731 | 1017 | if (state->rmid_usecnt++) { |
0bac2378 TG |
1018 | if (!WARN_ON_ONCE(state->rmid != rmid)) |
1019 | return; | |
1020 | } else { | |
4afbb24c | 1021 | WARN_ON_ONCE(state->rmid); |
0bac2378 | 1022 | } |
4afbb24c MF |
1023 | |
1024 | state->rmid = rmid; | |
bf926731 | 1025 | wrmsr(MSR_IA32_PQR_ASSOC, rmid, state->closid); |
4afbb24c MF |
1026 | } |
1027 | ||
1028 | static void intel_cqm_event_stop(struct perf_event *event, int mode) | |
1029 | { | |
bf926731 | 1030 | struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); |
4afbb24c MF |
1031 | |
1032 | if (event->hw.cqm_state & PERF_HES_STOPPED) | |
1033 | return; | |
1034 | ||
1035 | event->hw.cqm_state |= PERF_HES_STOPPED; | |
1036 | ||
4afbb24c MF |
1037 | intel_cqm_event_read(event); |
1038 | ||
bf926731 | 1039 | if (!--state->rmid_usecnt) { |
4afbb24c | 1040 | state->rmid = 0; |
bf926731 | 1041 | wrmsr(MSR_IA32_PQR_ASSOC, 0, state->closid); |
4afbb24c MF |
1042 | } else { |
1043 | WARN_ON_ONCE(!state->rmid); | |
1044 | } | |
4afbb24c MF |
1045 | } |
1046 | ||
1047 | static int intel_cqm_event_add(struct perf_event *event, int mode) | |
1048 | { | |
bff671db | 1049 | unsigned long flags; |
adafa999 | 1050 | u32 rmid; |
bff671db MF |
1051 | |
1052 | raw_spin_lock_irqsave(&cache_lock, flags); | |
4afbb24c MF |
1053 | |
1054 | event->hw.cqm_state = PERF_HES_STOPPED; | |
1055 | rmid = event->hw.cqm_rmid; | |
4afbb24c | 1056 | |
bff671db | 1057 | if (__rmid_valid(rmid) && (mode & PERF_EF_START)) |
4afbb24c MF |
1058 | intel_cqm_event_start(event, mode); |
1059 | ||
bff671db MF |
1060 | raw_spin_unlock_irqrestore(&cache_lock, flags); |
1061 | ||
4afbb24c MF |
1062 | return 0; |
1063 | } | |
1064 | ||
4afbb24c MF |
1065 | static void intel_cqm_event_destroy(struct perf_event *event) |
1066 | { | |
1067 | struct perf_event *group_other = NULL; | |
1068 | ||
1069 | mutex_lock(&cache_mutex); | |
1070 | ||
1071 | /* | |
1072 | * If there's another event in this group... | |
1073 | */ | |
1074 | if (!list_empty(&event->hw.cqm_group_entry)) { | |
1075 | group_other = list_first_entry(&event->hw.cqm_group_entry, | |
1076 | struct perf_event, | |
1077 | hw.cqm_group_entry); | |
1078 | list_del(&event->hw.cqm_group_entry); | |
1079 | } | |
1080 | ||
1081 | /* | |
1082 | * And we're the group leader.. | |
1083 | */ | |
bfe1fcd2 | 1084 | if (cqm_group_leader(event)) { |
4afbb24c MF |
1085 | /* |
1086 | * If there was a group_other, make that leader, otherwise | |
1087 | * destroy the group and return the RMID. | |
1088 | */ | |
1089 | if (group_other) { | |
1090 | list_replace(&event->hw.cqm_groups_entry, | |
1091 | &group_other->hw.cqm_groups_entry); | |
1092 | } else { | |
adafa999 | 1093 | u32 rmid = event->hw.cqm_rmid; |
4afbb24c | 1094 | |
bff671db MF |
1095 | if (__rmid_valid(rmid)) |
1096 | __put_rmid(rmid); | |
4afbb24c MF |
1097 | list_del(&event->hw.cqm_groups_entry); |
1098 | } | |
1099 | } | |
1100 | ||
1101 | mutex_unlock(&cache_mutex); | |
1102 | } | |
1103 | ||
4afbb24c MF |
1104 | static int intel_cqm_event_init(struct perf_event *event) |
1105 | { | |
1106 | struct perf_event *group = NULL; | |
bff671db | 1107 | bool rotate = false; |
4afbb24c MF |
1108 | |
1109 | if (event->attr.type != intel_cqm_pmu.type) | |
1110 | return -ENOENT; | |
1111 | ||
1112 | if (event->attr.config & ~QOS_EVENT_MASK) | |
1113 | return -EINVAL; | |
1114 | ||
4afbb24c MF |
1115 | /* unsupported modes and filters */ |
1116 | if (event->attr.exclude_user || | |
1117 | event->attr.exclude_kernel || | |
1118 | event->attr.exclude_hv || | |
1119 | event->attr.exclude_idle || | |
1120 | event->attr.exclude_host || | |
1121 | event->attr.exclude_guest || | |
1122 | event->attr.sample_period) /* no sampling */ | |
1123 | return -EINVAL; | |
1124 | ||
1125 | INIT_LIST_HEAD(&event->hw.cqm_group_entry); | |
1126 | INIT_LIST_HEAD(&event->hw.cqm_groups_entry); | |
1127 | ||
1128 | event->destroy = intel_cqm_event_destroy; | |
1129 | ||
1130 | mutex_lock(&cache_mutex); | |
1131 | ||
bfe1fcd2 | 1132 | /* Will also set rmid */ |
59bf7fd4 | 1133 | intel_cqm_setup_event(event, &group); |
4afbb24c MF |
1134 | |
1135 | if (group) { | |
1136 | list_add_tail(&event->hw.cqm_group_entry, | |
1137 | &group->hw.cqm_group_entry); | |
1138 | } else { | |
1139 | list_add_tail(&event->hw.cqm_groups_entry, | |
1140 | &cache_groups); | |
bff671db MF |
1141 | |
1142 | /* | |
1143 | * All RMIDs are either in use or have recently been | |
1144 | * used. Kick the rotation worker to clean/free some. | |
1145 | * | |
1146 | * We only do this for the group leader, rather than for | |
1147 | * every event in a group to save on needless work. | |
1148 | */ | |
1149 | if (!__rmid_valid(event->hw.cqm_rmid)) | |
1150 | rotate = true; | |
4afbb24c MF |
1151 | } |
1152 | ||
4afbb24c | 1153 | mutex_unlock(&cache_mutex); |
bff671db MF |
1154 | |
1155 | if (rotate) | |
1156 | schedule_delayed_work(&intel_cqm_rmid_work, 0); | |
1157 | ||
59bf7fd4 | 1158 | return 0; |
4afbb24c MF |
1159 | } |
1160 | ||
1161 | EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01"); | |
1162 | EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cqm_llc_pkg, "1"); | |
1163 | EVENT_ATTR_STR(llc_occupancy.unit, intel_cqm_llc_unit, "Bytes"); | |
1164 | EVENT_ATTR_STR(llc_occupancy.scale, intel_cqm_llc_scale, NULL); | |
1165 | EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cqm_llc_snapshot, "1"); | |
1166 | ||
1167 | static struct attribute *intel_cqm_events_attr[] = { | |
1168 | EVENT_PTR(intel_cqm_llc), | |
1169 | EVENT_PTR(intel_cqm_llc_pkg), | |
1170 | EVENT_PTR(intel_cqm_llc_unit), | |
1171 | EVENT_PTR(intel_cqm_llc_scale), | |
1172 | EVENT_PTR(intel_cqm_llc_snapshot), | |
1173 | NULL, | |
1174 | }; | |
1175 | ||
1176 | static struct attribute_group intel_cqm_events_group = { | |
1177 | .name = "events", | |
1178 | .attrs = intel_cqm_events_attr, | |
1179 | }; | |
1180 | ||
1181 | PMU_FORMAT_ATTR(event, "config:0-7"); | |
1182 | static struct attribute *intel_cqm_formats_attr[] = { | |
1183 | &format_attr_event.attr, | |
1184 | NULL, | |
1185 | }; | |
1186 | ||
1187 | static struct attribute_group intel_cqm_format_group = { | |
1188 | .name = "format", | |
1189 | .attrs = intel_cqm_formats_attr, | |
1190 | }; | |
1191 | ||
bff671db MF |
1192 | static ssize_t |
1193 | max_recycle_threshold_show(struct device *dev, struct device_attribute *attr, | |
1194 | char *page) | |
1195 | { | |
1196 | ssize_t rv; | |
1197 | ||
1198 | mutex_lock(&cache_mutex); | |
1199 | rv = snprintf(page, PAGE_SIZE-1, "%u\n", __intel_cqm_max_threshold); | |
1200 | mutex_unlock(&cache_mutex); | |
1201 | ||
1202 | return rv; | |
1203 | } | |
1204 | ||
1205 | static ssize_t | |
1206 | max_recycle_threshold_store(struct device *dev, | |
1207 | struct device_attribute *attr, | |
1208 | const char *buf, size_t count) | |
1209 | { | |
1210 | unsigned int bytes, cachelines; | |
1211 | int ret; | |
1212 | ||
1213 | ret = kstrtouint(buf, 0, &bytes); | |
1214 | if (ret) | |
1215 | return ret; | |
1216 | ||
1217 | mutex_lock(&cache_mutex); | |
1218 | ||
1219 | __intel_cqm_max_threshold = bytes; | |
1220 | cachelines = bytes / cqm_l3_scale; | |
1221 | ||
1222 | /* | |
1223 | * The new maximum takes effect immediately. | |
1224 | */ | |
1225 | if (__intel_cqm_threshold > cachelines) | |
1226 | __intel_cqm_threshold = cachelines; | |
1227 | ||
1228 | mutex_unlock(&cache_mutex); | |
1229 | ||
1230 | return count; | |
1231 | } | |
1232 | ||
1233 | static DEVICE_ATTR_RW(max_recycle_threshold); | |
1234 | ||
1235 | static struct attribute *intel_cqm_attrs[] = { | |
1236 | &dev_attr_max_recycle_threshold.attr, | |
1237 | NULL, | |
1238 | }; | |
1239 | ||
1240 | static const struct attribute_group intel_cqm_group = { | |
1241 | .attrs = intel_cqm_attrs, | |
1242 | }; | |
1243 | ||
4afbb24c MF |
1244 | static const struct attribute_group *intel_cqm_attr_groups[] = { |
1245 | &intel_cqm_events_group, | |
1246 | &intel_cqm_format_group, | |
bff671db | 1247 | &intel_cqm_group, |
4afbb24c MF |
1248 | NULL, |
1249 | }; | |
1250 | ||
1251 | static struct pmu intel_cqm_pmu = { | |
bff671db MF |
1252 | .hrtimer_interval_ms = RMID_DEFAULT_QUEUE_TIME, |
1253 | .attr_groups = intel_cqm_attr_groups, | |
1254 | .task_ctx_nr = perf_sw_context, | |
1255 | .event_init = intel_cqm_event_init, | |
1256 | .add = intel_cqm_event_add, | |
43d0c2f6 | 1257 | .del = intel_cqm_event_stop, |
bff671db MF |
1258 | .start = intel_cqm_event_start, |
1259 | .stop = intel_cqm_event_stop, | |
1260 | .read = intel_cqm_event_read, | |
1261 | .count = intel_cqm_event_count, | |
4afbb24c MF |
1262 | }; |
1263 | ||
1264 | static inline void cqm_pick_event_reader(int cpu) | |
1265 | { | |
827db839 | 1266 | int reader; |
4afbb24c | 1267 | |
827db839 TG |
1268 | /* First online cpu in package becomes the reader */ |
1269 | reader = cpumask_any_and(&cqm_cpumask, topology_core_cpumask(cpu)); | |
1270 | if (reader >= nr_cpu_ids) | |
1271 | cpumask_set_cpu(cpu, &cqm_cpumask); | |
4afbb24c MF |
1272 | } |
1273 | ||
d7a702f0 | 1274 | static void intel_cqm_cpu_starting(unsigned int cpu) |
4afbb24c | 1275 | { |
bf926731 | 1276 | struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); |
4afbb24c MF |
1277 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
1278 | ||
4afbb24c | 1279 | state->rmid = 0; |
bf926731 TG |
1280 | state->closid = 0; |
1281 | state->rmid_usecnt = 0; | |
4afbb24c MF |
1282 | |
1283 | WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid); | |
1284 | WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale); | |
1285 | } | |
1286 | ||
1287 | static void intel_cqm_cpu_exit(unsigned int cpu) | |
1288 | { | |
827db839 | 1289 | int target; |
4afbb24c | 1290 | |
827db839 | 1291 | /* Is @cpu the current cqm reader for this package ? */ |
4afbb24c MF |
1292 | if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask)) |
1293 | return; | |
1294 | ||
827db839 TG |
1295 | /* Find another online reader in this package */ |
1296 | target = cpumask_any_but(topology_core_cpumask(cpu), cpu); | |
4afbb24c | 1297 | |
827db839 TG |
1298 | if (target < nr_cpu_ids) |
1299 | cpumask_set_cpu(target, &cqm_cpumask); | |
4afbb24c MF |
1300 | } |
1301 | ||
1302 | static int intel_cqm_cpu_notifier(struct notifier_block *nb, | |
1303 | unsigned long action, void *hcpu) | |
1304 | { | |
1305 | unsigned int cpu = (unsigned long)hcpu; | |
1306 | ||
1307 | switch (action & ~CPU_TASKS_FROZEN) { | |
4afbb24c MF |
1308 | case CPU_DOWN_PREPARE: |
1309 | intel_cqm_cpu_exit(cpu); | |
1310 | break; | |
1311 | case CPU_STARTING: | |
d7a702f0 | 1312 | intel_cqm_cpu_starting(cpu); |
4afbb24c MF |
1313 | cqm_pick_event_reader(cpu); |
1314 | break; | |
1315 | } | |
1316 | ||
1317 | return NOTIFY_OK; | |
1318 | } | |
1319 | ||
1320 | static const struct x86_cpu_id intel_cqm_match[] = { | |
1321 | { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_OCCUP_LLC }, | |
1322 | {} | |
1323 | }; | |
1324 | ||
1325 | static int __init intel_cqm_init(void) | |
1326 | { | |
ada2f634 | 1327 | char *str = NULL, scale[20]; |
4afbb24c MF |
1328 | int i, cpu, ret; |
1329 | ||
1330 | if (!x86_match_cpu(intel_cqm_match)) | |
1331 | return -ENODEV; | |
1332 | ||
1333 | cqm_l3_scale = boot_cpu_data.x86_cache_occ_scale; | |
1334 | ||
1335 | /* | |
1336 | * It's possible that not all resources support the same number | |
1337 | * of RMIDs. Instead of making scheduling much more complicated | |
1338 | * (where we have to match a task's RMID to a cpu that supports | |
1339 | * that many RMIDs) just find the minimum RMIDs supported across | |
1340 | * all cpus. | |
1341 | * | |
1342 | * Also, check that the scales match on all cpus. | |
1343 | */ | |
1344 | cpu_notifier_register_begin(); | |
1345 | ||
1346 | for_each_online_cpu(cpu) { | |
1347 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
1348 | ||
1349 | if (c->x86_cache_max_rmid < cqm_max_rmid) | |
1350 | cqm_max_rmid = c->x86_cache_max_rmid; | |
1351 | ||
1352 | if (c->x86_cache_occ_scale != cqm_l3_scale) { | |
1353 | pr_err("Multiple LLC scale values, disabling\n"); | |
1354 | ret = -EINVAL; | |
1355 | goto out; | |
1356 | } | |
1357 | } | |
1358 | ||
bff671db MF |
1359 | /* |
1360 | * A reasonable upper limit on the max threshold is the number | |
1361 | * of lines tagged per RMID if all RMIDs have the same number of | |
1362 | * lines tagged in the LLC. | |
1363 | * | |
1364 | * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. | |
1365 | */ | |
1366 | __intel_cqm_max_threshold = | |
1367 | boot_cpu_data.x86_cache_size * 1024 / (cqm_max_rmid + 1); | |
1368 | ||
4afbb24c MF |
1369 | snprintf(scale, sizeof(scale), "%u", cqm_l3_scale); |
1370 | str = kstrdup(scale, GFP_KERNEL); | |
1371 | if (!str) { | |
1372 | ret = -ENOMEM; | |
1373 | goto out; | |
1374 | } | |
1375 | ||
1376 | event_attr_intel_cqm_llc_scale.event_str = str; | |
1377 | ||
1378 | ret = intel_cqm_setup_rmid_cache(); | |
1379 | if (ret) | |
1380 | goto out; | |
1381 | ||
1382 | for_each_online_cpu(i) { | |
d7a702f0 | 1383 | intel_cqm_cpu_starting(i); |
4afbb24c MF |
1384 | cqm_pick_event_reader(i); |
1385 | } | |
1386 | ||
50f16a8b | 1387 | ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1); |
ada2f634 | 1388 | if (ret) { |
4afbb24c | 1389 | pr_err("Intel CQM perf registration failed: %d\n", ret); |
ada2f634 VS |
1390 | goto out; |
1391 | } | |
4afbb24c | 1392 | |
ada2f634 VS |
1393 | pr_info("Intel CQM monitoring enabled\n"); |
1394 | ||
1395 | /* | |
1396 | * Register the hot cpu notifier once we are sure cqm | |
1397 | * is enabled to avoid notifier leak. | |
1398 | */ | |
1399 | __perf_cpu_notifier(intel_cqm_cpu_notifier); | |
4afbb24c MF |
1400 | out: |
1401 | cpu_notifier_register_done(); | |
ada2f634 VS |
1402 | if (ret) { |
1403 | kfree(str); | |
1404 | cqm_cleanup(); | |
1405 | } | |
4afbb24c MF |
1406 | |
1407 | return ret; | |
1408 | } | |
1409 | device_initcall(intel_cqm_init); |