2 * Intel Cache Quality-of-Service Monitoring (CQM) support.
4 * Based very, very heavily on work by Peter Zijlstra.
7 #include <linux/perf_event.h>
8 #include <linux/slab.h>
9 #include <asm/cpu_device_id.h>
10 #include "../perf_event.h"
12 #define MSR_IA32_PQR_ASSOC 0x0c8f
13 #define MSR_IA32_QM_CTR 0x0c8e
14 #define MSR_IA32_QM_EVTSEL 0x0c8d
16 static u32 cqm_max_rmid
= -1;
17 static unsigned int cqm_l3_scale
; /* supposedly cacheline size */
18 static bool cqm_enabled
, mbm_enabled
;
21 * struct intel_pqr_state - State cache for the PQR MSR
22 * @rmid: The cached Resource Monitoring ID
23 * @closid: The cached Class Of Service ID
24 * @rmid_usecnt: The usage counter for rmid
26 * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
27 * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
28 * contains both parts, so we need to cache them.
30 * The cache also helps to avoid pointless updates if the value does
33 struct intel_pqr_state
{
40 * The cached intel_pqr_state is strictly per CPU and can never be
41 * updated from a remote CPU. Both functions which modify the state
42 * (intel_cqm_event_start and intel_cqm_event_stop) are called with
43 * interrupts disabled, which is sufficient for the protection.
45 static DEFINE_PER_CPU(struct intel_pqr_state
, pqr_state
);
47 * struct sample - mbm event's (local or total) data
48 * @total_bytes #bytes since we began monitoring
49 * @prev_msr previous value of MSR
57 * samples profiled for total memory bandwidth type events
59 static struct sample
*mbm_total
;
61 * samples profiled for local memory bandwidth type events
63 static struct sample
*mbm_local
;
66 * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
67 * Also protects event->hw.cqm_rmid
69 * Hold either for stability, both for modification of ->hw.cqm_rmid.
71 static DEFINE_MUTEX(cache_mutex
);
72 static DEFINE_RAW_SPINLOCK(cache_lock
);
75 * Groups of events that have the same target(s), one RMID per group.
77 static LIST_HEAD(cache_groups
);
80 * Mask of CPUs for reading CQM values. We only need one per-socket.
82 static cpumask_t cqm_cpumask
;
84 #define RMID_VAL_ERROR (1ULL << 63)
85 #define RMID_VAL_UNAVAIL (1ULL << 62)
87 #define QOS_L3_OCCUP_EVENT_ID (1 << 0)
89 #define QOS_EVENT_MASK QOS_L3_OCCUP_EVENT_ID
92 * This is central to the rotation algorithm in __intel_cqm_rmid_rotate().
94 * This rmid is always free and is guaranteed to have an associated
95 * near-zero occupancy value, i.e. no cachelines are tagged with this
96 * RMID, once __intel_cqm_rmid_rotate() returns.
98 static u32 intel_cqm_rotation_rmid
;
100 #define INVALID_RMID (-1)
103 * Is @rmid valid for programming the hardware?
105 * rmid 0 is reserved by the hardware for all non-monitored tasks, which
106 * means that we should never come across an rmid with that value.
107 * Likewise, an rmid value of -1 is used to indicate "no rmid currently
108 * assigned" and is used as part of the rotation code.
110 static inline bool __rmid_valid(u32 rmid
)
112 if (!rmid
|| rmid
== INVALID_RMID
)
118 static u64
__rmid_read(u32 rmid
)
123 * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
124 * it just says that to increase confusion.
126 wrmsr(MSR_IA32_QM_EVTSEL
, QOS_L3_OCCUP_EVENT_ID
, rmid
);
127 rdmsrl(MSR_IA32_QM_CTR
, val
);
130 * Aside from the ERROR and UNAVAIL bits, assume this thing returns
131 * the number of cachelines tagged with @rmid.
136 enum rmid_recycle_state
{
142 struct cqm_rmid_entry
{
144 enum rmid_recycle_state state
;
145 struct list_head list
;
146 unsigned long queue_time
;
150 * cqm_rmid_free_lru - A least recently used list of RMIDs.
152 * Oldest entry at the head, newest (most recently used) entry at the
153 * tail. This list is never traversed, it's only used to keep track of
154 * the lru order. That is, we only pick entries of the head or insert
157 * All entries on the list are 'free', and their RMIDs are not currently
158 * in use. To mark an RMID as in use, remove its entry from the lru
162 * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs.
164 * This list is contains RMIDs that no one is currently using but that
165 * may have a non-zero occupancy value associated with them. The
166 * rotation worker moves RMIDs from the limbo list to the free list once
167 * the occupancy value drops below __intel_cqm_threshold.
169 * Both lists are protected by cache_mutex.
171 static LIST_HEAD(cqm_rmid_free_lru
);
172 static LIST_HEAD(cqm_rmid_limbo_lru
);
175 * We use a simple array of pointers so that we can lookup a struct
176 * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
177 * and __put_rmid() from having to worry about dealing with struct
178 * cqm_rmid_entry - they just deal with rmids, i.e. integers.
180 * Once this array is initialized it is read-only. No locks are required
183 * All entries for all RMIDs can be looked up in the this array at all
186 static struct cqm_rmid_entry
**cqm_rmid_ptrs
;
188 static inline struct cqm_rmid_entry
*__rmid_entry(u32 rmid
)
190 struct cqm_rmid_entry
*entry
;
192 entry
= cqm_rmid_ptrs
[rmid
];
193 WARN_ON(entry
->rmid
!= rmid
);
199 * Returns < 0 on fail.
201 * We expect to be called with cache_mutex held.
203 static u32
__get_rmid(void)
205 struct cqm_rmid_entry
*entry
;
207 lockdep_assert_held(&cache_mutex
);
209 if (list_empty(&cqm_rmid_free_lru
))
212 entry
= list_first_entry(&cqm_rmid_free_lru
, struct cqm_rmid_entry
, list
);
213 list_del(&entry
->list
);
218 static void __put_rmid(u32 rmid
)
220 struct cqm_rmid_entry
*entry
;
222 lockdep_assert_held(&cache_mutex
);
224 WARN_ON(!__rmid_valid(rmid
));
225 entry
= __rmid_entry(rmid
);
227 entry
->queue_time
= jiffies
;
228 entry
->state
= RMID_YOUNG
;
230 list_add_tail(&entry
->list
, &cqm_rmid_limbo_lru
);
233 static void cqm_cleanup(void)
240 for (i
= 0; i
< cqm_max_rmid
; i
++)
241 kfree(cqm_rmid_ptrs
[i
]);
243 kfree(cqm_rmid_ptrs
);
244 cqm_rmid_ptrs
= NULL
;
248 static int intel_cqm_setup_rmid_cache(void)
250 struct cqm_rmid_entry
*entry
;
251 unsigned int nr_rmids
;
254 nr_rmids
= cqm_max_rmid
+ 1;
255 cqm_rmid_ptrs
= kzalloc(sizeof(struct cqm_rmid_entry
*) *
256 nr_rmids
, GFP_KERNEL
);
260 for (; r
<= cqm_max_rmid
; r
++) {
261 struct cqm_rmid_entry
*entry
;
263 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
267 INIT_LIST_HEAD(&entry
->list
);
269 cqm_rmid_ptrs
[r
] = entry
;
271 list_add_tail(&entry
->list
, &cqm_rmid_free_lru
);
275 * RMID 0 is special and is always allocated. It's used for all
276 * tasks that are not monitored.
278 entry
= __rmid_entry(0);
279 list_del(&entry
->list
);
281 mutex_lock(&cache_mutex
);
282 intel_cqm_rotation_rmid
= __get_rmid();
283 mutex_unlock(&cache_mutex
);
293 * Determine if @a and @b measure the same set of tasks.
295 * If @a and @b measure the same set of tasks then we want to share a
298 static bool __match_event(struct perf_event
*a
, struct perf_event
*b
)
300 /* Per-cpu and task events don't mix */
301 if ((a
->attach_state
& PERF_ATTACH_TASK
) !=
302 (b
->attach_state
& PERF_ATTACH_TASK
))
305 #ifdef CONFIG_CGROUP_PERF
306 if (a
->cgrp
!= b
->cgrp
)
310 /* If not task event, we're machine wide */
311 if (!(b
->attach_state
& PERF_ATTACH_TASK
))
315 * Events that target same task are placed into the same cache group.
316 * Mark it as a multi event group, so that we update ->count
317 * for every event rather than just the group leader later.
319 if (a
->hw
.target
== b
->hw
.target
) {
320 b
->hw
.is_group_event
= true;
325 * Are we an inherited event?
333 #ifdef CONFIG_CGROUP_PERF
334 static inline struct perf_cgroup
*event_to_cgroup(struct perf_event
*event
)
336 if (event
->attach_state
& PERF_ATTACH_TASK
)
337 return perf_cgroup_from_task(event
->hw
.target
, event
->ctx
);
344 * Determine if @a's tasks intersect with @b's tasks
346 * There are combinations of events that we explicitly prohibit,
349 * system-wide -> cgroup and task
350 * cgroup -> system-wide
352 * task -> system-wide
355 * Call this function before allocating an RMID.
357 static bool __conflict_event(struct perf_event
*a
, struct perf_event
*b
)
359 #ifdef CONFIG_CGROUP_PERF
361 * We can have any number of cgroups but only one system-wide
364 if (a
->cgrp
&& b
->cgrp
) {
365 struct perf_cgroup
*ac
= a
->cgrp
;
366 struct perf_cgroup
*bc
= b
->cgrp
;
369 * This condition should have been caught in
370 * __match_event() and we should be sharing an RMID.
372 WARN_ON_ONCE(ac
== bc
);
374 if (cgroup_is_descendant(ac
->css
.cgroup
, bc
->css
.cgroup
) ||
375 cgroup_is_descendant(bc
->css
.cgroup
, ac
->css
.cgroup
))
381 if (a
->cgrp
|| b
->cgrp
) {
382 struct perf_cgroup
*ac
, *bc
;
385 * cgroup and system-wide events are mutually exclusive
387 if ((a
->cgrp
&& !(b
->attach_state
& PERF_ATTACH_TASK
)) ||
388 (b
->cgrp
&& !(a
->attach_state
& PERF_ATTACH_TASK
)))
392 * Ensure neither event is part of the other's cgroup
394 ac
= event_to_cgroup(a
);
395 bc
= event_to_cgroup(b
);
400 * Must have cgroup and non-intersecting task events.
406 * We have cgroup and task events, and the task belongs
407 * to a cgroup. Check for for overlap.
409 if (cgroup_is_descendant(ac
->css
.cgroup
, bc
->css
.cgroup
) ||
410 cgroup_is_descendant(bc
->css
.cgroup
, ac
->css
.cgroup
))
417 * If one of them is not a task, same story as above with cgroups.
419 if (!(a
->attach_state
& PERF_ATTACH_TASK
) ||
420 !(b
->attach_state
& PERF_ATTACH_TASK
))
424 * Must be non-overlapping.
434 static void __intel_cqm_event_count(void *info
);
437 * Exchange the RMID of a group of events.
439 static u32
intel_cqm_xchg_rmid(struct perf_event
*group
, u32 rmid
)
441 struct perf_event
*event
;
442 struct list_head
*head
= &group
->hw
.cqm_group_entry
;
443 u32 old_rmid
= group
->hw
.cqm_rmid
;
445 lockdep_assert_held(&cache_mutex
);
448 * If our RMID is being deallocated, perform a read now.
450 if (__rmid_valid(old_rmid
) && !__rmid_valid(rmid
)) {
451 struct rmid_read rr
= {
452 .value
= ATOMIC64_INIT(0),
456 on_each_cpu_mask(&cqm_cpumask
, __intel_cqm_event_count
,
458 local64_set(&group
->count
, atomic64_read(&rr
.value
));
461 raw_spin_lock_irq(&cache_lock
);
463 group
->hw
.cqm_rmid
= rmid
;
464 list_for_each_entry(event
, head
, hw
.cqm_group_entry
)
465 event
->hw
.cqm_rmid
= rmid
;
467 raw_spin_unlock_irq(&cache_lock
);
473 * If we fail to assign a new RMID for intel_cqm_rotation_rmid because
474 * cachelines are still tagged with RMIDs in limbo, we progressively
475 * increment the threshold until we find an RMID in limbo with <=
476 * __intel_cqm_threshold lines tagged. This is designed to mitigate the
477 * problem where cachelines tagged with an RMID are not steadily being
480 * On successful rotations we decrease the threshold back towards zero.
482 * __intel_cqm_max_threshold provides an upper bound on the threshold,
483 * and is measured in bytes because it's exposed to userland.
485 static unsigned int __intel_cqm_threshold
;
486 static unsigned int __intel_cqm_max_threshold
;
489 * Test whether an RMID has a zero occupancy value on this cpu.
491 static void intel_cqm_stable(void *arg
)
493 struct cqm_rmid_entry
*entry
;
495 list_for_each_entry(entry
, &cqm_rmid_limbo_lru
, list
) {
496 if (entry
->state
!= RMID_AVAILABLE
)
499 if (__rmid_read(entry
->rmid
) > __intel_cqm_threshold
)
500 entry
->state
= RMID_DIRTY
;
505 * If we have group events waiting for an RMID that don't conflict with
506 * events already running, assign @rmid.
508 static bool intel_cqm_sched_in_event(u32 rmid
)
510 struct perf_event
*leader
, *event
;
512 lockdep_assert_held(&cache_mutex
);
514 leader
= list_first_entry(&cache_groups
, struct perf_event
,
515 hw
.cqm_groups_entry
);
518 list_for_each_entry_continue(event
, &cache_groups
,
519 hw
.cqm_groups_entry
) {
520 if (__rmid_valid(event
->hw
.cqm_rmid
))
523 if (__conflict_event(event
, leader
))
526 intel_cqm_xchg_rmid(event
, rmid
);
534 * Initially use this constant for both the limbo queue time and the
535 * rotation timer interval, pmu::hrtimer_interval_ms.
537 * They don't need to be the same, but the two are related since if you
538 * rotate faster than you recycle RMIDs, you may run out of available
541 #define RMID_DEFAULT_QUEUE_TIME 250 /* ms */
543 static unsigned int __rmid_queue_time_ms
= RMID_DEFAULT_QUEUE_TIME
;
546 * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
547 * @nr_available: number of freeable RMIDs on the limbo list
549 * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
550 * cachelines are tagged with those RMIDs. After this we can reuse them
551 * and know that the current set of active RMIDs is stable.
553 * Return %true or %false depending on whether stabilization needs to be
556 * If we return %true then @nr_available is updated to indicate the
557 * number of RMIDs on the limbo list that have been queued for the
558 * minimum queue time (RMID_AVAILABLE), but whose data occupancy values
559 * are above __intel_cqm_threshold.
561 static bool intel_cqm_rmid_stabilize(unsigned int *available
)
563 struct cqm_rmid_entry
*entry
, *tmp
;
565 lockdep_assert_held(&cache_mutex
);
568 list_for_each_entry(entry
, &cqm_rmid_limbo_lru
, list
) {
569 unsigned long min_queue_time
;
570 unsigned long now
= jiffies
;
573 * We hold RMIDs placed into limbo for a minimum queue
574 * time. Before the minimum queue time has elapsed we do
577 * The reasoning is that until a sufficient time has
578 * passed since we stopped using an RMID, any RMID
579 * placed onto the limbo list will likely still have
580 * data tagged in the cache, which means we'll probably
581 * fail to recycle it anyway.
583 * We can save ourselves an expensive IPI by skipping
584 * any RMIDs that have not been queued for the minimum
587 min_queue_time
= entry
->queue_time
+
588 msecs_to_jiffies(__rmid_queue_time_ms
);
590 if (time_after(min_queue_time
, now
))
593 entry
->state
= RMID_AVAILABLE
;
598 * Fast return if none of the RMIDs on the limbo list have been
599 * sitting on the queue for the minimum queue time.
605 * Test whether an RMID is free for each package.
607 on_each_cpu_mask(&cqm_cpumask
, intel_cqm_stable
, NULL
, true);
609 list_for_each_entry_safe(entry
, tmp
, &cqm_rmid_limbo_lru
, list
) {
611 * Exhausted all RMIDs that have waited min queue time.
613 if (entry
->state
== RMID_YOUNG
)
616 if (entry
->state
== RMID_DIRTY
)
619 list_del(&entry
->list
); /* remove from limbo */
622 * The rotation RMID gets priority if it's
623 * currently invalid. In which case, skip adding
624 * the RMID to the the free lru.
626 if (!__rmid_valid(intel_cqm_rotation_rmid
)) {
627 intel_cqm_rotation_rmid
= entry
->rmid
;
632 * If we have groups waiting for RMIDs, hand
633 * them one now provided they don't conflict.
635 if (intel_cqm_sched_in_event(entry
->rmid
))
639 * Otherwise place it onto the free list.
641 list_add_tail(&entry
->list
, &cqm_rmid_free_lru
);
645 return __rmid_valid(intel_cqm_rotation_rmid
);
649 * Pick a victim group and move it to the tail of the group list.
650 * @next: The first group without an RMID
652 static void __intel_cqm_pick_and_rotate(struct perf_event
*next
)
654 struct perf_event
*rotor
;
657 lockdep_assert_held(&cache_mutex
);
659 rotor
= list_first_entry(&cache_groups
, struct perf_event
,
660 hw
.cqm_groups_entry
);
663 * The group at the front of the list should always have a valid
664 * RMID. If it doesn't then no groups have RMIDs assigned and we
665 * don't need to rotate the list.
670 rmid
= intel_cqm_xchg_rmid(rotor
, INVALID_RMID
);
673 list_rotate_left(&cache_groups
);
677 * Deallocate the RMIDs from any events that conflict with @event, and
678 * place them on the back of the group list.
680 static void intel_cqm_sched_out_conflicting_events(struct perf_event
*event
)
682 struct perf_event
*group
, *g
;
685 lockdep_assert_held(&cache_mutex
);
687 list_for_each_entry_safe(group
, g
, &cache_groups
, hw
.cqm_groups_entry
) {
691 rmid
= group
->hw
.cqm_rmid
;
694 * Skip events that don't have a valid RMID.
696 if (!__rmid_valid(rmid
))
700 * No conflict? No problem! Leave the event alone.
702 if (!__conflict_event(group
, event
))
705 intel_cqm_xchg_rmid(group
, INVALID_RMID
);
711 * Attempt to rotate the groups and assign new RMIDs.
713 * We rotate for two reasons,
714 * 1. To handle the scheduling of conflicting events
715 * 2. To recycle RMIDs
717 * Rotating RMIDs is complicated because the hardware doesn't give us
720 * There's problems with the hardware interface; when you change the
721 * task:RMID map cachelines retain their 'old' tags, giving a skewed
722 * picture. In order to work around this, we must always keep one free
723 * RMID - intel_cqm_rotation_rmid.
725 * Rotation works by taking away an RMID from a group (the old RMID),
726 * and assigning the free RMID to another group (the new RMID). We must
727 * then wait for the old RMID to not be used (no cachelines tagged).
728 * This ensure that all cachelines are tagged with 'active' RMIDs. At
729 * this point we can start reading values for the new RMID and treat the
730 * old RMID as the free RMID for the next rotation.
732 * Return %true or %false depending on whether we did any rotating.
734 static bool __intel_cqm_rmid_rotate(void)
736 struct perf_event
*group
, *start
= NULL
;
737 unsigned int threshold_limit
;
738 unsigned int nr_needed
= 0;
739 unsigned int nr_available
;
740 bool rotated
= false;
742 mutex_lock(&cache_mutex
);
746 * Fast path through this function if there are no groups and no
747 * RMIDs that need cleaning.
749 if (list_empty(&cache_groups
) && list_empty(&cqm_rmid_limbo_lru
))
752 list_for_each_entry(group
, &cache_groups
, hw
.cqm_groups_entry
) {
753 if (!__rmid_valid(group
->hw
.cqm_rmid
)) {
761 * We have some event groups, but they all have RMIDs assigned
762 * and no RMIDs need cleaning.
764 if (!nr_needed
&& list_empty(&cqm_rmid_limbo_lru
))
771 * We have more event groups without RMIDs than available RMIDs,
772 * or we have event groups that conflict with the ones currently
775 * We force deallocate the rmid of the group at the head of
776 * cache_groups. The first event group without an RMID then gets
777 * assigned intel_cqm_rotation_rmid. This ensures we always make
780 * Rotate the cache_groups list so the previous head is now the
783 __intel_cqm_pick_and_rotate(start
);
786 * If the rotation is going to succeed, reduce the threshold so
787 * that we don't needlessly reuse dirty RMIDs.
789 if (__rmid_valid(intel_cqm_rotation_rmid
)) {
790 intel_cqm_xchg_rmid(start
, intel_cqm_rotation_rmid
);
791 intel_cqm_rotation_rmid
= __get_rmid();
793 intel_cqm_sched_out_conflicting_events(start
);
795 if (__intel_cqm_threshold
)
796 __intel_cqm_threshold
--;
803 * We now need to stablize the RMID we freed above (if any) to
804 * ensure that the next time we rotate we have an RMID with zero
807 * Alternatively, if we didn't need to perform any rotation,
808 * we'll have a bunch of RMIDs in limbo that need stabilizing.
810 threshold_limit
= __intel_cqm_max_threshold
/ cqm_l3_scale
;
812 while (intel_cqm_rmid_stabilize(&nr_available
) &&
813 __intel_cqm_threshold
< threshold_limit
) {
814 unsigned int steal_limit
;
817 * Don't spin if nobody is actively waiting for an RMID,
818 * the rotation worker will be kicked as soon as an
819 * event needs an RMID anyway.
824 /* Allow max 25% of RMIDs to be in limbo. */
825 steal_limit
= (cqm_max_rmid
+ 1) / 4;
828 * We failed to stabilize any RMIDs so our rotation
829 * logic is now stuck. In order to make forward progress
830 * we have a few options:
832 * 1. rotate ("steal") another RMID
833 * 2. increase the threshold
836 * We do both of 1. and 2. until we hit the steal limit.
838 * The steal limit prevents all RMIDs ending up on the
839 * limbo list. This can happen if every RMID has a
840 * non-zero occupancy above threshold_limit, and the
841 * occupancy values aren't dropping fast enough.
843 * Note that there is prioritisation at work here - we'd
844 * rather increase the number of RMIDs on the limbo list
845 * than increase the threshold, because increasing the
846 * threshold skews the event data (because we reuse
847 * dirty RMIDs) - threshold bumps are a last resort.
849 if (nr_available
< steal_limit
)
852 __intel_cqm_threshold
++;
856 mutex_unlock(&cache_mutex
);
860 static void intel_cqm_rmid_rotate(struct work_struct
*work
);
862 static DECLARE_DELAYED_WORK(intel_cqm_rmid_work
, intel_cqm_rmid_rotate
);
864 static struct pmu intel_cqm_pmu
;
866 static void intel_cqm_rmid_rotate(struct work_struct
*work
)
870 __intel_cqm_rmid_rotate();
872 delay
= msecs_to_jiffies(intel_cqm_pmu
.hrtimer_interval_ms
);
873 schedule_delayed_work(&intel_cqm_rmid_work
, delay
);
877 * Find a group and setup RMID.
879 * If we're part of a group, we use the group's RMID.
881 static void intel_cqm_setup_event(struct perf_event
*event
,
882 struct perf_event
**group
)
884 struct perf_event
*iter
;
885 bool conflict
= false;
888 event
->hw
.is_group_event
= false;
889 list_for_each_entry(iter
, &cache_groups
, hw
.cqm_groups_entry
) {
890 rmid
= iter
->hw
.cqm_rmid
;
892 if (__match_event(iter
, event
)) {
893 /* All tasks in a group share an RMID */
894 event
->hw
.cqm_rmid
= rmid
;
900 * We only care about conflicts for events that are
901 * actually scheduled in (and hence have a valid RMID).
903 if (__conflict_event(iter
, event
) && __rmid_valid(rmid
))
912 event
->hw
.cqm_rmid
= rmid
;
915 static void intel_cqm_event_read(struct perf_event
*event
)
922 * Task events are handled by intel_cqm_event_count().
924 if (event
->cpu
== -1)
927 raw_spin_lock_irqsave(&cache_lock
, flags
);
928 rmid
= event
->hw
.cqm_rmid
;
930 if (!__rmid_valid(rmid
))
933 val
= __rmid_read(rmid
);
936 * Ignore this reading on error states and do not update the value.
938 if (val
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
))
941 local64_set(&event
->count
, val
);
943 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
946 static void __intel_cqm_event_count(void *info
)
948 struct rmid_read
*rr
= info
;
951 val
= __rmid_read(rr
->rmid
);
953 if (val
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
))
956 atomic64_add(val
, &rr
->value
);
959 static inline bool cqm_group_leader(struct perf_event
*event
)
961 return !list_empty(&event
->hw
.cqm_groups_entry
);
964 static u64
intel_cqm_event_count(struct perf_event
*event
)
967 struct rmid_read rr
= {
968 .value
= ATOMIC64_INIT(0),
972 * We only need to worry about task events. System-wide events
973 * are handled like usual, i.e. entirely with
974 * intel_cqm_event_read().
976 if (event
->cpu
!= -1)
977 return __perf_event_count(event
);
980 * Only the group leader gets to report values except in case of
981 * multiple events in the same group, we still need to read the
982 * other events.This stops us
983 * reporting duplicate values to userspace, and gives us a clear
984 * rule for which task gets to report the values.
986 * Note that it is impossible to attribute these values to
987 * specific packages - we forfeit that ability when we create
990 if (!cqm_group_leader(event
) && !event
->hw
.is_group_event
)
994 * Getting up-to-date values requires an SMP IPI which is not
995 * possible if we're being called in interrupt context. Return
996 * the cached values instead.
998 if (unlikely(in_interrupt()))
1002 * Notice that we don't perform the reading of an RMID
1003 * atomically, because we can't hold a spin lock across the
1006 * Speculatively perform the read, since @event might be
1007 * assigned a different (possibly invalid) RMID while we're
1008 * busying performing the IPI calls. It's therefore necessary to
1009 * check @event's RMID afterwards, and if it has changed,
1010 * discard the result of the read.
1012 rr
.rmid
= ACCESS_ONCE(event
->hw
.cqm_rmid
);
1014 if (!__rmid_valid(rr
.rmid
))
1017 on_each_cpu_mask(&cqm_cpumask
, __intel_cqm_event_count
, &rr
, 1);
1019 raw_spin_lock_irqsave(&cache_lock
, flags
);
1020 if (event
->hw
.cqm_rmid
== rr
.rmid
)
1021 local64_set(&event
->count
, atomic64_read(&rr
.value
));
1022 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
1024 return __perf_event_count(event
);
1027 static void intel_cqm_event_start(struct perf_event
*event
, int mode
)
1029 struct intel_pqr_state
*state
= this_cpu_ptr(&pqr_state
);
1030 u32 rmid
= event
->hw
.cqm_rmid
;
1032 if (!(event
->hw
.cqm_state
& PERF_HES_STOPPED
))
1035 event
->hw
.cqm_state
&= ~PERF_HES_STOPPED
;
1037 if (state
->rmid_usecnt
++) {
1038 if (!WARN_ON_ONCE(state
->rmid
!= rmid
))
1041 WARN_ON_ONCE(state
->rmid
);
1045 wrmsr(MSR_IA32_PQR_ASSOC
, rmid
, state
->closid
);
1048 static void intel_cqm_event_stop(struct perf_event
*event
, int mode
)
1050 struct intel_pqr_state
*state
= this_cpu_ptr(&pqr_state
);
1052 if (event
->hw
.cqm_state
& PERF_HES_STOPPED
)
1055 event
->hw
.cqm_state
|= PERF_HES_STOPPED
;
1057 intel_cqm_event_read(event
);
1059 if (!--state
->rmid_usecnt
) {
1061 wrmsr(MSR_IA32_PQR_ASSOC
, 0, state
->closid
);
1063 WARN_ON_ONCE(!state
->rmid
);
1067 static int intel_cqm_event_add(struct perf_event
*event
, int mode
)
1069 unsigned long flags
;
1072 raw_spin_lock_irqsave(&cache_lock
, flags
);
1074 event
->hw
.cqm_state
= PERF_HES_STOPPED
;
1075 rmid
= event
->hw
.cqm_rmid
;
1077 if (__rmid_valid(rmid
) && (mode
& PERF_EF_START
))
1078 intel_cqm_event_start(event
, mode
);
1080 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
1085 static void intel_cqm_event_destroy(struct perf_event
*event
)
1087 struct perf_event
*group_other
= NULL
;
1089 mutex_lock(&cache_mutex
);
1092 * If there's another event in this group...
1094 if (!list_empty(&event
->hw
.cqm_group_entry
)) {
1095 group_other
= list_first_entry(&event
->hw
.cqm_group_entry
,
1097 hw
.cqm_group_entry
);
1098 list_del(&event
->hw
.cqm_group_entry
);
1102 * And we're the group leader..
1104 if (cqm_group_leader(event
)) {
1106 * If there was a group_other, make that leader, otherwise
1107 * destroy the group and return the RMID.
1110 list_replace(&event
->hw
.cqm_groups_entry
,
1111 &group_other
->hw
.cqm_groups_entry
);
1113 u32 rmid
= event
->hw
.cqm_rmid
;
1115 if (__rmid_valid(rmid
))
1117 list_del(&event
->hw
.cqm_groups_entry
);
1121 mutex_unlock(&cache_mutex
);
1124 static int intel_cqm_event_init(struct perf_event
*event
)
1126 struct perf_event
*group
= NULL
;
1127 bool rotate
= false;
1129 if (event
->attr
.type
!= intel_cqm_pmu
.type
)
1132 if (event
->attr
.config
& ~QOS_EVENT_MASK
)
1135 /* unsupported modes and filters */
1136 if (event
->attr
.exclude_user
||
1137 event
->attr
.exclude_kernel
||
1138 event
->attr
.exclude_hv
||
1139 event
->attr
.exclude_idle
||
1140 event
->attr
.exclude_host
||
1141 event
->attr
.exclude_guest
||
1142 event
->attr
.sample_period
) /* no sampling */
1145 INIT_LIST_HEAD(&event
->hw
.cqm_group_entry
);
1146 INIT_LIST_HEAD(&event
->hw
.cqm_groups_entry
);
1148 event
->destroy
= intel_cqm_event_destroy
;
1150 mutex_lock(&cache_mutex
);
1152 /* Will also set rmid */
1153 intel_cqm_setup_event(event
, &group
);
1156 list_add_tail(&event
->hw
.cqm_group_entry
,
1157 &group
->hw
.cqm_group_entry
);
1159 list_add_tail(&event
->hw
.cqm_groups_entry
,
1163 * All RMIDs are either in use or have recently been
1164 * used. Kick the rotation worker to clean/free some.
1166 * We only do this for the group leader, rather than for
1167 * every event in a group to save on needless work.
1169 if (!__rmid_valid(event
->hw
.cqm_rmid
))
1173 mutex_unlock(&cache_mutex
);
1176 schedule_delayed_work(&intel_cqm_rmid_work
, 0);
1181 EVENT_ATTR_STR(llc_occupancy
, intel_cqm_llc
, "event=0x01");
1182 EVENT_ATTR_STR(llc_occupancy
.per
-pkg
, intel_cqm_llc_pkg
, "1");
1183 EVENT_ATTR_STR(llc_occupancy
.unit
, intel_cqm_llc_unit
, "Bytes");
1184 EVENT_ATTR_STR(llc_occupancy
.scale
, intel_cqm_llc_scale
, NULL
);
1185 EVENT_ATTR_STR(llc_occupancy
.snapshot
, intel_cqm_llc_snapshot
, "1");
1187 EVENT_ATTR_STR(total_bytes
, intel_cqm_total_bytes
, "event=0x02");
1188 EVENT_ATTR_STR(total_bytes
.per
-pkg
, intel_cqm_total_bytes_pkg
, "1");
1189 EVENT_ATTR_STR(total_bytes
.unit
, intel_cqm_total_bytes_unit
, "MB");
1190 EVENT_ATTR_STR(total_bytes
.scale
, intel_cqm_total_bytes_scale
, "1e-6");
1192 EVENT_ATTR_STR(local_bytes
, intel_cqm_local_bytes
, "event=0x03");
1193 EVENT_ATTR_STR(local_bytes
.per
-pkg
, intel_cqm_local_bytes_pkg
, "1");
1194 EVENT_ATTR_STR(local_bytes
.unit
, intel_cqm_local_bytes_unit
, "MB");
1195 EVENT_ATTR_STR(local_bytes
.scale
, intel_cqm_local_bytes_scale
, "1e-6");
1197 static struct attribute
*intel_cqm_events_attr
[] = {
1198 EVENT_PTR(intel_cqm_llc
),
1199 EVENT_PTR(intel_cqm_llc_pkg
),
1200 EVENT_PTR(intel_cqm_llc_unit
),
1201 EVENT_PTR(intel_cqm_llc_scale
),
1202 EVENT_PTR(intel_cqm_llc_snapshot
),
1206 static struct attribute
*intel_mbm_events_attr
[] = {
1207 EVENT_PTR(intel_cqm_total_bytes
),
1208 EVENT_PTR(intel_cqm_local_bytes
),
1209 EVENT_PTR(intel_cqm_total_bytes_pkg
),
1210 EVENT_PTR(intel_cqm_local_bytes_pkg
),
1211 EVENT_PTR(intel_cqm_total_bytes_unit
),
1212 EVENT_PTR(intel_cqm_local_bytes_unit
),
1213 EVENT_PTR(intel_cqm_total_bytes_scale
),
1214 EVENT_PTR(intel_cqm_local_bytes_scale
),
1218 static struct attribute
*intel_cmt_mbm_events_attr
[] = {
1219 EVENT_PTR(intel_cqm_llc
),
1220 EVENT_PTR(intel_cqm_total_bytes
),
1221 EVENT_PTR(intel_cqm_local_bytes
),
1222 EVENT_PTR(intel_cqm_llc_pkg
),
1223 EVENT_PTR(intel_cqm_total_bytes_pkg
),
1224 EVENT_PTR(intel_cqm_local_bytes_pkg
),
1225 EVENT_PTR(intel_cqm_llc_unit
),
1226 EVENT_PTR(intel_cqm_total_bytes_unit
),
1227 EVENT_PTR(intel_cqm_local_bytes_unit
),
1228 EVENT_PTR(intel_cqm_llc_scale
),
1229 EVENT_PTR(intel_cqm_total_bytes_scale
),
1230 EVENT_PTR(intel_cqm_local_bytes_scale
),
1231 EVENT_PTR(intel_cqm_llc_snapshot
),
1235 static struct attribute_group intel_cqm_events_group
= {
1240 PMU_FORMAT_ATTR(event
, "config:0-7");
1241 static struct attribute
*intel_cqm_formats_attr
[] = {
1242 &format_attr_event
.attr
,
1246 static struct attribute_group intel_cqm_format_group
= {
1248 .attrs
= intel_cqm_formats_attr
,
1252 max_recycle_threshold_show(struct device
*dev
, struct device_attribute
*attr
,
1257 mutex_lock(&cache_mutex
);
1258 rv
= snprintf(page
, PAGE_SIZE
-1, "%u\n", __intel_cqm_max_threshold
);
1259 mutex_unlock(&cache_mutex
);
1265 max_recycle_threshold_store(struct device
*dev
,
1266 struct device_attribute
*attr
,
1267 const char *buf
, size_t count
)
1269 unsigned int bytes
, cachelines
;
1272 ret
= kstrtouint(buf
, 0, &bytes
);
1276 mutex_lock(&cache_mutex
);
1278 __intel_cqm_max_threshold
= bytes
;
1279 cachelines
= bytes
/ cqm_l3_scale
;
1282 * The new maximum takes effect immediately.
1284 if (__intel_cqm_threshold
> cachelines
)
1285 __intel_cqm_threshold
= cachelines
;
1287 mutex_unlock(&cache_mutex
);
1292 static DEVICE_ATTR_RW(max_recycle_threshold
);
1294 static struct attribute
*intel_cqm_attrs
[] = {
1295 &dev_attr_max_recycle_threshold
.attr
,
1299 static const struct attribute_group intel_cqm_group
= {
1300 .attrs
= intel_cqm_attrs
,
1303 static const struct attribute_group
*intel_cqm_attr_groups
[] = {
1304 &intel_cqm_events_group
,
1305 &intel_cqm_format_group
,
1310 static struct pmu intel_cqm_pmu
= {
1311 .hrtimer_interval_ms
= RMID_DEFAULT_QUEUE_TIME
,
1312 .attr_groups
= intel_cqm_attr_groups
,
1313 .task_ctx_nr
= perf_sw_context
,
1314 .event_init
= intel_cqm_event_init
,
1315 .add
= intel_cqm_event_add
,
1316 .del
= intel_cqm_event_stop
,
1317 .start
= intel_cqm_event_start
,
1318 .stop
= intel_cqm_event_stop
,
1319 .read
= intel_cqm_event_read
,
1320 .count
= intel_cqm_event_count
,
1323 static inline void cqm_pick_event_reader(int cpu
)
1327 /* First online cpu in package becomes the reader */
1328 reader
= cpumask_any_and(&cqm_cpumask
, topology_core_cpumask(cpu
));
1329 if (reader
>= nr_cpu_ids
)
1330 cpumask_set_cpu(cpu
, &cqm_cpumask
);
1333 static void intel_cqm_cpu_starting(unsigned int cpu
)
1335 struct intel_pqr_state
*state
= &per_cpu(pqr_state
, cpu
);
1336 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
1340 state
->rmid_usecnt
= 0;
1342 WARN_ON(c
->x86_cache_max_rmid
!= cqm_max_rmid
);
1343 WARN_ON(c
->x86_cache_occ_scale
!= cqm_l3_scale
);
1346 static void intel_cqm_cpu_exit(unsigned int cpu
)
1350 /* Is @cpu the current cqm reader for this package ? */
1351 if (!cpumask_test_and_clear_cpu(cpu
, &cqm_cpumask
))
1354 /* Find another online reader in this package */
1355 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
1357 if (target
< nr_cpu_ids
)
1358 cpumask_set_cpu(target
, &cqm_cpumask
);
1361 static int intel_cqm_cpu_notifier(struct notifier_block
*nb
,
1362 unsigned long action
, void *hcpu
)
1364 unsigned int cpu
= (unsigned long)hcpu
;
1366 switch (action
& ~CPU_TASKS_FROZEN
) {
1367 case CPU_DOWN_PREPARE
:
1368 intel_cqm_cpu_exit(cpu
);
1371 intel_cqm_cpu_starting(cpu
);
1372 cqm_pick_event_reader(cpu
);
1379 static const struct x86_cpu_id intel_cqm_match
[] = {
1380 { .vendor
= X86_VENDOR_INTEL
, .feature
= X86_FEATURE_CQM_OCCUP_LLC
},
1384 static void mbm_cleanup(void)
1391 mbm_enabled
= false;
1394 static const struct x86_cpu_id intel_mbm_local_match
[] = {
1395 { .vendor
= X86_VENDOR_INTEL
, .feature
= X86_FEATURE_CQM_MBM_LOCAL
},
1399 static const struct x86_cpu_id intel_mbm_total_match
[] = {
1400 { .vendor
= X86_VENDOR_INTEL
, .feature
= X86_FEATURE_CQM_MBM_TOTAL
},
1404 static int intel_mbm_init(void)
1406 int array_size
, maxid
= cqm_max_rmid
+ 1;
1408 array_size
= sizeof(struct sample
) * maxid
* topology_max_packages();
1409 mbm_local
= kmalloc(array_size
, GFP_KERNEL
);
1413 mbm_total
= kmalloc(array_size
, GFP_KERNEL
);
1422 static int __init
intel_cqm_init(void)
1424 char *str
= NULL
, scale
[20];
1427 if (x86_match_cpu(intel_cqm_match
))
1430 if (x86_match_cpu(intel_mbm_local_match
) &&
1431 x86_match_cpu(intel_mbm_total_match
))
1434 if (!cqm_enabled
&& !mbm_enabled
)
1437 cqm_l3_scale
= boot_cpu_data
.x86_cache_occ_scale
;
1440 * It's possible that not all resources support the same number
1441 * of RMIDs. Instead of making scheduling much more complicated
1442 * (where we have to match a task's RMID to a cpu that supports
1443 * that many RMIDs) just find the minimum RMIDs supported across
1446 * Also, check that the scales match on all cpus.
1448 cpu_notifier_register_begin();
1450 for_each_online_cpu(cpu
) {
1451 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
1453 if (c
->x86_cache_max_rmid
< cqm_max_rmid
)
1454 cqm_max_rmid
= c
->x86_cache_max_rmid
;
1456 if (c
->x86_cache_occ_scale
!= cqm_l3_scale
) {
1457 pr_err("Multiple LLC scale values, disabling\n");
1464 * A reasonable upper limit on the max threshold is the number
1465 * of lines tagged per RMID if all RMIDs have the same number of
1466 * lines tagged in the LLC.
1468 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
1470 __intel_cqm_max_threshold
=
1471 boot_cpu_data
.x86_cache_size
* 1024 / (cqm_max_rmid
+ 1);
1473 snprintf(scale
, sizeof(scale
), "%u", cqm_l3_scale
);
1474 str
= kstrdup(scale
, GFP_KERNEL
);
1480 event_attr_intel_cqm_llc_scale
.event_str
= str
;
1482 ret
= intel_cqm_setup_rmid_cache();
1486 for_each_online_cpu(i
) {
1487 intel_cqm_cpu_starting(i
);
1488 cqm_pick_event_reader(i
);
1492 ret
= intel_mbm_init();
1493 if (ret
&& !cqm_enabled
)
1496 if (cqm_enabled
&& mbm_enabled
)
1497 intel_cqm_events_group
.attrs
= intel_cmt_mbm_events_attr
;
1498 else if (!cqm_enabled
&& mbm_enabled
)
1499 intel_cqm_events_group
.attrs
= intel_mbm_events_attr
;
1500 else if (cqm_enabled
&& !mbm_enabled
)
1501 intel_cqm_events_group
.attrs
= intel_cqm_events_attr
;
1503 ret
= perf_pmu_register(&intel_cqm_pmu
, "intel_cqm", -1);
1505 pr_err("Intel CQM perf registration failed: %d\n", ret
);
1510 pr_info("Intel CQM monitoring enabled\n");
1512 pr_info("Intel MBM enabled\n");
1515 * Register the hot cpu notifier once we are sure cqm
1516 * is enabled to avoid notifier leak.
1518 __perf_cpu_notifier(intel_cqm_cpu_notifier
);
1520 cpu_notifier_register_done();
1529 device_initcall(intel_cqm_init
);