2 * Intel Cache Quality-of-Service Monitoring (CQM) support.
4 * Based very, very heavily on work by Peter Zijlstra.
7 #include <linux/perf_event.h>
8 #include <linux/slab.h>
9 #include <asm/cpu_device_id.h>
10 #include "../perf_event.h"
12 #define MSR_IA32_PQR_ASSOC 0x0c8f
13 #define MSR_IA32_QM_CTR 0x0c8e
14 #define MSR_IA32_QM_EVTSEL 0x0c8d
16 #define MBM_CNTR_WIDTH 24
18 * Guaranteed time in ms as per SDM where MBM counters will not overflow.
20 #define MBM_CTR_OVERFLOW_TIME 1000
22 static u32 cqm_max_rmid
= -1;
23 static unsigned int cqm_l3_scale
; /* supposedly cacheline size */
24 static bool cqm_enabled
, mbm_enabled
;
25 unsigned int mbm_socket_max
;
28 * struct intel_pqr_state - State cache for the PQR MSR
29 * @rmid: The cached Resource Monitoring ID
30 * @closid: The cached Class Of Service ID
31 * @rmid_usecnt: The usage counter for rmid
33 * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
34 * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
35 * contains both parts, so we need to cache them.
37 * The cache also helps to avoid pointless updates if the value does
40 struct intel_pqr_state
{
47 * The cached intel_pqr_state is strictly per CPU and can never be
48 * updated from a remote CPU. Both functions which modify the state
49 * (intel_cqm_event_start and intel_cqm_event_stop) are called with
50 * interrupts disabled, which is sufficient for the protection.
52 static DEFINE_PER_CPU(struct intel_pqr_state
, pqr_state
);
53 static struct hrtimer
*mbm_timers
;
55 * struct sample - mbm event's (local or total) data
56 * @total_bytes #bytes since we began monitoring
57 * @prev_msr previous value of MSR
65 * samples profiled for total memory bandwidth type events
67 static struct sample
*mbm_total
;
69 * samples profiled for local memory bandwidth type events
71 static struct sample
*mbm_local
;
73 #define pkg_id topology_physical_package_id(smp_processor_id())
75 * rmid_2_index returns the index for the rmid in mbm_local/mbm_total array.
76 * mbm_total[] and mbm_local[] are linearly indexed by socket# * max number of
77 * rmids per socket, an example is given below
78 * RMID1 of Socket0: vrmid = 1
79 * RMID1 of Socket1: vrmid = 1 * (cqm_max_rmid + 1) + 1
80 * RMID1 of Socket2: vrmid = 2 * (cqm_max_rmid + 1) + 1
82 #define rmid_2_index(rmid) ((pkg_id * (cqm_max_rmid + 1)) + rmid)
84 * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
85 * Also protects event->hw.cqm_rmid
87 * Hold either for stability, both for modification of ->hw.cqm_rmid.
89 static DEFINE_MUTEX(cache_mutex
);
90 static DEFINE_RAW_SPINLOCK(cache_lock
);
93 * Groups of events that have the same target(s), one RMID per group.
95 static LIST_HEAD(cache_groups
);
98 * Mask of CPUs for reading CQM values. We only need one per-socket.
100 static cpumask_t cqm_cpumask
;
102 #define RMID_VAL_ERROR (1ULL << 63)
103 #define RMID_VAL_UNAVAIL (1ULL << 62)
106 * Event IDs are used to program IA32_QM_EVTSEL before reading event
107 * counter from IA32_QM_CTR
109 #define QOS_L3_OCCUP_EVENT_ID 0x01
110 #define QOS_MBM_TOTAL_EVENT_ID 0x02
111 #define QOS_MBM_LOCAL_EVENT_ID 0x03
114 * This is central to the rotation algorithm in __intel_cqm_rmid_rotate().
116 * This rmid is always free and is guaranteed to have an associated
117 * near-zero occupancy value, i.e. no cachelines are tagged with this
118 * RMID, once __intel_cqm_rmid_rotate() returns.
120 static u32 intel_cqm_rotation_rmid
;
122 #define INVALID_RMID (-1)
125 * Is @rmid valid for programming the hardware?
127 * rmid 0 is reserved by the hardware for all non-monitored tasks, which
128 * means that we should never come across an rmid with that value.
129 * Likewise, an rmid value of -1 is used to indicate "no rmid currently
130 * assigned" and is used as part of the rotation code.
132 static inline bool __rmid_valid(u32 rmid
)
134 if (!rmid
|| rmid
== INVALID_RMID
)
140 static u64
__rmid_read(u32 rmid
)
145 * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
146 * it just says that to increase confusion.
148 wrmsr(MSR_IA32_QM_EVTSEL
, QOS_L3_OCCUP_EVENT_ID
, rmid
);
149 rdmsrl(MSR_IA32_QM_CTR
, val
);
152 * Aside from the ERROR and UNAVAIL bits, assume this thing returns
153 * the number of cachelines tagged with @rmid.
158 enum rmid_recycle_state
{
164 struct cqm_rmid_entry
{
166 enum rmid_recycle_state state
;
167 struct list_head list
;
168 unsigned long queue_time
;
172 * cqm_rmid_free_lru - A least recently used list of RMIDs.
174 * Oldest entry at the head, newest (most recently used) entry at the
175 * tail. This list is never traversed, it's only used to keep track of
176 * the lru order. That is, we only pick entries of the head or insert
179 * All entries on the list are 'free', and their RMIDs are not currently
180 * in use. To mark an RMID as in use, remove its entry from the lru
184 * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs.
186 * This list is contains RMIDs that no one is currently using but that
187 * may have a non-zero occupancy value associated with them. The
188 * rotation worker moves RMIDs from the limbo list to the free list once
189 * the occupancy value drops below __intel_cqm_threshold.
191 * Both lists are protected by cache_mutex.
193 static LIST_HEAD(cqm_rmid_free_lru
);
194 static LIST_HEAD(cqm_rmid_limbo_lru
);
197 * We use a simple array of pointers so that we can lookup a struct
198 * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
199 * and __put_rmid() from having to worry about dealing with struct
200 * cqm_rmid_entry - they just deal with rmids, i.e. integers.
202 * Once this array is initialized it is read-only. No locks are required
205 * All entries for all RMIDs can be looked up in the this array at all
208 static struct cqm_rmid_entry
**cqm_rmid_ptrs
;
210 static inline struct cqm_rmid_entry
*__rmid_entry(u32 rmid
)
212 struct cqm_rmid_entry
*entry
;
214 entry
= cqm_rmid_ptrs
[rmid
];
215 WARN_ON(entry
->rmid
!= rmid
);
221 * Returns < 0 on fail.
223 * We expect to be called with cache_mutex held.
225 static u32
__get_rmid(void)
227 struct cqm_rmid_entry
*entry
;
229 lockdep_assert_held(&cache_mutex
);
231 if (list_empty(&cqm_rmid_free_lru
))
234 entry
= list_first_entry(&cqm_rmid_free_lru
, struct cqm_rmid_entry
, list
);
235 list_del(&entry
->list
);
240 static void __put_rmid(u32 rmid
)
242 struct cqm_rmid_entry
*entry
;
244 lockdep_assert_held(&cache_mutex
);
246 WARN_ON(!__rmid_valid(rmid
));
247 entry
= __rmid_entry(rmid
);
249 entry
->queue_time
= jiffies
;
250 entry
->state
= RMID_YOUNG
;
252 list_add_tail(&entry
->list
, &cqm_rmid_limbo_lru
);
255 static void cqm_cleanup(void)
262 for (i
= 0; i
< cqm_max_rmid
; i
++)
263 kfree(cqm_rmid_ptrs
[i
]);
265 kfree(cqm_rmid_ptrs
);
266 cqm_rmid_ptrs
= NULL
;
270 static int intel_cqm_setup_rmid_cache(void)
272 struct cqm_rmid_entry
*entry
;
273 unsigned int nr_rmids
;
276 nr_rmids
= cqm_max_rmid
+ 1;
277 cqm_rmid_ptrs
= kzalloc(sizeof(struct cqm_rmid_entry
*) *
278 nr_rmids
, GFP_KERNEL
);
282 for (; r
<= cqm_max_rmid
; r
++) {
283 struct cqm_rmid_entry
*entry
;
285 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
289 INIT_LIST_HEAD(&entry
->list
);
291 cqm_rmid_ptrs
[r
] = entry
;
293 list_add_tail(&entry
->list
, &cqm_rmid_free_lru
);
297 * RMID 0 is special and is always allocated. It's used for all
298 * tasks that are not monitored.
300 entry
= __rmid_entry(0);
301 list_del(&entry
->list
);
303 mutex_lock(&cache_mutex
);
304 intel_cqm_rotation_rmid
= __get_rmid();
305 mutex_unlock(&cache_mutex
);
315 * Determine if @a and @b measure the same set of tasks.
317 * If @a and @b measure the same set of tasks then we want to share a
320 static bool __match_event(struct perf_event
*a
, struct perf_event
*b
)
322 /* Per-cpu and task events don't mix */
323 if ((a
->attach_state
& PERF_ATTACH_TASK
) !=
324 (b
->attach_state
& PERF_ATTACH_TASK
))
327 #ifdef CONFIG_CGROUP_PERF
328 if (a
->cgrp
!= b
->cgrp
)
332 /* If not task event, we're machine wide */
333 if (!(b
->attach_state
& PERF_ATTACH_TASK
))
337 * Events that target same task are placed into the same cache group.
338 * Mark it as a multi event group, so that we update ->count
339 * for every event rather than just the group leader later.
341 if (a
->hw
.target
== b
->hw
.target
) {
342 b
->hw
.is_group_event
= true;
347 * Are we an inherited event?
355 #ifdef CONFIG_CGROUP_PERF
356 static inline struct perf_cgroup
*event_to_cgroup(struct perf_event
*event
)
358 if (event
->attach_state
& PERF_ATTACH_TASK
)
359 return perf_cgroup_from_task(event
->hw
.target
, event
->ctx
);
366 * Determine if @a's tasks intersect with @b's tasks
368 * There are combinations of events that we explicitly prohibit,
371 * system-wide -> cgroup and task
372 * cgroup -> system-wide
374 * task -> system-wide
377 * Call this function before allocating an RMID.
379 static bool __conflict_event(struct perf_event
*a
, struct perf_event
*b
)
381 #ifdef CONFIG_CGROUP_PERF
383 * We can have any number of cgroups but only one system-wide
386 if (a
->cgrp
&& b
->cgrp
) {
387 struct perf_cgroup
*ac
= a
->cgrp
;
388 struct perf_cgroup
*bc
= b
->cgrp
;
391 * This condition should have been caught in
392 * __match_event() and we should be sharing an RMID.
394 WARN_ON_ONCE(ac
== bc
);
396 if (cgroup_is_descendant(ac
->css
.cgroup
, bc
->css
.cgroup
) ||
397 cgroup_is_descendant(bc
->css
.cgroup
, ac
->css
.cgroup
))
403 if (a
->cgrp
|| b
->cgrp
) {
404 struct perf_cgroup
*ac
, *bc
;
407 * cgroup and system-wide events are mutually exclusive
409 if ((a
->cgrp
&& !(b
->attach_state
& PERF_ATTACH_TASK
)) ||
410 (b
->cgrp
&& !(a
->attach_state
& PERF_ATTACH_TASK
)))
414 * Ensure neither event is part of the other's cgroup
416 ac
= event_to_cgroup(a
);
417 bc
= event_to_cgroup(b
);
422 * Must have cgroup and non-intersecting task events.
428 * We have cgroup and task events, and the task belongs
429 * to a cgroup. Check for for overlap.
431 if (cgroup_is_descendant(ac
->css
.cgroup
, bc
->css
.cgroup
) ||
432 cgroup_is_descendant(bc
->css
.cgroup
, ac
->css
.cgroup
))
439 * If one of them is not a task, same story as above with cgroups.
441 if (!(a
->attach_state
& PERF_ATTACH_TASK
) ||
442 !(b
->attach_state
& PERF_ATTACH_TASK
))
446 * Must be non-overlapping.
457 static void __intel_cqm_event_count(void *info
);
458 static void init_mbm_sample(u32 rmid
, u32 evt_type
);
459 static void __intel_mbm_event_count(void *info
);
461 static bool is_mbm_event(int e
)
463 return (e
>= QOS_MBM_TOTAL_EVENT_ID
&& e
<= QOS_MBM_LOCAL_EVENT_ID
);
466 static void cqm_mask_call(struct rmid_read
*rr
)
468 if (is_mbm_event(rr
->evt_type
))
469 on_each_cpu_mask(&cqm_cpumask
, __intel_mbm_event_count
, rr
, 1);
471 on_each_cpu_mask(&cqm_cpumask
, __intel_cqm_event_count
, rr
, 1);
475 * Exchange the RMID of a group of events.
477 static u32
intel_cqm_xchg_rmid(struct perf_event
*group
, u32 rmid
)
479 struct perf_event
*event
;
480 struct list_head
*head
= &group
->hw
.cqm_group_entry
;
481 u32 old_rmid
= group
->hw
.cqm_rmid
;
483 lockdep_assert_held(&cache_mutex
);
486 * If our RMID is being deallocated, perform a read now.
488 if (__rmid_valid(old_rmid
) && !__rmid_valid(rmid
)) {
489 struct rmid_read rr
= {
491 .evt_type
= group
->attr
.config
,
492 .value
= ATOMIC64_INIT(0),
496 local64_set(&group
->count
, atomic64_read(&rr
.value
));
499 raw_spin_lock_irq(&cache_lock
);
501 group
->hw
.cqm_rmid
= rmid
;
502 list_for_each_entry(event
, head
, hw
.cqm_group_entry
)
503 event
->hw
.cqm_rmid
= rmid
;
505 raw_spin_unlock_irq(&cache_lock
);
508 * If the allocation is for mbm, init the mbm stats.
509 * Need to check if each event in the group is mbm event
510 * because there could be multiple type of events in the same group.
512 if (__rmid_valid(rmid
)) {
514 if (is_mbm_event(event
->attr
.config
))
515 init_mbm_sample(rmid
, event
->attr
.config
);
517 list_for_each_entry(event
, head
, hw
.cqm_group_entry
) {
518 if (is_mbm_event(event
->attr
.config
))
519 init_mbm_sample(rmid
, event
->attr
.config
);
527 * If we fail to assign a new RMID for intel_cqm_rotation_rmid because
528 * cachelines are still tagged with RMIDs in limbo, we progressively
529 * increment the threshold until we find an RMID in limbo with <=
530 * __intel_cqm_threshold lines tagged. This is designed to mitigate the
531 * problem where cachelines tagged with an RMID are not steadily being
534 * On successful rotations we decrease the threshold back towards zero.
536 * __intel_cqm_max_threshold provides an upper bound on the threshold,
537 * and is measured in bytes because it's exposed to userland.
539 static unsigned int __intel_cqm_threshold
;
540 static unsigned int __intel_cqm_max_threshold
;
543 * Test whether an RMID has a zero occupancy value on this cpu.
545 static void intel_cqm_stable(void *arg
)
547 struct cqm_rmid_entry
*entry
;
549 list_for_each_entry(entry
, &cqm_rmid_limbo_lru
, list
) {
550 if (entry
->state
!= RMID_AVAILABLE
)
553 if (__rmid_read(entry
->rmid
) > __intel_cqm_threshold
)
554 entry
->state
= RMID_DIRTY
;
559 * If we have group events waiting for an RMID that don't conflict with
560 * events already running, assign @rmid.
562 static bool intel_cqm_sched_in_event(u32 rmid
)
564 struct perf_event
*leader
, *event
;
566 lockdep_assert_held(&cache_mutex
);
568 leader
= list_first_entry(&cache_groups
, struct perf_event
,
569 hw
.cqm_groups_entry
);
572 list_for_each_entry_continue(event
, &cache_groups
,
573 hw
.cqm_groups_entry
) {
574 if (__rmid_valid(event
->hw
.cqm_rmid
))
577 if (__conflict_event(event
, leader
))
580 intel_cqm_xchg_rmid(event
, rmid
);
588 * Initially use this constant for both the limbo queue time and the
589 * rotation timer interval, pmu::hrtimer_interval_ms.
591 * They don't need to be the same, but the two are related since if you
592 * rotate faster than you recycle RMIDs, you may run out of available
595 #define RMID_DEFAULT_QUEUE_TIME 250 /* ms */
597 static unsigned int __rmid_queue_time_ms
= RMID_DEFAULT_QUEUE_TIME
;
600 * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
601 * @nr_available: number of freeable RMIDs on the limbo list
603 * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
604 * cachelines are tagged with those RMIDs. After this we can reuse them
605 * and know that the current set of active RMIDs is stable.
607 * Return %true or %false depending on whether stabilization needs to be
610 * If we return %true then @nr_available is updated to indicate the
611 * number of RMIDs on the limbo list that have been queued for the
612 * minimum queue time (RMID_AVAILABLE), but whose data occupancy values
613 * are above __intel_cqm_threshold.
615 static bool intel_cqm_rmid_stabilize(unsigned int *available
)
617 struct cqm_rmid_entry
*entry
, *tmp
;
619 lockdep_assert_held(&cache_mutex
);
622 list_for_each_entry(entry
, &cqm_rmid_limbo_lru
, list
) {
623 unsigned long min_queue_time
;
624 unsigned long now
= jiffies
;
627 * We hold RMIDs placed into limbo for a minimum queue
628 * time. Before the minimum queue time has elapsed we do
631 * The reasoning is that until a sufficient time has
632 * passed since we stopped using an RMID, any RMID
633 * placed onto the limbo list will likely still have
634 * data tagged in the cache, which means we'll probably
635 * fail to recycle it anyway.
637 * We can save ourselves an expensive IPI by skipping
638 * any RMIDs that have not been queued for the minimum
641 min_queue_time
= entry
->queue_time
+
642 msecs_to_jiffies(__rmid_queue_time_ms
);
644 if (time_after(min_queue_time
, now
))
647 entry
->state
= RMID_AVAILABLE
;
652 * Fast return if none of the RMIDs on the limbo list have been
653 * sitting on the queue for the minimum queue time.
659 * Test whether an RMID is free for each package.
661 on_each_cpu_mask(&cqm_cpumask
, intel_cqm_stable
, NULL
, true);
663 list_for_each_entry_safe(entry
, tmp
, &cqm_rmid_limbo_lru
, list
) {
665 * Exhausted all RMIDs that have waited min queue time.
667 if (entry
->state
== RMID_YOUNG
)
670 if (entry
->state
== RMID_DIRTY
)
673 list_del(&entry
->list
); /* remove from limbo */
676 * The rotation RMID gets priority if it's
677 * currently invalid. In which case, skip adding
678 * the RMID to the the free lru.
680 if (!__rmid_valid(intel_cqm_rotation_rmid
)) {
681 intel_cqm_rotation_rmid
= entry
->rmid
;
686 * If we have groups waiting for RMIDs, hand
687 * them one now provided they don't conflict.
689 if (intel_cqm_sched_in_event(entry
->rmid
))
693 * Otherwise place it onto the free list.
695 list_add_tail(&entry
->list
, &cqm_rmid_free_lru
);
699 return __rmid_valid(intel_cqm_rotation_rmid
);
703 * Pick a victim group and move it to the tail of the group list.
704 * @next: The first group without an RMID
706 static void __intel_cqm_pick_and_rotate(struct perf_event
*next
)
708 struct perf_event
*rotor
;
711 lockdep_assert_held(&cache_mutex
);
713 rotor
= list_first_entry(&cache_groups
, struct perf_event
,
714 hw
.cqm_groups_entry
);
717 * The group at the front of the list should always have a valid
718 * RMID. If it doesn't then no groups have RMIDs assigned and we
719 * don't need to rotate the list.
724 rmid
= intel_cqm_xchg_rmid(rotor
, INVALID_RMID
);
727 list_rotate_left(&cache_groups
);
731 * Deallocate the RMIDs from any events that conflict with @event, and
732 * place them on the back of the group list.
734 static void intel_cqm_sched_out_conflicting_events(struct perf_event
*event
)
736 struct perf_event
*group
, *g
;
739 lockdep_assert_held(&cache_mutex
);
741 list_for_each_entry_safe(group
, g
, &cache_groups
, hw
.cqm_groups_entry
) {
745 rmid
= group
->hw
.cqm_rmid
;
748 * Skip events that don't have a valid RMID.
750 if (!__rmid_valid(rmid
))
754 * No conflict? No problem! Leave the event alone.
756 if (!__conflict_event(group
, event
))
759 intel_cqm_xchg_rmid(group
, INVALID_RMID
);
765 * Attempt to rotate the groups and assign new RMIDs.
767 * We rotate for two reasons,
768 * 1. To handle the scheduling of conflicting events
769 * 2. To recycle RMIDs
771 * Rotating RMIDs is complicated because the hardware doesn't give us
774 * There's problems with the hardware interface; when you change the
775 * task:RMID map cachelines retain their 'old' tags, giving a skewed
776 * picture. In order to work around this, we must always keep one free
777 * RMID - intel_cqm_rotation_rmid.
779 * Rotation works by taking away an RMID from a group (the old RMID),
780 * and assigning the free RMID to another group (the new RMID). We must
781 * then wait for the old RMID to not be used (no cachelines tagged).
782 * This ensure that all cachelines are tagged with 'active' RMIDs. At
783 * this point we can start reading values for the new RMID and treat the
784 * old RMID as the free RMID for the next rotation.
786 * Return %true or %false depending on whether we did any rotating.
788 static bool __intel_cqm_rmid_rotate(void)
790 struct perf_event
*group
, *start
= NULL
;
791 unsigned int threshold_limit
;
792 unsigned int nr_needed
= 0;
793 unsigned int nr_available
;
794 bool rotated
= false;
796 mutex_lock(&cache_mutex
);
800 * Fast path through this function if there are no groups and no
801 * RMIDs that need cleaning.
803 if (list_empty(&cache_groups
) && list_empty(&cqm_rmid_limbo_lru
))
806 list_for_each_entry(group
, &cache_groups
, hw
.cqm_groups_entry
) {
807 if (!__rmid_valid(group
->hw
.cqm_rmid
)) {
815 * We have some event groups, but they all have RMIDs assigned
816 * and no RMIDs need cleaning.
818 if (!nr_needed
&& list_empty(&cqm_rmid_limbo_lru
))
825 * We have more event groups without RMIDs than available RMIDs,
826 * or we have event groups that conflict with the ones currently
829 * We force deallocate the rmid of the group at the head of
830 * cache_groups. The first event group without an RMID then gets
831 * assigned intel_cqm_rotation_rmid. This ensures we always make
834 * Rotate the cache_groups list so the previous head is now the
837 __intel_cqm_pick_and_rotate(start
);
840 * If the rotation is going to succeed, reduce the threshold so
841 * that we don't needlessly reuse dirty RMIDs.
843 if (__rmid_valid(intel_cqm_rotation_rmid
)) {
844 intel_cqm_xchg_rmid(start
, intel_cqm_rotation_rmid
);
845 intel_cqm_rotation_rmid
= __get_rmid();
847 intel_cqm_sched_out_conflicting_events(start
);
849 if (__intel_cqm_threshold
)
850 __intel_cqm_threshold
--;
857 * We now need to stablize the RMID we freed above (if any) to
858 * ensure that the next time we rotate we have an RMID with zero
861 * Alternatively, if we didn't need to perform any rotation,
862 * we'll have a bunch of RMIDs in limbo that need stabilizing.
864 threshold_limit
= __intel_cqm_max_threshold
/ cqm_l3_scale
;
866 while (intel_cqm_rmid_stabilize(&nr_available
) &&
867 __intel_cqm_threshold
< threshold_limit
) {
868 unsigned int steal_limit
;
871 * Don't spin if nobody is actively waiting for an RMID,
872 * the rotation worker will be kicked as soon as an
873 * event needs an RMID anyway.
878 /* Allow max 25% of RMIDs to be in limbo. */
879 steal_limit
= (cqm_max_rmid
+ 1) / 4;
882 * We failed to stabilize any RMIDs so our rotation
883 * logic is now stuck. In order to make forward progress
884 * we have a few options:
886 * 1. rotate ("steal") another RMID
887 * 2. increase the threshold
890 * We do both of 1. and 2. until we hit the steal limit.
892 * The steal limit prevents all RMIDs ending up on the
893 * limbo list. This can happen if every RMID has a
894 * non-zero occupancy above threshold_limit, and the
895 * occupancy values aren't dropping fast enough.
897 * Note that there is prioritisation at work here - we'd
898 * rather increase the number of RMIDs on the limbo list
899 * than increase the threshold, because increasing the
900 * threshold skews the event data (because we reuse
901 * dirty RMIDs) - threshold bumps are a last resort.
903 if (nr_available
< steal_limit
)
906 __intel_cqm_threshold
++;
910 mutex_unlock(&cache_mutex
);
914 static void intel_cqm_rmid_rotate(struct work_struct
*work
);
916 static DECLARE_DELAYED_WORK(intel_cqm_rmid_work
, intel_cqm_rmid_rotate
);
918 static struct pmu intel_cqm_pmu
;
920 static void intel_cqm_rmid_rotate(struct work_struct
*work
)
924 __intel_cqm_rmid_rotate();
926 delay
= msecs_to_jiffies(intel_cqm_pmu
.hrtimer_interval_ms
);
927 schedule_delayed_work(&intel_cqm_rmid_work
, delay
);
930 static u64
update_sample(unsigned int rmid
, u32 evt_type
, int first
)
932 struct sample
*mbm_current
;
933 u32 vrmid
= rmid_2_index(rmid
);
934 u64 val
, bytes
, shift
;
937 if (evt_type
== QOS_MBM_LOCAL_EVENT_ID
) {
938 mbm_current
= &mbm_local
[vrmid
];
939 eventid
= QOS_MBM_LOCAL_EVENT_ID
;
941 mbm_current
= &mbm_total
[vrmid
];
942 eventid
= QOS_MBM_TOTAL_EVENT_ID
;
945 wrmsr(MSR_IA32_QM_EVTSEL
, eventid
, rmid
);
946 rdmsrl(MSR_IA32_QM_CTR
, val
);
947 if (val
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
))
948 return mbm_current
->total_bytes
;
951 mbm_current
->prev_msr
= val
;
952 mbm_current
->total_bytes
= 0;
953 return mbm_current
->total_bytes
;
957 * The h/w guarantees that counters will not overflow
958 * so long as we poll them at least once per second.
960 shift
= 64 - MBM_CNTR_WIDTH
;
961 bytes
= (val
<< shift
) - (mbm_current
->prev_msr
<< shift
);
964 bytes
*= cqm_l3_scale
;
966 mbm_current
->total_bytes
+= bytes
;
967 mbm_current
->prev_msr
= val
;
969 return mbm_current
->total_bytes
;
972 static u64
rmid_read_mbm(unsigned int rmid
, u32 evt_type
)
974 return update_sample(rmid
, evt_type
, 0);
977 static void __intel_mbm_event_init(void *info
)
979 struct rmid_read
*rr
= info
;
981 update_sample(rr
->rmid
, rr
->evt_type
, 1);
984 static void init_mbm_sample(u32 rmid
, u32 evt_type
)
986 struct rmid_read rr
= {
988 .evt_type
= evt_type
,
989 .value
= ATOMIC64_INIT(0),
992 /* on each socket, init sample */
993 on_each_cpu_mask(&cqm_cpumask
, __intel_mbm_event_init
, &rr
, 1);
997 * Find a group and setup RMID.
999 * If we're part of a group, we use the group's RMID.
1001 static void intel_cqm_setup_event(struct perf_event
*event
,
1002 struct perf_event
**group
)
1004 struct perf_event
*iter
;
1005 bool conflict
= false;
1008 event
->hw
.is_group_event
= false;
1009 list_for_each_entry(iter
, &cache_groups
, hw
.cqm_groups_entry
) {
1010 rmid
= iter
->hw
.cqm_rmid
;
1012 if (__match_event(iter
, event
)) {
1013 /* All tasks in a group share an RMID */
1014 event
->hw
.cqm_rmid
= rmid
;
1016 if (is_mbm_event(event
->attr
.config
) && __rmid_valid(rmid
))
1017 init_mbm_sample(rmid
, event
->attr
.config
);
1022 * We only care about conflicts for events that are
1023 * actually scheduled in (and hence have a valid RMID).
1025 if (__conflict_event(iter
, event
) && __rmid_valid(rmid
))
1030 rmid
= INVALID_RMID
;
1032 rmid
= __get_rmid();
1034 if (is_mbm_event(event
->attr
.config
) && __rmid_valid(rmid
))
1035 init_mbm_sample(rmid
, event
->attr
.config
);
1037 event
->hw
.cqm_rmid
= rmid
;
1040 static void intel_cqm_event_read(struct perf_event
*event
)
1042 unsigned long flags
;
1047 * Task events are handled by intel_cqm_event_count().
1049 if (event
->cpu
== -1)
1052 raw_spin_lock_irqsave(&cache_lock
, flags
);
1053 rmid
= event
->hw
.cqm_rmid
;
1055 if (!__rmid_valid(rmid
))
1058 if (is_mbm_event(event
->attr
.config
))
1059 val
= rmid_read_mbm(rmid
, event
->attr
.config
);
1061 val
= __rmid_read(rmid
);
1064 * Ignore this reading on error states and do not update the value.
1066 if (val
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
))
1069 local64_set(&event
->count
, val
);
1071 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
1074 static void __intel_cqm_event_count(void *info
)
1076 struct rmid_read
*rr
= info
;
1079 val
= __rmid_read(rr
->rmid
);
1081 if (val
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
))
1084 atomic64_add(val
, &rr
->value
);
1087 static inline bool cqm_group_leader(struct perf_event
*event
)
1089 return !list_empty(&event
->hw
.cqm_groups_entry
);
1092 static void __intel_mbm_event_count(void *info
)
1094 struct rmid_read
*rr
= info
;
1097 val
= rmid_read_mbm(rr
->rmid
, rr
->evt_type
);
1098 if (val
& (RMID_VAL_ERROR
| RMID_VAL_UNAVAIL
))
1100 atomic64_add(val
, &rr
->value
);
1103 static enum hrtimer_restart
mbm_hrtimer_handle(struct hrtimer
*hrtimer
)
1105 struct perf_event
*iter
, *iter1
;
1106 int ret
= HRTIMER_RESTART
;
1107 struct list_head
*head
;
1108 unsigned long flags
;
1112 * Need to cache_lock as the timer Event Select MSR reads
1113 * can race with the mbm/cqm count() and mbm_init() reads.
1115 raw_spin_lock_irqsave(&cache_lock
, flags
);
1117 if (list_empty(&cache_groups
)) {
1118 ret
= HRTIMER_NORESTART
;
1122 list_for_each_entry(iter
, &cache_groups
, hw
.cqm_groups_entry
) {
1123 grp_rmid
= iter
->hw
.cqm_rmid
;
1124 if (!__rmid_valid(grp_rmid
))
1126 if (is_mbm_event(iter
->attr
.config
))
1127 update_sample(grp_rmid
, iter
->attr
.config
, 0);
1129 head
= &iter
->hw
.cqm_group_entry
;
1130 if (list_empty(head
))
1132 list_for_each_entry(iter1
, head
, hw
.cqm_group_entry
) {
1133 if (!iter1
->hw
.is_group_event
)
1135 if (is_mbm_event(iter1
->attr
.config
))
1136 update_sample(iter1
->hw
.cqm_rmid
,
1137 iter1
->attr
.config
, 0);
1141 hrtimer_forward_now(hrtimer
, ms_to_ktime(MBM_CTR_OVERFLOW_TIME
));
1143 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
1148 static void __mbm_start_timer(void *info
)
1150 hrtimer_start(&mbm_timers
[pkg_id
], ms_to_ktime(MBM_CTR_OVERFLOW_TIME
),
1151 HRTIMER_MODE_REL_PINNED
);
1154 static void __mbm_stop_timer(void *info
)
1156 hrtimer_cancel(&mbm_timers
[pkg_id
]);
1159 static void mbm_start_timers(void)
1161 on_each_cpu_mask(&cqm_cpumask
, __mbm_start_timer
, NULL
, 1);
1164 static void mbm_stop_timers(void)
1166 on_each_cpu_mask(&cqm_cpumask
, __mbm_stop_timer
, NULL
, 1);
1169 static void mbm_hrtimer_init(void)
1174 for (i
= 0; i
< mbm_socket_max
; i
++) {
1175 hr
= &mbm_timers
[i
];
1176 hrtimer_init(hr
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1177 hr
->function
= mbm_hrtimer_handle
;
1181 static u64
intel_cqm_event_count(struct perf_event
*event
)
1183 unsigned long flags
;
1184 struct rmid_read rr
= {
1185 .evt_type
= event
->attr
.config
,
1186 .value
= ATOMIC64_INIT(0),
1190 * We only need to worry about task events. System-wide events
1191 * are handled like usual, i.e. entirely with
1192 * intel_cqm_event_read().
1194 if (event
->cpu
!= -1)
1195 return __perf_event_count(event
);
1198 * Only the group leader gets to report values except in case of
1199 * multiple events in the same group, we still need to read the
1200 * other events.This stops us
1201 * reporting duplicate values to userspace, and gives us a clear
1202 * rule for which task gets to report the values.
1204 * Note that it is impossible to attribute these values to
1205 * specific packages - we forfeit that ability when we create
1208 if (!cqm_group_leader(event
) && !event
->hw
.is_group_event
)
1212 * Getting up-to-date values requires an SMP IPI which is not
1213 * possible if we're being called in interrupt context. Return
1214 * the cached values instead.
1216 if (unlikely(in_interrupt()))
1220 * Notice that we don't perform the reading of an RMID
1221 * atomically, because we can't hold a spin lock across the
1224 * Speculatively perform the read, since @event might be
1225 * assigned a different (possibly invalid) RMID while we're
1226 * busying performing the IPI calls. It's therefore necessary to
1227 * check @event's RMID afterwards, and if it has changed,
1228 * discard the result of the read.
1230 rr
.rmid
= ACCESS_ONCE(event
->hw
.cqm_rmid
);
1232 if (!__rmid_valid(rr
.rmid
))
1237 raw_spin_lock_irqsave(&cache_lock
, flags
);
1238 if (event
->hw
.cqm_rmid
== rr
.rmid
)
1239 local64_set(&event
->count
, atomic64_read(&rr
.value
));
1240 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
1242 return __perf_event_count(event
);
1245 static void intel_cqm_event_start(struct perf_event
*event
, int mode
)
1247 struct intel_pqr_state
*state
= this_cpu_ptr(&pqr_state
);
1248 u32 rmid
= event
->hw
.cqm_rmid
;
1250 if (!(event
->hw
.cqm_state
& PERF_HES_STOPPED
))
1253 event
->hw
.cqm_state
&= ~PERF_HES_STOPPED
;
1255 if (state
->rmid_usecnt
++) {
1256 if (!WARN_ON_ONCE(state
->rmid
!= rmid
))
1259 WARN_ON_ONCE(state
->rmid
);
1263 wrmsr(MSR_IA32_PQR_ASSOC
, rmid
, state
->closid
);
1266 static void intel_cqm_event_stop(struct perf_event
*event
, int mode
)
1268 struct intel_pqr_state
*state
= this_cpu_ptr(&pqr_state
);
1270 if (event
->hw
.cqm_state
& PERF_HES_STOPPED
)
1273 event
->hw
.cqm_state
|= PERF_HES_STOPPED
;
1275 intel_cqm_event_read(event
);
1277 if (!--state
->rmid_usecnt
) {
1279 wrmsr(MSR_IA32_PQR_ASSOC
, 0, state
->closid
);
1281 WARN_ON_ONCE(!state
->rmid
);
1285 static int intel_cqm_event_add(struct perf_event
*event
, int mode
)
1287 unsigned long flags
;
1290 raw_spin_lock_irqsave(&cache_lock
, flags
);
1292 event
->hw
.cqm_state
= PERF_HES_STOPPED
;
1293 rmid
= event
->hw
.cqm_rmid
;
1295 if (__rmid_valid(rmid
) && (mode
& PERF_EF_START
))
1296 intel_cqm_event_start(event
, mode
);
1298 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
1303 static void intel_cqm_event_destroy(struct perf_event
*event
)
1305 struct perf_event
*group_other
= NULL
;
1306 unsigned long flags
;
1308 mutex_lock(&cache_mutex
);
1310 * Hold the cache_lock as mbm timer handlers could be
1311 * scanning the list of events.
1313 raw_spin_lock_irqsave(&cache_lock
, flags
);
1316 * If there's another event in this group...
1318 if (!list_empty(&event
->hw
.cqm_group_entry
)) {
1319 group_other
= list_first_entry(&event
->hw
.cqm_group_entry
,
1321 hw
.cqm_group_entry
);
1322 list_del(&event
->hw
.cqm_group_entry
);
1326 * And we're the group leader..
1328 if (cqm_group_leader(event
)) {
1330 * If there was a group_other, make that leader, otherwise
1331 * destroy the group and return the RMID.
1334 list_replace(&event
->hw
.cqm_groups_entry
,
1335 &group_other
->hw
.cqm_groups_entry
);
1337 u32 rmid
= event
->hw
.cqm_rmid
;
1339 if (__rmid_valid(rmid
))
1341 list_del(&event
->hw
.cqm_groups_entry
);
1345 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
1348 * Stop the mbm overflow timers when the last event is destroyed.
1350 if (mbm_enabled
&& list_empty(&cache_groups
))
1353 mutex_unlock(&cache_mutex
);
1356 static int intel_cqm_event_init(struct perf_event
*event
)
1358 struct perf_event
*group
= NULL
;
1359 bool rotate
= false;
1360 unsigned long flags
;
1362 if (event
->attr
.type
!= intel_cqm_pmu
.type
)
1365 if ((event
->attr
.config
< QOS_L3_OCCUP_EVENT_ID
) ||
1366 (event
->attr
.config
> QOS_MBM_LOCAL_EVENT_ID
))
1369 /* unsupported modes and filters */
1370 if (event
->attr
.exclude_user
||
1371 event
->attr
.exclude_kernel
||
1372 event
->attr
.exclude_hv
||
1373 event
->attr
.exclude_idle
||
1374 event
->attr
.exclude_host
||
1375 event
->attr
.exclude_guest
||
1376 event
->attr
.sample_period
) /* no sampling */
1379 INIT_LIST_HEAD(&event
->hw
.cqm_group_entry
);
1380 INIT_LIST_HEAD(&event
->hw
.cqm_groups_entry
);
1382 event
->destroy
= intel_cqm_event_destroy
;
1384 mutex_lock(&cache_mutex
);
1387 * Start the mbm overflow timers when the first event is created.
1389 if (mbm_enabled
&& list_empty(&cache_groups
))
1392 /* Will also set rmid */
1393 intel_cqm_setup_event(event
, &group
);
1396 * Hold the cache_lock as mbm timer handlers be
1397 * scanning the list of events.
1399 raw_spin_lock_irqsave(&cache_lock
, flags
);
1402 list_add_tail(&event
->hw
.cqm_group_entry
,
1403 &group
->hw
.cqm_group_entry
);
1405 list_add_tail(&event
->hw
.cqm_groups_entry
,
1409 * All RMIDs are either in use or have recently been
1410 * used. Kick the rotation worker to clean/free some.
1412 * We only do this for the group leader, rather than for
1413 * every event in a group to save on needless work.
1415 if (!__rmid_valid(event
->hw
.cqm_rmid
))
1419 raw_spin_unlock_irqrestore(&cache_lock
, flags
);
1420 mutex_unlock(&cache_mutex
);
1423 schedule_delayed_work(&intel_cqm_rmid_work
, 0);
1428 EVENT_ATTR_STR(llc_occupancy
, intel_cqm_llc
, "event=0x01");
1429 EVENT_ATTR_STR(llc_occupancy
.per
-pkg
, intel_cqm_llc_pkg
, "1");
1430 EVENT_ATTR_STR(llc_occupancy
.unit
, intel_cqm_llc_unit
, "Bytes");
1431 EVENT_ATTR_STR(llc_occupancy
.scale
, intel_cqm_llc_scale
, NULL
);
1432 EVENT_ATTR_STR(llc_occupancy
.snapshot
, intel_cqm_llc_snapshot
, "1");
1434 EVENT_ATTR_STR(total_bytes
, intel_cqm_total_bytes
, "event=0x02");
1435 EVENT_ATTR_STR(total_bytes
.per
-pkg
, intel_cqm_total_bytes_pkg
, "1");
1436 EVENT_ATTR_STR(total_bytes
.unit
, intel_cqm_total_bytes_unit
, "MB");
1437 EVENT_ATTR_STR(total_bytes
.scale
, intel_cqm_total_bytes_scale
, "1e-6");
1439 EVENT_ATTR_STR(local_bytes
, intel_cqm_local_bytes
, "event=0x03");
1440 EVENT_ATTR_STR(local_bytes
.per
-pkg
, intel_cqm_local_bytes_pkg
, "1");
1441 EVENT_ATTR_STR(local_bytes
.unit
, intel_cqm_local_bytes_unit
, "MB");
1442 EVENT_ATTR_STR(local_bytes
.scale
, intel_cqm_local_bytes_scale
, "1e-6");
1444 static struct attribute
*intel_cqm_events_attr
[] = {
1445 EVENT_PTR(intel_cqm_llc
),
1446 EVENT_PTR(intel_cqm_llc_pkg
),
1447 EVENT_PTR(intel_cqm_llc_unit
),
1448 EVENT_PTR(intel_cqm_llc_scale
),
1449 EVENT_PTR(intel_cqm_llc_snapshot
),
1453 static struct attribute
*intel_mbm_events_attr
[] = {
1454 EVENT_PTR(intel_cqm_total_bytes
),
1455 EVENT_PTR(intel_cqm_local_bytes
),
1456 EVENT_PTR(intel_cqm_total_bytes_pkg
),
1457 EVENT_PTR(intel_cqm_local_bytes_pkg
),
1458 EVENT_PTR(intel_cqm_total_bytes_unit
),
1459 EVENT_PTR(intel_cqm_local_bytes_unit
),
1460 EVENT_PTR(intel_cqm_total_bytes_scale
),
1461 EVENT_PTR(intel_cqm_local_bytes_scale
),
1465 static struct attribute
*intel_cmt_mbm_events_attr
[] = {
1466 EVENT_PTR(intel_cqm_llc
),
1467 EVENT_PTR(intel_cqm_total_bytes
),
1468 EVENT_PTR(intel_cqm_local_bytes
),
1469 EVENT_PTR(intel_cqm_llc_pkg
),
1470 EVENT_PTR(intel_cqm_total_bytes_pkg
),
1471 EVENT_PTR(intel_cqm_local_bytes_pkg
),
1472 EVENT_PTR(intel_cqm_llc_unit
),
1473 EVENT_PTR(intel_cqm_total_bytes_unit
),
1474 EVENT_PTR(intel_cqm_local_bytes_unit
),
1475 EVENT_PTR(intel_cqm_llc_scale
),
1476 EVENT_PTR(intel_cqm_total_bytes_scale
),
1477 EVENT_PTR(intel_cqm_local_bytes_scale
),
1478 EVENT_PTR(intel_cqm_llc_snapshot
),
1482 static struct attribute_group intel_cqm_events_group
= {
1487 PMU_FORMAT_ATTR(event
, "config:0-7");
1488 static struct attribute
*intel_cqm_formats_attr
[] = {
1489 &format_attr_event
.attr
,
1493 static struct attribute_group intel_cqm_format_group
= {
1495 .attrs
= intel_cqm_formats_attr
,
1499 max_recycle_threshold_show(struct device
*dev
, struct device_attribute
*attr
,
1504 mutex_lock(&cache_mutex
);
1505 rv
= snprintf(page
, PAGE_SIZE
-1, "%u\n", __intel_cqm_max_threshold
);
1506 mutex_unlock(&cache_mutex
);
1512 max_recycle_threshold_store(struct device
*dev
,
1513 struct device_attribute
*attr
,
1514 const char *buf
, size_t count
)
1516 unsigned int bytes
, cachelines
;
1519 ret
= kstrtouint(buf
, 0, &bytes
);
1523 mutex_lock(&cache_mutex
);
1525 __intel_cqm_max_threshold
= bytes
;
1526 cachelines
= bytes
/ cqm_l3_scale
;
1529 * The new maximum takes effect immediately.
1531 if (__intel_cqm_threshold
> cachelines
)
1532 __intel_cqm_threshold
= cachelines
;
1534 mutex_unlock(&cache_mutex
);
1539 static DEVICE_ATTR_RW(max_recycle_threshold
);
1541 static struct attribute
*intel_cqm_attrs
[] = {
1542 &dev_attr_max_recycle_threshold
.attr
,
1546 static const struct attribute_group intel_cqm_group
= {
1547 .attrs
= intel_cqm_attrs
,
1550 static const struct attribute_group
*intel_cqm_attr_groups
[] = {
1551 &intel_cqm_events_group
,
1552 &intel_cqm_format_group
,
1557 static struct pmu intel_cqm_pmu
= {
1558 .hrtimer_interval_ms
= RMID_DEFAULT_QUEUE_TIME
,
1559 .attr_groups
= intel_cqm_attr_groups
,
1560 .task_ctx_nr
= perf_sw_context
,
1561 .event_init
= intel_cqm_event_init
,
1562 .add
= intel_cqm_event_add
,
1563 .del
= intel_cqm_event_stop
,
1564 .start
= intel_cqm_event_start
,
1565 .stop
= intel_cqm_event_stop
,
1566 .read
= intel_cqm_event_read
,
1567 .count
= intel_cqm_event_count
,
1570 static inline void cqm_pick_event_reader(int cpu
)
1574 /* First online cpu in package becomes the reader */
1575 reader
= cpumask_any_and(&cqm_cpumask
, topology_core_cpumask(cpu
));
1576 if (reader
>= nr_cpu_ids
)
1577 cpumask_set_cpu(cpu
, &cqm_cpumask
);
1580 static int intel_cqm_cpu_starting(unsigned int cpu
)
1582 struct intel_pqr_state
*state
= &per_cpu(pqr_state
, cpu
);
1583 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
1587 state
->rmid_usecnt
= 0;
1589 WARN_ON(c
->x86_cache_max_rmid
!= cqm_max_rmid
);
1590 WARN_ON(c
->x86_cache_occ_scale
!= cqm_l3_scale
);
1592 cqm_pick_event_reader(cpu
);
1596 static int intel_cqm_cpu_exit(unsigned int cpu
)
1600 /* Is @cpu the current cqm reader for this package ? */
1601 if (!cpumask_test_and_clear_cpu(cpu
, &cqm_cpumask
))
1604 /* Find another online reader in this package */
1605 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
1607 if (target
< nr_cpu_ids
)
1608 cpumask_set_cpu(target
, &cqm_cpumask
);
1613 static const struct x86_cpu_id intel_cqm_match
[] = {
1614 { .vendor
= X86_VENDOR_INTEL
, .feature
= X86_FEATURE_CQM_OCCUP_LLC
},
1618 static void mbm_cleanup(void)
1625 mbm_enabled
= false;
1628 static const struct x86_cpu_id intel_mbm_local_match
[] = {
1629 { .vendor
= X86_VENDOR_INTEL
, .feature
= X86_FEATURE_CQM_MBM_LOCAL
},
1633 static const struct x86_cpu_id intel_mbm_total_match
[] = {
1634 { .vendor
= X86_VENDOR_INTEL
, .feature
= X86_FEATURE_CQM_MBM_TOTAL
},
1638 static int intel_mbm_init(void)
1640 int ret
= 0, array_size
, maxid
= cqm_max_rmid
+ 1;
1642 mbm_socket_max
= topology_max_packages();
1643 array_size
= sizeof(struct sample
) * maxid
* mbm_socket_max
;
1644 mbm_local
= kmalloc(array_size
, GFP_KERNEL
);
1648 mbm_total
= kmalloc(array_size
, GFP_KERNEL
);
1654 array_size
= sizeof(struct hrtimer
) * mbm_socket_max
;
1655 mbm_timers
= kmalloc(array_size
, GFP_KERNEL
);
1669 static int __init
intel_cqm_init(void)
1671 char *str
= NULL
, scale
[20];
1674 if (x86_match_cpu(intel_cqm_match
))
1677 if (x86_match_cpu(intel_mbm_local_match
) &&
1678 x86_match_cpu(intel_mbm_total_match
))
1681 if (!cqm_enabled
&& !mbm_enabled
)
1684 cqm_l3_scale
= boot_cpu_data
.x86_cache_occ_scale
;
1687 * It's possible that not all resources support the same number
1688 * of RMIDs. Instead of making scheduling much more complicated
1689 * (where we have to match a task's RMID to a cpu that supports
1690 * that many RMIDs) just find the minimum RMIDs supported across
1693 * Also, check that the scales match on all cpus.
1696 for_each_online_cpu(cpu
) {
1697 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
1699 if (c
->x86_cache_max_rmid
< cqm_max_rmid
)
1700 cqm_max_rmid
= c
->x86_cache_max_rmid
;
1702 if (c
->x86_cache_occ_scale
!= cqm_l3_scale
) {
1703 pr_err("Multiple LLC scale values, disabling\n");
1710 * A reasonable upper limit on the max threshold is the number
1711 * of lines tagged per RMID if all RMIDs have the same number of
1712 * lines tagged in the LLC.
1714 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
1716 __intel_cqm_max_threshold
=
1717 boot_cpu_data
.x86_cache_size
* 1024 / (cqm_max_rmid
+ 1);
1719 snprintf(scale
, sizeof(scale
), "%u", cqm_l3_scale
);
1720 str
= kstrdup(scale
, GFP_KERNEL
);
1726 event_attr_intel_cqm_llc_scale
.event_str
= str
;
1728 ret
= intel_cqm_setup_rmid_cache();
1733 ret
= intel_mbm_init();
1734 if (ret
&& !cqm_enabled
)
1737 if (cqm_enabled
&& mbm_enabled
)
1738 intel_cqm_events_group
.attrs
= intel_cmt_mbm_events_attr
;
1739 else if (!cqm_enabled
&& mbm_enabled
)
1740 intel_cqm_events_group
.attrs
= intel_mbm_events_attr
;
1741 else if (cqm_enabled
&& !mbm_enabled
)
1742 intel_cqm_events_group
.attrs
= intel_cqm_events_attr
;
1744 ret
= perf_pmu_register(&intel_cqm_pmu
, "intel_cqm", -1);
1746 pr_err("Intel CQM perf registration failed: %d\n", ret
);
1751 pr_info("Intel CQM monitoring enabled\n");
1753 pr_info("Intel MBM enabled\n");
1756 * Setup the hot cpu notifier once we are sure cqm
1757 * is enabled to avoid notifier leak.
1759 cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING
,
1760 "AP_PERF_X86_CQM_STARTING",
1761 intel_cqm_cpu_starting
, NULL
);
1762 cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE
, "AP_PERF_X86_CQM_ONLINE",
1763 NULL
, intel_cqm_cpu_exit
);
1776 device_initcall(intel_cqm_init
);