return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
}
+static void cqm_mask_call(struct rmid_read *rr)
+{
+ if (is_mbm_event(rr->evt_type))
+ on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, rr, 1);
+ else
+ on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, rr, 1);
+}
+
/*
* Exchange the RMID of a group of events.
*/
*/
if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
struct rmid_read rr = {
- .value = ATOMIC64_INIT(0),
.rmid = old_rmid,
+ .evt_type = group->attr.config,
+ .value = ATOMIC64_INIT(0),
};
- if (is_mbm_event(group->attr.config)) {
- rr.evt_type = group->attr.config;
- on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count,
- &rr, 1);
- } else {
- on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
- &rr, 1);
- }
+ cqm_mask_call(&rr);
local64_set(&group->count, atomic64_read(&rr.value));
}
{
unsigned long flags;
struct rmid_read rr = {
+ .evt_type = event->attr.config,
.value = ATOMIC64_INIT(0),
};
if (!__rmid_valid(rr.rmid))
goto out;
- if (is_mbm_event(event->attr.config)) {
- rr.evt_type = event->attr.config;
- on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, &rr, 1);
- } else {
- on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1);
- }
+ cqm_mask_call(&rr);
raw_spin_lock_irqsave(&cache_lock, flags);
if (event->hw.cqm_rmid == rr.rmid)