/**
* struct intel_pqr_state - State cache for the PQR MSR
- * @rmid: The cached Resource Monitoring ID
- * @closid: The cached Class Of Service ID
+ * @cur_rmid: The cached Resource Monitoring ID
+ * @cur_closid: The cached Class Of Service ID
+ * @default_rmid: The user assigned Resource Monitoring ID
+ * @default_closid: The user assigned cached Class Of Service ID
*
* The upper 32 bits of IA32_PQR_ASSOC contain closid and the
* lower 10 bits rmid. The update to IA32_PQR_ASSOC always
* not change.
*/
struct intel_pqr_state {
- u32 rmid;
- u32 closid;
+ u32 cur_rmid;
+ u32 cur_closid;
+ u32 default_rmid;
+ u32 default_closid;
};
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
-DECLARE_PER_CPU_READ_MOSTLY(struct intel_pqr_state, rdt_cpu_default);
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
*/
static void __intel_rdt_sched_in(void)
{
- struct intel_pqr_state newstate = this_cpu_read(rdt_cpu_default);
- struct intel_pqr_state *curstate = this_cpu_ptr(&pqr_state);
+ struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+ u32 closid = state->default_closid;
+ u32 rmid = state->default_rmid;
/*
* If this task has a closid/rmid assigned, use it.
*/
if (static_branch_likely(&rdt_alloc_enable_key)) {
if (current->closid)
- newstate.closid = current->closid;
+ closid = current->closid;
}
if (static_branch_likely(&rdt_mon_enable_key)) {
if (current->rmid)
- newstate.rmid = current->rmid;
+ rmid = current->rmid;
}
- if (newstate.closid != curstate->closid ||
- newstate.rmid != curstate->rmid) {
- *curstate = newstate;
- wrmsr(IA32_PQR_ASSOC, newstate.rmid, newstate.closid);
+ if (closid != state->cur_closid || rmid != state->cur_rmid) {
+ state->cur_closid = closid;
+ state->cur_rmid = rmid;
+ wrmsr(IA32_PQR_ASSOC, rmid, closid);
}
}
*/
DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
-DEFINE_PER_CPU_READ_MOSTLY(struct intel_pqr_state, rdt_cpu_default);
-
/*
* Used to store the max resource name width and max resource data width
* to display the schemata in a tabular format
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
- per_cpu(rdt_cpu_default.closid, cpu) = 0;
- per_cpu(rdt_cpu_default.rmid, cpu) = 0;
- state->closid = 0;
- state->rmid = 0;
+ state->default_closid = 0;
+ state->default_rmid = 0;
+ state->cur_closid = 0;
+ state->cur_rmid = 0;
wrmsr(IA32_PQR_ASSOC, 0, 0);
}
struct rdtgroup *r = info;
if (r) {
- this_cpu_write(rdt_cpu_default.closid, r->closid);
- this_cpu_write(rdt_cpu_default.rmid, r->mon.rmid);
+ this_cpu_write(pqr_state.default_closid, r->closid);
+ this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
}
/*
/* Update per cpu rmid of the moved CPUs first */
for_each_cpu(cpu, &rdtgrp->cpu_mask)
- per_cpu(rdt_cpu_default.rmid, cpu) = prdtgrp->mon.rmid;
+ per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
/*
* Update the MSR on moved CPUs and CPUs which have moved
* task running on them.
/* Update per cpu closid and rmid of the moved CPUs first */
for_each_cpu(cpu, &rdtgrp->cpu_mask) {
- per_cpu(rdt_cpu_default.closid, cpu) = rdtgroup_default.closid;
- per_cpu(rdt_cpu_default.rmid, cpu) = rdtgroup_default.mon.rmid;
+ per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
+ per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
}
/*