#define _ASM_X86_INTEL_RDT_H
#define IA32_L3_CBM_BASE 0xc90
+#define IA32_L2_CBM_BASE 0xd10
+/**
+ * struct rdt_resource - attributes of an RDT resource
+ * @enabled: Is this feature enabled on this machine
+ * @capable: Is this feature available on this machine
+ * @name: Name to use in "schemata" file
+ * @num_closid: Number of CLOSIDs available
+ * @max_cbm: Largest Cache Bit Mask allowed
+ * @min_cbm_bits: Minimum number of consecutive bits to be set
+ * in a cache bit mask
+ * @domains: All domains for this resource
+ * @num_domains: Number of domains active
+ * @msr_base: Base MSR address for CBMs
+ * @tmp_cbms: Scratch space when updating schemata
+ * @cache_level: Which cache level defines scope of this domain
+ * @cbm_idx_multi: Multiplier of CBM index
+ * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
+ * closid * cbm_idx_multi + cbm_idx_offset
+ */
+struct rdt_resource {
+ bool enabled;
+ bool capable;
+ char *name;
+ int num_closid;
+ int cbm_len;
+ int min_cbm_bits;
+ u32 max_cbm;
+ struct list_head domains;
+ int num_domains;
+ int msr_base;
+ u32 *tmp_cbms;
+ int cache_level;
+ int cbm_idx_multi;
+ int cbm_idx_offset;
+};
+
+extern struct rdt_resource rdt_resources_all[];
+
+enum {
+ RDT_RESOURCE_L3,
+ RDT_RESOURCE_L3DATA,
+ RDT_RESOURCE_L3CODE,
+ RDT_RESOURCE_L2,
+
+ /* Must be the last */
+ RDT_NUM_RESOURCES,
+};
+
+#define for_each_capable_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->capable)
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
+union cpuid_0x10_1_eax {
+ struct {
+ unsigned int cbm_len:5;
+ } split;
+ unsigned int full;
+};
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EDX */
+union cpuid_0x10_1_edx {
+ struct {
+ unsigned int cos_max:16;
+ } split;
+ unsigned int full;
+};
#endif /* _ASM_X86_INTEL_RDT_H */
#include <asm/intel-family.h>
#include <asm/intel_rdt.h>
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
+
+struct rdt_resource rdt_resources_all[] = {
+ {
+ .name = "L3",
+ .domains = domain_init(RDT_RESOURCE_L3),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 1,
+ .cbm_idx_offset = 0
+ },
+ {
+ .name = "L3DATA",
+ .domains = domain_init(RDT_RESOURCE_L3DATA),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 2,
+ .cbm_idx_offset = 0
+ },
+ {
+ .name = "L3CODE",
+ .domains = domain_init(RDT_RESOURCE_L3CODE),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 2,
+ .cbm_idx_offset = 1
+ },
+ {
+ .name = "L2",
+ .domains = domain_init(RDT_RESOURCE_L2),
+ .msr_base = IA32_L2_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 2,
+ .cbm_idx_multi = 1,
+ .cbm_idx_offset = 0
+ },
+};
+
/*
* cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
* as they do not have CPUID enumeration support for Cache allocation.
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) {
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
u32 l, h, max_cbm = BIT_MASK(20) - 1;
if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
rdmsr(IA32_L3_CBM_BASE, l, h);
/* If all the bits were set in MSR, return success */
- return l == max_cbm;
+ if (l != max_cbm)
+ return false;
+
+ r->num_closid = 4;
+ r->cbm_len = 20;
+ r->max_cbm = max_cbm;
+ r->min_cbm_bits = 2;
+ r->capable = true;
+ r->enabled = true;
+
+ return true;
}
return false;
}
+static void rdt_get_config(int idx, struct rdt_resource *r)
+{
+ union cpuid_0x10_1_eax eax;
+ union cpuid_0x10_1_edx edx;
+ u32 ebx, ecx;
+
+ cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
+ r->num_closid = edx.split.cos_max + 1;
+ r->cbm_len = eax.split.cbm_len + 1;
+ r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
+ r->capable = true;
+ r->enabled = true;
+}
+
+static void rdt_get_cdp_l3_config(int type)
+{
+ struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ struct rdt_resource *r = &rdt_resources_all[type];
+
+ r->num_closid = r_l3->num_closid / 2;
+ r->cbm_len = r_l3->cbm_len;
+ r->max_cbm = r_l3->max_cbm;
+ r->capable = true;
+ /*
+ * By default, CDP is disabled. CDP can be enabled by mount parameter
+ * "cdp" during resctrl file system mount time.
+ */
+ r->enabled = false;
+}
+
static inline bool get_rdt_resources(void)
{
+ bool ret = false;
+
if (cache_alloc_hsw_probe())
return true;
if (!boot_cpu_has(X86_FEATURE_RDT_A))
return false;
- if (!boot_cpu_has(X86_FEATURE_CAT_L3))
- return false;
- return true;
+ if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
+ rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+ if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
+ rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
+ rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
+ }
+ ret = true;
+ }
+ if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
+ /* CPUID 0x10.2 fields are same format at 0x10.1 */
+ rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+ ret = true;
+ }
+
+ return ret;
}
static int __init intel_rdt_late_init(void)
{
+ struct rdt_resource *r;
+
if (!get_rdt_resources())
return -ENODEV;
- pr_info("Intel RDT cache allocation detected\n");
- if (boot_cpu_has(X86_FEATURE_CDP_L3))
- pr_info("Intel RDT code data prioritization detected\n");
+ for_each_capable_rdt_resource(r)
+ pr_info("Intel RDT %s allocation detected\n", r->name);
return 0;
}