static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
- const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+ const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
int cpu, idx = 0, err = 0;
struct msr msrs[cpumask_weight(cpumask)];
u32 value;
static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
{
- const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
+ const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
int cpu, idx = 0, err = 0;
struct msr msrs[cpumask_weight(cpumask)];
u32 value;
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
{
int cpu;
/* check MCG_CTL on all the cpus on this node */
static bool amd64_nb_mce_bank_enabled_on_node(int nid)
{
- cpumask_t mask;
+ cpumask_var_t mask;
struct msr *msrs;
int cpu, nbe, idx = 0;
bool ret = false;
- cpumask_clear(&mask);
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+ __func__);
+ return false;
+ }
- get_cpus_on_this_dct_cpumask(&mask, nid);
+ get_cpus_on_this_dct_cpumask(mask, nid);
- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
+ msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
if (!msrs) {
amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
__func__);
+ free_cpumask_var(mask);
return false;
}
- rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
+ rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
- for_each_cpu(cpu, &mask) {
+ for_each_cpu(cpu, mask) {
nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
out:
kfree(msrs);
+ free_cpumask_var(mask);
return ret;
}