#define APM_CPU_PART_POTENZA 0x000
+#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
+#define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
+#define ID_AA64MMFR0_BIGENDEL0(mmfr0) \
+ (((mmfr0) & ID_AA64MMFR0_BIGENDEL0_MASK) >> ID_AA64MMFR0_BIGENDEL0_SHIFT)
+#define ID_AA64MMFR0_BIGEND_SHIFT 8
+#define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT)
+#define ID_AA64MMFR0_BIGEND(mmfr0) \
+ (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
+
#ifndef __ASSEMBLY__
/*
return read_cpuid(CTR_EL0);
}
+static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
+{
+ return (ID_AA64MMFR0_BIGEND(mmfr0) == 0x1) ||
+ (ID_AA64MMFR0_BIGENDEL0(mmfr0) == 0x1);
+}
#endif /* __ASSEMBLY__ */
#endif
*/
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
+static bool mixed_endian_el0 = true;
static char *icache_policy_str[] = {
[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
}
+bool cpu_supports_mixed_endian_el0(void)
+{
+ return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+}
+
+bool system_supports_mixed_endian_el0(void)
+{
+ return mixed_endian_el0;
+}
+
+static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info)
+{
+ mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0);
+}
+
+static void update_cpu_features(struct cpuinfo_arm64 *info)
+{
+ update_mixed_endian_el0_support(info);
+}
+
static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
{
if ((boot & mask) == (cur & mask))
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();
+ update_cpu_features(info);
}
void cpuinfo_store_cpu(void)