Commit
6d99b68933fbcf51f84fcbba49246ce1209ec193 upstream.
Now that KVM uses tpidr_el2 in the same way as Linux's cpu_offset in
tpidr_el1, merge the two. This saves KVM from save/restoring tpidr_el1
on VHE hosts, and allows future code to blindly access per-cpu variables
without triggering world-switch.
Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Christoffer Dall <cdall@linaro.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
#include <linux/stddef.h>
#include <linux/stringify.h>
+extern int alternatives_applied;
+
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
*/
.macro adr_this_cpu, dst, sym, tmp
adr_l \dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
+alternative_else
+ mrs \tmp, tpidr_el2
+alternative_endif
add \dst, \dst, \tmp
.endm
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
+alternative_else
+ mrs \tmp, tpidr_el2
+alternative_endif
ldr \dst, [\dst, \tmp]
.endm
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/alternative.h>
+
static inline void set_my_cpu_offset(unsigned long off)
{
- asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+ asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
+ "msr tpidr_el2, %0",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ :: "r" (off) : "memory");
}
static inline unsigned long __my_cpu_offset(void)
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrs %0, tpidr_el1" : "=r" (off) :
+ asm(ALTERNATIVE("mrs %0, tpidr_el1",
+ "mrs %0, tpidr_el2",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ : "=r" (off) :
"Q" (*(const unsigned long *)current_stack_pointer));
return off;
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+int alternatives_applied;
+
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
*/
static int __apply_alternatives_multi_stop(void *unused)
{
- static int patched = 0;
struct alt_region region = {
.begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end,
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
- while (!READ_ONCE(patched))
+ while (!READ_ONCE(alternatives_applied))
cpu_relax();
isb();
} else {
- BUG_ON(patched);
+ BUG_ON(alternatives_applied);
__apply_alternatives(®ion);
/* Barriers provided by the cache flushing */
- WRITE_ONCE(patched, 1);
+ WRITE_ONCE(alternatives_applied, 1);
}
return 0;
early_param("kpti", parse_kpti);
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+static int cpu_copy_el2regs(void *__unused)
+{
+ /*
+ * Copy register values that aren't redirected by hardware.
+ *
+ * Before code patching, we only set tpidr_el1, all CPUs need to copy
+ * this value to tpidr_el2 before we patch the code. Once we've done
+ * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
+ * do anything here.
+ */
+ if (!alternatives_applied)
+ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+
+ return 0;
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_VIRT_HOST_EXTN,
.def_scope = SCOPE_SYSTEM,
.matches = runs_at_el2,
+ .enable = cpu_copy_el2regs,
},
{
.desc = "32-bit EL0 Support",