commit
986372c4367f46b34a3c0f6918d7fb95cbdf39d6 upstream.
In order to avoid checking arm64_ssbd_callback_required on each
kernel entry/exit even if no mitigation is required, let's
add yet another alternative that by default jumps over the mitigation,
and that gets nop'ed out if we're doing dynamic mitigation.
Think of it as a poor man's static key...
Reviewed-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
*updptr = cpu_to_le32(insn);
}
+void __init arm64_enable_wa2_handling(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+{
+ BUG_ON(nr_inst != 1);
+ /*
+ * Only allow mitigation on EL1 entry/exit and guest
+ * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
+ * be flipped.
+ */
+ if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
static void arm64_set_ssbd_mitigation(bool state)
{
switch (psci_ops.conduit) {
// to save/restore them if required.
.macro apply_ssbd, state, targ, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD
+alternative_cb arm64_enable_wa2_handling
+ b \targ
+alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
cbz \tmp2, \targ
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2