Commit 5bee81c9 authored by Will Deacon's avatar Will Deacon Committed by Greg Kroah-Hartman
Browse files

arm64: Add skeleton to harden the branch predictor against aliasing attacks


Commit 0f15adbb

 upstream.

Aliasing attacks against CPU branch predictors can allow an attacker to
redirect speculative control flow on some CPUs and potentially divulge
information from one context to another.

This patch adds initial skeleton code behind a new Kconfig option to
enable implementation-specific mitigations against these attacks for
CPUs that are affected.
Co-developed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c10e4aa7
......@@ -818,6 +818,23 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y.
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
default y
help
Speculation attacks against some high-performance processors rely on
being able to manipulate the branch predictor for a victim context by
executing aliasing branches in the attacker context. Such attacks
can be partially mitigated against by clearing internal branch
predictor state and limiting the prediction logic in some situations.
This config option will take CPU-specific actions to harden the
branch predictor against aliasing attacks and may rely on specific
instruction sequences or control bits being set by the system
firmware.
If unsure, say Y.
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
......
......@@ -41,7 +41,8 @@
#define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_HAS_DCPOP 21
#define ARM64_UNMAP_KERNEL_AT_EL0 23
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
#define ARM64_NCAPS 24
#define ARM64_NCAPS 25
#endif /* __ASM_CPUCAPS_H */
......@@ -41,6 +41,43 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data {
int hyp_vectors_slot;
bp_hardening_cb_t fn;
};
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
{
return this_cpu_ptr(&bp_hardening_data);
}
static inline void arm64_apply_bp_hardening(void)
{
struct bp_hardening_data *d;
if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
return;
d = arm64_get_bp_hardening_data();
if (d->fn)
d->fn();
}
#else
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
{
return NULL;
}
static inline void arm64_apply_bp_hardening(void) { }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
......
......@@ -333,6 +333,7 @@
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
......
......@@ -55,6 +55,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
ifeq ($(CONFIG_KVM),y)
arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
endif
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
head-y := head.o
......
/*
* Contains CPU specific branch predictor invalidation sequences
*
* Copyright (C) 2018 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
.macro ventry target
.rept 31
nop
.endr
b \target
.endm
.macro vectors target
ventry \target + 0x000
ventry \target + 0x080
ventry \target + 0x100
ventry \target + 0x180
ventry \target + 0x200
ventry \target + 0x280
ventry \target + 0x300
ventry \target + 0x380
ventry \target + 0x400
ventry \target + 0x480
ventry \target + 0x500
ventry \target + 0x580
ventry \target + 0x600
ventry \target + 0x680
ventry \target + 0x700
ventry \target + 0x780
.endm
.align 11
ENTRY(__bp_harden_hyp_vecs_start)
.rept 4
vectors __kvm_hyp_vector
.endr
ENTRY(__bp_harden_hyp_vecs_end)
......@@ -60,6 +60,80 @@ static int cpu_enable_trap_ctr_access(void *__unused)
return 0;
}
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
int i;
for (i = 0; i < SZ_2K; i += 0x80)
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
static int last_slot = -1;
static DEFINE_SPINLOCK(bp_lock);
int cpu, slot = -1;
spin_lock(&bp_lock);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
break;
}
}
if (slot == -1) {
last_slot++;
BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
/ SZ_2K) <= last_slot);
slot = last_slot;
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
spin_unlock(&bp_lock);
}
#else
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
__this_cpu_write(bp_hardening_data.fn, fn);
}
#endif /* CONFIG_KVM */
static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
u64 pfr0;
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return;
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
return;
__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
#define MIDR_RANGE(model, min, max) \
.def_scope = SCOPE_LOCAL_CPU, \
.matches = is_affected_midr_range, \
......
......@@ -126,6 +126,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
......
......@@ -724,13 +724,15 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
// enable interrupts before calling the main handler
enable_dbg_and_irq
enable_dbg
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
/*
......
......@@ -242,6 +242,8 @@ asmlinkage void post_ttbr_update_workaround(void)
"ic iallu; dsb nsh; isb",
ARM64_WORKAROUND_CAVIUM_27456,
CONFIG_CAVIUM_ERRATUM_27456));
arm64_apply_bp_hardening();
}
static int asids_init(void)
......
......@@ -751,6 +751,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
/*
* We've taken an instruction abort from userspace and not yet
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
if (addr > TASK_SIZE)
arm64_apply_bp_hardening();
local_irq_enable();
do_mem_abort(addr, esr, regs);
}
/*
* Handle stack alignment exceptions.
*/
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment