Commit 7fec1b57 authored by Catalin Marinas's avatar Catalin Marinas
Browse files

ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on ASID-capable CPUs



Since the ASIDs must be unique to an mm across all the CPUs in a system,
the __new_context() function needs to broadcast a context reset event to
all the CPUs during ASID allocation if a roll-over occurred. Such IPIs
cannot be issued with interrupts disabled and ARM had to define
__ARCH_WANT_INTERRUPTS_ON_CTXSW.

This patch changes the check_context() function to
check_and_switch_context() called from switch_mm(). In case of
ASID-capable CPUs (ARMv6 onwards), if a new ASID is needed and the
interrupts are disabled, it defers the __new_context() and
cpu_switch_mm() calls to the post-lock switch hook where the interrupts
are enabled. Setting the reserved TTBR0 was also moved to
check_and_switch_context() from cpu_v7_switch_mm().
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Tested-by: default avatarWill Deacon <will.deacon@arm.com>
Reviewed-by: default avatarFrank Rowand <frank.rowand@am.sony.com>
Tested-by: default avatarMarc Zyngier <Marc.Zyngier@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 3c5f7e7b
......@@ -39,6 +39,8 @@ typedef struct {
* so enable interrupts over the context switch to avoid high
* latency.
*/
#ifndef CONFIG_CPU_HAS_ASID
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
#endif
#endif
......@@ -49,39 +49,80 @@ DECLARE_PER_CPU(struct mm_struct *, current_mm);
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
void cpu_set_reserved_ttbr0(void);
static inline void check_context(struct mm_struct *mm)
static inline void switch_new_context(struct mm_struct *mm)
{
/*
* This code is executed with interrupts enabled. Therefore,
* mm->context.id cannot be updated to the latest ASID version
* on a different CPU (and condition below not triggered)
* without first getting an IPI to reset the context. The
* alternative is to take a read_lock on mm->context.id_lock
* (after changing its type to rwlock_t).
*/
if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
__new_context(mm);
unsigned long flags;
__new_context(mm);
local_irq_save(flags);
cpu_switch_mm(mm->pgd, mm);
local_irq_restore(flags);
}
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
/*
* Required during context switch to avoid speculative page table
* walking with the wrong TTBR.
*/
cpu_set_reserved_ttbr0();
if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
/*
* The ASID is from the current generation, just switch to the
* new pgd. This condition is only true for calls from
* context_switch() and interrupts are already disabled.
*/
cpu_switch_mm(mm->pgd, mm);
else if (irqs_disabled())
/*
* Defer the new ASID allocation until after the context
* switch critical region since __new_context() cannot be
* called with interrupts disabled (it sends IPIs).
*/
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
else
/*
* That is a direct call to switch_mm() or activate_mm() with
* interrupts enabled and a new context.
*/
switch_new_context(mm);
}
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
#else
#define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
if (test_and_clear_thread_flag(TIF_SWITCH_MM))
switch_new_context(current->mm);
}
static inline void check_context(struct mm_struct *mm)
#else /* !CONFIG_CPU_HAS_ASID */
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
#ifdef CONFIG_MMU
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
cpu_switch_mm(mm->pgd, mm);
#endif
}
#define init_new_context(tsk,mm) 0
#endif
#define finish_arch_post_lock_switch() do { } while (0)
#endif /* CONFIG_CPU_HAS_ASID */
#define destroy_context(mm) do { } while(0)
......@@ -123,8 +164,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
*crt_mm = next;
#endif
check_context(next);
cpu_switch_mm(next->pgd, next);
check_and_switch_context(next, tsk);
if (cache_is_vivt())
cpumask_clear_cpu(cpu, mm_cpumask(prev));
}
......
......@@ -146,6 +146,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
#define TIF_SECCOMP 21
#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
......
......@@ -23,7 +23,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
#endif
#ifdef CONFIG_ARM_LPAE
static void cpu_set_reserved_ttbr0(void)
void cpu_set_reserved_ttbr0(void)
{
unsigned long ttbl = __pa(swapper_pg_dir);
unsigned long ttbh = 0;
......@@ -39,7 +39,7 @@ static void cpu_set_reserved_ttbr0(void)
isb();
}
#else
static void cpu_set_reserved_ttbr0(void)
void cpu_set_reserved_ttbr0(void)
{
u32 ttb;
/* Copy TTBR1 into TTBR0 */
......
......@@ -46,9 +46,6 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
mrc p15, 0, r2, c2, c0, 1 @ load TTB 1
mcr p15, 0, r2, c2, c0, 0 @ into TTB 0
isb
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment