mmu_context.h 2.93 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
2
 *  arch/arm/include/asm/mmu_context.h
Linus Torvalds's avatar
Linus Torvalds committed
3
4
5
6
7
8
9
10
11
12
13
14
15
 *
 *  Copyright (C) 1996 Russell King.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Changelog:
 *   27-06-1996	RMK	Created
 */
#ifndef __ASM_ARM_MMU_CONTEXT_H
#define __ASM_ARM_MMU_CONTEXT_H

16
#include <linux/compiler.h>
17
#include <linux/sched.h>
18
#include <asm/cacheflush.h>
19
#include <asm/cachetype.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <asm/proc-fns.h>
21
#include <asm-generic/mm_hooks.h>
Linus Torvalds's avatar
Linus Torvalds committed
22

23
void __check_vmalloc_seq(struct mm_struct *mm);
24

25
#ifdef CONFIG_CPU_HAS_ASID
Linus Torvalds's avatar
Linus Torvalds committed
26

27
28
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm)	({ mm->context.id = 0; })
Linus Torvalds's avatar
Linus Torvalds committed
29

30
31
#else	/* !CONFIG_CPU_HAS_ASID */

32
33
#ifdef CONFIG_MMU

34
35
static inline void check_and_switch_context(struct mm_struct *mm,
					    struct task_struct *tsk)
36
{
37
38
	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
		__check_vmalloc_seq(mm);
39
40
41
42
43
44
45
46
47
48
49
50

	if (irqs_disabled())
		/*
		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
		 * high interrupt latencies, defer the call and continue
		 * running with the old mm. Since we only support UP systems
		 * on non-ASID CPUs, the old mm will remain valid until the
		 * finish_arch_post_lock_switch() call.
		 */
		set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
	else
		cpu_switch_mm(mm->pgd, mm);
51
52
}

53
54
55
56
57
58
59
60
61
#define finish_arch_post_lock_switch \
	finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
	if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
		struct mm_struct *mm = current->mm;
		cpu_switch_mm(mm->pgd, mm);
	}
}
Linus Torvalds's avatar
Linus Torvalds committed
62

63
64
65
#endif	/* CONFIG_MMU */

#define init_new_context(tsk,mm)	0
66
67

#endif	/* CONFIG_CPU_HAS_ASID */
Linus Torvalds's avatar
Linus Torvalds committed
68
69

#define destroy_context(mm)		do { } while(0)
70
#define activate_mm(prev,next)		switch_mm(prev, next, NULL)
Linus Torvalds's avatar
Linus Torvalds committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95

/*
 * This is called when "tsk" is about to enter lazy TLB mode.
 *
 * mm:  describes the currently active mm context
 * tsk: task which is entering lazy tlb
 * cpu: cpu number which is entering lazy tlb
 *
 * tsk->mm will be NULL
 */
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

/*
 * This is the actual mm switch as far as the scheduler
 * is concerned.  No registers are touched.  We avoid
 * calling the CPU specific function when the mm hasn't
 * actually changed.
 */
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
	  struct task_struct *tsk)
{
96
#ifdef CONFIG_MMU
Linus Torvalds's avatar
Linus Torvalds committed
97
98
	unsigned int cpu = smp_processor_id();

99
100
#ifdef CONFIG_SMP
	/* check for possible thread migration */
101
102
	if (!cpumask_empty(mm_cpumask(next)) &&
	    !cpumask_test_cpu(cpu, mm_cpumask(next)))
103
104
		__flush_icache_all();
#endif
105
	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
106
		check_and_switch_context(next, tsk);
107
		if (cache_is_vivt())
108
			cpumask_clear_cpu(cpu, mm_cpumask(prev));
Linus Torvalds's avatar
Linus Torvalds committed
109
	}
110
#endif
Linus Torvalds's avatar
Linus Torvalds committed
111
112
113
114
115
}

#define deactivate_mm(tsk,mm)	do { } while (0)

#endif