Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
d84b4711 | 2 | * linux/arch/arm/mm/context.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/init.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/mm.h> | |
11805bcf CM |
13 | #include <linux/smp.h> |
14 | #include <linux/percpu.h> | |
1da177e4 LT |
15 | |
16 | #include <asm/mmu_context.h> | |
575320d6 | 17 | #include <asm/thread_notify.h> |
1da177e4 LT |
18 | #include <asm/tlbflush.h> |
19 | ||
bd31b859 | 20 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
8678c1f0 | 21 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
1da177e4 | 22 | |
14d8c951 | 23 | #ifdef CONFIG_ARM_LPAE |
7fec1b57 | 24 | void cpu_set_reserved_ttbr0(void) |
3c5f7e7b WD |
25 | { |
26 | unsigned long ttbl = __pa(swapper_pg_dir); | |
27 | unsigned long ttbh = 0; | |
28 | ||
29 | /* | |
30 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The | |
31 | * ASID is set to 0. | |
32 | */ | |
33 | asm volatile( | |
34 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" | |
35 | : | |
36 | : "r" (ttbl), "r" (ttbh)); | |
37 | isb(); | |
14d8c951 CM |
38 | } |
39 | #else | |
7fec1b57 | 40 | void cpu_set_reserved_ttbr0(void) |
3c5f7e7b WD |
41 | { |
42 | u32 ttb; | |
43 | /* Copy TTBR1 into TTBR0 */ | |
44 | asm volatile( | |
45 | " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" | |
46 | " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" | |
47 | : "=r" (ttb)); | |
48 | isb(); | |
49 | } | |
14d8c951 CM |
50 | #endif |
51 | ||
575320d6 WD |
52 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
53 | static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, | |
54 | void *t) | |
55 | { | |
56 | u32 contextidr; | |
57 | pid_t pid; | |
58 | struct thread_info *thread = t; | |
59 | ||
60 | if (cmd != THREAD_NOTIFY_SWITCH) | |
61 | return NOTIFY_DONE; | |
62 | ||
63 | pid = task_pid_nr(thread->task) << ASID_BITS; | |
64 | asm volatile( | |
65 | " mrc p15, 0, %0, c13, c0, 1\n" | |
ae3790b8 WD |
66 | " and %0, %0, %2\n" |
67 | " orr %0, %0, %1\n" | |
68 | " mcr p15, 0, %0, c13, c0, 1\n" | |
575320d6 | 69 | : "=r" (contextidr), "+r" (pid) |
ae3790b8 | 70 | : "I" (~ASID_MASK)); |
575320d6 WD |
71 | isb(); |
72 | ||
73 | return NOTIFY_OK; | |
74 | } | |
75 | ||
76 | static struct notifier_block contextidr_notifier_block = { | |
77 | .notifier_call = contextidr_notifier, | |
78 | }; | |
79 | ||
80 | static int __init contextidr_notifier_init(void) | |
81 | { | |
82 | return thread_register_notifier(&contextidr_notifier_block); | |
83 | } | |
84 | arch_initcall(contextidr_notifier_init); | |
85 | #endif | |
86 | ||
1da177e4 LT |
87 | /* |
88 | * We fork()ed a process, and we need a new context for the child | |
3c5f7e7b | 89 | * to run in. |
1da177e4 LT |
90 | */ |
91 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
92 | { | |
93 | mm->context.id = 0; | |
bd31b859 | 94 | raw_spin_lock_init(&mm->context.id_lock); |
1da177e4 LT |
95 | } |
96 | ||
11805bcf CM |
97 | static void flush_context(void) |
98 | { | |
3c5f7e7b | 99 | cpu_set_reserved_ttbr0(); |
11805bcf CM |
100 | local_flush_tlb_all(); |
101 | if (icache_is_vivt_asid_tagged()) { | |
102 | __flush_icache_all(); | |
103 | dsb(); | |
104 | } | |
105 | } | |
106 | ||
107 | #ifdef CONFIG_SMP | |
108 | ||
109 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) | |
110 | { | |
111 | unsigned long flags; | |
112 | ||
113 | /* | |
114 | * Locking needed for multi-threaded applications where the | |
115 | * same mm->context.id could be set from different CPUs during | |
116 | * the broadcast. This function is also called via IPI so the | |
117 | * mm->context.id_lock has to be IRQ-safe. | |
118 | */ | |
bd31b859 | 119 | raw_spin_lock_irqsave(&mm->context.id_lock, flags); |
11805bcf CM |
120 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { |
121 | /* | |
122 | * Old version of ASID found. Set the new one and | |
123 | * reset mm_cpumask(mm). | |
124 | */ | |
125 | mm->context.id = asid; | |
126 | cpumask_clear(mm_cpumask(mm)); | |
127 | } | |
bd31b859 | 128 | raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); |
11805bcf CM |
129 | |
130 | /* | |
131 | * Set the mm_cpumask(mm) bit for the current CPU. | |
132 | */ | |
133 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | |
134 | } | |
135 | ||
136 | /* | |
137 | * Reset the ASID on the current CPU. This function call is broadcast | |
138 | * from the CPU handling the ASID rollover and holding cpu_asid_lock. | |
139 | */ | |
140 | static void reset_context(void *info) | |
141 | { | |
142 | unsigned int asid; | |
143 | unsigned int cpu = smp_processor_id(); | |
e323969c | 144 | struct mm_struct *mm = current->active_mm; |
11805bcf CM |
145 | |
146 | smp_rmb(); | |
a0a54d37 | 147 | asid = cpu_last_asid + cpu + 1; |
11805bcf CM |
148 | |
149 | flush_context(); | |
150 | set_mm_context(mm, asid); | |
151 | ||
152 | /* set the new ASID */ | |
3c5f7e7b | 153 | cpu_switch_mm(mm->pgd, mm); |
11805bcf CM |
154 | } |
155 | ||
156 | #else | |
157 | ||
158 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) | |
159 | { | |
160 | mm->context.id = asid; | |
161 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | |
162 | } | |
163 | ||
164 | #endif | |
165 | ||
1da177e4 LT |
166 | void __new_context(struct mm_struct *mm) |
167 | { | |
168 | unsigned int asid; | |
169 | ||
bd31b859 | 170 | raw_spin_lock(&cpu_asid_lock); |
11805bcf CM |
171 | #ifdef CONFIG_SMP |
172 | /* | |
173 | * Check the ASID again, in case the change was broadcast from | |
174 | * another CPU before we acquired the lock. | |
175 | */ | |
176 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | |
177 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | |
bd31b859 | 178 | raw_spin_unlock(&cpu_asid_lock); |
11805bcf CM |
179 | return; |
180 | } | |
181 | #endif | |
182 | /* | |
183 | * At this point, it is guaranteed that the current mm (with | |
184 | * an old ASID) isn't active on any other CPU since the ASIDs | |
185 | * are changed simultaneously via IPI. | |
186 | */ | |
1da177e4 LT |
187 | asid = ++cpu_last_asid; |
188 | if (asid == 0) | |
8678c1f0 | 189 | asid = cpu_last_asid = ASID_FIRST_VERSION; |
1da177e4 LT |
190 | |
191 | /* | |
192 | * If we've used up all our ASIDs, we need | |
193 | * to start a new version and flush the TLB. | |
194 | */ | |
8678c1f0 | 195 | if (unlikely((asid & ~ASID_MASK) == 0)) { |
a0a54d37 | 196 | asid = cpu_last_asid + smp_processor_id() + 1; |
11805bcf CM |
197 | flush_context(); |
198 | #ifdef CONFIG_SMP | |
199 | smp_wmb(); | |
200 | smp_call_function(reset_context, NULL, 1); | |
201 | #endif | |
a0a54d37 | 202 | cpu_last_asid += NR_CPUS; |
9d99df4b | 203 | } |
1da177e4 | 204 | |
11805bcf | 205 | set_mm_context(mm, asid); |
bd31b859 | 206 | raw_spin_unlock(&cpu_asid_lock); |
1da177e4 | 207 | } |