Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
d84b4711 | 2 | * linux/arch/arm/mm/context.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | |
b5466f87 WD |
5 | * Copyright (C) 2012 ARM Limited |
6 | * | |
7 | * Author: Will Deacon <will.deacon@arm.com> | |
1da177e4 LT |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | #include <linux/init.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/mm.h> | |
11805bcf CM |
16 | #include <linux/smp.h> |
17 | #include <linux/percpu.h> | |
1da177e4 LT |
18 | |
19 | #include <asm/mmu_context.h> | |
b5466f87 | 20 | #include <asm/smp_plat.h> |
575320d6 | 21 | #include <asm/thread_notify.h> |
1da177e4 LT |
22 | #include <asm/tlbflush.h> |
23 | ||
b5466f87 WD |
24 | /* |
25 | * On ARMv6, we have the following structure in the Context ID: | |
26 | * | |
27 | * 31 7 0 | |
28 | * +-------------------------+-----------+ | |
29 | * | process ID | ASID | | |
30 | * +-------------------------+-----------+ | |
31 | * | context ID | | |
32 | * +-------------------------------------+ | |
33 | * | |
34 | * The ASID is used to tag entries in the CPU caches and TLBs. | |
35 | * The context ID is used by debuggers and trace logic, and | |
36 | * should be unique within all running processes. | |
9520a5be BD |
37 | * |
38 | * In big endian operation, the two 32 bit words are swapped if accesed by | |
39 | * non 64-bit operations. | |
b5466f87 WD |
40 | */ |
41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | |
4aa60221 | 42 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
b5466f87 | 43 | |
bd31b859 | 44 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
bf51bb82 WD |
45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
46 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | |
b5466f87 | 47 | |
e6a01df4 | 48 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
b5466f87 WD |
49 | static DEFINE_PER_CPU(u64, reserved_asids); |
50 | static cpumask_t tlb_flush_pending; | |
1da177e4 | 51 | |
e6a01df4 MZ |
52 | #ifdef CONFIG_ARM_ERRATA_798181 |
53 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | |
54 | cpumask_t *mask) | |
55 | { | |
56 | int cpu; | |
57 | unsigned long flags; | |
58 | u64 context_id, asid; | |
59 | ||
60 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | |
61 | context_id = mm->context.id.counter; | |
62 | for_each_online_cpu(cpu) { | |
63 | if (cpu == this_cpu) | |
64 | continue; | |
65 | /* | |
66 | * We only need to send an IPI if the other CPUs are | |
67 | * running the same ASID as the one being invalidated. | |
68 | */ | |
69 | asid = per_cpu(active_asids, cpu).counter; | |
70 | if (asid == 0) | |
71 | asid = per_cpu(reserved_asids, cpu); | |
72 | if (context_id == asid) | |
73 | cpumask_set_cpu(cpu, mask); | |
74 | } | |
75 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | |
76 | } | |
77 | #endif | |
78 | ||
14d8c951 | 79 | #ifdef CONFIG_ARM_LPAE |
b5466f87 | 80 | static void cpu_set_reserved_ttbr0(void) |
3c5f7e7b WD |
81 | { |
82 | unsigned long ttbl = __pa(swapper_pg_dir); | |
83 | unsigned long ttbh = 0; | |
84 | ||
85 | /* | |
86 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The | |
87 | * ASID is set to 0. | |
88 | */ | |
89 | asm volatile( | |
90 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" | |
91 | : | |
92 | : "r" (ttbl), "r" (ttbh)); | |
93 | isb(); | |
14d8c951 CM |
94 | } |
95 | #else | |
b5466f87 | 96 | static void cpu_set_reserved_ttbr0(void) |
3c5f7e7b WD |
97 | { |
98 | u32 ttb; | |
99 | /* Copy TTBR1 into TTBR0 */ | |
100 | asm volatile( | |
101 | " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" | |
102 | " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" | |
103 | : "=r" (ttb)); | |
104 | isb(); | |
105 | } | |
14d8c951 CM |
106 | #endif |
107 | ||
575320d6 WD |
108 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
109 | static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, | |
110 | void *t) | |
111 | { | |
112 | u32 contextidr; | |
113 | pid_t pid; | |
114 | struct thread_info *thread = t; | |
115 | ||
116 | if (cmd != THREAD_NOTIFY_SWITCH) | |
117 | return NOTIFY_DONE; | |
118 | ||
119 | pid = task_pid_nr(thread->task) << ASID_BITS; | |
120 | asm volatile( | |
121 | " mrc p15, 0, %0, c13, c0, 1\n" | |
ae3790b8 WD |
122 | " and %0, %0, %2\n" |
123 | " orr %0, %0, %1\n" | |
124 | " mcr p15, 0, %0, c13, c0, 1\n" | |
575320d6 | 125 | : "=r" (contextidr), "+r" (pid) |
ae3790b8 | 126 | : "I" (~ASID_MASK)); |
575320d6 WD |
127 | isb(); |
128 | ||
129 | return NOTIFY_OK; | |
130 | } | |
131 | ||
132 | static struct notifier_block contextidr_notifier_block = { | |
133 | .notifier_call = contextidr_notifier, | |
134 | }; | |
135 | ||
136 | static int __init contextidr_notifier_init(void) | |
137 | { | |
138 | return thread_register_notifier(&contextidr_notifier_block); | |
139 | } | |
140 | arch_initcall(contextidr_notifier_init); | |
141 | #endif | |
142 | ||
b5466f87 | 143 | static void flush_context(unsigned int cpu) |
1da177e4 | 144 | { |
b5466f87 | 145 | int i; |
bf51bb82 WD |
146 | u64 asid; |
147 | ||
148 | /* Update the list of reserved ASIDs and the ASID bitmap. */ | |
149 | bitmap_clear(asid_map, 0, NUM_USER_ASIDS); | |
150 | for_each_possible_cpu(i) { | |
151 | if (i == cpu) { | |
152 | asid = 0; | |
153 | } else { | |
154 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); | |
b7dc4032 MZ |
155 | /* |
156 | * If this CPU has already been through a | |
157 | * rollover, but hasn't run another task in | |
158 | * the meantime, we must preserve its reserved | |
159 | * ASID, as this is the only trace we have of | |
160 | * the process it is still running. | |
161 | */ | |
162 | if (asid == 0) | |
163 | asid = per_cpu(reserved_asids, i); | |
4aa60221 | 164 | __set_bit(asid & ~ASID_MASK, asid_map); |
bf51bb82 WD |
165 | } |
166 | per_cpu(reserved_asids, i) = asid; | |
167 | } | |
b5466f87 WD |
168 | |
169 | /* Queue a TLB invalidate and flush the I-cache if necessary. */ | |
170 | if (!tlb_ops_need_broadcast()) | |
171 | cpumask_set_cpu(cpu, &tlb_flush_pending); | |
172 | else | |
173 | cpumask_setall(&tlb_flush_pending); | |
174 | ||
175 | if (icache_is_vivt_asid_tagged()) | |
11805bcf | 176 | __flush_icache_all(); |
11805bcf CM |
177 | } |
178 | ||
bf51bb82 | 179 | static int is_reserved_asid(u64 asid) |
b5466f87 WD |
180 | { |
181 | int cpu; | |
182 | for_each_possible_cpu(cpu) | |
bf51bb82 | 183 | if (per_cpu(reserved_asids, cpu) == asid) |
b5466f87 WD |
184 | return 1; |
185 | return 0; | |
186 | } | |
11805bcf | 187 | |
8a4e3a9e | 188 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
11805bcf | 189 | { |
8a4e3a9e | 190 | u64 asid = atomic64_read(&mm->context.id); |
bf51bb82 | 191 | u64 generation = atomic64_read(&asid_generation); |
11805bcf | 192 | |
bf51bb82 | 193 | if (asid != 0 && is_reserved_asid(asid)) { |
11805bcf | 194 | /* |
b5466f87 WD |
195 | * Our current ASID was active during a rollover, we can |
196 | * continue to use it and this was just a false alarm. | |
11805bcf | 197 | */ |
bf51bb82 | 198 | asid = generation | (asid & ~ASID_MASK); |
b5466f87 WD |
199 | } else { |
200 | /* | |
201 | * Allocate a free ASID. If we can't find one, take a | |
202 | * note of the currently active ASIDs and mark the TLBs | |
4aa60221 MZ |
203 | * as requiring flushes. We always count from ASID #1, |
204 | * as we reserve ASID #0 to switch via TTBR0 and indicate | |
205 | * rollover events. | |
b5466f87 | 206 | */ |
4aa60221 | 207 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
bf51bb82 WD |
208 | if (asid == NUM_USER_ASIDS) { |
209 | generation = atomic64_add_return(ASID_FIRST_VERSION, | |
210 | &asid_generation); | |
211 | flush_context(cpu); | |
4aa60221 | 212 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
bf51bb82 WD |
213 | } |
214 | __set_bit(asid, asid_map); | |
4aa60221 | 215 | asid |= generation; |
11805bcf CM |
216 | cpumask_clear(mm_cpumask(mm)); |
217 | } | |
11805bcf | 218 | |
8a4e3a9e | 219 | return asid; |
11805bcf CM |
220 | } |
221 | ||
b5466f87 | 222 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
11805bcf | 223 | { |
b5466f87 | 224 | unsigned long flags; |
11805bcf | 225 | unsigned int cpu = smp_processor_id(); |
8a4e3a9e | 226 | u64 asid; |
11805bcf | 227 | |
3e99675a NP |
228 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
229 | __check_vmalloc_seq(mm); | |
11805bcf | 230 | |
11805bcf | 231 | /* |
b5466f87 WD |
232 | * Required during context switch to avoid speculative page table |
233 | * walking with the wrong TTBR. | |
11805bcf | 234 | */ |
b5466f87 | 235 | cpu_set_reserved_ttbr0(); |
1da177e4 | 236 | |
8a4e3a9e WD |
237 | asid = atomic64_read(&mm->context.id); |
238 | if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) | |
239 | && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) | |
4b883160 WD |
240 | goto switch_mm_fastpath; |
241 | ||
b5466f87 WD |
242 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
243 | /* Check that our ASID belongs to the current generation. */ | |
8a4e3a9e WD |
244 | asid = atomic64_read(&mm->context.id); |
245 | if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { | |
246 | asid = new_context(mm, cpu); | |
247 | atomic64_set(&mm->context.id, asid); | |
248 | } | |
1da177e4 | 249 | |
89c7e4b8 WD |
250 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
251 | local_flush_bp_all(); | |
b5466f87 | 252 | local_flush_tlb_all(); |
6fa3eb70 | 253 | erratum_a15_798181(); |
89c7e4b8 | 254 | } |
37f47e3d | 255 | |
8a4e3a9e | 256 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
37f47e3d | 257 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
b5466f87 WD |
258 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
259 | ||
4b883160 | 260 | switch_mm_fastpath: |
b5466f87 | 261 | cpu_switch_mm(mm->pgd, mm); |
1da177e4 | 262 | } |