Commit | Line | Data |
---|---|---|
b3901d54 CM |
1 | /* |
2 | * Based on arch/arm/mm/context.c | |
3 | * | |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/init.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/percpu.h> | |
25 | ||
26 | #include <asm/mmu_context.h> | |
27 | #include <asm/tlbflush.h> | |
28 | #include <asm/cachetype.h> | |
29 | ||
30 | #define asid_bits(reg) \ | |
31 | (((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8) | |
32 | ||
33 | #define ASID_FIRST_VERSION (1 << MAX_ASID_BITS) | |
34 | ||
35 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |
36 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | |
37 | ||
38 | /* | |
39 | * We fork()ed a process, and we need a new context for the child to run in. | |
40 | */ | |
41 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
42 | { | |
43 | mm->context.id = 0; | |
44 | raw_spin_lock_init(&mm->context.id_lock); | |
45 | } | |
46 | ||
47 | static void flush_context(void) | |
48 | { | |
49 | /* set the reserved TTBR0 before flushing the TLB */ | |
50 | cpu_set_reserved_ttbr0(); | |
51 | flush_tlb_all(); | |
52 | if (icache_is_aivivt()) | |
53 | __flush_icache_all(); | |
54 | } | |
55 | ||
56 | #ifdef CONFIG_SMP | |
57 | ||
58 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) | |
59 | { | |
60 | unsigned long flags; | |
61 | ||
62 | /* | |
63 | * Locking needed for multi-threaded applications where the same | |
64 | * mm->context.id could be set from different CPUs during the | |
65 | * broadcast. This function is also called via IPI so the | |
66 | * mm->context.id_lock has to be IRQ-safe. | |
67 | */ | |
68 | raw_spin_lock_irqsave(&mm->context.id_lock, flags); | |
69 | if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { | |
70 | /* | |
71 | * Old version of ASID found. Set the new one and reset | |
72 | * mm_cpumask(mm). | |
73 | */ | |
74 | mm->context.id = asid; | |
75 | cpumask_clear(mm_cpumask(mm)); | |
76 | } | |
77 | raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); | |
78 | ||
79 | /* | |
80 | * Set the mm_cpumask(mm) bit for the current CPU. | |
81 | */ | |
82 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Reset the ASID on the current CPU. This function call is broadcast from the | |
87 | * CPU handling the ASID rollover and holding cpu_asid_lock. | |
88 | */ | |
89 | static void reset_context(void *info) | |
90 | { | |
91 | unsigned int asid; | |
92 | unsigned int cpu = smp_processor_id(); | |
93 | struct mm_struct *mm = current->active_mm; | |
94 | ||
95 | smp_rmb(); | |
96 | asid = cpu_last_asid + cpu; | |
97 | ||
98 | flush_context(); | |
99 | set_mm_context(mm, asid); | |
100 | ||
101 | /* set the new ASID */ | |
102 | cpu_switch_mm(mm->pgd, mm); | |
103 | } | |
104 | ||
105 | #else | |
106 | ||
107 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) | |
108 | { | |
109 | mm->context.id = asid; | |
110 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | |
111 | } | |
112 | ||
113 | #endif | |
114 | ||
115 | void __new_context(struct mm_struct *mm) | |
116 | { | |
117 | unsigned int asid; | |
118 | unsigned int bits = asid_bits(); | |
119 | ||
120 | raw_spin_lock(&cpu_asid_lock); | |
121 | #ifdef CONFIG_SMP | |
122 | /* | |
123 | * Check the ASID again, in case the change was broadcast from another | |
124 | * CPU before we acquired the lock. | |
125 | */ | |
126 | if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { | |
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | |
128 | raw_spin_unlock(&cpu_asid_lock); | |
129 | return; | |
130 | } | |
131 | #endif | |
132 | /* | |
133 | * At this point, it is guaranteed that the current mm (with an old | |
134 | * ASID) isn't active on any other CPU since the ASIDs are changed | |
135 | * simultaneously via IPI. | |
136 | */ | |
137 | asid = ++cpu_last_asid; | |
138 | ||
139 | /* | |
140 | * If we've used up all our ASIDs, we need to start a new version and | |
141 | * flush the TLB. | |
142 | */ | |
143 | if (unlikely((asid & ((1 << bits) - 1)) == 0)) { | |
144 | /* increment the ASID version */ | |
145 | cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits); | |
146 | if (cpu_last_asid == 0) | |
147 | cpu_last_asid = ASID_FIRST_VERSION; | |
148 | asid = cpu_last_asid + smp_processor_id(); | |
149 | flush_context(); | |
150 | #ifdef CONFIG_SMP | |
151 | smp_wmb(); | |
152 | smp_call_function(reset_context, NULL, 1); | |
153 | #endif | |
154 | cpu_last_asid += NR_CPUS - 1; | |
155 | } | |
156 | ||
157 | set_mm_context(mm, asid); | |
158 | raw_spin_unlock(&cpu_asid_lock); | |
159 | } |