Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _PARISC_TLBFLUSH_H |
3 | #define _PARISC_TLBFLUSH_H | |
4 | ||
5 | /* TLB flushing routines.... */ | |
6 | ||
1da177e4 | 7 | #include <linux/mm.h> |
e8edc6e0 | 8 | #include <linux/sched.h> |
1da177e4 LT |
9 | #include <asm/mmu_context.h> |
10 | ||
04d472dc GG |
11 | |
12 | /* This is for the serialisation of PxTLB broadcasts. At least on the | |
13 | * N class systems, only one PxTLB inter processor broadcast can be | |
14 | * active at any one time on the Merced bus. This tlb purge | |
15 | * synchronisation is fairly lightweight and harmless so we activate | |
e82a3b75 | 16 | * it on all systems not just the N class. |
01ab6057 JDA |
17 | |
18 | * It is also used to ensure PTE updates are atomic and consistent | |
19 | * with the TLB. | |
29a622dd | 20 | */ |
04d472dc GG |
21 | extern spinlock_t pa_tlb_lock; |
22 | ||
e82a3b75 HD |
23 | #define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags) |
24 | #define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags) | |
04d472dc | 25 | |
1da177e4 | 26 | extern void flush_tlb_all(void); |
1b2425e3 | 27 | extern void flush_tlb_all_local(void *); |
1da177e4 | 28 | |
0fc537d1 HD |
29 | #define smp_flush_tlb_all() flush_tlb_all() |
30 | ||
01ab6057 JDA |
31 | int __flush_tlb_range(unsigned long sid, |
32 | unsigned long start, unsigned long end); | |
33 | ||
34 | #define flush_tlb_range(vma, start, end) \ | |
35 | __flush_tlb_range((vma)->vm_mm->context, start, end) | |
36 | ||
37 | #define flush_tlb_kernel_range(start, end) \ | |
38 | __flush_tlb_range(0, start, end) | |
39 | ||
1da177e4 LT |
40 | /* |
41 | * flush_tlb_mm() | |
42 | * | |
01ab6057 JDA |
43 | * The code to switch to a new context is NOT valid for processes |
44 | * which play with the space id's. Thus, we have to preserve the | |
45 | * space and just flush the entire tlb. However, the compilers, | |
46 | * dynamic linker, etc, do not manipulate space id's, so there | |
47 | * could be a significant performance benefit in switching contexts | |
48 | * and not flushing the whole tlb. | |
1da177e4 LT |
49 | */ |
50 | ||
592ac93a RC |
51 | static inline void flush_tlb_mm(struct mm_struct *mm) |
52 | { | |
04532c4f KM |
53 | BUG_ON(mm == &init_mm); /* Should never happen */ |
54 | ||
5289f46b | 55 | #if 1 || defined(CONFIG_SMP) |
01ab6057 JDA |
56 | /* Except for very small threads, flushing the whole TLB is |
57 | * faster than using __flush_tlb_range. The pdtlb and pitlb | |
58 | * instructions are very slow because of the TLB broadcast. | |
59 | * It might be faster to do local range flushes on all CPUs | |
60 | * on PA 2.0 systems. | |
61 | */ | |
04532c4f KM |
62 | flush_tlb_all(); |
63 | #else | |
5289f46b | 64 | /* FIXME: currently broken, causing space id and protection ids |
01ab6057 JDA |
65 | * to go out of sync, resulting in faults on userspace accesses. |
66 | * This approach needs further investigation since running many | |
67 | * small applications (e.g., GCC testsuite) is faster on HP-UX. | |
5289f46b | 68 | */ |
04532c4f KM |
69 | if (mm) { |
70 | if (mm->context != 0) | |
71 | free_sid(mm->context); | |
72 | mm->context = alloc_sid(); | |
73 | if (mm == current->active_mm) | |
74 | load_context(mm->context); | |
75 | } | |
76 | #endif | |
1da177e4 LT |
77 | } |
78 | ||
1da177e4 LT |
79 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
80 | unsigned long addr) | |
81 | { | |
e8d8fc21 | 82 | unsigned long flags, sid; |
e82a3b75 | 83 | |
e8d8fc21 | 84 | sid = vma->vm_mm->context; |
e82a3b75 | 85 | purge_tlb_start(flags); |
e8d8fc21 | 86 | mtsp(sid, 1); |
1da177e4 | 87 | pdtlb(addr); |
01ab6057 JDA |
88 | if (unlikely(split_tlb)) |
89 | pitlb(addr); | |
e82a3b75 | 90 | purge_tlb_end(flags); |
1da177e4 | 91 | } |
1da177e4 | 92 | #endif |