Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _PARISC_TLBFLUSH_H |
2 | #define _PARISC_TLBFLUSH_H | |
3 | ||
4 | /* TLB flushing routines.... */ | |
5 | ||
1da177e4 | 6 | #include <linux/mm.h> |
e8edc6e0 | 7 | #include <linux/sched.h> |
1da177e4 LT |
8 | #include <asm/mmu_context.h> |
9 | ||
04d472dc GG |
10 | |
11 | /* This is for the serialisation of PxTLB broadcasts. At least on the | |
12 | * N class systems, only one PxTLB inter processor broadcast can be | |
13 | * active at any one time on the Merced bus. This tlb purge | |
14 | * synchronisation is fairly lightweight and harmless so we activate | |
e82a3b75 | 15 | * it on all systems not just the N class. |
01ab6057 JDA |
16 | |
17 | * It is also used to ensure PTE updates are atomic and consistent | |
18 | * with the TLB. | |
29a622dd | 19 | */ |
04d472dc GG |
20 | extern spinlock_t pa_tlb_lock; |
21 | ||
e82a3b75 HD |
22 | #define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags) |
23 | #define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags) | |
04d472dc | 24 | |
1da177e4 | 25 | extern void flush_tlb_all(void); |
1b2425e3 | 26 | extern void flush_tlb_all_local(void *); |
1da177e4 | 27 | |
0fc537d1 HD |
28 | #define smp_flush_tlb_all() flush_tlb_all() |
29 | ||
01ab6057 JDA |
30 | int __flush_tlb_range(unsigned long sid, |
31 | unsigned long start, unsigned long end); | |
32 | ||
33 | #define flush_tlb_range(vma, start, end) \ | |
34 | __flush_tlb_range((vma)->vm_mm->context, start, end) | |
35 | ||
36 | #define flush_tlb_kernel_range(start, end) \ | |
37 | __flush_tlb_range(0, start, end) | |
38 | ||
1da177e4 LT |
39 | /* |
40 | * flush_tlb_mm() | |
41 | * | |
01ab6057 JDA |
42 | * The code to switch to a new context is NOT valid for processes |
43 | * which play with the space id's. Thus, we have to preserve the | |
44 | * space and just flush the entire tlb. However, the compilers, | |
45 | * dynamic linker, etc, do not manipulate space id's, so there | |
46 | * could be a significant performance benefit in switching contexts | |
47 | * and not flushing the whole tlb. | |
1da177e4 LT |
48 | */ |
49 | ||
592ac93a RC |
50 | static inline void flush_tlb_mm(struct mm_struct *mm) |
51 | { | |
04532c4f KM |
52 | BUG_ON(mm == &init_mm); /* Should never happen */ |
53 | ||
5289f46b | 54 | #if 1 || defined(CONFIG_SMP) |
01ab6057 JDA |
55 | /* Except for very small threads, flushing the whole TLB is |
56 | * faster than using __flush_tlb_range. The pdtlb and pitlb | |
57 | * instructions are very slow because of the TLB broadcast. | |
58 | * It might be faster to do local range flushes on all CPUs | |
59 | * on PA 2.0 systems. | |
60 | */ | |
04532c4f KM |
61 | flush_tlb_all(); |
62 | #else | |
5289f46b | 63 | /* FIXME: currently broken, causing space id and protection ids |
01ab6057 JDA |
64 | * to go out of sync, resulting in faults on userspace accesses. |
65 | * This approach needs further investigation since running many | |
66 | * small applications (e.g., GCC testsuite) is faster on HP-UX. | |
5289f46b | 67 | */ |
04532c4f KM |
68 | if (mm) { |
69 | if (mm->context != 0) | |
70 | free_sid(mm->context); | |
71 | mm->context = alloc_sid(); | |
72 | if (mm == current->active_mm) | |
73 | load_context(mm->context); | |
74 | } | |
75 | #endif | |
1da177e4 LT |
76 | } |
77 | ||
1da177e4 LT |
78 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
79 | unsigned long addr) | |
80 | { | |
e8d8fc21 | 81 | unsigned long flags, sid; |
e82a3b75 | 82 | |
e8d8fc21 | 83 | sid = vma->vm_mm->context; |
e82a3b75 | 84 | purge_tlb_start(flags); |
e8d8fc21 | 85 | mtsp(sid, 1); |
1da177e4 | 86 | pdtlb(addr); |
01ab6057 JDA |
87 | if (unlikely(split_tlb)) |
88 | pitlb(addr); | |
e82a3b75 | 89 | purge_tlb_end(flags); |
1da177e4 | 90 | } |
1da177e4 | 91 | #endif |