Commit | Line | Data |
---|---|---|
1970282f SR |
1 | #ifndef _ASM_POWERPC_TLBFLUSH_H |
2 | #define _ASM_POWERPC_TLBFLUSH_H | |
3 | /* | |
4 | * TLB flushing: | |
5 | * | |
6 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
7 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
8 | * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB | |
9 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
10 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
11 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; either version | |
16 | * 2 of the License, or (at your option) any later version. | |
17 | */ | |
18 | #ifdef __KERNEL__ | |
19 | ||
1970282f SR |
20 | |
21 | struct mm_struct; | |
22 | ||
23 | #ifdef CONFIG_PPC64 | |
24 | ||
25 | #include <linux/percpu.h> | |
26 | #include <asm/page.h> | |
27 | ||
28 | #define PPC64_TLB_BATCH_NR 192 | |
29 | ||
30 | struct ppc64_tlb_batch { | |
a741e679 BH |
31 | int active; |
32 | unsigned long index; | |
33 | struct mm_struct *mm; | |
34 | real_pte_t pte[PPC64_TLB_BATCH_NR]; | |
35 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; | |
36 | unsigned int psize; | |
1970282f SR |
37 | }; |
38 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |
39 | ||
40 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | |
41 | ||
a741e679 BH |
42 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
43 | pte_t *ptep, unsigned long pte, int huge); | |
44 | ||
45 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | |
46 | ||
47 | static inline void arch_enter_lazy_mmu_mode(void) | |
48 | { | |
49 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | |
50 | ||
51 | batch->active = 1; | |
52 | } | |
53 | ||
54 | static inline void arch_leave_lazy_mmu_mode(void) | |
1970282f | 55 | { |
a741e679 | 56 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
1970282f SR |
57 | |
58 | if (batch->index) | |
59 | __flush_tlb_pending(batch); | |
a741e679 | 60 | batch->active = 0; |
1970282f SR |
61 | } |
62 | ||
a741e679 BH |
63 | #define arch_flush_lazy_mmu_mode() do {} while (0) |
64 | ||
65 | ||
3c726f8d BH |
66 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, |
67 | int local); | |
68 | extern void flush_hash_range(unsigned long number, int local); | |
1970282f SR |
69 | |
70 | #else /* CONFIG_PPC64 */ | |
71 | ||
72 | #include <linux/mm.h> | |
73 | ||
74 | extern void _tlbie(unsigned long address); | |
75 | extern void _tlbia(void); | |
76 | ||
77 | /* | |
78 | * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & | |
79 | * flush_tlb_kernel_range are best implemented as tlbia vs | |
80 | * specific tlbie's | |
81 | */ | |
82 | ||
83 | #if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) | |
84 | #define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") | |
85 | #elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) | |
86 | #define flush_tlb_pending() _tlbia() | |
87 | #endif | |
88 | ||
89 | /* | |
90 | * This gets called at the end of handling a page fault, when | |
91 | * the kernel has put a new PTE into the page table for the process. | |
92 | * We use it to ensure coherency between the i-cache and d-cache | |
93 | * for the page which has just been mapped in. | |
94 | * On machines which use an MMU hash table, we use this to put a | |
95 | * corresponding HPTE into the hash table ahead of time, instead of | |
96 | * waiting for the inevitable extra hash-table miss exception. | |
97 | */ | |
98 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |
99 | ||
100 | #endif /* CONFIG_PPC64 */ | |
101 | ||
102 | #if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ | |
103 | defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) | |
104 | ||
105 | static inline void flush_tlb_mm(struct mm_struct *mm) | |
106 | { | |
1970282f SR |
107 | } |
108 | ||
109 | static inline void flush_tlb_page(struct vm_area_struct *vma, | |
110 | unsigned long vmaddr) | |
111 | { | |
a741e679 | 112 | #ifndef CONFIG_PPC64 |
1970282f SR |
113 | _tlbie(vmaddr); |
114 | #endif | |
115 | } | |
116 | ||
117 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | |
118 | unsigned long vmaddr) | |
119 | { | |
120 | #ifndef CONFIG_PPC64 | |
121 | _tlbie(vmaddr); | |
122 | #endif | |
123 | } | |
124 | ||
125 | static inline void flush_tlb_range(struct vm_area_struct *vma, | |
126 | unsigned long start, unsigned long end) | |
127 | { | |
1970282f SR |
128 | } |
129 | ||
130 | static inline void flush_tlb_kernel_range(unsigned long start, | |
131 | unsigned long end) | |
132 | { | |
1970282f SR |
133 | } |
134 | ||
135 | #else /* 6xx, 7xx, 7xxx cpus */ | |
136 | ||
137 | extern void flush_tlb_mm(struct mm_struct *mm); | |
138 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | |
139 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | |
140 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
141 | unsigned long end); | |
142 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | |
143 | ||
144 | #endif | |
145 | ||
146 | /* | |
147 | * This is called in munmap when we have freed up some page-table | |
148 | * pages. We don't need to do anything here, there's nothing special | |
149 | * about our page-table pages. -- paulus | |
150 | */ | |
151 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | |
152 | unsigned long start, unsigned long end) | |
153 | { | |
154 | } | |
155 | ||
156 | #endif /*__KERNEL__ */ | |
157 | #endif /* _ASM_POWERPC_TLBFLUSH_H */ |