1 #ifndef _ASM_POWERPC_HUGETLB_H
2 #define _ASM_POWERPC_HUGETLB_H
4 #ifdef CONFIG_HUGETLB_PAGE
6 #include <asm-generic/hugetlb.h>
8 extern struct kmem_cache
*hugepte_cache
;
10 #ifdef CONFIG_PPC_BOOK3S_64
12 #include <asm/book3s/64/hugetlb.h>
14 * This should work for other subarchs too. But right now we use the
15 * new format only for 64bit book3s
17 static inline pte_t
*hugepd_page(hugepd_t hpd
)
19 BUG_ON(!hugepd_ok(hpd
));
21 * We have only four bits to encode, MMU page size
23 BUILD_BUG_ON((MMU_PAGE_COUNT
- 1) > 0xf);
24 return __va(hpd
.pd
& HUGEPD_ADDR_MASK
);
27 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd
)
29 return (hpd
.pd
& HUGEPD_SHIFT_MASK
) >> 2;
32 static inline unsigned int hugepd_shift(hugepd_t hpd
)
34 return mmu_psize_to_shift(hugepd_mmu_psize(hpd
));
36 static inline void flush_hugetlb_page(struct vm_area_struct
*vma
,
40 return radix__flush_hugetlb_page(vma
, vmaddr
);
43 static inline void __local_flush_hugetlb_page(struct vm_area_struct
*vma
,
47 return radix__local_flush_hugetlb_page(vma
, vmaddr
);
51 static inline pte_t
*hugepd_page(hugepd_t hpd
)
53 BUG_ON(!hugepd_ok(hpd
));
55 return (pte_t
*)__va(hpd
.pd
& ~(_PMD_PAGE_MASK
| _PMD_PRESENT_MASK
));
57 return (pte_t
*)((hpd
.pd
& ~HUGEPD_SHIFT_MASK
) | PD_HUGE
);
61 static inline unsigned int hugepd_shift(hugepd_t hpd
)
64 return ((hpd
.pd
& _PMD_PAGE_MASK
) >> 1) + 17;
66 return hpd
.pd
& HUGEPD_SHIFT_MASK
;
70 #endif /* CONFIG_PPC_BOOK3S_64 */
73 static inline pte_t
*hugepte_offset(hugepd_t hpd
, unsigned long addr
,
77 * On FSL BookE, we have multiple higher-level table entries that
78 * point to the same hugepte. Just use the first one since they're all
79 * identical. So for that case, idx=0.
81 unsigned long idx
= 0;
83 pte_t
*dir
= hugepd_page(hpd
);
84 #ifndef CONFIG_PPC_FSL_BOOK3E
85 idx
= (addr
& ((1UL << pdshift
) - 1)) >> hugepd_shift(hpd
);
91 pte_t
*huge_pte_offset_and_shift(struct mm_struct
*mm
,
92 unsigned long addr
, unsigned *shift
);
94 void flush_dcache_icache_hugepage(struct page
*page
);
96 #if defined(CONFIG_PPC_MM_SLICES)
97 int is_hugepage_only_range(struct mm_struct
*mm
, unsigned long addr
,
100 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
108 void book3e_hugetlb_preload(struct vm_area_struct
*vma
, unsigned long ea
,
110 #ifdef CONFIG_PPC_8xx
111 static inline void flush_hugetlb_page(struct vm_area_struct
*vma
,
112 unsigned long vmaddr
)
114 flush_tlb_page(vma
, vmaddr
);
117 void flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
120 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
, unsigned long addr
,
121 unsigned long end
, unsigned long floor
,
122 unsigned long ceiling
);
125 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
126 * to override the version in mm/hugetlb.c
128 #define vma_mmu_pagesize vma_mmu_pagesize
131 * If the arch doesn't supply something else, assume that hugepage
132 * size aligned regions are ok without further preparation.
134 static inline int prepare_hugepage_range(struct file
*file
,
135 unsigned long addr
, unsigned long len
)
137 struct hstate
*h
= hstate_file(file
);
138 if (len
& ~huge_page_mask(h
))
140 if (addr
& ~huge_page_mask(h
))
145 static inline void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
146 pte_t
*ptep
, pte_t pte
)
148 set_pte_at(mm
, addr
, ptep
, pte
);
151 static inline pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
,
152 unsigned long addr
, pte_t
*ptep
)
155 return __pte(pte_update(mm
, addr
, ptep
, ~0UL, 0, 1));
157 return __pte(pte_update(ptep
, ~0UL, 0));
161 static inline void huge_ptep_clear_flush(struct vm_area_struct
*vma
,
162 unsigned long addr
, pte_t
*ptep
)
165 pte
= huge_ptep_get_and_clear(vma
->vm_mm
, addr
, ptep
);
166 flush_hugetlb_page(vma
, addr
);
169 static inline int huge_pte_none(pte_t pte
)
171 return pte_none(pte
);
174 static inline pte_t
huge_pte_wrprotect(pte_t pte
)
176 return pte_wrprotect(pte
);
179 static inline int huge_ptep_set_access_flags(struct vm_area_struct
*vma
,
180 unsigned long addr
, pte_t
*ptep
,
181 pte_t pte
, int dirty
)
183 #ifdef HUGETLB_NEED_PRELOAD
185 * The "return 1" forces a call of update_mmu_cache, which will write a
186 * TLB entry. Without this, platforms that don't do a write of the TLB
187 * entry in the TLB miss handler asm will fault ad infinitum.
189 ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
192 return ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
196 static inline pte_t
huge_ptep_get(pte_t
*ptep
)
201 static inline void arch_clear_hugepage_flags(struct page
*page
)
205 #else /* ! CONFIG_HUGETLB_PAGE */
206 static inline void flush_hugetlb_page(struct vm_area_struct
*vma
,
207 unsigned long vmaddr
)
211 #define hugepd_shift(x) 0
212 static inline pte_t
*hugepte_offset(hugepd_t hpd
, unsigned long addr
,
217 #endif /* CONFIG_HUGETLB_PAGE */
220 * FSL Book3E platforms require special gpage handling - the gpages
221 * are reserved early in the boot process by memblock instead of via
222 * the .dts as on IBM platforms.
224 #if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
225 defined(CONFIG_PPC_8xx))
226 extern void __init
reserve_hugetlb_gpages(void);
228 static inline void reserve_hugetlb_gpages(void)
233 #endif /* _ASM_POWERPC_HUGETLB_H */