return NULL;
}
-/*
- * Do nothing, until we've worked out what to do! To allow build, we
- * must remove reference to clear_page_range since it no longer exists.
- */
-void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
- unsigned long start, unsigned long end)
+void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
{
+ /*
+ * This is called only when is_hugepage_only_range(addr,),
+ * and it follows that is_hugepage_only_range(end,) also.
+ *
+ * The offset of these addresses from the base of the hugetlb
+ * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
+ * the standard free_pgd_range will free the right page tables.
+ *
+ * If floor and ceiling are also in the hugetlb region, they
+ * must likewise be scaled down; but if outside, left unchanged.
+ */
+
+ addr = htlbpage_to_page(addr);
+ end = htlbpage_to_page(end);
+ if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
+ floor = htlbpage_to_page(floor);
+ if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
+ ceiling = htlbpage_to_page(ceiling);
+
+ free_pgd_range(tlb, addr, end, floor, ceiling);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
flush_tlb_pending();
}
-void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
- unsigned long start, unsigned long end)
-{
- /* Because the huge pgtables are only 2 level, they can take
- * at most around 4M, much less than one hugepage which the
- * process is presumably entitled to use. So we don't bother
- * freeing up the pagetables on unmap, and wait until
- * destroy_context() to clean up the lot. */
-}
-
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{
struct mm_struct *mm = current->mm;
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
# define is_hugepage_only_range(mm, addr, len) \
(REGION_NUMBER(addr) == REGION_HPAGE && \
- REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
+ REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
extern unsigned int hpage_shift;
#endif
#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
struct mmu_gather;
-extern void hugetlb_free_pgtables(struct mmu_gather *tlb,
- struct vm_area_struct * prev, unsigned long start, unsigned long end);
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
#endif
/*
extern void paging_init(void);
-struct mmu_gather;
-void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
- unsigned long start, unsigned long end);
+/*
+ * Because the huge pgtables are only 2 level, they can take
+ * at most around 4M, much less than one hugepage which the
+ * process is presumably entitled to use. So we don't bother
+ * freeing up the pagetables on unmap, and wait until
+ * destroy_context() to clean up the lot.
+ */
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
+ do { } while (0)
/*
* This gets called at the end of handling a page fault, when
#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
+ do { } while (0)
#endif
#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
#define prepare_hugepage_range(addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
+ do { } while (0)
#define alloc_huge_page() ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *);
-void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
+void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
+void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
*
* Must be called with pagetable lock held.
*/
-static inline void free_pgd_range(struct mmu_gather *tlb,
+void free_pgd_range(struct mmu_gather **tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
return;
start = addr;
- pgd = pgd_offset(tlb->mm, addr);
+ pgd = pgd_offset((*tlb)->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+ free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
- if (!tlb_is_full_mm(tlb))
- flush_tlb_pgtables(tlb->mm, start, end);
+ if (!tlb_is_full_mm(*tlb))
+ flush_tlb_pgtables((*tlb)->mm, start, end);
}
void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
- unsigned long floor, unsigned long ceiling)
+ unsigned long floor, unsigned long ceiling)
{
while (vma) {
struct vm_area_struct *next = vma->vm_next;
unsigned long addr = vma->vm_start;
- /* Optimization: gather nearby vmas into a single call down */
- while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
- vma = next;
- next = vma->vm_next;
- }
- free_pgd_range(*tlb, addr, vma->vm_end,
+ if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
+ hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
+ floor, next? next->vm_start: ceiling);
+ } else {
+ /*
+ * Optimization: gather nearby vmas into one call down
+ */
+ while (next && next->vm_start <= vma->vm_end + PMD_SIZE
+ && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
+ HPAGE_SIZE)) {
+ vma = next;
+ next = vma->vm_next;
+ }
+ free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
+ }
vma = next;
}
}
-pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
{
if (!pmd_present(*pmd)) {
struct page *new;