secondary_data.pgdir = 0;
*pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
- pgd_free(pgd);
+ pgd_free(&init_mm, pgd);
if (ret) {
printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
- pte_free_kernel(pmd_page_vaddr(pmd));
+ pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PGDIR_SIZE;
return new_pgd;
no_pte:
- pmd_free(new_pmd);
+ pmd_free(mm, new_pmd);
no_pmd:
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
}
-void free_pgd_slow(pgd_t *pgd)
+void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
{
pmd_t *pmd;
struct page *pte;
pmd_clear(pmd);
dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
pte_lock_deinit(pte);
- pte_free(pte);
- pmd_free(pmd);
+ pte_free(mm, pte);
+ pmd_free(mm, pmd);
free:
free_pages((unsigned long) pgd, 2);
}
return pgd;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
/* in the non-PAE case, clear_page_tables() clears user pgd entries */
quicklist_free(0, pgd_dtor, pgd);
return ret;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
return ptepage;
}
-void pte_free_kernel(pte_t *pte)
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
#ifdef CONFIG_SMP
hash_page_sync();
free_page((unsigned long)pte);
}
-void pte_free(struct page *ptepage)
+void pte_free(struct mm_struct *mm, struct page *ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
return ret;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
return ptepage;
}
-void pte_free_kernel(pte_t *pte)
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
#ifdef CONFIG_SMP
hash_page_sync();
free_page((unsigned long)pte);
}
-void pte_free(struct page *ptepage)
+void pte_free(struct mm_struct *mm, struct page *ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
return pgd;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
return 0;
out_pmd:
- pud_free(pud);
+ pud_free(mm, pud);
out_pte:
- pmd_free(pmd);
+ pmd_free(mm, pmd);
out:
return -ENOMEM;
}
if (!proc_mm || !ptrace_faultinfo) {
free_page(mmu->id.stack);
pte_lock_deinit(virt_to_page(mmu->last_page_table));
- pte_free_kernel((pte_t *) mmu->last_page_table);
+ pte_free_kernel(mm, (pte_t *) mmu->last_page_table);
dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
#ifdef CONFIG_3_LEVEL_PGTABLES
- pmd_free((pmd_t *) mmu->last_pmd);
+ pmd_free(mm, (pmd_t *) mmu->last_pmd);
#endif
}
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
-static void pgd_mop_up_pmds(pgd_t *pgdp)
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
int i;
pgdp[i] = native_make_pgd(0);
paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
- pmd_free(pmd);
+ pmd_free(mm, pmd);
}
}
}
pmd_t *pmd = pmd_alloc_one(mm, addr);
if (!pmd) {
- pgd_mop_up_pmds(pgd);
+ pgd_mop_up_pmds(mm, pgd);
return 0;
}
return 1;
}
-static void pgd_mop_up_pmds(pgd_t *pgd)
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
}
#endif /* CONFIG_X86_PAE */
return pgd;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pgd_mop_up_pmds(pgd);
+ pgd_mop_up_pmds(mm, pgd);
quicklist_free(0, pgd_dtor, pgd);
}
extern pgd_t *pgd_alloc(struct mm_struct *mm);
static inline void
-pgd_free(pgd_t *pgd)
+pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
}
static inline void
-pmd_free(pmd_t *pmd)
+pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
static inline void
-pte_free_kernel(pte_t *pte)
+pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
}
static inline void
-pte_free(struct page *page)
+pte_free(struct mm_struct *mm, struct page *page)
{
__free_page(page);
}
#include <asm-generic/tlb.h>
-#define __pte_free_tlb(tlb,pte) pte_free(pte)
-#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
#endif
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(pmd) do { } while (0)
+#define pmd_free(mm, pmd) do { } while (0)
#define pgd_populate(mm,pmd,pte) BUG()
extern pgd_t *get_pgd_slow(struct mm_struct *mm);
-extern void free_pgd_slow(pgd_t *pgd);
+extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
-#define pgd_free(pgd) free_pgd_slow(pgd)
+#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
/*
* Allocate one PTE table.
/*
* Free one PTE table.
*/
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
}
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb,ptep) pte_free(ptep)
-#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
+#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)
return kcalloc(USER_PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
kfree(pgd);
}
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
return (pgd_t *)get_zeroed_page(GFP_KERNEL);
}
-static inline void pgd_free (pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *);
+extern void pgd_free(struct mm_struct *mm, pgd_t *);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
* (In the PAE case we free the pmds as part of the pgd.)
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#endif /* CONFIG_MMU */
* inside the pgd, so has no extra memory associated with it.
*/
#define pud_alloc_one(mm, address) NULL
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
/*
#undef pud_free_tlb
#define pud_free_tlb(tlb, x) do { } while (0)
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
#undef pud_addr_end
* inside the pud, so has no extra memory associated with it.
*/
#define pmd_alloc_one(mm, address) NULL
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#undef pmd_addr_end
* inside the pgd, so has no extra memory associated with it.
*/
#define pud_alloc_one(mm, address) NULL
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
#undef pud_addr_end
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pgd_free(pgd_t * pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(0, NULL, pgd);
}
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pud_free(pud_t * pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
quicklist_free(0, NULL, pud);
}
-#define __pud_free_tlb(tlb, pud) pud_free(pud)
+#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud)
#endif /* CONFIG_PGTABLE_4 */
static inline void
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pmd_free(pmd_t * pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
quicklist_free(0, NULL, pmd);
}
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
quicklist_free_page(0, NULL, pte);
}
-static inline void pte_free_kernel(pte_t * pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(0, NULL, pte);
}
quicklist_trim(0, NULL, 25, 16);
}
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* _ASM_IA64_PGALLOC_H */
return pgd;
}
-static __inline__ void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
return pte;
}
-static __inline__ void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static __inline__ void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
-#define __pte_free_tlb(tlb, pte) pte_free((pte))
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
cache_page(pte);
free_page((unsigned long) pte);
return page;
}
-static inline void pte_free(struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
cache_page(kmap(page));
kunmap(page);
return get_pointer_table();
}
-static inline int pmd_free(pmd_t *pmd)
+static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
return free_pointer_table(pmd);
}
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pmd_free((pmd_t *)pgd);
+ pmd_free(mm, (pmd_t *)pgd);
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-static inline void pte_free_kernel(pte_t * pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
-static inline void pte_free(struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
__free_page(page);
}
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
-static inline void pgd_free(pgd_t * pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
return ret;
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGD_ORDER);
}
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_pages((unsigned long)pte, PTE_ORDER);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_pages(pte, PTE_ORDER);
}
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#endif
return pmd;
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_pages((unsigned long)pmd, PMD_ORDER);
}
-#define __pmd_free_tlb(tlb, x) pmd_free(x)
+#define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x)
#endif
return actual_pgd;
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_64BIT
pgd -= PTRS_PER_PGD;
return pmd;
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#endif
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-#define pte_free(page) pte_free_kernel(page_address(page))
+#define pte_free(mm, page) pte_free_kernel(page_address(page))
#define check_pgt_cache() do { } while (0)
#include <asm-generic/tlb.h>
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif
extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
/* #define pgd_populate(mm, pmd, pte) BUG() */
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-extern void pte_free_kernel(pte_t *pte);
-extern void pte_free(struct page *pte);
+extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+extern void pte_free(struct mm_struct *mm, struct page *pte);
-#define __pte_free_tlb(tlb, pte) pte_free((pte))
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define check_pgt_cache() do { } while (0)
return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
subpage_prot_free(pgd);
kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
GFP_KERNEL|__GFP_REPEAT);
}
-static inline void pud_free(pud_t *pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
}
GFP_KERNEL|__GFP_REPEAT);
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
}
return pte ? virt_to_page(pte) : NULL;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
{
__free_page(ptepage);
}
extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-extern void pte_free_kernel(pte_t *pte);
-extern void pte_free(struct page *pte);
+extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+extern void pte_free(struct mm_struct *mm, struct page *pte);
-#define __pte_free_tlb(tlb, pte) pte_free((pte))
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define check_pgt_cache() do { } while (0)
}
#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pgd, pud) BUG()
}
#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
return (pmd_t *) crst;
}
-#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
+#define pmd_free(mm, pmd) crst_table_free((unsigned long *)pmd)
#define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pgd, pud) BUG()
crst_table_init(crst, pgd_entry_type(mm));
return (pgd_t *) crst;
}
-#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
+#define pgd_free(mm, pgd) crst_table_free((unsigned long *) pgd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
#define pte_alloc_one(mm, vmaddr) \
virt_to_page(page_table_alloc(s390_noexec))
-#define pte_free_kernel(pte) \
+#define pte_free_kernel(mm, pte) \
page_table_free((unsigned long *) pte)
-#define pte_free(pte) \
+#define pte_free(mm, pte) \
page_table_free((unsigned long *) page_to_phys((struct page *) pte))
#endif /* _S390_PGALLOC_H */
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS))
__tlb_flush_mm(tlb->mm);
while (tlb->nr_ptes > 0)
- pte_free(tlb->array[--tlb->nr_ptes]);
+ pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]);
while (tlb->nr_pmds < TLB_NR_PTRS)
- pmd_free((pmd_t *) tlb->array[tlb->nr_pmds++]);
+ pmd_free(tlb->mm, (pmd_t *) tlb->array[tlb->nr_pmds++]);
}
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
if (tlb->nr_ptes >= tlb->nr_pmds)
tlb_flush_mmu(tlb, 0, 0);
} else
- pte_free(page);
+ pte_free(tlb->mm, page);
}
/*
if (tlb->nr_ptes >= tlb->nr_pmds)
tlb_flush_mmu(tlb, 0, 0);
} else
- pmd_free(pmd);
+ pmd_free(tlb->mm, pmd);
#endif
}
return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(QUICK_PGD, NULL, pgd);
}
return pg ? virt_to_page(pg) : NULL;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(QUICK_PT, NULL, pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
quicklist_free_page(QUICK_PT, NULL, pte);
}
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
static inline void check_pgt_cache(void)
BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
-#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_free(mm, pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
-#define pmd_free(pmd) free_pmd_fast(pmd)
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
+#define pmd_free(mm, pmd) free_pmd_fast(pmd)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
-#define pte_free_kernel(pte) BTFIXUP_CALL(free_pte_fast)(pte)
+#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
BTFIXUPDEF_CALL(void, pte_free, struct page *)
-#define pte_free(pte) BTFIXUP_CALL(pte_free)(pte)
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* _SPARC_PGALLOC_H */
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(0, NULL, pgd);
}
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
quicklist_free(0, NULL, pmd);
}
return pg ? virt_to_page(pg) : NULL;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(0, NULL, pte);
}
-static inline void pte_free(struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
{
quicklist_free_page(0, NULL, ptepage);
}
}
#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp,ptepage) pte_free(ptepage)
-#define pmd_free_tlb(mp,pmdp) pmd_free(pmdp)
+#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
+#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
#define tlb_migrate_finish(mm) do { } while (0)
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
#ifdef CONFIG_3_LEVEL_PGTABLES
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
-static inline void pud_free (pud_t *pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
free_page((unsigned long)pud);
return pgd;
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
pgd_list_del(pgd);
/* Should really implement gc for free page table pages. This could be
done with a reference count in struct page. */
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
return virt_to_page(pte_alloc_one_kernel(mm, addr));
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
kmem_cache_free(pgtable_cache, pte);
}
-static inline void pte_free(struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
kmem_cache_free(pgtable_cache, page_address(page));
}
#include <asm-generic/tlb.h>
-#define __pte_free_tlb(tlb,pte) pte_free(pte)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* _XTENSA_TLB_H */
static inline void mm_free_pgd(struct mm_struct * mm)
{
- pgd_free(mm->pgd);
+ pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
spin_lock(&mm->page_table_lock);
if (pmd_present(*pmd)) { /* Another has populated it */
pte_lock_deinit(new);
- pte_free(new);
+ pte_free(mm, new);
} else {
mm->nr_ptes++;
inc_zone_page_state(new, NR_PAGETABLE);
spin_lock(&init_mm.page_table_lock);
if (pmd_present(*pmd)) /* Another has populated it */
- pte_free_kernel(new);
+ pte_free_kernel(&init_mm, new);
else
pmd_populate_kernel(&init_mm, pmd, new);
spin_unlock(&init_mm.page_table_lock);
spin_lock(&mm->page_table_lock);
if (pgd_present(*pgd)) /* Another has populated it */
- pud_free(new);
+ pud_free(mm, new);
else
pgd_populate(mm, pgd, new);
spin_unlock(&mm->page_table_lock);
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_4LEVEL_HACK
if (pud_present(*pud)) /* Another has populated it */
- pmd_free(new);
+ pmd_free(mm, new);
else
pud_populate(mm, pud, new);
#else
if (pgd_present(*pud)) /* Another has populated it */
- pmd_free(new);
+ pmd_free(mm, new);
else
pgd_populate(mm, pud, new);
#endif /* __ARCH_HAS_4LEVEL_HACK */