static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
+static void clear_huge_page(struct page *page, unsigned long addr)
+{
+ int i;
+
+ might_sleep();
+ for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
+ cond_resched();
+ clear_user_highpage(page + i, addr);
+ }
+}
+
+static void copy_huge_page(struct page *dst, struct page *src,
+ unsigned long addr)
+{
+ int i;
+
+ might_sleep();
+ for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
+ cond_resched();
+ copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE);
+ }
+}
+
/*
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
*/
struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
- int i;
spin_lock(&hugetlb_lock);
page = dequeue_huge_page(vma, addr);
}
spin_unlock(&hugetlb_lock);
set_page_refcounted(page);
- for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
- clear_user_highpage(&page[i], addr);
return page;
}
unsigned long address, pte_t *ptep, pte_t pte)
{
struct page *old_page, *new_page;
- int i, avoidcopy;
+ int avoidcopy;
old_page = pte_page(pte);
}
spin_unlock(&mm->page_table_lock);
- for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++)
- copy_user_highpage(new_page + i, old_page + i,
- address + i*PAGE_SIZE);
+ copy_huge_page(new_page, old_page, address);
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & HPAGE_MASK);
ret = VM_FAULT_OOM;
goto out;
}
+ clear_huge_page(page, address);
if (vma->vm_flags & VM_SHARED) {
int err;