Our copy_user_highpage() implementations may require cache maintainence.
Ensure that implementations have all necessary details to perform this
maintainence.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
#endif
struct page;
+struct vm_area_struct;
struct cpu_user_fns {
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
- unsigned long vaddr);
+ unsigned long vaddr, struct vm_area_struct *vma);
};
#ifdef MULTI_USER
extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr);
+ unsigned long vaddr, struct vm_area_struct *vma);
#endif
#define clear_user_highpage(page,vaddr) \
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define copy_user_highpage(to,from,vaddr,vma) \
- __cpu_copy_user_highpage(to, from, vaddr)
+ __cpu_copy_user_highpage(to, from, vaddr, vma)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
}
void feroceon_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
}
void v3_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
}
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);
}
void v4wb_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
}
void v4wt_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
* attack the kernel's existing mapping of these pages.
*/
static void v6_copy_user_highpage_nonaliasing(struct page *to,
- struct page *from, unsigned long vaddr)
+ struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_highpage_aliasing(struct page *to,
- struct page *from, unsigned long vaddr)
+ struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
}
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
}
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);