unsigned long offset = address - vma->vm_start;
struct page *pg;
#ifdef CONFIG_PPC64
- void *vbase = test_thread_flag(TIF_32BIT) ?
- vdso32_kbase : vdso64_kbase;
+ void *vbase = (vma->vm_mm->task_size > TASK_SIZE_USER32) ?
+ vdso64_kbase : vdso32_kbase;
#else
void *vbase = vdso32_kbase;
#endif
current->flags &= ~PF_RANDOMIZE;
flush_thread();
+ /* Set the new mm task size. We have to do that late because it may
+ * depend on TIF_32BIT which is only updated in flush_thread() on
+ * some architectures like powerpc
+ */
+ current->mm->task_size = TASK_SIZE;
+
if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
file_permission(bprm->file, MAY_READ) ||
(bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
- unsigned long mmap_base; /* base of mmap area */
- unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long task_size; /* size of task vm space */
+ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */