arm64: vdso: move to _install_special_mapping and remove arch_vma_name
authorWill Deacon <will.deacon@arm.com>
Wed, 9 Jul 2014 18:22:12 +0000 (19:22 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 17 Jul 2014 15:18:46 +0000 (16:18 +0100)
_install_special_mapping replaces install_special_mapping and removes
the need to detect special VMA in arch_vma_name.

This patch moves the vdso and compat vectors page code over to the new
API.

Cc: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/vdso.c

index 84cafbc3eb5452ca621190c3cde0305852b2f315..60ae12087d9f325e440a7e71b0ca793361221c9e 100644 (file)
@@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
 {
        struct mm_struct *mm = current->mm;
        unsigned long addr = AARCH32_VECTORS_BASE;
-       int ret;
+       static struct vm_special_mapping spec = {
+               .name   = "[vectors]",
+               .pages  = vectors_page,
+
+       };
+       void *ret;
 
        down_write(&mm->mmap_sem);
        current->mm->context.vdso = (void *)addr;
 
        /* Map vectors page at the high address. */
-       ret = install_special_mapping(mm, addr, PAGE_SIZE,
-                                     VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
-                                     vectors_page);
+       ret = _install_special_mapping(mm, addr, PAGE_SIZE,
+                                      VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
+                                      &spec);
 
        up_write(&mm->mmap_sem);
 
-       return ret;
+       return PTR_ERR_OR_ZERO(ret);
 }
 #endif /* CONFIG_COMPAT */
 
+static struct vm_special_mapping vdso_spec[2];
+
 static int __init vdso_init(void)
 {
        int i;
@@ -130,6 +137,17 @@ static int __init vdso_init(void)
        /* Grab the vDSO data page. */
        vdso_pagelist[i] = virt_to_page(vdso_data);
 
+       /* Populate the special mapping structures */
+       vdso_spec[0] = (struct vm_special_mapping) {
+               .name   = "[vdso]",
+               .pages  = vdso_pagelist,
+       };
+
+       vdso_spec[1] = (struct vm_special_mapping) {
+               .name   = "[vvar]",
+               .pages  = vdso_pagelist + vdso_pages,
+       };
+
        return 0;
 }
 arch_initcall(vdso_init);
@@ -139,7 +157,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
 {
        struct mm_struct *mm = current->mm;
        unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
-       int ret;
+       void *ret;
 
        vdso_text_len = vdso_pages << PAGE_SHIFT;
        /* Be sure to map the data page */
@@ -148,23 +166,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
        down_write(&mm->mmap_sem);
        vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
        if (IS_ERR_VALUE(vdso_base)) {
-               ret = vdso_base;
+               ret = ERR_PTR(vdso_base);
                goto up_fail;
        }
        mm->context.vdso = (void *)vdso_base;
 
-       ret = install_special_mapping(mm, vdso_base, vdso_text_len,
-                                     VM_READ|VM_EXEC|
-                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-                                     vdso_pagelist);
-       if (ret)
+       ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+                                      VM_READ|VM_EXEC|
+                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+                                      &vdso_spec[0]);
+       if (IS_ERR(ret))
                goto up_fail;
 
        vdso_base += vdso_text_len;
-       ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
-                                     VM_READ|VM_MAYREAD,
-                                     vdso_pagelist + vdso_pages);
-       if (ret)
+       ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+                                      VM_READ|VM_MAYREAD,
+                                      &vdso_spec[1]);
+       if (IS_ERR(ret))
                goto up_fail;
 
        up_write(&mm->mmap_sem);
@@ -173,35 +191,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
 up_fail:
        mm->context.vdso = NULL;
        up_write(&mm->mmap_sem);
-       return ret;
-}
-
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
-       unsigned long vdso_text;
-
-       if (!vma->vm_mm)
-               return NULL;
-
-       vdso_text = (unsigned long)vma->vm_mm->context.vdso;
-
-       /*
-        * We can re-use the vdso pointer in mm_context_t for identifying
-        * the vectors page for compat applications. The vDSO will always
-        * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
-        * it conflicting with the vectors base.
-        */
-       if (vma->vm_start == vdso_text) {
-#ifdef CONFIG_COMPAT
-               if (vma->vm_start == AARCH32_VECTORS_BASE)
-                       return "[vectors]";
-#endif
-               return "[vdso]";
-       } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) {
-               return "[vvar]";
-       }
-
-       return NULL;
+       return PTR_ERR(ret);
 }
 
 /*