MIPS: Move arch_get_unmapped_area and gang to new file.
authorRalf Baechle <ralf@linux-mips.org>
Thu, 19 May 2011 08:21:33 +0000 (09:21 +0100)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 19 May 2011 08:55:49 +0000 (09:55 +0100)
It never really belonged into syscall.c and it's about to become well more
complex.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/kernel/syscall.c
arch/mips/mm/Makefile
arch/mips/mm/mmap.c [new file with mode: 0644]

index 0c207e8ee6018a05953010e9821f7f4b7e675423..d02765708ddb9672a8ebe787068f1709d0e8d09b 100644 (file)
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/linkage.h>
-#include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/smp.h>
-#include <linux/mman.h>
 #include <linux/ptrace.h>
-#include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/syscalls.h>
 #include <linux/file.h>
 #include <linux/msg.h>
 #include <linux/shm.h>
 #include <linux/compiler.h>
-#include <linux/module.h>
 #include <linux/ipc.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
-#include <linux/random.h>
 #include <linux/elf.h>
 
 #include <asm/asm.h>
@@ -66,114 +61,6 @@ out:
        return res;
 }
 
-unsigned long shm_align_mask = PAGE_SIZE - 1;  /* Sane caches */
-
-EXPORT_SYMBOL(shm_align_mask);
-
-#define COLOUR_ALIGN(addr,pgoff)                               \
-       ((((addr) + shm_align_mask) & ~shm_align_mask) +        \
-        (((pgoff) << PAGE_SHIFT) & shm_align_mask))
-
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
-       unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct vm_area_struct * vmm;
-       int do_color_align;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (flags & MAP_FIXED) {
-               /* Even MAP_FIXED mappings must reside within TASK_SIZE.  */
-               if (TASK_SIZE - len < addr)
-                       return -EINVAL;
-
-               /*
-                * We do not accept a shared mapping if it would violate
-                * cache aliasing constraints.
-                */
-               if ((flags & MAP_SHARED) &&
-                   ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
-                       return -EINVAL;
-               return addr;
-       }
-
-       do_color_align = 0;
-       if (filp || (flags & MAP_SHARED))
-               do_color_align = 1;
-       if (addr) {
-               if (do_color_align)
-                       addr = COLOUR_ALIGN(addr, pgoff);
-               else
-                       addr = PAGE_ALIGN(addr);
-               vmm = find_vma(current->mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vmm || addr + len <= vmm->vm_start))
-                       return addr;
-       }
-       addr = current->mm->mmap_base;
-       if (do_color_align)
-               addr = COLOUR_ALIGN(addr, pgoff);
-       else
-               addr = PAGE_ALIGN(addr);
-
-       for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
-               /* At this point:  (!vmm || addr < vmm->vm_end). */
-               if (TASK_SIZE - len < addr)
-                       return -ENOMEM;
-               if (!vmm || addr + len <= vmm->vm_start)
-                       return addr;
-               addr = vmm->vm_end;
-               if (do_color_align)
-                       addr = COLOUR_ALIGN(addr, pgoff);
-       }
-}
-
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
-       unsigned long random_factor = 0UL;
-
-       if (current->flags & PF_RANDOMIZE) {
-               random_factor = get_random_int();
-               random_factor = random_factor << PAGE_SHIFT;
-               if (TASK_IS_32BIT_ADDR)
-                       random_factor &= 0xfffffful;
-               else
-                       random_factor &= 0xffffffful;
-       }
-
-       mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-       mm->get_unmapped_area = arch_get_unmapped_area;
-       mm->unmap_area = arch_unmap_area;
-}
-
-static inline unsigned long brk_rnd(void)
-{
-       unsigned long rnd = get_random_int();
-
-       rnd = rnd << PAGE_SHIFT;
-       /* 8MB for 32bit, 256MB for 64bit */
-       if (TASK_IS_32BIT_ADDR)
-               rnd = rnd & 0x7ffffful;
-       else
-               rnd = rnd & 0xffffffful;
-
-       return rnd;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       unsigned long base = mm->brk;
-       unsigned long ret;
-
-       ret = PAGE_ALIGN(base + brk_rnd());
-
-       if (ret < mm->brk)
-               return mm->brk;
-
-       return ret;
-}
-
 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
        unsigned long, prot, unsigned long, flags, unsigned long,
        fd, off_t, offset)
index eb4463689faa0bcc44529f234634f4ef46e55843..4d8c1623eee2978a00bb792a6e714bfe668f27d7 100644 (file)
@@ -3,7 +3,8 @@
 #
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
-                                  init.o tlbex.o tlbex-fault.o uasm.o page.o
+                                  init.o mmap.o tlbex.o tlbex-fault.o uasm.o \
+                                  page.o
 
 obj-$(CONFIG_32BIT)            += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
new file mode 100644 (file)
index 0000000..ae3c20a
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 Wind River Systems,
+ *   written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+
+unsigned long shm_align_mask = PAGE_SIZE - 1;  /* Sane caches */
+
+EXPORT_SYMBOL(shm_align_mask);
+
+#define COLOUR_ALIGN(addr,pgoff)                               \
+       ((((addr) + shm_align_mask) & ~shm_align_mask) +        \
+        (((pgoff) << PAGE_SHIFT) & shm_align_mask))
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+       unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct vm_area_struct * vmm;
+       int do_color_align;
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (flags & MAP_FIXED) {
+               /* Even MAP_FIXED mappings must reside within TASK_SIZE.  */
+               if (TASK_SIZE - len < addr)
+                       return -EINVAL;
+
+               /*
+                * We do not accept a shared mapping if it would violate
+                * cache aliasing constraints.
+                */
+               if ((flags & MAP_SHARED) &&
+                   ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+                       return -EINVAL;
+               return addr;
+       }
+
+       do_color_align = 0;
+       if (filp || (flags & MAP_SHARED))
+               do_color_align = 1;
+       if (addr) {
+               if (do_color_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+               else
+                       addr = PAGE_ALIGN(addr);
+               vmm = find_vma(current->mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vmm || addr + len <= vmm->vm_start))
+                       return addr;
+       }
+       addr = current->mm->mmap_base;
+       if (do_color_align)
+               addr = COLOUR_ALIGN(addr, pgoff);
+       else
+               addr = PAGE_ALIGN(addr);
+
+       for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+               /* At this point:  (!vmm || addr < vmm->vm_end). */
+               if (TASK_SIZE - len < addr)
+                       return -ENOMEM;
+               if (!vmm || addr + len <= vmm->vm_start)
+                       return addr;
+               addr = vmm->vm_end;
+               if (do_color_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+       }
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+       unsigned long random_factor = 0UL;
+
+       if (current->flags & PF_RANDOMIZE) {
+               random_factor = get_random_int();
+               random_factor = random_factor << PAGE_SHIFT;
+               if (TASK_IS_32BIT_ADDR)
+                       random_factor &= 0xfffffful;
+               else
+                       random_factor &= 0xffffffful;
+       }
+
+       mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+       mm->get_unmapped_area = arch_get_unmapped_area;
+       mm->unmap_area = arch_unmap_area;
+}
+
+static inline unsigned long brk_rnd(void)
+{
+       unsigned long rnd = get_random_int();
+
+       rnd = rnd << PAGE_SHIFT;
+       /* 8MB for 32bit, 256MB for 64bit */
+       if (TASK_IS_32BIT_ADDR)
+               rnd = rnd & 0x7ffffful;
+       else
+               rnd = rnd & 0xffffffful;
+
+       return rnd;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+       unsigned long base = mm->brk;
+       unsigned long ret;
+
+       ret = PAGE_ALIGN(base + brk_rnd());
+
+       if (ret < mm->brk)
+               return mm->brk;
+
+       return ret;
+}