4 * (C) Copyright 1996 Linus Torvalds
6 * Address space accounting code <alan@redhat.com>
7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/syscalls.h>
21 #include <asm/uaccess.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
25 static pmd_t
*get_old_pmd(struct mm_struct
*mm
, unsigned long addr
)
32 * We don't need page_table_lock: we have mmap_sem exclusively.
34 pgd
= pgd_offset(mm
, addr
);
35 if (pgd_none_or_clear_bad(pgd
))
38 pud
= pud_offset(pgd
, addr
);
39 if (pud_none_or_clear_bad(pud
))
42 pmd
= pmd_offset(pud
, addr
);
43 if (pmd_none_or_clear_bad(pmd
))
49 static pmd_t
*alloc_new_pmd(struct mm_struct
*mm
, unsigned long addr
)
57 * We do need page_table_lock: because allocators expect that.
59 spin_lock(&mm
->page_table_lock
);
60 pgd
= pgd_offset(mm
, addr
);
61 pud
= pud_alloc(mm
, pgd
, addr
);
65 pmd
= pmd_alloc(mm
, pud
, addr
);
69 pte
= pte_alloc_map(mm
, pmd
, addr
);
76 spin_unlock(&mm
->page_table_lock
);
80 static void move_ptes(struct vm_area_struct
*vma
, pmd_t
*old_pmd
,
81 unsigned long old_addr
, unsigned long old_end
,
82 struct vm_area_struct
*new_vma
, pmd_t
*new_pmd
,
83 unsigned long new_addr
)
85 struct address_space
*mapping
= NULL
;
86 struct mm_struct
*mm
= vma
->vm_mm
;
87 pte_t
*old_pte
, *new_pte
, pte
;
91 * Subtle point from Rajesh Venkatasubramanian: before
92 * moving file-based ptes, we must lock vmtruncate out,
93 * since it might clean the dst vma before the src vma,
94 * and we propagate stale pages into the dst afterward.
96 mapping
= vma
->vm_file
->f_mapping
;
97 spin_lock(&mapping
->i_mmap_lock
);
98 if (new_vma
->vm_truncate_count
&&
99 new_vma
->vm_truncate_count
!= vma
->vm_truncate_count
)
100 new_vma
->vm_truncate_count
= 0;
103 spin_lock(&mm
->page_table_lock
);
104 old_pte
= pte_offset_map(old_pmd
, old_addr
);
105 new_pte
= pte_offset_map_nested(new_pmd
, new_addr
);
107 for (; old_addr
< old_end
; old_pte
++, old_addr
+= PAGE_SIZE
,
108 new_pte
++, new_addr
+= PAGE_SIZE
) {
109 if (pte_none(*old_pte
))
111 pte
= ptep_clear_flush(vma
, old_addr
, old_pte
);
112 /* ZERO_PAGE can be dependant on virtual addr */
113 pte
= move_pte(pte
, new_vma
->vm_page_prot
, old_addr
, new_addr
);
114 set_pte_at(mm
, new_addr
, new_pte
, pte
);
117 pte_unmap_nested(new_pte
- 1);
118 pte_unmap(old_pte
- 1);
119 spin_unlock(&mm
->page_table_lock
);
121 spin_unlock(&mapping
->i_mmap_lock
);
124 #define LATENCY_LIMIT (64 * PAGE_SIZE)
126 static unsigned long move_page_tables(struct vm_area_struct
*vma
,
127 unsigned long old_addr
, struct vm_area_struct
*new_vma
,
128 unsigned long new_addr
, unsigned long len
)
130 unsigned long extent
, next
, old_end
;
131 pmd_t
*old_pmd
, *new_pmd
;
133 old_end
= old_addr
+ len
;
134 flush_cache_range(vma
, old_addr
, old_end
);
136 for (; old_addr
< old_end
; old_addr
+= extent
, new_addr
+= extent
) {
138 next
= (old_addr
+ PMD_SIZE
) & PMD_MASK
;
139 if (next
- 1 > old_end
)
141 extent
= next
- old_addr
;
142 old_pmd
= get_old_pmd(vma
->vm_mm
, old_addr
);
145 new_pmd
= alloc_new_pmd(vma
->vm_mm
, new_addr
);
148 next
= (new_addr
+ PMD_SIZE
) & PMD_MASK
;
149 if (extent
> next
- new_addr
)
150 extent
= next
- new_addr
;
151 if (extent
> LATENCY_LIMIT
)
152 extent
= LATENCY_LIMIT
;
153 move_ptes(vma
, old_pmd
, old_addr
, old_addr
+ extent
,
154 new_vma
, new_pmd
, new_addr
);
157 return len
+ old_addr
- old_end
; /* how much done */
160 static unsigned long move_vma(struct vm_area_struct
*vma
,
161 unsigned long old_addr
, unsigned long old_len
,
162 unsigned long new_len
, unsigned long new_addr
)
164 struct mm_struct
*mm
= vma
->vm_mm
;
165 struct vm_area_struct
*new_vma
;
166 unsigned long vm_flags
= vma
->vm_flags
;
167 unsigned long new_pgoff
;
168 unsigned long moved_len
;
169 unsigned long excess
= 0;
170 unsigned long hiwater_vm
;
174 * We'd prefer to avoid failure later on in do_munmap:
175 * which may split one vma into three before unmapping.
177 if (mm
->map_count
>= sysctl_max_map_count
- 3)
180 new_pgoff
= vma
->vm_pgoff
+ ((old_addr
- vma
->vm_start
) >> PAGE_SHIFT
);
181 new_vma
= copy_vma(&vma
, new_addr
, new_len
, new_pgoff
);
185 moved_len
= move_page_tables(vma
, old_addr
, new_vma
, new_addr
, old_len
);
186 if (moved_len
< old_len
) {
188 * On error, move entries back from new area to old,
189 * which will succeed since page tables still there,
190 * and then proceed to unmap new area instead of old.
192 move_page_tables(new_vma
, new_addr
, vma
, old_addr
, moved_len
);
199 /* Conceal VM_ACCOUNT so old reservation is not undone */
200 if (vm_flags
& VM_ACCOUNT
) {
201 vma
->vm_flags
&= ~VM_ACCOUNT
;
202 excess
= vma
->vm_end
- vma
->vm_start
- old_len
;
203 if (old_addr
> vma
->vm_start
&&
204 old_addr
+ old_len
< vma
->vm_end
)
209 * If we failed to move page tables we still do total_vm increment
210 * since do_munmap() will decrement it by old_len == new_len.
212 * Since total_vm is about to be raised artificially high for a
213 * moment, we need to restore high watermark afterwards: if stats
214 * are taken meanwhile, total_vm and hiwater_vm appear too high.
215 * If this were a serious issue, we'd add a flag to do_munmap().
217 hiwater_vm
= mm
->hiwater_vm
;
218 mm
->total_vm
+= new_len
>> PAGE_SHIFT
;
219 vm_stat_account(mm
, vma
->vm_flags
, vma
->vm_file
, new_len
>>PAGE_SHIFT
);
221 if (do_munmap(mm
, old_addr
, old_len
) < 0) {
222 /* OOM: unable to split vma, just get accounts right */
223 vm_unacct_memory(excess
>> PAGE_SHIFT
);
226 mm
->hiwater_vm
= hiwater_vm
;
228 /* Restore VM_ACCOUNT if one or two pieces of vma left */
230 vma
->vm_flags
|= VM_ACCOUNT
;
232 vma
->vm_next
->vm_flags
|= VM_ACCOUNT
;
235 if (vm_flags
& VM_LOCKED
) {
236 mm
->locked_vm
+= new_len
>> PAGE_SHIFT
;
237 if (new_len
> old_len
)
238 make_pages_present(new_addr
+ old_len
,
246 * Expand (or shrink) an existing mapping, potentially moving it at the
247 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
249 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
250 * This option implies MREMAP_MAYMOVE.
252 unsigned long do_mremap(unsigned long addr
,
253 unsigned long old_len
, unsigned long new_len
,
254 unsigned long flags
, unsigned long new_addr
)
256 struct mm_struct
*mm
= current
->mm
;
257 struct vm_area_struct
*vma
;
258 unsigned long ret
= -EINVAL
;
259 unsigned long charged
= 0;
261 if (flags
& ~(MREMAP_FIXED
| MREMAP_MAYMOVE
))
264 if (addr
& ~PAGE_MASK
)
267 old_len
= PAGE_ALIGN(old_len
);
268 new_len
= PAGE_ALIGN(new_len
);
271 * We allow a zero old-len as a special case
272 * for DOS-emu "duplicate shm area" thing. But
273 * a zero new-len is nonsensical.
278 /* new_addr is only valid if MREMAP_FIXED is specified */
279 if (flags
& MREMAP_FIXED
) {
280 if (new_addr
& ~PAGE_MASK
)
282 if (!(flags
& MREMAP_MAYMOVE
))
285 if (new_len
> TASK_SIZE
|| new_addr
> TASK_SIZE
- new_len
)
288 /* Check if the location we're moving into overlaps the
289 * old location at all, and fail if it does.
291 if ((new_addr
<= addr
) && (new_addr
+new_len
) > addr
)
294 if ((addr
<= new_addr
) && (addr
+old_len
) > new_addr
)
297 ret
= do_munmap(mm
, new_addr
, new_len
);
303 * Always allow a shrinking remap: that just unmaps
304 * the unnecessary pages..
305 * do_munmap does all the needed commit accounting
307 if (old_len
>= new_len
) {
308 ret
= do_munmap(mm
, addr
+new_len
, old_len
- new_len
);
309 if (ret
&& old_len
!= new_len
)
312 if (!(flags
& MREMAP_FIXED
) || (new_addr
== addr
))
318 * Ok, we need to grow.. or relocate.
321 vma
= find_vma(mm
, addr
);
322 if (!vma
|| vma
->vm_start
> addr
)
324 if (is_vm_hugetlb_page(vma
)) {
328 /* We can't remap across vm area boundaries */
329 if (old_len
> vma
->vm_end
- addr
)
331 if (vma
->vm_flags
& VM_DONTEXPAND
) {
332 if (new_len
> old_len
)
335 if (vma
->vm_flags
& VM_LOCKED
) {
336 unsigned long locked
, lock_limit
;
337 locked
= mm
->locked_vm
<< PAGE_SHIFT
;
338 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
339 locked
+= new_len
- old_len
;
341 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
344 if (!may_expand_vm(mm
, (new_len
- old_len
) >> PAGE_SHIFT
)) {
349 if (vma
->vm_flags
& VM_ACCOUNT
) {
350 charged
= (new_len
- old_len
) >> PAGE_SHIFT
;
351 if (security_vm_enough_memory(charged
))
355 /* old_len exactly to the end of the area..
356 * And we're not relocating the area.
358 if (old_len
== vma
->vm_end
- addr
&&
359 !((flags
& MREMAP_FIXED
) && (addr
!= new_addr
)) &&
360 (old_len
!= new_len
|| !(flags
& MREMAP_MAYMOVE
))) {
361 unsigned long max_addr
= TASK_SIZE
;
363 max_addr
= vma
->vm_next
->vm_start
;
364 /* can we just expand the current mapping? */
365 if (max_addr
- addr
>= new_len
) {
366 int pages
= (new_len
- old_len
) >> PAGE_SHIFT
;
368 vma_adjust(vma
, vma
->vm_start
,
369 addr
+ new_len
, vma
->vm_pgoff
, NULL
);
371 mm
->total_vm
+= pages
;
372 vm_stat_account(mm
, vma
->vm_flags
, vma
->vm_file
, pages
);
373 if (vma
->vm_flags
& VM_LOCKED
) {
374 mm
->locked_vm
+= pages
;
375 make_pages_present(addr
+ old_len
,
384 * We weren't able to just expand or shrink the area,
385 * we need to create a new one and move it..
388 if (flags
& MREMAP_MAYMOVE
) {
389 if (!(flags
& MREMAP_FIXED
)) {
390 unsigned long map_flags
= 0;
391 if (vma
->vm_flags
& VM_MAYSHARE
)
392 map_flags
|= MAP_SHARED
;
394 new_addr
= get_unmapped_area(vma
->vm_file
, 0, new_len
,
395 vma
->vm_pgoff
, map_flags
);
397 if (new_addr
& ~PAGE_MASK
)
400 ret
= move_vma(vma
, addr
, old_len
, new_len
, new_addr
);
403 if (ret
& ~PAGE_MASK
)
404 vm_unacct_memory(charged
);
409 asmlinkage
unsigned long sys_mremap(unsigned long addr
,
410 unsigned long old_len
, unsigned long new_len
,
411 unsigned long flags
, unsigned long new_addr
)
415 down_write(¤t
->mm
->mmap_sem
);
416 ret
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
417 up_write(¤t
->mm
->mmap_sem
);