}
EXPORT_SYMBOL(vm_get_page_prot);
+static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
+{
+ return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
+}
+
+/* Update vma->vm_page_prot to reflect vma->vm_flags. */
+void vma_set_page_prot(struct vm_area_struct *vma)
+{
+ unsigned long vm_flags = vma->vm_flags;
+
+ vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
+ if (vma_wants_writenotify(vma)) {
+ vm_flags &= ~VM_SHARED;
+ vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
+ vm_flags);
+ }
+}
+
+
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
unsigned long sysctl_overcommit_kbytes __read_mostly;
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
if (vma->vm_start < prev) {
- pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);
+ pr_emerg("vm_start %lx < prev %lx\n",
+ vma->vm_start, prev);
bug = 1;
}
if (vma->vm_start < pend) {
- pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);
+ pr_emerg("vm_start %lx < pend %lx\n",
+ vma->vm_start, pend);
bug = 1;
}
if (vma->vm_start > vma->vm_end) {
- pr_emerg("vm_end %lx < vm_start %lx\n",
- vma->vm_end, vma->vm_start);
+ pr_emerg("vm_start %lx > vm_end %lx\n",
+ vma->vm_start, vma->vm_end);
bug = 1;
}
if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
- BUG_ON(vma != ignore &&
- vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
+ VM_BUG_ON_VMA(vma != ignore &&
+ vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
+ vma);
}
}
int i = 0;
unsigned long highest_address = 0;
struct vm_area_struct *vma = mm->mmap;
+
while (vma) {
struct anon_vma_chain *avc;
+
vma_lock_anon_vma(vma);
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
}
if (highest_address != mm->highest_vm_end) {
pr_emerg("mm->highest_vm_end %lx, found %lx\n",
- mm->highest_vm_end, highest_address);
+ mm->highest_vm_end, highest_address);
bug = 1;
}
i = browse_rb(&mm->mm_rb);
if (i != mm->map_count) {
- pr_emerg("map_count %d rb %d\n", mm->map_count, i);
+ if (i != -1)
+ pr_emerg("map_count %d rb %d\n", mm->map_count, i);
bug = 1;
}
- BUG_ON(bug);
+ VM_BUG_ON_MM(bug, mm);
}
#else
#define validate_mm_rb(root, ignore) do { } while (0)
if (!anon_vma && adjust_next)
anon_vma = next->anon_vma;
if (anon_vma) {
- VM_BUG_ON(adjust_next && next->anon_vma &&
- anon_vma != next->anon_vma);
+ VM_BUG_ON_VMA(adjust_next && next->anon_vma &&
+ anon_vma != next->anon_vma, next);
anon_vma_lock_write(anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
if (adjust_next)
if (vma->vm_ops && vma->vm_ops->page_mkwrite)
return 1;
- /* The open routine did something to the protections already? */
+ /* The open routine did something to the protections that pgprot_modify
+ * won't preserve? */
if (pgprot_val(vma->vm_page_prot) !=
- pgprot_val(vm_get_page_prot(vm_flags)))
+ pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
return 0;
+ /* Do we need to track softdirty? */
+ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
+ return 1;
+
/* Specialty mapping? */
if (vm_flags & VM_PFNMAP)
return 0;
goto free_vma;
}
- if (vma_wants_writenotify(vma)) {
- pgprot_t pprot = vma->vm_page_prot;
-
- /* Can vma->vm_page_prot have changed??
- *
- * Answer: Yes, drivers may have changed it in their
- * f_op->mmap method.
- *
- * Ensures that vmas marked as uncached stay that way.
- */
- vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
- if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- }
-
vma_link(mm, vma, prev, rb_link, rb_parent);
/* Once vma denies write, undo our temporary denial count */
if (file) {
*/
vma->vm_flags |= VM_SOFTDIRTY;
+ vma_set_page_prot(vma);
+
return addr;
unmap_and_free_vma:
* safe. It is only safe to keep the vm_pgoff
* linear if there are no pages mapped yet.
*/
- VM_BUG_ON(faulted_in_anon_vma);
+ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
*vmap = vma = new_vma;
}
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
{
int ret;
- ret = percpu_counter_init(&vm_committed_as, 0);
+ ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
VM_BUG_ON(ret);
}