return error;
}
+/*
+ * Go through vma areas and sum size of mlocked
+ * vma pages, as return value.
+ * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
+ * is also counted.
+ * Return value: previously mlocked page counts
+ */
+static int count_mm_mlocked_page_nr(struct mm_struct *mm,
+ unsigned long start, size_t len)
+{
+ struct vm_area_struct *vma;
+ int count = 0;
+
+ if (mm == NULL)
+ mm = current->mm;
+
+ vma = find_vma(mm, start);
+ if (vma == NULL)
+ vma = mm->mmap;
+
+ for (; vma ; vma = vma->vm_next) {
+ if (start >= vma->vm_end)
+ continue;
+ if (start + len <= vma->vm_start)
+ break;
+ if (vma->vm_flags & VM_LOCKED) {
+ if (start > vma->vm_start)
+ count -= (start - vma->vm_start);
+ if (start + len < vma->vm_end) {
+ count += start + len - vma->vm_start;
+ break;
+ }
+ count += vma->vm_end - vma->vm_start;
+ }
+ }
+
+ return count >> PAGE_SHIFT;
+}
+
static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
{
unsigned long locked;
return -EINTR;
locked += current->mm->locked_vm;
+ if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
+ /*
+ * It is possible that the regions requested intersect with
+ * previously mlocked areas, that part area in "mm->locked_vm"
+ * should not be counted to new mlock increment count. So check
+ * and adjust locked count if necessary.
+ */
+ locked -= count_mm_mlocked_page_nr(current->mm,
+ start, len);
+ }
/* check against resource limits */
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))