Currently vb2 core acquires mmap_sem just around call to
__qbuf_userptr(). However since commit
f035eb4e976ef5 (videobuf2: fix
lockdep warning) it isn't necessary to acquire it so early as we no
longer have to drop queue mutex before acquiring mmap_sem. So push
acquisition of mmap_sem down into .get_userptr and .put_userptr memops
so that the semaphore is acquired for a shorter time and it is clearer
what it is needed for.
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
ret = __qbuf_mmap(vb, b);
break;
case V4L2_MEMORY_USERPTR:
- down_read(¤t->mm->mmap_sem);
ret = __qbuf_userptr(vb, b);
- up_read(¤t->mm->mmap_sem);
break;
case V4L2_MEMORY_DMABUF:
ret = __qbuf_dmabuf(vb, b);
sg_free_table(sgt);
kfree(sgt);
}
+ down_read(¤t->mm->mmap_sem);
vb2_put_vma(buf->vma);
+ up_read(¤t->mm->mmap_sem);
kfree(buf);
}
goto fail_buf;
}
+ down_read(¤t->mm->mmap_sem);
/* current->mm->mmap_sem is taken by videobuf2 core */
vma = find_vma(current->mm, vaddr);
if (!vma) {
if (ret) {
unsigned long pfn;
if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
+ up_read(¤t->mm->mmap_sem);
buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
buf->size = size;
kfree(pages);
pr_err("failed to get user pages\n");
goto fail_vma;
}
+ up_read(¤t->mm->mmap_sem);
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
while (n_pages)
put_page(pages[--n_pages]);
+ down_read(¤t->mm->mmap_sem);
fail_vma:
vb2_put_vma(buf->vma);
fail_pages:
+ up_read(¤t->mm->mmap_sem);
kfree(pages); /* kfree is NULL-proof */
fail_buf:
if (!buf->pages)
goto userptr_fail_alloc_pages;
+ down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, vaddr);
if (!vma) {
dprintk(1, "no vma for address %lu\n", vaddr);
1, /* force */
buf->pages,
NULL);
+ up_read(¤t->mm->mmap_sem);
if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
if (!vma_is_io(buf->vma))
while (--num_pages_from_user >= 0)
put_page(buf->pages[num_pages_from_user]);
+ down_read(¤t->mm->mmap_sem);
vb2_put_vma(buf->vma);
userptr_fail_find_vma:
+ up_read(¤t->mm->mmap_sem);
kfree(buf->pages);
userptr_fail_alloc_pages:
kfree(buf);
put_page(buf->pages[i]);
}
kfree(buf->pages);
+ down_read(¤t->mm->mmap_sem);
vb2_put_vma(buf->vma);
+ up_read(¤t->mm->mmap_sem);
kfree(buf);
}
offset = vaddr & ~PAGE_MASK;
buf->size = size;
-
+ down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, vaddr);
if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
if (!buf->vaddr)
goto fail_get_user_pages;
}
+ up_read(¤t->mm->mmap_sem);
buf->vaddr += offset;
return buf;
kfree(buf->pages);
fail_pages_array_alloc:
+ up_read(¤t->mm->mmap_sem);
kfree(buf);
return NULL;
unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
unsigned int i;
+ down_read(¤t->mm->mmap_sem);
if (buf->pages) {
if (vaddr)
vm_unmap_ram((void *)vaddr, buf->n_pages);
vb2_put_vma(buf->vma);
iounmap((__force void __iomem *)buf->vaddr);
}
+ up_read(¤t->mm->mmap_sem);
kfree(buf);
}