Merge branch 'for-2.6.39' into for-2.6.40
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / media / video / videobuf2-memops.c
1 /*
2 * videobuf2-memops.c - generic memory handling routines for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mm.h>
19 #include <linux/sched.h>
20 #include <linux/file.h>
21 #include <linux/slab.h>
22
23 #include <media/videobuf2-core.h>
24 #include <media/videobuf2-memops.h>
25
26 /**
27 * vb2_get_vma() - acquire and lock the virtual memory area
28 * @vma: given virtual memory area
29 *
30 * This function attempts to acquire an area mapped in the userspace for
31 * the duration of a hardware operation. The area is "locked" by performing
32 * the same set of operation that are done when process calls fork() and
33 * memory areas are duplicated.
34 *
35 * Returns a copy of a virtual memory region on success or NULL.
36 */
37 struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
38 {
39 struct vm_area_struct *vma_copy;
40
41 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
42 if (vma_copy == NULL)
43 return NULL;
44
45 if (vma->vm_ops && vma->vm_ops->open)
46 vma->vm_ops->open(vma);
47
48 if (vma->vm_file)
49 get_file(vma->vm_file);
50
51 memcpy(vma_copy, vma, sizeof(*vma));
52
53 vma_copy->vm_mm = NULL;
54 vma_copy->vm_next = NULL;
55 vma_copy->vm_prev = NULL;
56
57 return vma_copy;
58 }
59
60 /**
61 * vb2_put_userptr() - release a userspace virtual memory area
62 * @vma: virtual memory region associated with the area to be released
63 *
64 * This function releases the previously acquired memory area after a hardware
65 * operation.
66 */
67 void vb2_put_vma(struct vm_area_struct *vma)
68 {
69 if (!vma)
70 return;
71
72 if (vma->vm_file)
73 fput(vma->vm_file);
74
75 if (vma->vm_ops && vma->vm_ops->close)
76 vma->vm_ops->close(vma);
77
78 kfree(vma);
79 }
80 EXPORT_SYMBOL_GPL(vb2_put_vma);
81
82 /**
83 * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
84 * @vaddr: starting virtual address of the area to be verified
85 * @size: size of the area
86 * @res_paddr: will return physical address for the given vaddr
87 * @res_vma: will return locked copy of struct vm_area for the given area
88 *
89 * This function will go through memory area of size @size mapped at @vaddr and
90 * verify that the underlying physical pages are contiguous. If they are
91 * contiguous the virtual memory area is locked and a @res_vma is filled with
92 * the copy and @res_pa set to the physical address of the buffer.
93 *
94 * Returns 0 on success.
95 */
96 int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
97 struct vm_area_struct **res_vma, dma_addr_t *res_pa)
98 {
99 struct mm_struct *mm = current->mm;
100 struct vm_area_struct *vma;
101 unsigned long offset, start, end;
102 unsigned long this_pfn, prev_pfn;
103 dma_addr_t pa = 0;
104 int ret = -EFAULT;
105
106 start = vaddr;
107 offset = start & ~PAGE_MASK;
108 end = start + size;
109
110 down_read(&mm->mmap_sem);
111 vma = find_vma(mm, start);
112
113 if (vma == NULL || vma->vm_end < end)
114 goto done;
115
116 for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
117 ret = follow_pfn(vma, start, &this_pfn);
118 if (ret)
119 goto done;
120
121 if (prev_pfn == 0)
122 pa = this_pfn << PAGE_SHIFT;
123 else if (this_pfn != prev_pfn + 1) {
124 ret = -EFAULT;
125 goto done;
126 }
127 prev_pfn = this_pfn;
128 }
129
130 /*
131 * Memory is contigous, lock vma and return to the caller
132 */
133 *res_vma = vb2_get_vma(vma);
134 if (*res_vma == NULL) {
135 ret = -ENOMEM;
136 goto done;
137 }
138 *res_pa = pa + offset;
139 ret = 0;
140
141 done:
142 up_read(&mm->mmap_sem);
143 return ret;
144 }
145 EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
146
147 /**
148 * vb2_mmap_pfn_range() - map physical pages to userspace
149 * @vma: virtual memory region for the mapping
150 * @paddr: starting physical address of the memory to be mapped
151 * @size: size of the memory to be mapped
152 * @vm_ops: vm operations to be assigned to the created area
153 * @priv: private data to be associated with the area
154 *
155 * Returns 0 on success.
156 */
157 int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
158 unsigned long size,
159 const struct vm_operations_struct *vm_ops,
160 void *priv)
161 {
162 int ret;
163
164 size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
165
166 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
167 ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
168 size, vma->vm_page_prot);
169 if (ret) {
170 printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
171 return ret;
172 }
173
174 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
175 vma->vm_private_data = priv;
176 vma->vm_ops = vm_ops;
177
178 vma->vm_ops->open(vma);
179
180 printk(KERN_DEBUG "%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
181 __func__, paddr, vma->vm_start, size);
182
183 return 0;
184 }
185 EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
186
187 /**
188 * vb2_common_vm_open() - increase refcount of the vma
189 * @vma: virtual memory region for the mapping
190 *
191 * This function adds another user to the provided vma. It expects
192 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
193 */
194 static void vb2_common_vm_open(struct vm_area_struct *vma)
195 {
196 struct vb2_vmarea_handler *h = vma->vm_private_data;
197
198 printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
199 __func__, h, atomic_read(h->refcount), vma->vm_start,
200 vma->vm_end);
201
202 atomic_inc(h->refcount);
203 }
204
205 /**
206 * vb2_common_vm_close() - decrease refcount of the vma
207 * @vma: virtual memory region for the mapping
208 *
209 * This function releases the user from the provided vma. It expects
210 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
211 */
212 static void vb2_common_vm_close(struct vm_area_struct *vma)
213 {
214 struct vb2_vmarea_handler *h = vma->vm_private_data;
215
216 printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
217 __func__, h, atomic_read(h->refcount), vma->vm_start,
218 vma->vm_end);
219
220 h->put(h->arg);
221 }
222
223 /**
224 * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
225 * video buffers
226 */
227 const struct vm_operations_struct vb2_common_vm_ops = {
228 .open = vb2_common_vm_open,
229 .close = vb2_common_vm_close,
230 };
231 EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
232
233 MODULE_DESCRIPTION("common memory handling routines for videobuf2");
234 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
235 MODULE_LICENSE("GPL");