[media] videobuf-dma-contig: use gfp_t for GFP flags
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / media / video / videobuf-dma-contig.c
CommitLineData
2cc45cf2
MD
1/*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
f19ad390 19#include <linux/mm.h>
720b17e7 20#include <linux/pagemap.h>
2cc45cf2 21#include <linux/dma-mapping.h>
f39c1ab3 22#include <linux/sched.h>
5a0e3ad6 23#include <linux/slab.h>
2cc45cf2
MD
24#include <media/videobuf-dma-contig.h>
25
26struct videobuf_dma_contig_memory {
27 u32 magic;
28 void *vaddr;
29 dma_addr_t dma_handle;
a8f3c203 30 bool cached;
2cc45cf2
MD
31 unsigned long size;
32};
33
34#define MAGIC_DC_MEM 0x0733ac61
c60f2b5c
GL
35#define MAGIC_CHECK(is, should) \
36 if (unlikely((is) != (should))) { \
37 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
38 BUG(); \
2cc45cf2
MD
39 }
40
a8f3c203
FV
41static int __videobuf_dc_alloc(struct device *dev,
42 struct videobuf_dma_contig_memory *mem,
50fbe32c 43 unsigned long size, gfp_t flags)
a8f3c203
FV
44{
45 mem->size = size;
46 if (mem->cached) {
47 mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
48 if (mem->vaddr) {
49 int err;
50
51 mem->dma_handle = dma_map_single(dev, mem->vaddr,
52 mem->size,
53 DMA_FROM_DEVICE);
54 err = dma_mapping_error(dev, mem->dma_handle);
55 if (err) {
56 dev_err(dev, "dma_map_single failed\n");
57
58 free_pages_exact(mem->vaddr, mem->size);
59 mem->vaddr = 0;
60 return err;
61 }
62 }
63 } else
64 mem->vaddr = dma_alloc_coherent(dev, mem->size,
65 &mem->dma_handle, flags);
66
67 if (!mem->vaddr) {
68 dev_err(dev, "memory alloc size %ld failed\n", mem->size);
69 return -ENOMEM;
70 }
71
72 dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
73
74 return 0;
75}
76
77static void __videobuf_dc_free(struct device *dev,
78 struct videobuf_dma_contig_memory *mem)
79{
80 if (mem->cached) {
81 if (!mem->vaddr)
82 return;
83 dma_unmap_single(dev, mem->dma_handle, mem->size,
84 DMA_FROM_DEVICE);
85 free_pages_exact(mem->vaddr, mem->size);
86 } else
87 dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
88
89 mem->vaddr = NULL;
90}
91
92static void videobuf_vm_open(struct vm_area_struct *vma)
2cc45cf2
MD
93{
94 struct videobuf_mapping *map = vma->vm_private_data;
95
96 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
97 map, map->count, vma->vm_start, vma->vm_end);
98
99 map->count++;
100}
101
102static void videobuf_vm_close(struct vm_area_struct *vma)
103{
104 struct videobuf_mapping *map = vma->vm_private_data;
105 struct videobuf_queue *q = map->q;
106 int i;
107
f35f1bb8 108 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
2cc45cf2
MD
109 map, map->count, vma->vm_start, vma->vm_end);
110
111 map->count--;
112 if (0 == map->count) {
113 struct videobuf_dma_contig_memory *mem;
114
f35f1bb8 115 dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
97397687 116 videobuf_queue_lock(q);
2cc45cf2
MD
117
118 /* We need first to cancel streams, before unmapping */
119 if (q->streaming)
120 videobuf_queue_cancel(q);
121
122 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
123 if (NULL == q->bufs[i])
124 continue;
125
126 if (q->bufs[i]->map != map)
127 continue;
128
129 mem = q->bufs[i]->priv;
130 if (mem) {
131 /* This callback is called only if kernel has
132 allocated memory and this memory is mmapped.
133 In this case, memory should be freed,
134 in order to do memory unmap.
135 */
136
137 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
138
139 /* vfree is not atomic - can't be
140 called with IRQ's disabled
141 */
f35f1bb8 142 dev_dbg(q->dev, "buf[%d] freeing %p\n",
2cc45cf2
MD
143 i, mem->vaddr);
144
a8f3c203 145 __videobuf_dc_free(q->dev, mem);
2cc45cf2
MD
146 mem->vaddr = NULL;
147 }
148
a8f3c203 149 q->bufs[i]->map = NULL;
2cc45cf2
MD
150 q->bufs[i]->baddr = 0;
151 }
152
153 kfree(map);
154
97397687 155 videobuf_queue_unlock(q);
2cc45cf2
MD
156 }
157}
158
f0f37e2f 159static const struct vm_operations_struct videobuf_vm_ops = {
a8f3c203
FV
160 .open = videobuf_vm_open,
161 .close = videobuf_vm_close,
2cc45cf2
MD
162};
163
720b17e7
MD
164/**
165 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
166 * @mem: per-buffer private videobuf-dma-contig data
167 *
168 * This function resets the user space pointer
169 */
170static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
171{
720b17e7
MD
172 mem->dma_handle = 0;
173 mem->size = 0;
174}
175
176/**
177 * videobuf_dma_contig_user_get() - setup user space memory pointer
178 * @mem: per-buffer private videobuf-dma-contig data
179 * @vb: video buffer to map
180 *
181 * This function validates and sets up a pointer to user space memory.
182 * Only physically contiguous pfn-mapped memory is accepted.
183 *
184 * Returns 0 if successful.
185 */
186static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
187 struct videobuf_buffer *vb)
188{
189 struct mm_struct *mm = current->mm;
190 struct vm_area_struct *vma;
191 unsigned long prev_pfn, this_pfn;
192 unsigned long pages_done, user_address;
31bedfa5 193 unsigned int offset;
720b17e7
MD
194 int ret;
195
31bedfa5
MK
196 offset = vb->baddr & ~PAGE_MASK;
197 mem->size = PAGE_ALIGN(vb->size + offset);
720b17e7
MD
198 ret = -EINVAL;
199
200 down_read(&mm->mmap_sem);
201
202 vma = find_vma(mm, vb->baddr);
203 if (!vma)
204 goto out_up;
205
206 if ((vb->baddr + mem->size) > vma->vm_end)
207 goto out_up;
208
209 pages_done = 0;
210 prev_pfn = 0; /* kill warning */
211 user_address = vb->baddr;
212
213 while (pages_done < (mem->size >> PAGE_SHIFT)) {
214 ret = follow_pfn(vma, user_address, &this_pfn);
215 if (ret)
216 break;
217
218 if (pages_done == 0)
31bedfa5 219 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
720b17e7
MD
220 else if (this_pfn != (prev_pfn + 1))
221 ret = -EFAULT;
222
223 if (ret)
224 break;
225
226 prev_pfn = this_pfn;
227 user_address += PAGE_SIZE;
228 pages_done++;
229 }
230
a8f3c203 231out_up:
720b17e7
MD
232 up_read(&current->mm->mmap_sem);
233
234 return ret;
235}
236
a8f3c203 237static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
2cc45cf2
MD
238{
239 struct videobuf_dma_contig_memory *mem;
240 struct videobuf_buffer *vb;
241
242 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
243 if (vb) {
a8f3c203
FV
244 vb->priv = ((char *)vb) + size;
245 mem = vb->priv;
2cc45cf2 246 mem->magic = MAGIC_DC_MEM;
a8f3c203 247 mem->cached = cached;
2cc45cf2
MD
248 }
249
250 return vb;
251}
252
a8f3c203
FV
253static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
254{
255 return __videobuf_alloc_vb(size, false);
256}
257
258static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
259{
260 return __videobuf_alloc_vb(size, true);
261}
262
037c75eb 263static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
2cc45cf2
MD
264{
265 struct videobuf_dma_contig_memory *mem = buf->priv;
266
267 BUG_ON(!mem);
268 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
269
270 return mem->vaddr;
271}
272
273static int __videobuf_iolock(struct videobuf_queue *q,
274 struct videobuf_buffer *vb,
275 struct v4l2_framebuffer *fbuf)
276{
277 struct videobuf_dma_contig_memory *mem = vb->priv;
278
279 BUG_ON(!mem);
280 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
281
282 switch (vb->memory) {
283 case V4L2_MEMORY_MMAP:
284 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
285
286 /* All handling should be done by __videobuf_mmap_mapper() */
287 if (!mem->vaddr) {
288 dev_err(q->dev, "memory is not alloced/mmapped.\n");
289 return -EINVAL;
290 }
291 break;
292 case V4L2_MEMORY_USERPTR:
293 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
294
720b17e7 295 /* handle pointer from user space */
2cc45cf2 296 if (vb->baddr)
720b17e7 297 return videobuf_dma_contig_user_get(mem, vb);
2cc45cf2 298
720b17e7 299 /* allocate memory for the read() method */
a8f3c203
FV
300 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
301 GFP_KERNEL))
2cc45cf2 302 return -ENOMEM;
2cc45cf2
MD
303 break;
304 case V4L2_MEMORY_OVERLAY:
305 default:
a8f3c203 306 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
2cc45cf2
MD
307 return -EINVAL;
308 }
309
310 return 0;
311}
312
a8f3c203
FV
313static int __videobuf_sync(struct videobuf_queue *q,
314 struct videobuf_buffer *buf)
315{
316 struct videobuf_dma_contig_memory *mem = buf->priv;
317 BUG_ON(!mem);
318 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
319
320 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
321 DMA_FROM_DEVICE);
322
323 return 0;
324}
325
2cc45cf2 326static int __videobuf_mmap_mapper(struct videobuf_queue *q,
0b62b737 327 struct videobuf_buffer *buf,
2cc45cf2
MD
328 struct vm_area_struct *vma)
329{
330 struct videobuf_dma_contig_memory *mem;
331 struct videobuf_mapping *map;
2cc45cf2 332 int retval;
0b62b737 333 unsigned long size;
a8f3c203
FV
334 unsigned long pos, start = vma->vm_start;
335 struct page *page;
2cc45cf2
MD
336
337 dev_dbg(q->dev, "%s\n", __func__);
2cc45cf2
MD
338
339 /* create mapping + update buffer list */
340 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
341 if (!map)
342 return -ENOMEM;
343
0b62b737 344 buf->map = map;
2cc45cf2
MD
345 map->q = q;
346
0b62b737 347 buf->baddr = vma->vm_start;
2cc45cf2 348
0b62b737 349 mem = buf->priv;
2cc45cf2
MD
350 BUG_ON(!mem);
351 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
352
a8f3c203
FV
353 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
354 GFP_KERNEL | __GFP_COMP))
2cc45cf2 355 goto error;
2cc45cf2
MD
356
357 /* Try to remap memory */
358
359 size = vma->vm_end - vma->vm_start;
360 size = (size < mem->size) ? size : mem->size;
361
a8f3c203
FV
362 if (!mem->cached)
363 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
364
365 pos = (unsigned long)mem->vaddr;
366
367 while (size > 0) {
368 page = virt_to_page((void *)pos);
369 if (NULL == page) {
370 dev_err(q->dev, "mmap: virt_to_page failed\n");
371 __videobuf_dc_free(q->dev, mem);
372 goto error;
373 }
374 retval = vm_insert_page(vma, start, page);
375 if (retval) {
376 dev_err(q->dev, "mmap: insert failed with error %d\n",
377 retval);
378 __videobuf_dc_free(q->dev, mem);
379 goto error;
380 }
381 start += PAGE_SIZE;
382 pos += PAGE_SIZE;
383
384 if (size > PAGE_SIZE)
385 size -= PAGE_SIZE;
386 else
387 size = 0;
2cc45cf2
MD
388 }
389
a8f3c203
FV
390 vma->vm_ops = &videobuf_vm_ops;
391 vma->vm_flags |= VM_DONTEXPAND;
2cc45cf2
MD
392 vma->vm_private_data = map;
393
394 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
395 map, q, vma->vm_start, vma->vm_end,
a8f3c203 396 (long int)buf->bsize, vma->vm_pgoff, buf->i);
2cc45cf2
MD
397
398 videobuf_vm_open(vma);
399
400 return 0;
401
402error:
403 kfree(map);
404 return -ENOMEM;
405}
406
2cc45cf2 407static struct videobuf_qtype_ops qops = {
a8f3c203
FV
408 .magic = MAGIC_QTYPE_OPS,
409 .alloc_vb = __videobuf_alloc_uncached,
410 .iolock = __videobuf_iolock,
411 .mmap_mapper = __videobuf_mmap_mapper,
412 .vaddr = __videobuf_to_vaddr,
413};
2cc45cf2 414
a8f3c203
FV
415static struct videobuf_qtype_ops qops_cached = {
416 .magic = MAGIC_QTYPE_OPS,
417 .alloc_vb = __videobuf_alloc_cached,
418 .iolock = __videobuf_iolock,
419 .sync = __videobuf_sync,
420 .mmap_mapper = __videobuf_mmap_mapper,
421 .vaddr = __videobuf_to_vaddr,
2cc45cf2
MD
422};
423
424void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
38a54f35 425 const struct videobuf_queue_ops *ops,
2cc45cf2
MD
426 struct device *dev,
427 spinlock_t *irqlock,
428 enum v4l2_buf_type type,
429 enum v4l2_field field,
430 unsigned int msize,
08bff03e
HV
431 void *priv,
432 struct mutex *ext_lock)
2cc45cf2
MD
433{
434 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
08bff03e 435 priv, &qops, ext_lock);
2cc45cf2
MD
436}
437EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
438
a8f3c203
FV
439void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
440 const struct videobuf_queue_ops *ops,
441 struct device *dev,
442 spinlock_t *irqlock,
443 enum v4l2_buf_type type,
444 enum v4l2_field field,
445 unsigned int msize,
446 void *priv, struct mutex *ext_lock)
447{
448 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
449 priv, &qops_cached, ext_lock);
450}
451EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
452
2cc45cf2
MD
453dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
454{
455 struct videobuf_dma_contig_memory *mem = buf->priv;
456
457 BUG_ON(!mem);
458 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
459
460 return mem->dma_handle;
461}
462EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
463
464void videobuf_dma_contig_free(struct videobuf_queue *q,
465 struct videobuf_buffer *buf)
466{
467 struct videobuf_dma_contig_memory *mem = buf->priv;
468
469 /* mmapped memory can't be freed here, otherwise mmapped region
470 would be released, while still needed. In this case, the memory
471 release should happen inside videobuf_vm_close().
472 So, it should free memory only if the memory were allocated for
473 read() operation.
474 */
720b17e7 475 if (buf->memory != V4L2_MEMORY_USERPTR)
2cc45cf2
MD
476 return;
477
478 if (!mem)
479 return;
480
481 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
482
720b17e7
MD
483 /* handle user space pointer case */
484 if (buf->baddr) {
485 videobuf_dma_contig_user_put(mem);
486 return;
487 }
488
489 /* read() method */
b2b476f5 490 if (mem->vaddr) {
a8f3c203 491 __videobuf_dc_free(q->dev, mem);
b2b476f5
PO
492 mem->vaddr = NULL;
493 }
2cc45cf2
MD
494}
495EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
496
497MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
498MODULE_AUTHOR("Magnus Damm");
499MODULE_LICENSE("GPL");