a4b829027d714b4e5f783ecc8071e1fbe6c36d31
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / media / platform / omap3isp / ispvideo.c
1 /*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26 #include <asm/cacheflush.h>
27 #include <linux/clk.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/omap-iommu.h>
31 #include <linux/pagemap.h>
32 #include <linux/scatterlist.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/vmalloc.h>
36 #include <media/v4l2-dev.h>
37 #include <media/v4l2-ioctl.h>
38 #include <plat/iommu.h>
39 #include <plat/omap-pm.h>
40
41 #include "ispvideo.h"
42 #include "isp.h"
43
44
45 /* -----------------------------------------------------------------------------
46 * Helper functions
47 */
48
49 /*
50 * NOTE: When adding new media bus codes, always remember to add
51 * corresponding in-memory formats to the table below!!!
52 */
53 static struct isp_format_info formats[] = {
54 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
55 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
56 V4L2_PIX_FMT_GREY, 8, 1, },
57 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
58 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
59 V4L2_PIX_FMT_Y10, 10, 2, },
60 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
61 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
62 V4L2_PIX_FMT_Y12, 12, 2, },
63 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
64 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
65 V4L2_PIX_FMT_SBGGR8, 8, 1, },
66 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
67 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
68 V4L2_PIX_FMT_SGBRG8, 8, 1, },
69 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
70 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
71 V4L2_PIX_FMT_SGRBG8, 8, 1, },
72 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
73 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
74 V4L2_PIX_FMT_SRGGB8, 8, 1, },
75 { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
76 V4L2_MBUS_FMT_SBGGR10_1X10, 0,
77 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
78 { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
79 V4L2_MBUS_FMT_SGBRG10_1X10, 0,
80 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
81 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
82 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
83 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
84 { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
85 V4L2_MBUS_FMT_SRGGB10_1X10, 0,
86 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
87 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
88 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
89 V4L2_PIX_FMT_SBGGR10, 10, 2, },
90 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
91 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
92 V4L2_PIX_FMT_SGBRG10, 10, 2, },
93 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
94 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
95 V4L2_PIX_FMT_SGRBG10, 10, 2, },
96 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
97 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
98 V4L2_PIX_FMT_SRGGB10, 10, 2, },
99 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
100 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
101 V4L2_PIX_FMT_SBGGR12, 12, 2, },
102 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
103 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
104 V4L2_PIX_FMT_SGBRG12, 12, 2, },
105 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
106 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
107 V4L2_PIX_FMT_SGRBG12, 12, 2, },
108 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
109 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
110 V4L2_PIX_FMT_SRGGB12, 12, 2, },
111 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
112 V4L2_MBUS_FMT_UYVY8_1X16, 0,
113 V4L2_PIX_FMT_UYVY, 16, 2, },
114 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
115 V4L2_MBUS_FMT_YUYV8_1X16, 0,
116 V4L2_PIX_FMT_YUYV, 16, 2, },
117 { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_UYVY8_2X8,
118 V4L2_MBUS_FMT_UYVY8_2X8, 0,
119 V4L2_PIX_FMT_UYVY, 8, 2, },
120 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_YUYV8_2X8,
121 V4L2_MBUS_FMT_YUYV8_2X8, 0,
122 V4L2_PIX_FMT_YUYV, 8, 2, },
123 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC
124 * module and avoid NULL pointer dereferences.
125 */
126 { 0, }
127 };
128
129 const struct isp_format_info *
130 omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
131 {
132 unsigned int i;
133
134 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
135 if (formats[i].code == code)
136 return &formats[i];
137 }
138
139 return NULL;
140 }
141
142 /*
143 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
144 * @video: ISP video instance
145 * @mbus: v4l2_mbus_framefmt format (input)
146 * @pix: v4l2_pix_format format (output)
147 *
148 * Fill the output pix structure with information from the input mbus format.
149 * The bytesperline and sizeimage fields are computed from the requested bytes
150 * per line value in the pix format and information from the video instance.
151 *
152 * Return the number of padding bytes at end of line.
153 */
154 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
155 const struct v4l2_mbus_framefmt *mbus,
156 struct v4l2_pix_format *pix)
157 {
158 unsigned int bpl = pix->bytesperline;
159 unsigned int min_bpl;
160 unsigned int i;
161
162 memset(pix, 0, sizeof(*pix));
163 pix->width = mbus->width;
164 pix->height = mbus->height;
165
166 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
167 if (formats[i].code == mbus->code)
168 break;
169 }
170
171 if (WARN_ON(i == ARRAY_SIZE(formats)))
172 return 0;
173
174 min_bpl = pix->width * formats[i].bpp;
175
176 /* Clamp the requested bytes per line value. If the maximum bytes per
177 * line value is zero, the module doesn't support user configurable line
178 * sizes. Override the requested value with the minimum in that case.
179 */
180 if (video->bpl_max)
181 bpl = clamp(bpl, min_bpl, video->bpl_max);
182 else
183 bpl = min_bpl;
184
185 if (!video->bpl_zero_padding || bpl != min_bpl)
186 bpl = ALIGN(bpl, video->bpl_alignment);
187
188 pix->pixelformat = formats[i].pixelformat;
189 pix->bytesperline = bpl;
190 pix->sizeimage = pix->bytesperline * pix->height;
191 pix->colorspace = mbus->colorspace;
192 pix->field = mbus->field;
193
194 return bpl - min_bpl;
195 }
196
197 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
198 struct v4l2_mbus_framefmt *mbus)
199 {
200 unsigned int i;
201
202 memset(mbus, 0, sizeof(*mbus));
203 mbus->width = pix->width;
204 mbus->height = pix->height;
205
206 /* Skip the last format in the loop so that it will be selected if no
207 * match is found.
208 */
209 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
210 if (formats[i].pixelformat == pix->pixelformat)
211 break;
212 }
213
214 mbus->code = formats[i].code;
215 mbus->colorspace = pix->colorspace;
216 mbus->field = pix->field;
217 }
218
219 static struct v4l2_subdev *
220 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
221 {
222 struct media_pad *remote;
223
224 remote = media_entity_remote_source(&video->pad);
225
226 if (remote == NULL ||
227 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
228 return NULL;
229
230 if (pad)
231 *pad = remote->index;
232
233 return media_entity_to_v4l2_subdev(remote->entity);
234 }
235
236 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
237 static int isp_video_get_graph_data(struct isp_video *video,
238 struct isp_pipeline *pipe)
239 {
240 struct media_entity_graph graph;
241 struct media_entity *entity = &video->video.entity;
242 struct media_device *mdev = entity->parent;
243 struct isp_video *far_end = NULL;
244
245 mutex_lock(&mdev->graph_mutex);
246 media_entity_graph_walk_start(&graph, entity);
247
248 while ((entity = media_entity_graph_walk_next(&graph))) {
249 struct isp_video *__video;
250
251 pipe->entities |= 1 << entity->id;
252
253 if (far_end != NULL)
254 continue;
255
256 if (entity == &video->video.entity)
257 continue;
258
259 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
260 continue;
261
262 __video = to_isp_video(media_entity_to_video_device(entity));
263 if (__video->type != video->type)
264 far_end = __video;
265 }
266
267 mutex_unlock(&mdev->graph_mutex);
268
269 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
270 pipe->input = far_end;
271 pipe->output = video;
272 } else {
273 if (far_end == NULL)
274 return -EPIPE;
275
276 pipe->input = video;
277 pipe->output = far_end;
278 }
279
280 return 0;
281 }
282
283 /*
284 * Validate a pipeline by checking both ends of all links for format
285 * discrepancies.
286 *
287 * Compute the minimum time per frame value as the maximum of time per frame
288 * limits reported by every block in the pipeline.
289 *
290 * Return 0 if all formats match, or -EPIPE if at least one link is found with
291 * different formats on its two ends or if the pipeline doesn't start with a
292 * video source (either a subdev with no input pad, or a non-subdev entity).
293 */
294 static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
295 {
296 struct isp_device *isp = pipe->output->isp;
297 struct media_pad *pad;
298 struct v4l2_subdev *subdev;
299
300 subdev = isp_video_remote_subdev(pipe->output, NULL);
301 if (subdev == NULL)
302 return -EPIPE;
303
304 while (1) {
305 /* Retrieve the sink format */
306 pad = &subdev->entity.pads[0];
307 if (!(pad->flags & MEDIA_PAD_FL_SINK))
308 break;
309
310 /* Update the maximum frame rate */
311 if (subdev == &isp->isp_res.subdev)
312 omap3isp_resizer_max_rate(&isp->isp_res,
313 &pipe->max_rate);
314
315 /* Retrieve the source format. Return an error if no source
316 * entity can be found, and stop checking the pipeline if the
317 * source entity isn't a subdev.
318 */
319 pad = media_entity_remote_source(pad);
320 if (pad == NULL)
321 return -EPIPE;
322
323 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
324 break;
325
326 subdev = media_entity_to_v4l2_subdev(pad->entity);
327 }
328
329 return 0;
330 }
331
332 static int
333 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
334 {
335 struct v4l2_subdev_format fmt;
336 struct v4l2_subdev *subdev;
337 u32 pad;
338 int ret;
339
340 subdev = isp_video_remote_subdev(video, &pad);
341 if (subdev == NULL)
342 return -EINVAL;
343
344 mutex_lock(&video->mutex);
345
346 fmt.pad = pad;
347 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
348 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
349 if (ret == -ENOIOCTLCMD)
350 ret = -EINVAL;
351
352 mutex_unlock(&video->mutex);
353
354 if (ret)
355 return ret;
356
357 format->type = video->type;
358 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
359 }
360
361 static int
362 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
363 {
364 struct v4l2_format format;
365 int ret;
366
367 memcpy(&format, &vfh->format, sizeof(format));
368 ret = __isp_video_get_format(video, &format);
369 if (ret < 0)
370 return ret;
371
372 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
373 vfh->format.fmt.pix.height != format.fmt.pix.height ||
374 vfh->format.fmt.pix.width != format.fmt.pix.width ||
375 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
376 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
377 return -EINVAL;
378
379 return ret;
380 }
381
382 /* -----------------------------------------------------------------------------
383 * IOMMU management
384 */
385
386 #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
387
388 /*
389 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
390 * @dev: Device pointer specific to the OMAP3 ISP.
391 * @sglist: Pointer to source Scatter gather list to allocate.
392 * @sglen: Number of elements of the scatter-gatter list.
393 *
394 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
395 * we ran out of memory.
396 */
397 static dma_addr_t
398 ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
399 {
400 struct sg_table *sgt;
401 u32 da;
402
403 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
404 if (sgt == NULL)
405 return -ENOMEM;
406
407 sgt->sgl = (struct scatterlist *)sglist;
408 sgt->nents = sglen;
409 sgt->orig_nents = sglen;
410
411 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
412 if (IS_ERR_VALUE(da))
413 kfree(sgt);
414
415 return da;
416 }
417
418 /*
419 * ispmmu_vunmap - Unmap a device address from the ISP MMU
420 * @dev: Device pointer specific to the OMAP3 ISP.
421 * @da: Device address generated from a ispmmu_vmap call.
422 */
423 static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
424 {
425 struct sg_table *sgt;
426
427 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
428 kfree(sgt);
429 }
430
431 /* -----------------------------------------------------------------------------
432 * Video queue operations
433 */
434
435 static void isp_video_queue_prepare(struct isp_video_queue *queue,
436 unsigned int *nbuffers, unsigned int *size)
437 {
438 struct isp_video_fh *vfh =
439 container_of(queue, struct isp_video_fh, queue);
440 struct isp_video *video = vfh->video;
441
442 *size = vfh->format.fmt.pix.sizeimage;
443 if (*size == 0)
444 return;
445
446 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
447 }
448
449 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
450 {
451 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
452 struct isp_buffer *buffer = to_isp_buffer(buf);
453 struct isp_video *video = vfh->video;
454
455 if (buffer->isp_addr) {
456 ispmmu_vunmap(video->isp, buffer->isp_addr);
457 buffer->isp_addr = 0;
458 }
459 }
460
461 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
462 {
463 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
464 struct isp_buffer *buffer = to_isp_buffer(buf);
465 struct isp_video *video = vfh->video;
466 unsigned long addr;
467
468 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
469 if (IS_ERR_VALUE(addr))
470 return -EIO;
471
472 if (!IS_ALIGNED(addr, 32)) {
473 dev_dbg(video->isp->dev, "Buffer address must be "
474 "aligned to 32 bytes boundary.\n");
475 ispmmu_vunmap(video->isp, buffer->isp_addr);
476 return -EINVAL;
477 }
478
479 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
480 buffer->isp_addr = addr;
481 return 0;
482 }
483
484 /*
485 * isp_video_buffer_queue - Add buffer to streaming queue
486 * @buf: Video buffer
487 *
488 * In memory-to-memory mode, start streaming on the pipeline if buffers are
489 * queued on both the input and the output, if the pipeline isn't already busy.
490 * If the pipeline is busy, it will be restarted in the output module interrupt
491 * handler.
492 */
493 static void isp_video_buffer_queue(struct isp_video_buffer *buf)
494 {
495 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
496 struct isp_buffer *buffer = to_isp_buffer(buf);
497 struct isp_video *video = vfh->video;
498 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
499 enum isp_pipeline_state state;
500 unsigned long flags;
501 unsigned int empty;
502 unsigned int start;
503
504 empty = list_empty(&video->dmaqueue);
505 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
506
507 if (empty) {
508 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
509 state = ISP_PIPELINE_QUEUE_OUTPUT;
510 else
511 state = ISP_PIPELINE_QUEUE_INPUT;
512
513 spin_lock_irqsave(&pipe->lock, flags);
514 pipe->state |= state;
515 video->ops->queue(video, buffer);
516 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
517
518 start = isp_pipeline_ready(pipe);
519 if (start)
520 pipe->state |= ISP_PIPELINE_STREAM;
521 spin_unlock_irqrestore(&pipe->lock, flags);
522
523 if (start)
524 omap3isp_pipeline_set_stream(pipe,
525 ISP_PIPELINE_STREAM_SINGLESHOT);
526 }
527 }
528
529 static const struct isp_video_queue_operations isp_video_queue_ops = {
530 .queue_prepare = &isp_video_queue_prepare,
531 .buffer_prepare = &isp_video_buffer_prepare,
532 .buffer_queue = &isp_video_buffer_queue,
533 .buffer_cleanup = &isp_video_buffer_cleanup,
534 };
535
536 /*
537 * omap3isp_video_buffer_next - Complete the current buffer and return the next
538 * @video: ISP video object
539 *
540 * Remove the current video buffer from the DMA queue and fill its timestamp,
541 * field count and state fields before waking up its completion handler.
542 *
543 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
544 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
545 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
546 *
547 * The DMA queue is expected to contain at least one buffer.
548 *
549 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
550 * empty.
551 */
552 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
553 {
554 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
555 struct isp_video_queue *queue = video->queue;
556 enum isp_pipeline_state state;
557 struct isp_video_buffer *buf;
558 unsigned long flags;
559 struct timespec ts;
560
561 spin_lock_irqsave(&queue->irqlock, flags);
562 if (WARN_ON(list_empty(&video->dmaqueue))) {
563 spin_unlock_irqrestore(&queue->irqlock, flags);
564 return NULL;
565 }
566
567 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
568 irqlist);
569 list_del(&buf->irqlist);
570 spin_unlock_irqrestore(&queue->irqlock, flags);
571
572 ktime_get_ts(&ts);
573 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
574 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
575
576 /* Do frame number propagation only if this is the output video node.
577 * Frame number either comes from the CSI receivers or it gets
578 * incremented here if H3A is not active.
579 * Note: There is no guarantee that the output buffer will finish
580 * first, so the input number might lag behind by 1 in some cases.
581 */
582 if (video == pipe->output && !pipe->do_propagation)
583 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
584 else
585 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
586
587 /* Report pipeline errors to userspace on the capture device side. */
588 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
589 buf->state = ISP_BUF_STATE_ERROR;
590 pipe->error = false;
591 } else {
592 buf->state = ISP_BUF_STATE_DONE;
593 }
594
595 wake_up(&buf->wait);
596
597 if (list_empty(&video->dmaqueue)) {
598 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
599 state = ISP_PIPELINE_QUEUE_OUTPUT
600 | ISP_PIPELINE_STREAM;
601 else
602 state = ISP_PIPELINE_QUEUE_INPUT
603 | ISP_PIPELINE_STREAM;
604
605 spin_lock_irqsave(&pipe->lock, flags);
606 pipe->state &= ~state;
607 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
608 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
609 spin_unlock_irqrestore(&pipe->lock, flags);
610 return NULL;
611 }
612
613 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
614 spin_lock_irqsave(&pipe->lock, flags);
615 pipe->state &= ~ISP_PIPELINE_STREAM;
616 spin_unlock_irqrestore(&pipe->lock, flags);
617 }
618
619 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
620 irqlist);
621 buf->state = ISP_BUF_STATE_ACTIVE;
622 return to_isp_buffer(buf);
623 }
624
625 /*
626 * omap3isp_video_resume - Perform resume operation on the buffers
627 * @video: ISP video object
628 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
629 *
630 * This function is intended to be used on suspend/resume scenario. It
631 * requests video queue layer to discard buffers marked as DONE if it's in
632 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
633 * if there's any.
634 */
635 void omap3isp_video_resume(struct isp_video *video, int continuous)
636 {
637 struct isp_buffer *buf = NULL;
638
639 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
640 omap3isp_video_queue_discard_done(video->queue);
641
642 if (!list_empty(&video->dmaqueue)) {
643 buf = list_first_entry(&video->dmaqueue,
644 struct isp_buffer, buffer.irqlist);
645 video->ops->queue(video, buf);
646 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
647 } else {
648 if (continuous)
649 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
650 }
651 }
652
653 /* -----------------------------------------------------------------------------
654 * V4L2 ioctls
655 */
656
657 static int
658 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
659 {
660 struct isp_video *video = video_drvdata(file);
661
662 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
663 strlcpy(cap->card, video->video.name, sizeof(cap->card));
664 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
665
666 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
667 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
668 else
669 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
670
671 return 0;
672 }
673
674 static int
675 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
676 {
677 struct isp_video_fh *vfh = to_isp_video_fh(fh);
678 struct isp_video *video = video_drvdata(file);
679
680 if (format->type != video->type)
681 return -EINVAL;
682
683 mutex_lock(&video->mutex);
684 *format = vfh->format;
685 mutex_unlock(&video->mutex);
686
687 return 0;
688 }
689
690 static int
691 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
692 {
693 struct isp_video_fh *vfh = to_isp_video_fh(fh);
694 struct isp_video *video = video_drvdata(file);
695 struct v4l2_mbus_framefmt fmt;
696
697 if (format->type != video->type)
698 return -EINVAL;
699
700 mutex_lock(&video->mutex);
701
702 /* Fill the bytesperline and sizeimage fields by converting to media bus
703 * format and back to pixel format.
704 */
705 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
706 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
707
708 vfh->format = *format;
709
710 mutex_unlock(&video->mutex);
711 return 0;
712 }
713
714 static int
715 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
716 {
717 struct isp_video *video = video_drvdata(file);
718 struct v4l2_subdev_format fmt;
719 struct v4l2_subdev *subdev;
720 u32 pad;
721 int ret;
722
723 if (format->type != video->type)
724 return -EINVAL;
725
726 subdev = isp_video_remote_subdev(video, &pad);
727 if (subdev == NULL)
728 return -EINVAL;
729
730 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
731
732 fmt.pad = pad;
733 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
734 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
735 if (ret)
736 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
737
738 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
739 return 0;
740 }
741
742 static int
743 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
744 {
745 struct isp_video *video = video_drvdata(file);
746 struct v4l2_subdev *subdev;
747 int ret;
748
749 subdev = isp_video_remote_subdev(video, NULL);
750 if (subdev == NULL)
751 return -EINVAL;
752
753 mutex_lock(&video->mutex);
754 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
755 mutex_unlock(&video->mutex);
756
757 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
758 }
759
760 static int
761 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
762 {
763 struct isp_video *video = video_drvdata(file);
764 struct v4l2_subdev_format format;
765 struct v4l2_subdev *subdev;
766 u32 pad;
767 int ret;
768
769 subdev = isp_video_remote_subdev(video, &pad);
770 if (subdev == NULL)
771 return -EINVAL;
772
773 /* Try the get crop operation first and fallback to get format if not
774 * implemented.
775 */
776 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
777 if (ret != -ENOIOCTLCMD)
778 return ret;
779
780 format.pad = pad;
781 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
782 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
783 if (ret < 0)
784 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
785
786 crop->c.left = 0;
787 crop->c.top = 0;
788 crop->c.width = format.format.width;
789 crop->c.height = format.format.height;
790
791 return 0;
792 }
793
794 static int
795 isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
796 {
797 struct isp_video *video = video_drvdata(file);
798 struct v4l2_subdev *subdev;
799 int ret;
800
801 subdev = isp_video_remote_subdev(video, NULL);
802 if (subdev == NULL)
803 return -EINVAL;
804
805 mutex_lock(&video->mutex);
806 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
807 mutex_unlock(&video->mutex);
808
809 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
810 }
811
812 static int
813 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
814 {
815 struct isp_video_fh *vfh = to_isp_video_fh(fh);
816 struct isp_video *video = video_drvdata(file);
817
818 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
819 video->type != a->type)
820 return -EINVAL;
821
822 memset(a, 0, sizeof(*a));
823 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
824 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
825 a->parm.output.timeperframe = vfh->timeperframe;
826
827 return 0;
828 }
829
830 static int
831 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
832 {
833 struct isp_video_fh *vfh = to_isp_video_fh(fh);
834 struct isp_video *video = video_drvdata(file);
835
836 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
837 video->type != a->type)
838 return -EINVAL;
839
840 if (a->parm.output.timeperframe.denominator == 0)
841 a->parm.output.timeperframe.denominator = 1;
842
843 vfh->timeperframe = a->parm.output.timeperframe;
844
845 return 0;
846 }
847
848 static int
849 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
850 {
851 struct isp_video_fh *vfh = to_isp_video_fh(fh);
852
853 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
854 }
855
856 static int
857 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
858 {
859 struct isp_video_fh *vfh = to_isp_video_fh(fh);
860
861 return omap3isp_video_queue_querybuf(&vfh->queue, b);
862 }
863
864 static int
865 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
866 {
867 struct isp_video_fh *vfh = to_isp_video_fh(fh);
868
869 return omap3isp_video_queue_qbuf(&vfh->queue, b);
870 }
871
872 static int
873 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
874 {
875 struct isp_video_fh *vfh = to_isp_video_fh(fh);
876
877 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
878 file->f_flags & O_NONBLOCK);
879 }
880
881 static int isp_video_check_external_subdevs(struct isp_video *video,
882 struct isp_pipeline *pipe)
883 {
884 struct isp_device *isp = video->isp;
885 struct media_entity *ents[] = {
886 &isp->isp_csi2a.subdev.entity,
887 &isp->isp_csi2c.subdev.entity,
888 &isp->isp_ccp2.subdev.entity,
889 &isp->isp_ccdc.subdev.entity
890 };
891 struct media_pad *source_pad;
892 struct media_entity *source = NULL;
893 struct media_entity *sink;
894 struct v4l2_subdev_format fmt;
895 struct v4l2_ext_controls ctrls;
896 struct v4l2_ext_control ctrl;
897 unsigned int i;
898 int ret = 0;
899
900 for (i = 0; i < ARRAY_SIZE(ents); i++) {
901 /* Is the entity part of the pipeline? */
902 if (!(pipe->entities & (1 << ents[i]->id)))
903 continue;
904
905 /* ISP entities have always sink pad == 0. Find source. */
906 source_pad = media_entity_remote_source(&ents[i]->pads[0]);
907 if (source_pad == NULL)
908 continue;
909
910 source = source_pad->entity;
911 sink = ents[i];
912 break;
913 }
914
915 if (!source) {
916 dev_warn(isp->dev, "can't find source, failing now\n");
917 return ret;
918 }
919
920 if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
921 return 0;
922
923 pipe->external = media_entity_to_v4l2_subdev(source);
924
925 fmt.pad = source_pad->index;
926 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
927 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
928 pad, get_fmt, NULL, &fmt);
929 if (unlikely(ret < 0)) {
930 dev_warn(isp->dev, "get_fmt returned null!\n");
931 return ret;
932 }
933
934 pipe->external_width =
935 omap3isp_video_format_info(fmt.format.code)->width;
936
937 memset(&ctrls, 0, sizeof(ctrls));
938 memset(&ctrl, 0, sizeof(ctrl));
939
940 ctrl.id = V4L2_CID_PIXEL_RATE;
941
942 ctrls.count = 1;
943 ctrls.controls = &ctrl;
944
945 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
946 if (ret < 0) {
947 dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
948 pipe->external->name);
949 return ret;
950 }
951
952 pipe->external_rate = ctrl.value64;
953
954 if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
955 unsigned int rate = UINT_MAX;
956 /*
957 * Check that maximum allowed CCDC pixel rate isn't
958 * exceeded by the pixel rate.
959 */
960 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
961 if (pipe->external_rate > rate)
962 return -ENOSPC;
963 }
964
965 return 0;
966 }
967
968 /*
969 * Stream management
970 *
971 * Every ISP pipeline has a single input and a single output. The input can be
972 * either a sensor or a video node. The output is always a video node.
973 *
974 * As every pipeline has an output video node, the ISP video objects at the
975 * pipeline output stores the pipeline state. It tracks the streaming state of
976 * both the input and output, as well as the availability of buffers.
977 *
978 * In sensor-to-memory mode, frames are always available at the pipeline input.
979 * Starting the sensor usually requires I2C transfers and must be done in
980 * interruptible context. The pipeline is started and stopped synchronously
981 * to the stream on/off commands. All modules in the pipeline will get their
982 * subdev set stream handler called. The module at the end of the pipeline must
983 * delay starting the hardware until buffers are available at its output.
984 *
985 * In memory-to-memory mode, starting/stopping the stream requires
986 * synchronization between the input and output. ISP modules can't be stopped
987 * in the middle of a frame, and at least some of the modules seem to become
988 * busy as soon as they're started, even if they don't receive a frame start
989 * event. For that reason frames need to be processed in single-shot mode. The
990 * driver needs to wait until a frame is completely processed and written to
991 * memory before restarting the pipeline for the next frame. Pipelined
992 * processing might be possible but requires more testing.
993 *
994 * Stream start must be delayed until buffers are available at both the input
995 * and output. The pipeline must be started in the videobuf queue callback with
996 * the buffers queue spinlock held. The modules subdev set stream operation must
997 * not sleep.
998 */
999 static int
1000 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1001 {
1002 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1003 struct isp_video *video = video_drvdata(file);
1004 enum isp_pipeline_state state;
1005 struct isp_pipeline *pipe;
1006 unsigned long flags;
1007 int ret;
1008
1009 if (type != video->type)
1010 return -EINVAL;
1011
1012 mutex_lock(&video->stream_lock);
1013
1014 if (video->streaming) {
1015 mutex_unlock(&video->stream_lock);
1016 return -EBUSY;
1017 }
1018
1019 /* Start streaming on the pipeline. No link touching an entity in the
1020 * pipeline can be activated or deactivated once streaming is started.
1021 */
1022 pipe = video->video.entity.pipe
1023 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
1024
1025 pipe->entities = 0;
1026
1027 if (video->isp->pdata->set_constraints)
1028 video->isp->pdata->set_constraints(video->isp, true);
1029 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1030 pipe->max_rate = pipe->l3_ick;
1031
1032 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
1033 if (ret < 0)
1034 goto err_pipeline_start;
1035
1036 /* Verify that the currently configured format matches the output of
1037 * the connected subdev.
1038 */
1039 ret = isp_video_check_format(video, vfh);
1040 if (ret < 0)
1041 goto err_check_format;
1042
1043 video->bpl_padding = ret;
1044 video->bpl_value = vfh->format.fmt.pix.bytesperline;
1045
1046 ret = isp_video_get_graph_data(video, pipe);
1047 if (ret < 0)
1048 goto err_check_format;
1049
1050 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1051 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1052 else
1053 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1054
1055 ret = isp_video_check_external_subdevs(video, pipe);
1056 if (ret < 0)
1057 goto err_check_format;
1058
1059 /* Validate the pipeline and update its state. */
1060 ret = isp_video_validate_pipeline(pipe);
1061 if (ret < 0)
1062 goto err_check_format;
1063
1064 pipe->error = false;
1065
1066 spin_lock_irqsave(&pipe->lock, flags);
1067 pipe->state &= ~ISP_PIPELINE_STREAM;
1068 pipe->state |= state;
1069 spin_unlock_irqrestore(&pipe->lock, flags);
1070
1071 /* Set the maximum time per frame as the value requested by userspace.
1072 * This is a soft limit that can be overridden if the hardware doesn't
1073 * support the request limit.
1074 */
1075 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1076 pipe->max_timeperframe = vfh->timeperframe;
1077
1078 video->queue = &vfh->queue;
1079 INIT_LIST_HEAD(&video->dmaqueue);
1080 atomic_set(&pipe->frame_number, -1);
1081
1082 ret = omap3isp_video_queue_streamon(&vfh->queue);
1083 if (ret < 0)
1084 goto err_check_format;
1085
1086 /* In sensor-to-memory mode, the stream can be started synchronously
1087 * to the stream on command. In memory-to-memory mode, it will be
1088 * started when buffers are queued on both the input and output.
1089 */
1090 if (pipe->input == NULL) {
1091 ret = omap3isp_pipeline_set_stream(pipe,
1092 ISP_PIPELINE_STREAM_CONTINUOUS);
1093 if (ret < 0)
1094 goto err_set_stream;
1095 spin_lock_irqsave(&video->queue->irqlock, flags);
1096 if (list_empty(&video->dmaqueue))
1097 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1098 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1099 }
1100
1101 video->streaming = 1;
1102
1103 mutex_unlock(&video->stream_lock);
1104 return 0;
1105
1106 err_set_stream:
1107 omap3isp_video_queue_streamoff(&vfh->queue);
1108 err_check_format:
1109 media_entity_pipeline_stop(&video->video.entity);
1110 err_pipeline_start:
1111 if (video->isp->pdata->set_constraints)
1112 video->isp->pdata->set_constraints(video->isp, false);
1113 /* The DMA queue must be emptied here, otherwise CCDC interrupts that
1114 * will get triggered the next time the CCDC is powered up will try to
1115 * access buffers that might have been freed but still present in the
1116 * DMA queue. This can easily get triggered if the above
1117 * omap3isp_pipeline_set_stream() call fails on a system with a
1118 * free-running sensor.
1119 */
1120 INIT_LIST_HEAD(&video->dmaqueue);
1121 video->queue = NULL;
1122
1123 mutex_unlock(&video->stream_lock);
1124 return ret;
1125 }
1126
1127 static int
1128 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1129 {
1130 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1131 struct isp_video *video = video_drvdata(file);
1132 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1133 enum isp_pipeline_state state;
1134 unsigned int streaming;
1135 unsigned long flags;
1136
1137 if (type != video->type)
1138 return -EINVAL;
1139
1140 mutex_lock(&video->stream_lock);
1141
1142 /* Make sure we're not streaming yet. */
1143 mutex_lock(&vfh->queue.lock);
1144 streaming = vfh->queue.streaming;
1145 mutex_unlock(&vfh->queue.lock);
1146
1147 if (!streaming)
1148 goto done;
1149
1150 /* Update the pipeline state. */
1151 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1152 state = ISP_PIPELINE_STREAM_OUTPUT
1153 | ISP_PIPELINE_QUEUE_OUTPUT;
1154 else
1155 state = ISP_PIPELINE_STREAM_INPUT
1156 | ISP_PIPELINE_QUEUE_INPUT;
1157
1158 spin_lock_irqsave(&pipe->lock, flags);
1159 pipe->state &= ~state;
1160 spin_unlock_irqrestore(&pipe->lock, flags);
1161
1162 /* Stop the stream. */
1163 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1164 omap3isp_video_queue_streamoff(&vfh->queue);
1165 video->queue = NULL;
1166 video->streaming = 0;
1167
1168 if (video->isp->pdata->set_constraints)
1169 video->isp->pdata->set_constraints(video->isp, false);
1170 media_entity_pipeline_stop(&video->video.entity);
1171
1172 done:
1173 mutex_unlock(&video->stream_lock);
1174 return 0;
1175 }
1176
1177 static int
1178 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1179 {
1180 if (input->index > 0)
1181 return -EINVAL;
1182
1183 strlcpy(input->name, "camera", sizeof(input->name));
1184 input->type = V4L2_INPUT_TYPE_CAMERA;
1185
1186 return 0;
1187 }
1188
1189 static int
1190 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1191 {
1192 *input = 0;
1193
1194 return 0;
1195 }
1196
1197 static int
1198 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1199 {
1200 return input == 0 ? 0 : -EINVAL;
1201 }
1202
1203 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1204 .vidioc_querycap = isp_video_querycap,
1205 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1206 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1207 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1208 .vidioc_g_fmt_vid_out = isp_video_get_format,
1209 .vidioc_s_fmt_vid_out = isp_video_set_format,
1210 .vidioc_try_fmt_vid_out = isp_video_try_format,
1211 .vidioc_cropcap = isp_video_cropcap,
1212 .vidioc_g_crop = isp_video_get_crop,
1213 .vidioc_s_crop = isp_video_set_crop,
1214 .vidioc_g_parm = isp_video_get_param,
1215 .vidioc_s_parm = isp_video_set_param,
1216 .vidioc_reqbufs = isp_video_reqbufs,
1217 .vidioc_querybuf = isp_video_querybuf,
1218 .vidioc_qbuf = isp_video_qbuf,
1219 .vidioc_dqbuf = isp_video_dqbuf,
1220 .vidioc_streamon = isp_video_streamon,
1221 .vidioc_streamoff = isp_video_streamoff,
1222 .vidioc_enum_input = isp_video_enum_input,
1223 .vidioc_g_input = isp_video_g_input,
1224 .vidioc_s_input = isp_video_s_input,
1225 };
1226
1227 /* -----------------------------------------------------------------------------
1228 * V4L2 file operations
1229 */
1230
1231 static int isp_video_open(struct file *file)
1232 {
1233 struct isp_video *video = video_drvdata(file);
1234 struct isp_video_fh *handle;
1235 int ret = 0;
1236
1237 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1238 if (handle == NULL)
1239 return -ENOMEM;
1240
1241 v4l2_fh_init(&handle->vfh, &video->video);
1242 v4l2_fh_add(&handle->vfh);
1243
1244 /* If this is the first user, initialise the pipeline. */
1245 if (omap3isp_get(video->isp) == NULL) {
1246 ret = -EBUSY;
1247 goto done;
1248 }
1249
1250 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1251 if (ret < 0) {
1252 omap3isp_put(video->isp);
1253 goto done;
1254 }
1255
1256 omap3isp_video_queue_init(&handle->queue, video->type,
1257 &isp_video_queue_ops, video->isp->dev,
1258 sizeof(struct isp_buffer));
1259
1260 memset(&handle->format, 0, sizeof(handle->format));
1261 handle->format.type = video->type;
1262 handle->timeperframe.denominator = 1;
1263
1264 handle->video = video;
1265 file->private_data = &handle->vfh;
1266
1267 done:
1268 if (ret < 0) {
1269 v4l2_fh_del(&handle->vfh);
1270 kfree(handle);
1271 }
1272
1273 return ret;
1274 }
1275
1276 static int isp_video_release(struct file *file)
1277 {
1278 struct isp_video *video = video_drvdata(file);
1279 struct v4l2_fh *vfh = file->private_data;
1280 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1281
1282 /* Disable streaming and free the buffers queue resources. */
1283 isp_video_streamoff(file, vfh, video->type);
1284
1285 mutex_lock(&handle->queue.lock);
1286 omap3isp_video_queue_cleanup(&handle->queue);
1287 mutex_unlock(&handle->queue.lock);
1288
1289 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1290
1291 /* Release the file handle. */
1292 v4l2_fh_del(vfh);
1293 kfree(handle);
1294 file->private_data = NULL;
1295
1296 omap3isp_put(video->isp);
1297
1298 return 0;
1299 }
1300
1301 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1302 {
1303 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1304 struct isp_video_queue *queue = &vfh->queue;
1305
1306 return omap3isp_video_queue_poll(queue, file, wait);
1307 }
1308
1309 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1310 {
1311 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1312
1313 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1314 }
1315
1316 static struct v4l2_file_operations isp_video_fops = {
1317 .owner = THIS_MODULE,
1318 .unlocked_ioctl = video_ioctl2,
1319 .open = isp_video_open,
1320 .release = isp_video_release,
1321 .poll = isp_video_poll,
1322 .mmap = isp_video_mmap,
1323 };
1324
1325 /* -----------------------------------------------------------------------------
1326 * ISP video core
1327 */
1328
1329 static const struct isp_video_operations isp_video_dummy_ops = {
1330 };
1331
1332 int omap3isp_video_init(struct isp_video *video, const char *name)
1333 {
1334 const char *direction;
1335 int ret;
1336
1337 switch (video->type) {
1338 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1339 direction = "output";
1340 video->pad.flags = MEDIA_PAD_FL_SINK;
1341 break;
1342 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1343 direction = "input";
1344 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1345 video->video.vfl_dir = VFL_DIR_TX;
1346 break;
1347
1348 default:
1349 return -EINVAL;
1350 }
1351
1352 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1353 if (ret < 0)
1354 return ret;
1355
1356 mutex_init(&video->mutex);
1357 atomic_set(&video->active, 0);
1358
1359 spin_lock_init(&video->pipe.lock);
1360 mutex_init(&video->stream_lock);
1361
1362 /* Initialize the video device. */
1363 if (video->ops == NULL)
1364 video->ops = &isp_video_dummy_ops;
1365
1366 video->video.fops = &isp_video_fops;
1367 snprintf(video->video.name, sizeof(video->video.name),
1368 "OMAP3 ISP %s %s", name, direction);
1369 video->video.vfl_type = VFL_TYPE_GRABBER;
1370 video->video.release = video_device_release_empty;
1371 video->video.ioctl_ops = &isp_video_ioctl_ops;
1372 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1373
1374 video_set_drvdata(&video->video, video);
1375
1376 return 0;
1377 }
1378
1379 void omap3isp_video_cleanup(struct isp_video *video)
1380 {
1381 media_entity_cleanup(&video->video.entity);
1382 mutex_destroy(&video->stream_lock);
1383 mutex_destroy(&video->mutex);
1384 }
1385
1386 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1387 {
1388 int ret;
1389
1390 video->video.v4l2_dev = vdev;
1391
1392 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1393 if (ret < 0)
1394 printk(KERN_ERR "%s: could not register video device (%d)\n",
1395 __func__, ret);
1396
1397 return ret;
1398 }
1399
1400 void omap3isp_video_unregister(struct isp_video *video)
1401 {
1402 if (video_is_registered(&video->video))
1403 video_unregister_device(&video->video);
1404 }