[media] omap3isp: Prevent pipelines that contain a crashed entity from starting
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / media / video / omap3isp / ispvideo.c
1 /*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26 #include <asm/cacheflush.h>
27 #include <linux/clk.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/scatterlist.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/vmalloc.h>
35 #include <media/v4l2-dev.h>
36 #include <media/v4l2-ioctl.h>
37 #include <plat/iommu.h>
38 #include <plat/iovmm.h>
39 #include <plat/omap-pm.h>
40
41 #include "ispvideo.h"
42 #include "isp.h"
43
44
45 /* -----------------------------------------------------------------------------
46 * Helper functions
47 */
48
49 static struct isp_format_info formats[] = {
50 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
51 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
52 V4L2_PIX_FMT_GREY, 8, },
53 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
54 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
55 V4L2_PIX_FMT_Y10, 10, },
56 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
57 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
58 V4L2_PIX_FMT_Y12, 12, },
59 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
60 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
61 V4L2_PIX_FMT_SBGGR8, 8, },
62 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
63 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
64 V4L2_PIX_FMT_SGBRG8, 8, },
65 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
66 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
67 V4L2_PIX_FMT_SGRBG8, 8, },
68 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
69 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
70 V4L2_PIX_FMT_SRGGB8, 8, },
71 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
72 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
73 V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
74 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
75 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
76 V4L2_PIX_FMT_SBGGR10, 10, },
77 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
78 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
79 V4L2_PIX_FMT_SGBRG10, 10, },
80 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
81 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
82 V4L2_PIX_FMT_SGRBG10, 10, },
83 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
84 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
85 V4L2_PIX_FMT_SRGGB10, 10, },
86 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
87 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
88 V4L2_PIX_FMT_SBGGR12, 12, },
89 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
90 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
91 V4L2_PIX_FMT_SGBRG12, 12, },
92 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
93 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
94 V4L2_PIX_FMT_SGRBG12, 12, },
95 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
96 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
97 V4L2_PIX_FMT_SRGGB12, 12, },
98 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
99 V4L2_MBUS_FMT_UYVY8_1X16, 0,
100 V4L2_PIX_FMT_UYVY, 16, },
101 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
102 V4L2_MBUS_FMT_YUYV8_1X16, 0,
103 V4L2_PIX_FMT_YUYV, 16, },
104 };
105
106 const struct isp_format_info *
107 omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
108 {
109 unsigned int i;
110
111 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
112 if (formats[i].code == code)
113 return &formats[i];
114 }
115
116 return NULL;
117 }
118
119 /*
120 * Decide whether desired output pixel code can be obtained with
121 * the lane shifter by shifting the input pixel code.
122 * @in: input pixelcode to shifter
123 * @out: output pixelcode from shifter
124 * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
125 *
126 * return true if the combination is possible
127 * return false otherwise
128 */
129 static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
130 enum v4l2_mbus_pixelcode out,
131 unsigned int additional_shift)
132 {
133 const struct isp_format_info *in_info, *out_info;
134
135 if (in == out)
136 return true;
137
138 in_info = omap3isp_video_format_info(in);
139 out_info = omap3isp_video_format_info(out);
140
141 if ((in_info->flavor == 0) || (out_info->flavor == 0))
142 return false;
143
144 if (in_info->flavor != out_info->flavor)
145 return false;
146
147 return in_info->bpp - out_info->bpp + additional_shift <= 6;
148 }
149
150 /*
151 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
152 * @video: ISP video instance
153 * @mbus: v4l2_mbus_framefmt format (input)
154 * @pix: v4l2_pix_format format (output)
155 *
156 * Fill the output pix structure with information from the input mbus format.
157 * The bytesperline and sizeimage fields are computed from the requested bytes
158 * per line value in the pix format and information from the video instance.
159 *
160 * Return the number of padding bytes at end of line.
161 */
162 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
163 const struct v4l2_mbus_framefmt *mbus,
164 struct v4l2_pix_format *pix)
165 {
166 unsigned int bpl = pix->bytesperline;
167 unsigned int min_bpl;
168 unsigned int i;
169
170 memset(pix, 0, sizeof(*pix));
171 pix->width = mbus->width;
172 pix->height = mbus->height;
173
174 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
175 if (formats[i].code == mbus->code)
176 break;
177 }
178
179 if (WARN_ON(i == ARRAY_SIZE(formats)))
180 return 0;
181
182 min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
183
184 /* Clamp the requested bytes per line value. If the maximum bytes per
185 * line value is zero, the module doesn't support user configurable line
186 * sizes. Override the requested value with the minimum in that case.
187 */
188 if (video->bpl_max)
189 bpl = clamp(bpl, min_bpl, video->bpl_max);
190 else
191 bpl = min_bpl;
192
193 if (!video->bpl_zero_padding || bpl != min_bpl)
194 bpl = ALIGN(bpl, video->bpl_alignment);
195
196 pix->pixelformat = formats[i].pixelformat;
197 pix->bytesperline = bpl;
198 pix->sizeimage = pix->bytesperline * pix->height;
199 pix->colorspace = mbus->colorspace;
200 pix->field = mbus->field;
201
202 return bpl - min_bpl;
203 }
204
205 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
206 struct v4l2_mbus_framefmt *mbus)
207 {
208 unsigned int i;
209
210 memset(mbus, 0, sizeof(*mbus));
211 mbus->width = pix->width;
212 mbus->height = pix->height;
213
214 /* Skip the last format in the loop so that it will be selected if no
215 * match is found.
216 */
217 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
218 if (formats[i].pixelformat == pix->pixelformat)
219 break;
220 }
221
222 mbus->code = formats[i].code;
223 mbus->colorspace = pix->colorspace;
224 mbus->field = pix->field;
225 }
226
227 static struct v4l2_subdev *
228 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
229 {
230 struct media_pad *remote;
231
232 remote = media_entity_remote_source(&video->pad);
233
234 if (remote == NULL ||
235 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
236 return NULL;
237
238 if (pad)
239 *pad = remote->index;
240
241 return media_entity_to_v4l2_subdev(remote->entity);
242 }
243
244 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
245 static struct isp_video *
246 isp_video_far_end(struct isp_video *video)
247 {
248 struct media_entity_graph graph;
249 struct media_entity *entity = &video->video.entity;
250 struct media_device *mdev = entity->parent;
251 struct isp_video *far_end = NULL;
252
253 mutex_lock(&mdev->graph_mutex);
254 media_entity_graph_walk_start(&graph, entity);
255
256 while ((entity = media_entity_graph_walk_next(&graph))) {
257 if (entity == &video->video.entity)
258 continue;
259
260 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
261 continue;
262
263 far_end = to_isp_video(media_entity_to_video_device(entity));
264 if (far_end->type != video->type)
265 break;
266
267 far_end = NULL;
268 }
269
270 mutex_unlock(&mdev->graph_mutex);
271 return far_end;
272 }
273
274 /*
275 * Validate a pipeline by checking both ends of all links for format
276 * discrepancies.
277 *
278 * Compute the minimum time per frame value as the maximum of time per frame
279 * limits reported by every block in the pipeline.
280 *
281 * Return 0 if all formats match, or -EPIPE if at least one link is found with
282 * different formats on its two ends or if the pipeline doesn't start with a
283 * video source (either a subdev with no input pad, or a non-subdev entity).
284 */
285 static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
286 {
287 struct isp_device *isp = pipe->output->isp;
288 struct v4l2_subdev_format fmt_source;
289 struct v4l2_subdev_format fmt_sink;
290 struct media_pad *pad;
291 struct v4l2_subdev *subdev;
292 int ret;
293
294 pipe->max_rate = pipe->l3_ick;
295 pipe->entities = 0;
296
297 subdev = isp_video_remote_subdev(pipe->output, NULL);
298 if (subdev == NULL)
299 return -EPIPE;
300
301 while (1) {
302 unsigned int shifter_link;
303
304 pipe->entities |= 1U << subdev->entity.id;
305
306 /* Retrieve the sink format */
307 pad = &subdev->entity.pads[0];
308 if (!(pad->flags & MEDIA_PAD_FL_SINK))
309 break;
310
311 fmt_sink.pad = pad->index;
312 fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
313 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
314 if (ret < 0 && ret != -ENOIOCTLCMD)
315 return -EPIPE;
316
317 /* Update the maximum frame rate */
318 if (subdev == &isp->isp_res.subdev)
319 omap3isp_resizer_max_rate(&isp->isp_res,
320 &pipe->max_rate);
321
322 /* Check ccdc maximum data rate when data comes from sensor
323 * TODO: Include ccdc rate in pipe->max_rate and compare the
324 * total pipe rate with the input data rate from sensor.
325 */
326 if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
327 unsigned int rate = UINT_MAX;
328
329 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
330 if (isp->isp_ccdc.vpcfg.pixelclk > rate)
331 return -ENOSPC;
332 }
333
334 /* If sink pad is on CCDC, the link has the lane shifter
335 * in the middle of it. */
336 shifter_link = subdev == &isp->isp_ccdc.subdev;
337
338 /* Retrieve the source format. Return an error if no source
339 * entity can be found, and stop checking the pipeline if the
340 * source entity isn't a subdev.
341 */
342 pad = media_entity_remote_source(pad);
343 if (pad == NULL)
344 return -EPIPE;
345
346 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
347 break;
348
349 subdev = media_entity_to_v4l2_subdev(pad->entity);
350
351 fmt_source.pad = pad->index;
352 fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
353 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
354 if (ret < 0 && ret != -ENOIOCTLCMD)
355 return -EPIPE;
356
357 /* Check if the two ends match */
358 if (fmt_source.format.width != fmt_sink.format.width ||
359 fmt_source.format.height != fmt_sink.format.height)
360 return -EPIPE;
361
362 if (shifter_link) {
363 unsigned int parallel_shift = 0;
364 if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
365 struct isp_parallel_platform_data *pdata =
366 &((struct isp_v4l2_subdevs_group *)
367 subdev->host_priv)->bus.parallel;
368 parallel_shift = pdata->data_lane_shift * 2;
369 }
370 if (!isp_video_is_shiftable(fmt_source.format.code,
371 fmt_sink.format.code,
372 parallel_shift))
373 return -EPIPE;
374 } else if (fmt_source.format.code != fmt_sink.format.code)
375 return -EPIPE;
376 }
377
378 return 0;
379 }
380
381 static int
382 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
383 {
384 struct v4l2_subdev_format fmt;
385 struct v4l2_subdev *subdev;
386 u32 pad;
387 int ret;
388
389 subdev = isp_video_remote_subdev(video, &pad);
390 if (subdev == NULL)
391 return -EINVAL;
392
393 mutex_lock(&video->mutex);
394
395 fmt.pad = pad;
396 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
397 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
398 if (ret == -ENOIOCTLCMD)
399 ret = -EINVAL;
400
401 mutex_unlock(&video->mutex);
402
403 if (ret)
404 return ret;
405
406 format->type = video->type;
407 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
408 }
409
410 static int
411 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
412 {
413 struct v4l2_format format;
414 int ret;
415
416 memcpy(&format, &vfh->format, sizeof(format));
417 ret = __isp_video_get_format(video, &format);
418 if (ret < 0)
419 return ret;
420
421 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
422 vfh->format.fmt.pix.height != format.fmt.pix.height ||
423 vfh->format.fmt.pix.width != format.fmt.pix.width ||
424 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
425 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
426 return -EINVAL;
427
428 return ret;
429 }
430
431 /* -----------------------------------------------------------------------------
432 * IOMMU management
433 */
434
435 #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
436
437 /*
438 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
439 * @dev: Device pointer specific to the OMAP3 ISP.
440 * @sglist: Pointer to source Scatter gather list to allocate.
441 * @sglen: Number of elements of the scatter-gatter list.
442 *
443 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
444 * we ran out of memory.
445 */
446 static dma_addr_t
447 ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
448 {
449 struct sg_table *sgt;
450 u32 da;
451
452 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
453 if (sgt == NULL)
454 return -ENOMEM;
455
456 sgt->sgl = (struct scatterlist *)sglist;
457 sgt->nents = sglen;
458 sgt->orig_nents = sglen;
459
460 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
461 if (IS_ERR_VALUE(da))
462 kfree(sgt);
463
464 return da;
465 }
466
467 /*
468 * ispmmu_vunmap - Unmap a device address from the ISP MMU
469 * @dev: Device pointer specific to the OMAP3 ISP.
470 * @da: Device address generated from a ispmmu_vmap call.
471 */
472 static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
473 {
474 struct sg_table *sgt;
475
476 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
477 kfree(sgt);
478 }
479
480 /* -----------------------------------------------------------------------------
481 * Video queue operations
482 */
483
484 static void isp_video_queue_prepare(struct isp_video_queue *queue,
485 unsigned int *nbuffers, unsigned int *size)
486 {
487 struct isp_video_fh *vfh =
488 container_of(queue, struct isp_video_fh, queue);
489 struct isp_video *video = vfh->video;
490
491 *size = vfh->format.fmt.pix.sizeimage;
492 if (*size == 0)
493 return;
494
495 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
496 }
497
498 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
499 {
500 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
501 struct isp_buffer *buffer = to_isp_buffer(buf);
502 struct isp_video *video = vfh->video;
503
504 if (buffer->isp_addr) {
505 ispmmu_vunmap(video->isp, buffer->isp_addr);
506 buffer->isp_addr = 0;
507 }
508 }
509
510 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
511 {
512 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
513 struct isp_buffer *buffer = to_isp_buffer(buf);
514 struct isp_video *video = vfh->video;
515 unsigned long addr;
516
517 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
518 if (IS_ERR_VALUE(addr))
519 return -EIO;
520
521 if (!IS_ALIGNED(addr, 32)) {
522 dev_dbg(video->isp->dev, "Buffer address must be "
523 "aligned to 32 bytes boundary.\n");
524 ispmmu_vunmap(video->isp, buffer->isp_addr);
525 return -EINVAL;
526 }
527
528 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
529 buffer->isp_addr = addr;
530 return 0;
531 }
532
533 /*
534 * isp_video_buffer_queue - Add buffer to streaming queue
535 * @buf: Video buffer
536 *
537 * In memory-to-memory mode, start streaming on the pipeline if buffers are
538 * queued on both the input and the output, if the pipeline isn't already busy.
539 * If the pipeline is busy, it will be restarted in the output module interrupt
540 * handler.
541 */
542 static void isp_video_buffer_queue(struct isp_video_buffer *buf)
543 {
544 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
545 struct isp_buffer *buffer = to_isp_buffer(buf);
546 struct isp_video *video = vfh->video;
547 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
548 enum isp_pipeline_state state;
549 unsigned long flags;
550 unsigned int empty;
551 unsigned int start;
552
553 empty = list_empty(&video->dmaqueue);
554 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
555
556 if (empty) {
557 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
558 state = ISP_PIPELINE_QUEUE_OUTPUT;
559 else
560 state = ISP_PIPELINE_QUEUE_INPUT;
561
562 spin_lock_irqsave(&pipe->lock, flags);
563 pipe->state |= state;
564 video->ops->queue(video, buffer);
565 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
566
567 start = isp_pipeline_ready(pipe);
568 if (start)
569 pipe->state |= ISP_PIPELINE_STREAM;
570 spin_unlock_irqrestore(&pipe->lock, flags);
571
572 if (start)
573 omap3isp_pipeline_set_stream(pipe,
574 ISP_PIPELINE_STREAM_SINGLESHOT);
575 }
576 }
577
578 static const struct isp_video_queue_operations isp_video_queue_ops = {
579 .queue_prepare = &isp_video_queue_prepare,
580 .buffer_prepare = &isp_video_buffer_prepare,
581 .buffer_queue = &isp_video_buffer_queue,
582 .buffer_cleanup = &isp_video_buffer_cleanup,
583 };
584
585 /*
586 * omap3isp_video_buffer_next - Complete the current buffer and return the next
587 * @video: ISP video object
588 *
589 * Remove the current video buffer from the DMA queue and fill its timestamp,
590 * field count and state fields before waking up its completion handler.
591 *
592 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
593 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
594 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
595 *
596 * The DMA queue is expected to contain at least one buffer.
597 *
598 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
599 * empty.
600 */
601 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
602 {
603 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
604 struct isp_video_queue *queue = video->queue;
605 enum isp_pipeline_state state;
606 struct isp_video_buffer *buf;
607 unsigned long flags;
608 struct timespec ts;
609
610 spin_lock_irqsave(&queue->irqlock, flags);
611 if (WARN_ON(list_empty(&video->dmaqueue))) {
612 spin_unlock_irqrestore(&queue->irqlock, flags);
613 return NULL;
614 }
615
616 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
617 irqlist);
618 list_del(&buf->irqlist);
619 spin_unlock_irqrestore(&queue->irqlock, flags);
620
621 ktime_get_ts(&ts);
622 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
623 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
624
625 /* Do frame number propagation only if this is the output video node.
626 * Frame number either comes from the CSI receivers or it gets
627 * incremented here if H3A is not active.
628 * Note: There is no guarantee that the output buffer will finish
629 * first, so the input number might lag behind by 1 in some cases.
630 */
631 if (video == pipe->output && !pipe->do_propagation)
632 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
633 else
634 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
635
636 /* Report pipeline errors to userspace on the capture device side. */
637 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
638 buf->state = ISP_BUF_STATE_ERROR;
639 pipe->error = false;
640 } else {
641 buf->state = ISP_BUF_STATE_DONE;
642 }
643
644 wake_up(&buf->wait);
645
646 if (list_empty(&video->dmaqueue)) {
647 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
648 state = ISP_PIPELINE_QUEUE_OUTPUT
649 | ISP_PIPELINE_STREAM;
650 else
651 state = ISP_PIPELINE_QUEUE_INPUT
652 | ISP_PIPELINE_STREAM;
653
654 spin_lock_irqsave(&pipe->lock, flags);
655 pipe->state &= ~state;
656 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
657 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
658 spin_unlock_irqrestore(&pipe->lock, flags);
659 return NULL;
660 }
661
662 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
663 spin_lock_irqsave(&pipe->lock, flags);
664 pipe->state &= ~ISP_PIPELINE_STREAM;
665 spin_unlock_irqrestore(&pipe->lock, flags);
666 }
667
668 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
669 irqlist);
670 buf->state = ISP_BUF_STATE_ACTIVE;
671 return to_isp_buffer(buf);
672 }
673
674 /*
675 * omap3isp_video_resume - Perform resume operation on the buffers
676 * @video: ISP video object
677 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
678 *
679 * This function is intended to be used on suspend/resume scenario. It
680 * requests video queue layer to discard buffers marked as DONE if it's in
681 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
682 * if there's any.
683 */
684 void omap3isp_video_resume(struct isp_video *video, int continuous)
685 {
686 struct isp_buffer *buf = NULL;
687
688 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
689 omap3isp_video_queue_discard_done(video->queue);
690
691 if (!list_empty(&video->dmaqueue)) {
692 buf = list_first_entry(&video->dmaqueue,
693 struct isp_buffer, buffer.irqlist);
694 video->ops->queue(video, buf);
695 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
696 } else {
697 if (continuous)
698 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
699 }
700 }
701
702 /* -----------------------------------------------------------------------------
703 * V4L2 ioctls
704 */
705
706 static int
707 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
708 {
709 struct isp_video *video = video_drvdata(file);
710
711 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
712 strlcpy(cap->card, video->video.name, sizeof(cap->card));
713 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
714
715 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
716 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
717 else
718 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
719
720 return 0;
721 }
722
723 static int
724 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
725 {
726 struct isp_video_fh *vfh = to_isp_video_fh(fh);
727 struct isp_video *video = video_drvdata(file);
728
729 if (format->type != video->type)
730 return -EINVAL;
731
732 mutex_lock(&video->mutex);
733 *format = vfh->format;
734 mutex_unlock(&video->mutex);
735
736 return 0;
737 }
738
739 static int
740 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
741 {
742 struct isp_video_fh *vfh = to_isp_video_fh(fh);
743 struct isp_video *video = video_drvdata(file);
744 struct v4l2_mbus_framefmt fmt;
745
746 if (format->type != video->type)
747 return -EINVAL;
748
749 mutex_lock(&video->mutex);
750
751 /* Fill the bytesperline and sizeimage fields by converting to media bus
752 * format and back to pixel format.
753 */
754 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
755 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
756
757 vfh->format = *format;
758
759 mutex_unlock(&video->mutex);
760 return 0;
761 }
762
763 static int
764 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
765 {
766 struct isp_video *video = video_drvdata(file);
767 struct v4l2_subdev_format fmt;
768 struct v4l2_subdev *subdev;
769 u32 pad;
770 int ret;
771
772 if (format->type != video->type)
773 return -EINVAL;
774
775 subdev = isp_video_remote_subdev(video, &pad);
776 if (subdev == NULL)
777 return -EINVAL;
778
779 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
780
781 fmt.pad = pad;
782 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
783 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
784 if (ret)
785 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
786
787 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
788 return 0;
789 }
790
791 static int
792 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
793 {
794 struct isp_video *video = video_drvdata(file);
795 struct v4l2_subdev *subdev;
796 int ret;
797
798 subdev = isp_video_remote_subdev(video, NULL);
799 if (subdev == NULL)
800 return -EINVAL;
801
802 mutex_lock(&video->mutex);
803 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
804 mutex_unlock(&video->mutex);
805
806 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
807 }
808
809 static int
810 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
811 {
812 struct isp_video *video = video_drvdata(file);
813 struct v4l2_subdev_format format;
814 struct v4l2_subdev *subdev;
815 u32 pad;
816 int ret;
817
818 subdev = isp_video_remote_subdev(video, &pad);
819 if (subdev == NULL)
820 return -EINVAL;
821
822 /* Try the get crop operation first and fallback to get format if not
823 * implemented.
824 */
825 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
826 if (ret != -ENOIOCTLCMD)
827 return ret;
828
829 format.pad = pad;
830 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
831 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
832 if (ret < 0)
833 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
834
835 crop->c.left = 0;
836 crop->c.top = 0;
837 crop->c.width = format.format.width;
838 crop->c.height = format.format.height;
839
840 return 0;
841 }
842
843 static int
844 isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
845 {
846 struct isp_video *video = video_drvdata(file);
847 struct v4l2_subdev *subdev;
848 int ret;
849
850 subdev = isp_video_remote_subdev(video, NULL);
851 if (subdev == NULL)
852 return -EINVAL;
853
854 mutex_lock(&video->mutex);
855 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
856 mutex_unlock(&video->mutex);
857
858 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
859 }
860
861 static int
862 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
863 {
864 struct isp_video_fh *vfh = to_isp_video_fh(fh);
865 struct isp_video *video = video_drvdata(file);
866
867 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
868 video->type != a->type)
869 return -EINVAL;
870
871 memset(a, 0, sizeof(*a));
872 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
873 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
874 a->parm.output.timeperframe = vfh->timeperframe;
875
876 return 0;
877 }
878
879 static int
880 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
881 {
882 struct isp_video_fh *vfh = to_isp_video_fh(fh);
883 struct isp_video *video = video_drvdata(file);
884
885 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
886 video->type != a->type)
887 return -EINVAL;
888
889 if (a->parm.output.timeperframe.denominator == 0)
890 a->parm.output.timeperframe.denominator = 1;
891
892 vfh->timeperframe = a->parm.output.timeperframe;
893
894 return 0;
895 }
896
897 static int
898 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
899 {
900 struct isp_video_fh *vfh = to_isp_video_fh(fh);
901
902 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
903 }
904
905 static int
906 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
907 {
908 struct isp_video_fh *vfh = to_isp_video_fh(fh);
909
910 return omap3isp_video_queue_querybuf(&vfh->queue, b);
911 }
912
913 static int
914 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
915 {
916 struct isp_video_fh *vfh = to_isp_video_fh(fh);
917
918 return omap3isp_video_queue_qbuf(&vfh->queue, b);
919 }
920
921 static int
922 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
923 {
924 struct isp_video_fh *vfh = to_isp_video_fh(fh);
925
926 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
927 file->f_flags & O_NONBLOCK);
928 }
929
930 /*
931 * Stream management
932 *
933 * Every ISP pipeline has a single input and a single output. The input can be
934 * either a sensor or a video node. The output is always a video node.
935 *
936 * As every pipeline has an output video node, the ISP video objects at the
937 * pipeline output stores the pipeline state. It tracks the streaming state of
938 * both the input and output, as well as the availability of buffers.
939 *
940 * In sensor-to-memory mode, frames are always available at the pipeline input.
941 * Starting the sensor usually requires I2C transfers and must be done in
942 * interruptible context. The pipeline is started and stopped synchronously
943 * to the stream on/off commands. All modules in the pipeline will get their
944 * subdev set stream handler called. The module at the end of the pipeline must
945 * delay starting the hardware until buffers are available at its output.
946 *
947 * In memory-to-memory mode, starting/stopping the stream requires
948 * synchronization between the input and output. ISP modules can't be stopped
949 * in the middle of a frame, and at least some of the modules seem to become
950 * busy as soon as they're started, even if they don't receive a frame start
951 * event. For that reason frames need to be processed in single-shot mode. The
952 * driver needs to wait until a frame is completely processed and written to
953 * memory before restarting the pipeline for the next frame. Pipelined
954 * processing might be possible but requires more testing.
955 *
956 * Stream start must be delayed until buffers are available at both the input
957 * and output. The pipeline must be started in the videobuf queue callback with
958 * the buffers queue spinlock held. The modules subdev set stream operation must
959 * not sleep.
960 */
961 static int
962 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
963 {
964 struct isp_video_fh *vfh = to_isp_video_fh(fh);
965 struct isp_video *video = video_drvdata(file);
966 enum isp_pipeline_state state;
967 struct isp_pipeline *pipe;
968 struct isp_video *far_end;
969 unsigned long flags;
970 int ret;
971
972 if (type != video->type)
973 return -EINVAL;
974
975 mutex_lock(&video->stream_lock);
976
977 if (video->streaming) {
978 mutex_unlock(&video->stream_lock);
979 return -EBUSY;
980 }
981
982 /* Start streaming on the pipeline. No link touching an entity in the
983 * pipeline can be activated or deactivated once streaming is started.
984 */
985 pipe = video->video.entity.pipe
986 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
987 media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
988
989 /* Verify that the currently configured format matches the output of
990 * the connected subdev.
991 */
992 ret = isp_video_check_format(video, vfh);
993 if (ret < 0)
994 goto error;
995
996 video->bpl_padding = ret;
997 video->bpl_value = vfh->format.fmt.pix.bytesperline;
998
999 /* Find the ISP video node connected at the far end of the pipeline and
1000 * update the pipeline.
1001 */
1002 far_end = isp_video_far_end(video);
1003
1004 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1005 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1006 pipe->input = far_end;
1007 pipe->output = video;
1008 } else {
1009 if (far_end == NULL) {
1010 ret = -EPIPE;
1011 goto error;
1012 }
1013
1014 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1015 pipe->input = video;
1016 pipe->output = far_end;
1017 }
1018
1019 if (video->isp->pdata->set_constraints)
1020 video->isp->pdata->set_constraints(video->isp, true);
1021 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1022
1023 /* Validate the pipeline and update its state. */
1024 ret = isp_video_validate_pipeline(pipe);
1025 if (ret < 0)
1026 goto error;
1027
1028 pipe->error = false;
1029
1030 spin_lock_irqsave(&pipe->lock, flags);
1031 pipe->state &= ~ISP_PIPELINE_STREAM;
1032 pipe->state |= state;
1033 spin_unlock_irqrestore(&pipe->lock, flags);
1034
1035 /* Set the maximum time per frame as the value requested by userspace.
1036 * This is a soft limit that can be overridden if the hardware doesn't
1037 * support the request limit.
1038 */
1039 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1040 pipe->max_timeperframe = vfh->timeperframe;
1041
1042 video->queue = &vfh->queue;
1043 INIT_LIST_HEAD(&video->dmaqueue);
1044 atomic_set(&pipe->frame_number, -1);
1045
1046 ret = omap3isp_video_queue_streamon(&vfh->queue);
1047 if (ret < 0)
1048 goto error;
1049
1050 /* In sensor-to-memory mode, the stream can be started synchronously
1051 * to the stream on command. In memory-to-memory mode, it will be
1052 * started when buffers are queued on both the input and output.
1053 */
1054 if (pipe->input == NULL) {
1055 ret = omap3isp_pipeline_set_stream(pipe,
1056 ISP_PIPELINE_STREAM_CONTINUOUS);
1057 if (ret < 0)
1058 goto error;
1059 spin_lock_irqsave(&video->queue->irqlock, flags);
1060 if (list_empty(&video->dmaqueue))
1061 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1062 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1063 }
1064
1065 error:
1066 if (ret < 0) {
1067 omap3isp_video_queue_streamoff(&vfh->queue);
1068 if (video->isp->pdata->set_constraints)
1069 video->isp->pdata->set_constraints(video->isp, false);
1070 media_entity_pipeline_stop(&video->video.entity);
1071 /* The DMA queue must be emptied here, otherwise CCDC interrupts
1072 * that will get triggered the next time the CCDC is powered up
1073 * will try to access buffers that might have been freed but
1074 * still present in the DMA queue. This can easily get triggered
1075 * if the above omap3isp_pipeline_set_stream() call fails on a
1076 * system with a free-running sensor.
1077 */
1078 INIT_LIST_HEAD(&video->dmaqueue);
1079 video->queue = NULL;
1080 }
1081
1082 if (!ret)
1083 video->streaming = 1;
1084
1085 mutex_unlock(&video->stream_lock);
1086 return ret;
1087 }
1088
1089 static int
1090 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1091 {
1092 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1093 struct isp_video *video = video_drvdata(file);
1094 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1095 enum isp_pipeline_state state;
1096 unsigned int streaming;
1097 unsigned long flags;
1098
1099 if (type != video->type)
1100 return -EINVAL;
1101
1102 mutex_lock(&video->stream_lock);
1103
1104 /* Make sure we're not streaming yet. */
1105 mutex_lock(&vfh->queue.lock);
1106 streaming = vfh->queue.streaming;
1107 mutex_unlock(&vfh->queue.lock);
1108
1109 if (!streaming)
1110 goto done;
1111
1112 /* Update the pipeline state. */
1113 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1114 state = ISP_PIPELINE_STREAM_OUTPUT
1115 | ISP_PIPELINE_QUEUE_OUTPUT;
1116 else
1117 state = ISP_PIPELINE_STREAM_INPUT
1118 | ISP_PIPELINE_QUEUE_INPUT;
1119
1120 spin_lock_irqsave(&pipe->lock, flags);
1121 pipe->state &= ~state;
1122 spin_unlock_irqrestore(&pipe->lock, flags);
1123
1124 /* Stop the stream. */
1125 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1126 omap3isp_video_queue_streamoff(&vfh->queue);
1127 video->queue = NULL;
1128 video->streaming = 0;
1129
1130 if (video->isp->pdata->set_constraints)
1131 video->isp->pdata->set_constraints(video->isp, false);
1132 media_entity_pipeline_stop(&video->video.entity);
1133
1134 done:
1135 mutex_unlock(&video->stream_lock);
1136 return 0;
1137 }
1138
1139 static int
1140 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1141 {
1142 if (input->index > 0)
1143 return -EINVAL;
1144
1145 strlcpy(input->name, "camera", sizeof(input->name));
1146 input->type = V4L2_INPUT_TYPE_CAMERA;
1147
1148 return 0;
1149 }
1150
1151 static int
1152 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1153 {
1154 *input = 0;
1155
1156 return 0;
1157 }
1158
1159 static int
1160 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1161 {
1162 return input == 0 ? 0 : -EINVAL;
1163 }
1164
1165 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1166 .vidioc_querycap = isp_video_querycap,
1167 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1168 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1169 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1170 .vidioc_g_fmt_vid_out = isp_video_get_format,
1171 .vidioc_s_fmt_vid_out = isp_video_set_format,
1172 .vidioc_try_fmt_vid_out = isp_video_try_format,
1173 .vidioc_cropcap = isp_video_cropcap,
1174 .vidioc_g_crop = isp_video_get_crop,
1175 .vidioc_s_crop = isp_video_set_crop,
1176 .vidioc_g_parm = isp_video_get_param,
1177 .vidioc_s_parm = isp_video_set_param,
1178 .vidioc_reqbufs = isp_video_reqbufs,
1179 .vidioc_querybuf = isp_video_querybuf,
1180 .vidioc_qbuf = isp_video_qbuf,
1181 .vidioc_dqbuf = isp_video_dqbuf,
1182 .vidioc_streamon = isp_video_streamon,
1183 .vidioc_streamoff = isp_video_streamoff,
1184 .vidioc_enum_input = isp_video_enum_input,
1185 .vidioc_g_input = isp_video_g_input,
1186 .vidioc_s_input = isp_video_s_input,
1187 };
1188
1189 /* -----------------------------------------------------------------------------
1190 * V4L2 file operations
1191 */
1192
1193 static int isp_video_open(struct file *file)
1194 {
1195 struct isp_video *video = video_drvdata(file);
1196 struct isp_video_fh *handle;
1197 int ret = 0;
1198
1199 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1200 if (handle == NULL)
1201 return -ENOMEM;
1202
1203 v4l2_fh_init(&handle->vfh, &video->video);
1204 v4l2_fh_add(&handle->vfh);
1205
1206 /* If this is the first user, initialise the pipeline. */
1207 if (omap3isp_get(video->isp) == NULL) {
1208 ret = -EBUSY;
1209 goto done;
1210 }
1211
1212 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1213 if (ret < 0) {
1214 omap3isp_put(video->isp);
1215 goto done;
1216 }
1217
1218 omap3isp_video_queue_init(&handle->queue, video->type,
1219 &isp_video_queue_ops, video->isp->dev,
1220 sizeof(struct isp_buffer));
1221
1222 memset(&handle->format, 0, sizeof(handle->format));
1223 handle->format.type = video->type;
1224 handle->timeperframe.denominator = 1;
1225
1226 handle->video = video;
1227 file->private_data = &handle->vfh;
1228
1229 done:
1230 if (ret < 0) {
1231 v4l2_fh_del(&handle->vfh);
1232 kfree(handle);
1233 }
1234
1235 return ret;
1236 }
1237
1238 static int isp_video_release(struct file *file)
1239 {
1240 struct isp_video *video = video_drvdata(file);
1241 struct v4l2_fh *vfh = file->private_data;
1242 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1243
1244 /* Disable streaming and free the buffers queue resources. */
1245 isp_video_streamoff(file, vfh, video->type);
1246
1247 mutex_lock(&handle->queue.lock);
1248 omap3isp_video_queue_cleanup(&handle->queue);
1249 mutex_unlock(&handle->queue.lock);
1250
1251 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1252
1253 /* Release the file handle. */
1254 v4l2_fh_del(vfh);
1255 kfree(handle);
1256 file->private_data = NULL;
1257
1258 omap3isp_put(video->isp);
1259
1260 return 0;
1261 }
1262
1263 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1264 {
1265 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1266 struct isp_video_queue *queue = &vfh->queue;
1267
1268 return omap3isp_video_queue_poll(queue, file, wait);
1269 }
1270
1271 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1272 {
1273 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1274
1275 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1276 }
1277
1278 static struct v4l2_file_operations isp_video_fops = {
1279 .owner = THIS_MODULE,
1280 .unlocked_ioctl = video_ioctl2,
1281 .open = isp_video_open,
1282 .release = isp_video_release,
1283 .poll = isp_video_poll,
1284 .mmap = isp_video_mmap,
1285 };
1286
1287 /* -----------------------------------------------------------------------------
1288 * ISP video core
1289 */
1290
1291 static const struct isp_video_operations isp_video_dummy_ops = {
1292 };
1293
1294 int omap3isp_video_init(struct isp_video *video, const char *name)
1295 {
1296 const char *direction;
1297 int ret;
1298
1299 switch (video->type) {
1300 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1301 direction = "output";
1302 video->pad.flags = MEDIA_PAD_FL_SINK;
1303 break;
1304 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1305 direction = "input";
1306 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1307 break;
1308
1309 default:
1310 return -EINVAL;
1311 }
1312
1313 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1314 if (ret < 0)
1315 return ret;
1316
1317 mutex_init(&video->mutex);
1318 atomic_set(&video->active, 0);
1319
1320 spin_lock_init(&video->pipe.lock);
1321 mutex_init(&video->stream_lock);
1322
1323 /* Initialize the video device. */
1324 if (video->ops == NULL)
1325 video->ops = &isp_video_dummy_ops;
1326
1327 video->video.fops = &isp_video_fops;
1328 snprintf(video->video.name, sizeof(video->video.name),
1329 "OMAP3 ISP %s %s", name, direction);
1330 video->video.vfl_type = VFL_TYPE_GRABBER;
1331 video->video.release = video_device_release_empty;
1332 video->video.ioctl_ops = &isp_video_ioctl_ops;
1333 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1334
1335 video_set_drvdata(&video->video, video);
1336
1337 return 0;
1338 }
1339
1340 void omap3isp_video_cleanup(struct isp_video *video)
1341 {
1342 media_entity_cleanup(&video->video.entity);
1343 mutex_destroy(&video->stream_lock);
1344 mutex_destroy(&video->mutex);
1345 }
1346
1347 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1348 {
1349 int ret;
1350
1351 video->video.v4l2_dev = vdev;
1352
1353 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1354 if (ret < 0)
1355 printk(KERN_ERR "%s: could not register video device (%d)\n",
1356 __func__, ret);
1357
1358 return ret;
1359 }
1360
1361 void omap3isp_video_unregister(struct isp_video *video)
1362 {
1363 if (video_is_registered(&video->video))
1364 video_unregister_device(&video->video);
1365 }