]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/platform/omap3isp/ispvideo.c
Merge tag 'for-v3.13-fixes' of git://git.infradead.org/battery-2.6
[mirror_ubuntu-jammy-kernel.git] / drivers / media / platform / omap3isp / ispvideo.c
1 /*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26 #include <asm/cacheflush.h>
27 #include <linux/clk.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/omap-iommu.h>
31 #include <linux/pagemap.h>
32 #include <linux/scatterlist.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/vmalloc.h>
36 #include <media/v4l2-dev.h>
37 #include <media/v4l2-ioctl.h>
38
39 #include "ispvideo.h"
40 #include "isp.h"
41
42
43 /* -----------------------------------------------------------------------------
44 * Helper functions
45 */
46
47 /*
48 * NOTE: When adding new media bus codes, always remember to add
49 * corresponding in-memory formats to the table below!!!
50 */
51 static struct isp_format_info formats[] = {
52 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
53 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
54 V4L2_PIX_FMT_GREY, 8, 1, },
55 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
56 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
57 V4L2_PIX_FMT_Y10, 10, 2, },
58 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
59 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
60 V4L2_PIX_FMT_Y12, 12, 2, },
61 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
62 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
63 V4L2_PIX_FMT_SBGGR8, 8, 1, },
64 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
65 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
66 V4L2_PIX_FMT_SGBRG8, 8, 1, },
67 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
68 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
69 V4L2_PIX_FMT_SGRBG8, 8, 1, },
70 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
71 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
72 V4L2_PIX_FMT_SRGGB8, 8, 1, },
73 { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
74 V4L2_MBUS_FMT_SBGGR10_1X10, 0,
75 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
76 { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
77 V4L2_MBUS_FMT_SGBRG10_1X10, 0,
78 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
79 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
80 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
81 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
82 { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
83 V4L2_MBUS_FMT_SRGGB10_1X10, 0,
84 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
85 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
86 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
87 V4L2_PIX_FMT_SBGGR10, 10, 2, },
88 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
89 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
90 V4L2_PIX_FMT_SGBRG10, 10, 2, },
91 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
92 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
93 V4L2_PIX_FMT_SGRBG10, 10, 2, },
94 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
95 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
96 V4L2_PIX_FMT_SRGGB10, 10, 2, },
97 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
98 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
99 V4L2_PIX_FMT_SBGGR12, 12, 2, },
100 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
101 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
102 V4L2_PIX_FMT_SGBRG12, 12, 2, },
103 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
104 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
105 V4L2_PIX_FMT_SGRBG12, 12, 2, },
106 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
107 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
108 V4L2_PIX_FMT_SRGGB12, 12, 2, },
109 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
110 V4L2_MBUS_FMT_UYVY8_1X16, 0,
111 V4L2_PIX_FMT_UYVY, 16, 2, },
112 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
113 V4L2_MBUS_FMT_YUYV8_1X16, 0,
114 V4L2_PIX_FMT_YUYV, 16, 2, },
115 { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_UYVY8_2X8,
116 V4L2_MBUS_FMT_UYVY8_2X8, 0,
117 V4L2_PIX_FMT_UYVY, 8, 2, },
118 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_YUYV8_2X8,
119 V4L2_MBUS_FMT_YUYV8_2X8, 0,
120 V4L2_PIX_FMT_YUYV, 8, 2, },
121 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC
122 * module and avoid NULL pointer dereferences.
123 */
124 { 0, }
125 };
126
127 const struct isp_format_info *
128 omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
129 {
130 unsigned int i;
131
132 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
133 if (formats[i].code == code)
134 return &formats[i];
135 }
136
137 return NULL;
138 }
139
140 /*
141 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
142 * @video: ISP video instance
143 * @mbus: v4l2_mbus_framefmt format (input)
144 * @pix: v4l2_pix_format format (output)
145 *
146 * Fill the output pix structure with information from the input mbus format.
147 * The bytesperline and sizeimage fields are computed from the requested bytes
148 * per line value in the pix format and information from the video instance.
149 *
150 * Return the number of padding bytes at end of line.
151 */
152 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
153 const struct v4l2_mbus_framefmt *mbus,
154 struct v4l2_pix_format *pix)
155 {
156 unsigned int bpl = pix->bytesperline;
157 unsigned int min_bpl;
158 unsigned int i;
159
160 memset(pix, 0, sizeof(*pix));
161 pix->width = mbus->width;
162 pix->height = mbus->height;
163
164 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
165 if (formats[i].code == mbus->code)
166 break;
167 }
168
169 if (WARN_ON(i == ARRAY_SIZE(formats)))
170 return 0;
171
172 min_bpl = pix->width * formats[i].bpp;
173
174 /* Clamp the requested bytes per line value. If the maximum bytes per
175 * line value is zero, the module doesn't support user configurable line
176 * sizes. Override the requested value with the minimum in that case.
177 */
178 if (video->bpl_max)
179 bpl = clamp(bpl, min_bpl, video->bpl_max);
180 else
181 bpl = min_bpl;
182
183 if (!video->bpl_zero_padding || bpl != min_bpl)
184 bpl = ALIGN(bpl, video->bpl_alignment);
185
186 pix->pixelformat = formats[i].pixelformat;
187 pix->bytesperline = bpl;
188 pix->sizeimage = pix->bytesperline * pix->height;
189 pix->colorspace = mbus->colorspace;
190 pix->field = mbus->field;
191
192 return bpl - min_bpl;
193 }
194
195 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
196 struct v4l2_mbus_framefmt *mbus)
197 {
198 unsigned int i;
199
200 memset(mbus, 0, sizeof(*mbus));
201 mbus->width = pix->width;
202 mbus->height = pix->height;
203
204 /* Skip the last format in the loop so that it will be selected if no
205 * match is found.
206 */
207 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
208 if (formats[i].pixelformat == pix->pixelformat)
209 break;
210 }
211
212 mbus->code = formats[i].code;
213 mbus->colorspace = pix->colorspace;
214 mbus->field = pix->field;
215 }
216
217 static struct v4l2_subdev *
218 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
219 {
220 struct media_pad *remote;
221
222 remote = media_entity_remote_pad(&video->pad);
223
224 if (remote == NULL ||
225 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
226 return NULL;
227
228 if (pad)
229 *pad = remote->index;
230
231 return media_entity_to_v4l2_subdev(remote->entity);
232 }
233
234 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
235 static int isp_video_get_graph_data(struct isp_video *video,
236 struct isp_pipeline *pipe)
237 {
238 struct media_entity_graph graph;
239 struct media_entity *entity = &video->video.entity;
240 struct media_device *mdev = entity->parent;
241 struct isp_video *far_end = NULL;
242
243 mutex_lock(&mdev->graph_mutex);
244 media_entity_graph_walk_start(&graph, entity);
245
246 while ((entity = media_entity_graph_walk_next(&graph))) {
247 struct isp_video *__video;
248
249 pipe->entities |= 1 << entity->id;
250
251 if (far_end != NULL)
252 continue;
253
254 if (entity == &video->video.entity)
255 continue;
256
257 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
258 continue;
259
260 __video = to_isp_video(media_entity_to_video_device(entity));
261 if (__video->type != video->type)
262 far_end = __video;
263 }
264
265 mutex_unlock(&mdev->graph_mutex);
266
267 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
268 pipe->input = far_end;
269 pipe->output = video;
270 } else {
271 if (far_end == NULL)
272 return -EPIPE;
273
274 pipe->input = video;
275 pipe->output = far_end;
276 }
277
278 return 0;
279 }
280
281 /*
282 * Validate a pipeline by checking both ends of all links for format
283 * discrepancies.
284 *
285 * Compute the minimum time per frame value as the maximum of time per frame
286 * limits reported by every block in the pipeline.
287 *
288 * Return 0 if all formats match, or -EPIPE if at least one link is found with
289 * different formats on its two ends or if the pipeline doesn't start with a
290 * video source (either a subdev with no input pad, or a non-subdev entity).
291 */
292 static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
293 {
294 struct isp_device *isp = pipe->output->isp;
295 struct media_pad *pad;
296 struct v4l2_subdev *subdev;
297
298 subdev = isp_video_remote_subdev(pipe->output, NULL);
299 if (subdev == NULL)
300 return -EPIPE;
301
302 while (1) {
303 /* Retrieve the sink format */
304 pad = &subdev->entity.pads[0];
305 if (!(pad->flags & MEDIA_PAD_FL_SINK))
306 break;
307
308 /* Update the maximum frame rate */
309 if (subdev == &isp->isp_res.subdev)
310 omap3isp_resizer_max_rate(&isp->isp_res,
311 &pipe->max_rate);
312
313 /* Retrieve the source format. Return an error if no source
314 * entity can be found, and stop checking the pipeline if the
315 * source entity isn't a subdev.
316 */
317 pad = media_entity_remote_pad(pad);
318 if (pad == NULL)
319 return -EPIPE;
320
321 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
322 break;
323
324 subdev = media_entity_to_v4l2_subdev(pad->entity);
325 }
326
327 return 0;
328 }
329
330 static int
331 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
332 {
333 struct v4l2_subdev_format fmt;
334 struct v4l2_subdev *subdev;
335 u32 pad;
336 int ret;
337
338 subdev = isp_video_remote_subdev(video, &pad);
339 if (subdev == NULL)
340 return -EINVAL;
341
342 fmt.pad = pad;
343 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
344
345 mutex_lock(&video->mutex);
346 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
347 mutex_unlock(&video->mutex);
348
349 if (ret)
350 return ret;
351
352 format->type = video->type;
353 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
354 }
355
356 static int
357 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
358 {
359 struct v4l2_format format;
360 int ret;
361
362 memcpy(&format, &vfh->format, sizeof(format));
363 ret = __isp_video_get_format(video, &format);
364 if (ret < 0)
365 return ret;
366
367 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
368 vfh->format.fmt.pix.height != format.fmt.pix.height ||
369 vfh->format.fmt.pix.width != format.fmt.pix.width ||
370 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
371 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
372 return -EINVAL;
373
374 return ret;
375 }
376
377 /* -----------------------------------------------------------------------------
378 * IOMMU management
379 */
380
381 #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
382
383 /*
384 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
385 * @dev: Device pointer specific to the OMAP3 ISP.
386 * @sglist: Pointer to source Scatter gather list to allocate.
387 * @sglen: Number of elements of the scatter-gatter list.
388 *
389 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
390 * we ran out of memory.
391 */
392 static dma_addr_t
393 ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
394 {
395 struct sg_table *sgt;
396 u32 da;
397
398 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
399 if (sgt == NULL)
400 return -ENOMEM;
401
402 sgt->sgl = (struct scatterlist *)sglist;
403 sgt->nents = sglen;
404 sgt->orig_nents = sglen;
405
406 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
407 if (IS_ERR_VALUE(da))
408 kfree(sgt);
409
410 return da;
411 }
412
413 /*
414 * ispmmu_vunmap - Unmap a device address from the ISP MMU
415 * @dev: Device pointer specific to the OMAP3 ISP.
416 * @da: Device address generated from a ispmmu_vmap call.
417 */
418 static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
419 {
420 struct sg_table *sgt;
421
422 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
423 kfree(sgt);
424 }
425
426 /* -----------------------------------------------------------------------------
427 * Video queue operations
428 */
429
430 static void isp_video_queue_prepare(struct isp_video_queue *queue,
431 unsigned int *nbuffers, unsigned int *size)
432 {
433 struct isp_video_fh *vfh =
434 container_of(queue, struct isp_video_fh, queue);
435 struct isp_video *video = vfh->video;
436
437 *size = vfh->format.fmt.pix.sizeimage;
438 if (*size == 0)
439 return;
440
441 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
442 }
443
444 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
445 {
446 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
447 struct isp_buffer *buffer = to_isp_buffer(buf);
448 struct isp_video *video = vfh->video;
449
450 if (buffer->isp_addr) {
451 ispmmu_vunmap(video->isp, buffer->isp_addr);
452 buffer->isp_addr = 0;
453 }
454 }
455
456 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
457 {
458 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
459 struct isp_buffer *buffer = to_isp_buffer(buf);
460 struct isp_video *video = vfh->video;
461 unsigned long addr;
462
463 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
464 if (IS_ERR_VALUE(addr))
465 return -EIO;
466
467 if (!IS_ALIGNED(addr, 32)) {
468 dev_dbg(video->isp->dev, "Buffer address must be "
469 "aligned to 32 bytes boundary.\n");
470 ispmmu_vunmap(video->isp, buffer->isp_addr);
471 return -EINVAL;
472 }
473
474 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
475 buffer->isp_addr = addr;
476 return 0;
477 }
478
479 /*
480 * isp_video_buffer_queue - Add buffer to streaming queue
481 * @buf: Video buffer
482 *
483 * In memory-to-memory mode, start streaming on the pipeline if buffers are
484 * queued on both the input and the output, if the pipeline isn't already busy.
485 * If the pipeline is busy, it will be restarted in the output module interrupt
486 * handler.
487 */
488 static void isp_video_buffer_queue(struct isp_video_buffer *buf)
489 {
490 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
491 struct isp_buffer *buffer = to_isp_buffer(buf);
492 struct isp_video *video = vfh->video;
493 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
494 enum isp_pipeline_state state;
495 unsigned long flags;
496 unsigned int empty;
497 unsigned int start;
498
499 empty = list_empty(&video->dmaqueue);
500 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
501
502 if (empty) {
503 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
504 state = ISP_PIPELINE_QUEUE_OUTPUT;
505 else
506 state = ISP_PIPELINE_QUEUE_INPUT;
507
508 spin_lock_irqsave(&pipe->lock, flags);
509 pipe->state |= state;
510 video->ops->queue(video, buffer);
511 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
512
513 start = isp_pipeline_ready(pipe);
514 if (start)
515 pipe->state |= ISP_PIPELINE_STREAM;
516 spin_unlock_irqrestore(&pipe->lock, flags);
517
518 if (start)
519 omap3isp_pipeline_set_stream(pipe,
520 ISP_PIPELINE_STREAM_SINGLESHOT);
521 }
522 }
523
524 static const struct isp_video_queue_operations isp_video_queue_ops = {
525 .queue_prepare = &isp_video_queue_prepare,
526 .buffer_prepare = &isp_video_buffer_prepare,
527 .buffer_queue = &isp_video_buffer_queue,
528 .buffer_cleanup = &isp_video_buffer_cleanup,
529 };
530
531 /*
532 * omap3isp_video_buffer_next - Complete the current buffer and return the next
533 * @video: ISP video object
534 *
535 * Remove the current video buffer from the DMA queue and fill its timestamp,
536 * field count and state fields before waking up its completion handler.
537 *
538 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
539 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
540 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
541 *
542 * The DMA queue is expected to contain at least one buffer.
543 *
544 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
545 * empty.
546 */
547 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
548 {
549 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
550 struct isp_video_queue *queue = video->queue;
551 enum isp_pipeline_state state;
552 struct isp_video_buffer *buf;
553 unsigned long flags;
554 struct timespec ts;
555
556 spin_lock_irqsave(&queue->irqlock, flags);
557 if (WARN_ON(list_empty(&video->dmaqueue))) {
558 spin_unlock_irqrestore(&queue->irqlock, flags);
559 return NULL;
560 }
561
562 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
563 irqlist);
564 list_del(&buf->irqlist);
565 spin_unlock_irqrestore(&queue->irqlock, flags);
566
567 ktime_get_ts(&ts);
568 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
569 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
570
571 /* Do frame number propagation only if this is the output video node.
572 * Frame number either comes from the CSI receivers or it gets
573 * incremented here if H3A is not active.
574 * Note: There is no guarantee that the output buffer will finish
575 * first, so the input number might lag behind by 1 in some cases.
576 */
577 if (video == pipe->output && !pipe->do_propagation)
578 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
579 else
580 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
581
582 /* Report pipeline errors to userspace on the capture device side. */
583 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
584 buf->state = ISP_BUF_STATE_ERROR;
585 pipe->error = false;
586 } else {
587 buf->state = ISP_BUF_STATE_DONE;
588 }
589
590 wake_up(&buf->wait);
591
592 if (list_empty(&video->dmaqueue)) {
593 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
594 state = ISP_PIPELINE_QUEUE_OUTPUT
595 | ISP_PIPELINE_STREAM;
596 else
597 state = ISP_PIPELINE_QUEUE_INPUT
598 | ISP_PIPELINE_STREAM;
599
600 spin_lock_irqsave(&pipe->lock, flags);
601 pipe->state &= ~state;
602 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
603 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
604 spin_unlock_irqrestore(&pipe->lock, flags);
605 return NULL;
606 }
607
608 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
609 spin_lock_irqsave(&pipe->lock, flags);
610 pipe->state &= ~ISP_PIPELINE_STREAM;
611 spin_unlock_irqrestore(&pipe->lock, flags);
612 }
613
614 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
615 irqlist);
616 buf->state = ISP_BUF_STATE_ACTIVE;
617 return to_isp_buffer(buf);
618 }
619
620 /*
621 * omap3isp_video_resume - Perform resume operation on the buffers
622 * @video: ISP video object
623 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
624 *
625 * This function is intended to be used on suspend/resume scenario. It
626 * requests video queue layer to discard buffers marked as DONE if it's in
627 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
628 * if there's any.
629 */
630 void omap3isp_video_resume(struct isp_video *video, int continuous)
631 {
632 struct isp_buffer *buf = NULL;
633
634 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
635 omap3isp_video_queue_discard_done(video->queue);
636
637 if (!list_empty(&video->dmaqueue)) {
638 buf = list_first_entry(&video->dmaqueue,
639 struct isp_buffer, buffer.irqlist);
640 video->ops->queue(video, buf);
641 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
642 } else {
643 if (continuous)
644 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
645 }
646 }
647
648 /* -----------------------------------------------------------------------------
649 * V4L2 ioctls
650 */
651
652 static int
653 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
654 {
655 struct isp_video *video = video_drvdata(file);
656
657 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
658 strlcpy(cap->card, video->video.name, sizeof(cap->card));
659 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
660
661 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
662 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
663 else
664 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
665
666 return 0;
667 }
668
669 static int
670 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
671 {
672 struct isp_video_fh *vfh = to_isp_video_fh(fh);
673 struct isp_video *video = video_drvdata(file);
674
675 if (format->type != video->type)
676 return -EINVAL;
677
678 mutex_lock(&video->mutex);
679 *format = vfh->format;
680 mutex_unlock(&video->mutex);
681
682 return 0;
683 }
684
685 static int
686 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
687 {
688 struct isp_video_fh *vfh = to_isp_video_fh(fh);
689 struct isp_video *video = video_drvdata(file);
690 struct v4l2_mbus_framefmt fmt;
691
692 if (format->type != video->type)
693 return -EINVAL;
694
695 mutex_lock(&video->mutex);
696
697 /* Fill the bytesperline and sizeimage fields by converting to media bus
698 * format and back to pixel format.
699 */
700 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
701 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
702
703 vfh->format = *format;
704
705 mutex_unlock(&video->mutex);
706 return 0;
707 }
708
709 static int
710 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
711 {
712 struct isp_video *video = video_drvdata(file);
713 struct v4l2_subdev_format fmt;
714 struct v4l2_subdev *subdev;
715 u32 pad;
716 int ret;
717
718 if (format->type != video->type)
719 return -EINVAL;
720
721 subdev = isp_video_remote_subdev(video, &pad);
722 if (subdev == NULL)
723 return -EINVAL;
724
725 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
726
727 fmt.pad = pad;
728 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
729 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
730 if (ret)
731 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
732
733 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
734 return 0;
735 }
736
737 static int
738 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
739 {
740 struct isp_video *video = video_drvdata(file);
741 struct v4l2_subdev *subdev;
742 int ret;
743
744 subdev = isp_video_remote_subdev(video, NULL);
745 if (subdev == NULL)
746 return -EINVAL;
747
748 mutex_lock(&video->mutex);
749 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
750 mutex_unlock(&video->mutex);
751
752 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
753 }
754
755 static int
756 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
757 {
758 struct isp_video *video = video_drvdata(file);
759 struct v4l2_subdev_format format;
760 struct v4l2_subdev *subdev;
761 u32 pad;
762 int ret;
763
764 subdev = isp_video_remote_subdev(video, &pad);
765 if (subdev == NULL)
766 return -EINVAL;
767
768 /* Try the get crop operation first and fallback to get format if not
769 * implemented.
770 */
771 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
772 if (ret != -ENOIOCTLCMD)
773 return ret;
774
775 format.pad = pad;
776 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
777 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
778 if (ret < 0)
779 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
780
781 crop->c.left = 0;
782 crop->c.top = 0;
783 crop->c.width = format.format.width;
784 crop->c.height = format.format.height;
785
786 return 0;
787 }
788
789 static int
790 isp_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
791 {
792 struct isp_video *video = video_drvdata(file);
793 struct v4l2_subdev *subdev;
794 int ret;
795
796 subdev = isp_video_remote_subdev(video, NULL);
797 if (subdev == NULL)
798 return -EINVAL;
799
800 mutex_lock(&video->mutex);
801 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
802 mutex_unlock(&video->mutex);
803
804 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
805 }
806
807 static int
808 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
809 {
810 struct isp_video_fh *vfh = to_isp_video_fh(fh);
811 struct isp_video *video = video_drvdata(file);
812
813 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
814 video->type != a->type)
815 return -EINVAL;
816
817 memset(a, 0, sizeof(*a));
818 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
819 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
820 a->parm.output.timeperframe = vfh->timeperframe;
821
822 return 0;
823 }
824
825 static int
826 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
827 {
828 struct isp_video_fh *vfh = to_isp_video_fh(fh);
829 struct isp_video *video = video_drvdata(file);
830
831 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
832 video->type != a->type)
833 return -EINVAL;
834
835 if (a->parm.output.timeperframe.denominator == 0)
836 a->parm.output.timeperframe.denominator = 1;
837
838 vfh->timeperframe = a->parm.output.timeperframe;
839
840 return 0;
841 }
842
843 static int
844 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
845 {
846 struct isp_video_fh *vfh = to_isp_video_fh(fh);
847
848 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
849 }
850
851 static int
852 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
853 {
854 struct isp_video_fh *vfh = to_isp_video_fh(fh);
855
856 return omap3isp_video_queue_querybuf(&vfh->queue, b);
857 }
858
859 static int
860 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
861 {
862 struct isp_video_fh *vfh = to_isp_video_fh(fh);
863
864 return omap3isp_video_queue_qbuf(&vfh->queue, b);
865 }
866
867 static int
868 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
869 {
870 struct isp_video_fh *vfh = to_isp_video_fh(fh);
871
872 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
873 file->f_flags & O_NONBLOCK);
874 }
875
876 static int isp_video_check_external_subdevs(struct isp_video *video,
877 struct isp_pipeline *pipe)
878 {
879 struct isp_device *isp = video->isp;
880 struct media_entity *ents[] = {
881 &isp->isp_csi2a.subdev.entity,
882 &isp->isp_csi2c.subdev.entity,
883 &isp->isp_ccp2.subdev.entity,
884 &isp->isp_ccdc.subdev.entity
885 };
886 struct media_pad *source_pad;
887 struct media_entity *source = NULL;
888 struct media_entity *sink;
889 struct v4l2_subdev_format fmt;
890 struct v4l2_ext_controls ctrls;
891 struct v4l2_ext_control ctrl;
892 unsigned int i;
893 int ret = 0;
894
895 for (i = 0; i < ARRAY_SIZE(ents); i++) {
896 /* Is the entity part of the pipeline? */
897 if (!(pipe->entities & (1 << ents[i]->id)))
898 continue;
899
900 /* ISP entities have always sink pad == 0. Find source. */
901 source_pad = media_entity_remote_pad(&ents[i]->pads[0]);
902 if (source_pad == NULL)
903 continue;
904
905 source = source_pad->entity;
906 sink = ents[i];
907 break;
908 }
909
910 if (!source) {
911 dev_warn(isp->dev, "can't find source, failing now\n");
912 return ret;
913 }
914
915 if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
916 return 0;
917
918 pipe->external = media_entity_to_v4l2_subdev(source);
919
920 fmt.pad = source_pad->index;
921 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
922 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
923 pad, get_fmt, NULL, &fmt);
924 if (unlikely(ret < 0)) {
925 dev_warn(isp->dev, "get_fmt returned null!\n");
926 return ret;
927 }
928
929 pipe->external_width =
930 omap3isp_video_format_info(fmt.format.code)->width;
931
932 memset(&ctrls, 0, sizeof(ctrls));
933 memset(&ctrl, 0, sizeof(ctrl));
934
935 ctrl.id = V4L2_CID_PIXEL_RATE;
936
937 ctrls.count = 1;
938 ctrls.controls = &ctrl;
939
940 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
941 if (ret < 0) {
942 dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
943 pipe->external->name);
944 return ret;
945 }
946
947 pipe->external_rate = ctrl.value64;
948
949 if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
950 unsigned int rate = UINT_MAX;
951 /*
952 * Check that maximum allowed CCDC pixel rate isn't
953 * exceeded by the pixel rate.
954 */
955 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
956 if (pipe->external_rate > rate)
957 return -ENOSPC;
958 }
959
960 return 0;
961 }
962
963 /*
964 * Stream management
965 *
966 * Every ISP pipeline has a single input and a single output. The input can be
967 * either a sensor or a video node. The output is always a video node.
968 *
969 * As every pipeline has an output video node, the ISP video objects at the
970 * pipeline output stores the pipeline state. It tracks the streaming state of
971 * both the input and output, as well as the availability of buffers.
972 *
973 * In sensor-to-memory mode, frames are always available at the pipeline input.
974 * Starting the sensor usually requires I2C transfers and must be done in
975 * interruptible context. The pipeline is started and stopped synchronously
976 * to the stream on/off commands. All modules in the pipeline will get their
977 * subdev set stream handler called. The module at the end of the pipeline must
978 * delay starting the hardware until buffers are available at its output.
979 *
980 * In memory-to-memory mode, starting/stopping the stream requires
981 * synchronization between the input and output. ISP modules can't be stopped
982 * in the middle of a frame, and at least some of the modules seem to become
983 * busy as soon as they're started, even if they don't receive a frame start
984 * event. For that reason frames need to be processed in single-shot mode. The
985 * driver needs to wait until a frame is completely processed and written to
986 * memory before restarting the pipeline for the next frame. Pipelined
987 * processing might be possible but requires more testing.
988 *
989 * Stream start must be delayed until buffers are available at both the input
990 * and output. The pipeline must be started in the videobuf queue callback with
991 * the buffers queue spinlock held. The modules subdev set stream operation must
992 * not sleep.
993 */
994 static int
995 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
996 {
997 struct isp_video_fh *vfh = to_isp_video_fh(fh);
998 struct isp_video *video = video_drvdata(file);
999 enum isp_pipeline_state state;
1000 struct isp_pipeline *pipe;
1001 unsigned long flags;
1002 int ret;
1003
1004 if (type != video->type)
1005 return -EINVAL;
1006
1007 mutex_lock(&video->stream_lock);
1008
1009 if (video->streaming) {
1010 mutex_unlock(&video->stream_lock);
1011 return -EBUSY;
1012 }
1013
1014 /* Start streaming on the pipeline. No link touching an entity in the
1015 * pipeline can be activated or deactivated once streaming is started.
1016 */
1017 pipe = video->video.entity.pipe
1018 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
1019
1020 pipe->entities = 0;
1021
1022 if (video->isp->pdata->set_constraints)
1023 video->isp->pdata->set_constraints(video->isp, true);
1024 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1025 pipe->max_rate = pipe->l3_ick;
1026
1027 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
1028 if (ret < 0)
1029 goto err_pipeline_start;
1030
1031 /* Verify that the currently configured format matches the output of
1032 * the connected subdev.
1033 */
1034 ret = isp_video_check_format(video, vfh);
1035 if (ret < 0)
1036 goto err_check_format;
1037
1038 video->bpl_padding = ret;
1039 video->bpl_value = vfh->format.fmt.pix.bytesperline;
1040
1041 ret = isp_video_get_graph_data(video, pipe);
1042 if (ret < 0)
1043 goto err_check_format;
1044
1045 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1046 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1047 else
1048 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1049
1050 ret = isp_video_check_external_subdevs(video, pipe);
1051 if (ret < 0)
1052 goto err_check_format;
1053
1054 /* Validate the pipeline and update its state. */
1055 ret = isp_video_validate_pipeline(pipe);
1056 if (ret < 0)
1057 goto err_check_format;
1058
1059 pipe->error = false;
1060
1061 spin_lock_irqsave(&pipe->lock, flags);
1062 pipe->state &= ~ISP_PIPELINE_STREAM;
1063 pipe->state |= state;
1064 spin_unlock_irqrestore(&pipe->lock, flags);
1065
1066 /* Set the maximum time per frame as the value requested by userspace.
1067 * This is a soft limit that can be overridden if the hardware doesn't
1068 * support the request limit.
1069 */
1070 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1071 pipe->max_timeperframe = vfh->timeperframe;
1072
1073 video->queue = &vfh->queue;
1074 INIT_LIST_HEAD(&video->dmaqueue);
1075 atomic_set(&pipe->frame_number, -1);
1076
1077 ret = omap3isp_video_queue_streamon(&vfh->queue);
1078 if (ret < 0)
1079 goto err_check_format;
1080
1081 /* In sensor-to-memory mode, the stream can be started synchronously
1082 * to the stream on command. In memory-to-memory mode, it will be
1083 * started when buffers are queued on both the input and output.
1084 */
1085 if (pipe->input == NULL) {
1086 ret = omap3isp_pipeline_set_stream(pipe,
1087 ISP_PIPELINE_STREAM_CONTINUOUS);
1088 if (ret < 0)
1089 goto err_set_stream;
1090 spin_lock_irqsave(&video->queue->irqlock, flags);
1091 if (list_empty(&video->dmaqueue))
1092 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1093 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1094 }
1095
1096 video->streaming = 1;
1097
1098 mutex_unlock(&video->stream_lock);
1099 return 0;
1100
1101 err_set_stream:
1102 omap3isp_video_queue_streamoff(&vfh->queue);
1103 err_check_format:
1104 media_entity_pipeline_stop(&video->video.entity);
1105 err_pipeline_start:
1106 if (video->isp->pdata->set_constraints)
1107 video->isp->pdata->set_constraints(video->isp, false);
1108 /* The DMA queue must be emptied here, otherwise CCDC interrupts that
1109 * will get triggered the next time the CCDC is powered up will try to
1110 * access buffers that might have been freed but still present in the
1111 * DMA queue. This can easily get triggered if the above
1112 * omap3isp_pipeline_set_stream() call fails on a system with a
1113 * free-running sensor.
1114 */
1115 INIT_LIST_HEAD(&video->dmaqueue);
1116 video->queue = NULL;
1117
1118 mutex_unlock(&video->stream_lock);
1119 return ret;
1120 }
1121
1122 static int
1123 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1124 {
1125 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1126 struct isp_video *video = video_drvdata(file);
1127 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1128 enum isp_pipeline_state state;
1129 unsigned int streaming;
1130 unsigned long flags;
1131
1132 if (type != video->type)
1133 return -EINVAL;
1134
1135 mutex_lock(&video->stream_lock);
1136
1137 /* Make sure we're not streaming yet. */
1138 mutex_lock(&vfh->queue.lock);
1139 streaming = vfh->queue.streaming;
1140 mutex_unlock(&vfh->queue.lock);
1141
1142 if (!streaming)
1143 goto done;
1144
1145 /* Update the pipeline state. */
1146 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1147 state = ISP_PIPELINE_STREAM_OUTPUT
1148 | ISP_PIPELINE_QUEUE_OUTPUT;
1149 else
1150 state = ISP_PIPELINE_STREAM_INPUT
1151 | ISP_PIPELINE_QUEUE_INPUT;
1152
1153 spin_lock_irqsave(&pipe->lock, flags);
1154 pipe->state &= ~state;
1155 spin_unlock_irqrestore(&pipe->lock, flags);
1156
1157 /* Stop the stream. */
1158 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1159 omap3isp_video_queue_streamoff(&vfh->queue);
1160 video->queue = NULL;
1161 video->streaming = 0;
1162
1163 if (video->isp->pdata->set_constraints)
1164 video->isp->pdata->set_constraints(video->isp, false);
1165 media_entity_pipeline_stop(&video->video.entity);
1166
1167 done:
1168 mutex_unlock(&video->stream_lock);
1169 return 0;
1170 }
1171
1172 static int
1173 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1174 {
1175 if (input->index > 0)
1176 return -EINVAL;
1177
1178 strlcpy(input->name, "camera", sizeof(input->name));
1179 input->type = V4L2_INPUT_TYPE_CAMERA;
1180
1181 return 0;
1182 }
1183
1184 static int
1185 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1186 {
1187 *input = 0;
1188
1189 return 0;
1190 }
1191
1192 static int
1193 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1194 {
1195 return input == 0 ? 0 : -EINVAL;
1196 }
1197
1198 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1199 .vidioc_querycap = isp_video_querycap,
1200 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1201 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1202 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1203 .vidioc_g_fmt_vid_out = isp_video_get_format,
1204 .vidioc_s_fmt_vid_out = isp_video_set_format,
1205 .vidioc_try_fmt_vid_out = isp_video_try_format,
1206 .vidioc_cropcap = isp_video_cropcap,
1207 .vidioc_g_crop = isp_video_get_crop,
1208 .vidioc_s_crop = isp_video_set_crop,
1209 .vidioc_g_parm = isp_video_get_param,
1210 .vidioc_s_parm = isp_video_set_param,
1211 .vidioc_reqbufs = isp_video_reqbufs,
1212 .vidioc_querybuf = isp_video_querybuf,
1213 .vidioc_qbuf = isp_video_qbuf,
1214 .vidioc_dqbuf = isp_video_dqbuf,
1215 .vidioc_streamon = isp_video_streamon,
1216 .vidioc_streamoff = isp_video_streamoff,
1217 .vidioc_enum_input = isp_video_enum_input,
1218 .vidioc_g_input = isp_video_g_input,
1219 .vidioc_s_input = isp_video_s_input,
1220 };
1221
1222 /* -----------------------------------------------------------------------------
1223 * V4L2 file operations
1224 */
1225
1226 static int isp_video_open(struct file *file)
1227 {
1228 struct isp_video *video = video_drvdata(file);
1229 struct isp_video_fh *handle;
1230 int ret = 0;
1231
1232 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1233 if (handle == NULL)
1234 return -ENOMEM;
1235
1236 v4l2_fh_init(&handle->vfh, &video->video);
1237 v4l2_fh_add(&handle->vfh);
1238
1239 /* If this is the first user, initialise the pipeline. */
1240 if (omap3isp_get(video->isp) == NULL) {
1241 ret = -EBUSY;
1242 goto done;
1243 }
1244
1245 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1246 if (ret < 0) {
1247 omap3isp_put(video->isp);
1248 goto done;
1249 }
1250
1251 omap3isp_video_queue_init(&handle->queue, video->type,
1252 &isp_video_queue_ops, video->isp->dev,
1253 sizeof(struct isp_buffer));
1254
1255 memset(&handle->format, 0, sizeof(handle->format));
1256 handle->format.type = video->type;
1257 handle->timeperframe.denominator = 1;
1258
1259 handle->video = video;
1260 file->private_data = &handle->vfh;
1261
1262 done:
1263 if (ret < 0) {
1264 v4l2_fh_del(&handle->vfh);
1265 kfree(handle);
1266 }
1267
1268 return ret;
1269 }
1270
1271 static int isp_video_release(struct file *file)
1272 {
1273 struct isp_video *video = video_drvdata(file);
1274 struct v4l2_fh *vfh = file->private_data;
1275 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1276
1277 /* Disable streaming and free the buffers queue resources. */
1278 isp_video_streamoff(file, vfh, video->type);
1279
1280 mutex_lock(&handle->queue.lock);
1281 omap3isp_video_queue_cleanup(&handle->queue);
1282 mutex_unlock(&handle->queue.lock);
1283
1284 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1285
1286 /* Release the file handle. */
1287 v4l2_fh_del(vfh);
1288 kfree(handle);
1289 file->private_data = NULL;
1290
1291 omap3isp_put(video->isp);
1292
1293 return 0;
1294 }
1295
1296 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1297 {
1298 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1299 struct isp_video_queue *queue = &vfh->queue;
1300
1301 return omap3isp_video_queue_poll(queue, file, wait);
1302 }
1303
1304 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1305 {
1306 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1307
1308 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1309 }
1310
1311 static struct v4l2_file_operations isp_video_fops = {
1312 .owner = THIS_MODULE,
1313 .unlocked_ioctl = video_ioctl2,
1314 .open = isp_video_open,
1315 .release = isp_video_release,
1316 .poll = isp_video_poll,
1317 .mmap = isp_video_mmap,
1318 };
1319
1320 /* -----------------------------------------------------------------------------
1321 * ISP video core
1322 */
1323
1324 static const struct isp_video_operations isp_video_dummy_ops = {
1325 };
1326
1327 int omap3isp_video_init(struct isp_video *video, const char *name)
1328 {
1329 const char *direction;
1330 int ret;
1331
1332 switch (video->type) {
1333 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1334 direction = "output";
1335 video->pad.flags = MEDIA_PAD_FL_SINK;
1336 break;
1337 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1338 direction = "input";
1339 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1340 video->video.vfl_dir = VFL_DIR_TX;
1341 break;
1342
1343 default:
1344 return -EINVAL;
1345 }
1346
1347 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1348 if (ret < 0)
1349 return ret;
1350
1351 mutex_init(&video->mutex);
1352 atomic_set(&video->active, 0);
1353
1354 spin_lock_init(&video->pipe.lock);
1355 mutex_init(&video->stream_lock);
1356
1357 /* Initialize the video device. */
1358 if (video->ops == NULL)
1359 video->ops = &isp_video_dummy_ops;
1360
1361 video->video.fops = &isp_video_fops;
1362 snprintf(video->video.name, sizeof(video->video.name),
1363 "OMAP3 ISP %s %s", name, direction);
1364 video->video.vfl_type = VFL_TYPE_GRABBER;
1365 video->video.release = video_device_release_empty;
1366 video->video.ioctl_ops = &isp_video_ioctl_ops;
1367 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1368
1369 video_set_drvdata(&video->video, video);
1370
1371 return 0;
1372 }
1373
1374 void omap3isp_video_cleanup(struct isp_video *video)
1375 {
1376 media_entity_cleanup(&video->video.entity);
1377 mutex_destroy(&video->stream_lock);
1378 mutex_destroy(&video->mutex);
1379 }
1380
1381 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1382 {
1383 int ret;
1384
1385 video->video.v4l2_dev = vdev;
1386
1387 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1388 if (ret < 0)
1389 dev_err(video->isp->dev,
1390 "%s: could not register video device (%d)\n",
1391 __func__, ret);
1392
1393 return ret;
1394 }
1395
1396 void omap3isp_video_unregister(struct isp_video *video)
1397 {
1398 if (video_is_registered(&video->video))
1399 video_unregister_device(&video->video);
1400 }