]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/media/platform/vivid/vivid-vid-cap.c
[media] am437x-vpfe: add support for xfer_func
[mirror_ubuntu-focal-kernel.git] / drivers / media / platform / vivid / vivid-vid-cap.c
CommitLineData
ef834f78
HV
1/*
2 * vivid-vid-cap.c - video capture support functions.
3 *
4 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17 * SOFTWARE.
18 */
19
20#include <linux/errno.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
5754d0d5 23#include <linux/vmalloc.h>
ef834f78
HV
24#include <linux/videodev2.h>
25#include <linux/v4l2-dv-timings.h>
26#include <media/v4l2-common.h>
27#include <media/v4l2-event.h>
28#include <media/v4l2-dv-timings.h>
29
30#include "vivid-core.h"
31#include "vivid-vid-common.h"
32#include "vivid-kthread-cap.h"
33#include "vivid-vid-cap.h"
34
35/* timeperframe: min/max and default */
36static const struct v4l2_fract
37 tpf_min = {.numerator = 1, .denominator = FPS_MAX},
38 tpf_max = {.numerator = FPS_MAX, .denominator = 1},
39 tpf_default = {.numerator = 1, .denominator = 30};
40
41static const struct vivid_fmt formats_ovl[] = {
42 {
ef834f78 43 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
96c76efa
HV
44 .vdownsampling = { 1 },
45 .bit_depth = { 16 },
ef834f78 46 .planes = 1,
96c76efa 47 .buffers = 1,
ef834f78
HV
48 },
49 {
ef834f78 50 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
96c76efa
HV
51 .vdownsampling = { 1 },
52 .bit_depth = { 16 },
ef834f78 53 .planes = 1,
96c76efa 54 .buffers = 1,
ef834f78
HV
55 },
56 {
ef834f78 57 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
96c76efa
HV
58 .vdownsampling = { 1 },
59 .bit_depth = { 16 },
ef834f78 60 .planes = 1,
96c76efa 61 .buffers = 1,
ef834f78
HV
62 },
63};
64
65/* The number of discrete webcam framesizes */
1381fb3e 66#define VIVID_WEBCAM_SIZES 4
ef834f78
HV
67/* The number of discrete webcam frameintervals */
68#define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
69
70/* Sizes must be in increasing order */
71static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
72 { 320, 180 },
73 { 640, 360 },
74 { 1280, 720 },
1381fb3e 75 { 1920, 1080 },
ef834f78
HV
76};
77
78/*
79 * Intervals must be in increasing order and there must be twice as many
80 * elements in this array as there are in webcam_sizes.
81 */
82static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
1381fb3e
PZ
83 { 1, 2 },
84 { 1, 5 },
ef834f78
HV
85 { 1, 10 },
86 { 1, 15 },
87 { 1, 25 },
88 { 1, 30 },
89 { 1, 50 },
90 { 1, 60 },
91};
92
93static const struct v4l2_discrete_probe webcam_probe = {
94 webcam_sizes,
95 VIVID_WEBCAM_SIZES
96};
97
98static int vid_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
99 unsigned *nbuffers, unsigned *nplanes,
100 unsigned sizes[], void *alloc_ctxs[])
101{
102 struct vivid_dev *dev = vb2_get_drv_priv(vq);
ddcaee9d 103 unsigned buffers = tpg_g_buffers(&dev->tpg);
ef834f78
HV
104 unsigned h = dev->fmt_cap_rect.height;
105 unsigned p;
106
107 if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
108 /*
109 * You cannot use read() with FIELD_ALTERNATE since the field
110 * information (TOP/BOTTOM) cannot be passed back to the user.
111 */
112 if (vb2_fileio_is_active(vq))
113 return -EINVAL;
114 }
115
116 if (dev->queue_setup_error) {
117 /*
118 * Error injection: test what happens if queue_setup() returns
119 * an error.
120 */
121 dev->queue_setup_error = false;
122 return -EINVAL;
123 }
124 if (fmt) {
125 const struct v4l2_pix_format_mplane *mp;
126 struct v4l2_format mp_fmt;
127 const struct vivid_fmt *vfmt;
128
129 if (!V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) {
130 fmt_sp2mp(fmt, &mp_fmt);
131 fmt = &mp_fmt;
132 }
133 mp = &fmt->fmt.pix_mp;
134 /*
135 * Check if the number of planes in the specified format match
ddcaee9d 136 * the number of buffers in the current format. You can't mix that.
ef834f78 137 */
ddcaee9d 138 if (mp->num_planes != buffers)
ef834f78 139 return -EINVAL;
1fc78bc9 140 vfmt = vivid_get_format(dev, mp->pixelformat);
ddcaee9d 141 for (p = 0; p < buffers; p++) {
ef834f78 142 sizes[p] = mp->plane_fmt[p].sizeimage;
ddcaee9d 143 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
ef834f78
HV
144 vfmt->data_offset[p])
145 return -EINVAL;
146 }
147 } else {
ddcaee9d
HV
148 for (p = 0; p < buffers; p++)
149 sizes[p] = tpg_g_line_width(&dev->tpg, p) * h +
ef834f78
HV
150 dev->fmt_cap->data_offset[p];
151 }
152
153 if (vq->num_buffers + *nbuffers < 2)
154 *nbuffers = 2 - vq->num_buffers;
155
ddcaee9d 156 *nplanes = buffers;
ef834f78
HV
157
158 /*
159 * videobuf2-vmalloc allocator is context-less so no need to set
160 * alloc_ctxs array.
161 */
162
ddcaee9d
HV
163 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
164 for (p = 0; p < buffers; p++)
165 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
ef834f78
HV
166
167 return 0;
168}
169
170static int vid_cap_buf_prepare(struct vb2_buffer *vb)
171{
172 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
173 unsigned long size;
ddcaee9d 174 unsigned buffers = tpg_g_buffers(&dev->tpg);
ef834f78
HV
175 unsigned p;
176
177 dprintk(dev, 1, "%s\n", __func__);
178
179 if (WARN_ON(NULL == dev->fmt_cap))
180 return -EINVAL;
181
182 if (dev->buf_prepare_error) {
183 /*
184 * Error injection: test what happens if buf_prepare() returns
185 * an error.
186 */
187 dev->buf_prepare_error = false;
188 return -EINVAL;
189 }
ddcaee9d
HV
190 for (p = 0; p < buffers; p++) {
191 size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height +
ef834f78
HV
192 dev->fmt_cap->data_offset[p];
193
360565fc 194 if (vb2_plane_size(vb, p) < size) {
ef834f78 195 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
360565fc 196 __func__, p, vb2_plane_size(vb, p), size);
ef834f78
HV
197 return -EINVAL;
198 }
199
200 vb2_set_plane_payload(vb, p, size);
201 vb->v4l2_planes[p].data_offset = dev->fmt_cap->data_offset[p];
202 }
203
204 return 0;
205}
206
207static void vid_cap_buf_finish(struct vb2_buffer *vb)
208{
209 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
210 struct v4l2_timecode *tc = &vb->v4l2_buf.timecode;
211 unsigned fps = 25;
212 unsigned seq = vb->v4l2_buf.sequence;
213
214 if (!vivid_is_sdtv_cap(dev))
215 return;
216
217 /*
218 * Set the timecode. Rarely used, so it is interesting to
219 * test this.
220 */
221 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_TIMECODE;
222 if (dev->std_cap & V4L2_STD_525_60)
223 fps = 30;
224 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
225 tc->flags = 0;
226 tc->frames = seq % fps;
227 tc->seconds = (seq / fps) % 60;
228 tc->minutes = (seq / (60 * fps)) % 60;
229 tc->hours = (seq / (60 * 60 * fps)) % 24;
230}
231
232static void vid_cap_buf_queue(struct vb2_buffer *vb)
233{
234 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
235 struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
236
237 dprintk(dev, 1, "%s\n", __func__);
238
239 spin_lock(&dev->slock);
240 list_add_tail(&buf->list, &dev->vid_cap_active);
241 spin_unlock(&dev->slock);
242}
243
244static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
245{
246 struct vivid_dev *dev = vb2_get_drv_priv(vq);
247 unsigned i;
248 int err;
249
250 if (vb2_is_streaming(&dev->vb_vid_out_q))
251 dev->can_loop_video = vivid_vid_can_loop(dev);
252
253 if (dev->kthread_vid_cap)
254 return 0;
255
256 dev->vid_cap_seq_count = 0;
257 dprintk(dev, 1, "%s\n", __func__);
258 for (i = 0; i < VIDEO_MAX_FRAME; i++)
259 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
260 if (dev->start_streaming_error) {
261 dev->start_streaming_error = false;
262 err = -EINVAL;
263 } else {
264 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
265 }
266 if (err) {
267 struct vivid_buffer *buf, *tmp;
268
269 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
270 list_del(&buf->list);
271 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
272 }
273 }
274 return err;
275}
276
277/* abort streaming and wait for last buffer */
278static void vid_cap_stop_streaming(struct vb2_queue *vq)
279{
280 struct vivid_dev *dev = vb2_get_drv_priv(vq);
281
282 dprintk(dev, 1, "%s\n", __func__);
283 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
284 dev->can_loop_video = false;
285}
286
287const struct vb2_ops vivid_vid_cap_qops = {
288 .queue_setup = vid_cap_queue_setup,
289 .buf_prepare = vid_cap_buf_prepare,
290 .buf_finish = vid_cap_buf_finish,
291 .buf_queue = vid_cap_buf_queue,
292 .start_streaming = vid_cap_start_streaming,
293 .stop_streaming = vid_cap_stop_streaming,
6dfa5131
PL
294 .wait_prepare = vb2_ops_wait_prepare,
295 .wait_finish = vb2_ops_wait_finish,
ef834f78
HV
296};
297
298/*
299 * Determine the 'picture' quality based on the current TV frequency: either
300 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
301 * signal or NOISE for no signal.
302 */
303void vivid_update_quality(struct vivid_dev *dev)
304{
305 unsigned freq_modulus;
306
307 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
308 /*
309 * The 'noise' will only be replaced by the actual video
310 * if the output video matches the input video settings.
311 */
312 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
313 return;
314 }
315 if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)) {
316 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
317 return;
318 }
319 if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
320 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
321 return;
322 }
323 if (!vivid_is_tv_cap(dev)) {
324 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
325 return;
326 }
327
328 /*
329 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
330 * From +/- 0.25 MHz around the channel there is color, and from
331 * +/- 1 MHz there is grayscale (chroma is lost).
332 * Everywhere else it is just noise.
333 */
334 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
335 if (freq_modulus > 2 * 16) {
336 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
337 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
338 return;
339 }
340 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
341 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
342 else
343 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
344}
345
346/*
347 * Get the current picture quality and the associated afc value.
348 */
349static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
350{
351 unsigned freq_modulus;
352
353 if (afc)
354 *afc = 0;
355 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
356 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
357 return tpg_g_quality(&dev->tpg);
358
359 /*
360 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
361 * From +/- 0.25 MHz around the channel there is color, and from
362 * +/- 1 MHz there is grayscale (chroma is lost).
363 * Everywhere else it is just gray.
364 */
365 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
366 if (afc)
367 *afc = freq_modulus - 1 * 16;
368 return TPG_QUAL_GRAY;
369}
370
371enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
372{
373 if (vivid_is_sdtv_cap(dev))
374 return dev->std_aspect_ratio;
375
376 if (vivid_is_hdmi_cap(dev))
377 return dev->dv_timings_aspect_ratio;
378
379 return TPG_VIDEO_ASPECT_IMAGE;
380}
381
382static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
383{
384 if (vivid_is_sdtv_cap(dev))
385 return (dev->std_cap & V4L2_STD_525_60) ?
386 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
387
388 if (vivid_is_hdmi_cap(dev) &&
389 dev->src_rect.width == 720 && dev->src_rect.height <= 576)
390 return dev->src_rect.height == 480 ?
391 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
392
393 return TPG_PIXEL_ASPECT_SQUARE;
394}
395
396/*
397 * Called whenever the format has to be reset which can occur when
398 * changing inputs, standard, timings, etc.
399 */
400void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
401{
402 struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
403 unsigned size;
404
405 switch (dev->input_type[dev->input]) {
406 case WEBCAM:
407 default:
408 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
409 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
410 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
411 dev->field_cap = V4L2_FIELD_NONE;
412 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
413 break;
414 case TV:
415 case SVID:
416 dev->field_cap = dev->tv_field_cap;
417 dev->src_rect.width = 720;
418 if (dev->std_cap & V4L2_STD_525_60) {
419 dev->src_rect.height = 480;
420 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
421 dev->service_set_cap = V4L2_SLICED_CAPTION_525;
422 } else {
423 dev->src_rect.height = 576;
424 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
62f28725 425 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
ef834f78
HV
426 }
427 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
428 break;
429 case HDMI:
430 dev->src_rect.width = bt->width;
431 dev->src_rect.height = bt->height;
432 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
433 dev->timeperframe_vid_cap = (struct v4l2_fract) {
434 size / 100, (u32)bt->pixelclock / 100
435 };
436 if (bt->interlaced)
437 dev->field_cap = V4L2_FIELD_ALTERNATE;
438 else
439 dev->field_cap = V4L2_FIELD_NONE;
440
441 /*
442 * We can be called from within s_ctrl, in that case we can't
443 * set/get controls. Luckily we don't need to in that case.
444 */
445 if (keep_controls || !dev->colorspace)
446 break;
4a203349 447 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
ef834f78 448 if (bt->width == 720 && bt->height <= 576)
cd8adbe7 449 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
ef834f78 450 else
cd8adbe7 451 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
ef834f78
HV
452 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
453 } else {
cd8adbe7 454 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
ef834f78
HV
455 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
456 }
457 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
458 break;
459 }
460 vivid_update_quality(dev);
461 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
462 dev->crop_cap = dev->src_rect;
463 dev->crop_bounds_cap = dev->src_rect;
464 dev->compose_cap = dev->crop_cap;
465 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
466 dev->compose_cap.height /= 2;
467 dev->fmt_cap_rect = dev->compose_cap;
468 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
469 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
470 tpg_update_mv_step(&dev->tpg);
471}
472
473/* Map the field to something that is valid for the current input */
474static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
475{
476 if (vivid_is_sdtv_cap(dev)) {
477 switch (field) {
478 case V4L2_FIELD_INTERLACED_TB:
479 case V4L2_FIELD_INTERLACED_BT:
480 case V4L2_FIELD_SEQ_TB:
481 case V4L2_FIELD_SEQ_BT:
482 case V4L2_FIELD_TOP:
483 case V4L2_FIELD_BOTTOM:
484 case V4L2_FIELD_ALTERNATE:
485 return field;
486 case V4L2_FIELD_INTERLACED:
487 default:
488 return V4L2_FIELD_INTERLACED;
489 }
490 }
491 if (vivid_is_hdmi_cap(dev))
492 return dev->dv_timings_cap.bt.interlaced ? V4L2_FIELD_ALTERNATE :
493 V4L2_FIELD_NONE;
494 return V4L2_FIELD_NONE;
495}
496
497static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
498{
499 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
500 return tpg_g_colorspace(&dev->tpg);
501 return dev->colorspace_out;
502}
503
3e8a78d1
HV
504static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
505{
506 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
507 return tpg_g_ycbcr_enc(&dev->tpg);
508 return dev->ycbcr_enc_out;
509}
510
511static unsigned vivid_quantization_cap(struct vivid_dev *dev)
512{
513 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
514 return tpg_g_quantization(&dev->tpg);
515 return dev->quantization_out;
516}
517
ef834f78
HV
518int vivid_g_fmt_vid_cap(struct file *file, void *priv,
519 struct v4l2_format *f)
520{
521 struct vivid_dev *dev = video_drvdata(file);
522 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
523 unsigned p;
524
525 mp->width = dev->fmt_cap_rect.width;
526 mp->height = dev->fmt_cap_rect.height;
527 mp->field = dev->field_cap;
528 mp->pixelformat = dev->fmt_cap->fourcc;
529 mp->colorspace = vivid_colorspace_cap(dev);
3e8a78d1
HV
530 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
531 mp->quantization = vivid_quantization_cap(dev);
ddcaee9d 532 mp->num_planes = dev->fmt_cap->buffers;
ef834f78
HV
533 for (p = 0; p < mp->num_planes; p++) {
534 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
535 mp->plane_fmt[p].sizeimage =
ddcaee9d 536 tpg_g_line_width(&dev->tpg, p) * mp->height +
ef834f78
HV
537 dev->fmt_cap->data_offset[p];
538 }
539 return 0;
540}
541
542int vivid_try_fmt_vid_cap(struct file *file, void *priv,
543 struct v4l2_format *f)
544{
545 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
546 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
547 struct vivid_dev *dev = video_drvdata(file);
548 const struct vivid_fmt *fmt;
549 unsigned bytesperline, max_bpl;
550 unsigned factor = 1;
551 unsigned w, h;
552 unsigned p;
553
1fc78bc9 554 fmt = vivid_get_format(dev, mp->pixelformat);
ef834f78
HV
555 if (!fmt) {
556 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
557 mp->pixelformat);
558 mp->pixelformat = V4L2_PIX_FMT_YUYV;
1fc78bc9 559 fmt = vivid_get_format(dev, mp->pixelformat);
ef834f78
HV
560 }
561
562 mp->field = vivid_field_cap(dev, mp->field);
563 if (vivid_is_webcam(dev)) {
564 const struct v4l2_frmsize_discrete *sz =
565 v4l2_find_nearest_format(&webcam_probe, mp->width, mp->height);
566
567 w = sz->width;
568 h = sz->height;
569 } else if (vivid_is_sdtv_cap(dev)) {
570 w = 720;
571 h = (dev->std_cap & V4L2_STD_525_60) ? 480 : 576;
572 } else {
573 w = dev->src_rect.width;
574 h = dev->src_rect.height;
575 }
576 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
577 factor = 2;
578 if (vivid_is_webcam(dev) ||
579 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
580 mp->width = w;
581 mp->height = h / factor;
582 } else {
583 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
584
585 rect_set_min_size(&r, &vivid_min_rect);
586 rect_set_max_size(&r, &vivid_max_rect);
587 if (dev->has_scaler_cap && !dev->has_compose_cap) {
588 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
589
590 rect_set_max_size(&r, &max_r);
591 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
592 rect_set_max_size(&r, &dev->src_rect);
593 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
594 rect_set_min_size(&r, &dev->src_rect);
595 }
596 mp->width = r.width;
597 mp->height = r.height / factor;
598 }
599
600 /* This driver supports custom bytesperline values */
601
ddcaee9d 602 mp->num_planes = fmt->buffers;
ef834f78 603 for (p = 0; p < mp->num_planes; p++) {
ddcaee9d
HV
604 /* Calculate the minimum supported bytesperline value */
605 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
606 /* Calculate the maximum supported bytesperline value */
607 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
608
ef834f78
HV
609 if (pfmt[p].bytesperline > max_bpl)
610 pfmt[p].bytesperline = max_bpl;
611 if (pfmt[p].bytesperline < bytesperline)
612 pfmt[p].bytesperline = bytesperline;
ddcaee9d
HV
613 pfmt[p].sizeimage = tpg_calc_line_width(&dev->tpg, p, pfmt[p].bytesperline) *
614 mp->height + fmt->data_offset[p];
ef834f78
HV
615 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
616 }
617 mp->colorspace = vivid_colorspace_cap(dev);
3e8a78d1
HV
618 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
619 mp->quantization = vivid_quantization_cap(dev);
ef834f78
HV
620 memset(mp->reserved, 0, sizeof(mp->reserved));
621 return 0;
622}
623
624int vivid_s_fmt_vid_cap(struct file *file, void *priv,
625 struct v4l2_format *f)
626{
627 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
628 struct vivid_dev *dev = video_drvdata(file);
629 struct v4l2_rect *crop = &dev->crop_cap;
630 struct v4l2_rect *compose = &dev->compose_cap;
631 struct vb2_queue *q = &dev->vb_vid_cap_q;
632 int ret = vivid_try_fmt_vid_cap(file, priv, f);
633 unsigned factor = 1;
ddcaee9d 634 unsigned p;
ef834f78
HV
635 unsigned i;
636
637 if (ret < 0)
638 return ret;
639
640 if (vb2_is_busy(q)) {
641 dprintk(dev, 1, "%s device busy\n", __func__);
642 return -EBUSY;
643 }
644
645 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
646 dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
647 return -EBUSY;
648 }
649
1fc78bc9 650 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
ef834f78
HV
651 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
652 factor = 2;
653
654 /* Note: the webcam input doesn't support scaling, cropping or composing */
655
656 if (!vivid_is_webcam(dev) &&
657 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
658 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
659
660 if (dev->has_scaler_cap) {
661 if (dev->has_compose_cap)
662 rect_map_inside(compose, &r);
663 else
664 *compose = r;
665 if (dev->has_crop_cap && !dev->has_compose_cap) {
666 struct v4l2_rect min_r = {
667 0, 0,
668 r.width / MAX_ZOOM,
669 factor * r.height / MAX_ZOOM
670 };
671 struct v4l2_rect max_r = {
672 0, 0,
673 r.width * MAX_ZOOM,
674 factor * r.height * MAX_ZOOM
675 };
676
677 rect_set_min_size(crop, &min_r);
678 rect_set_max_size(crop, &max_r);
679 rect_map_inside(crop, &dev->crop_bounds_cap);
680 } else if (dev->has_crop_cap) {
681 struct v4l2_rect min_r = {
682 0, 0,
683 compose->width / MAX_ZOOM,
684 factor * compose->height / MAX_ZOOM
685 };
686 struct v4l2_rect max_r = {
687 0, 0,
688 compose->width * MAX_ZOOM,
689 factor * compose->height * MAX_ZOOM
690 };
691
692 rect_set_min_size(crop, &min_r);
693 rect_set_max_size(crop, &max_r);
694 rect_map_inside(crop, &dev->crop_bounds_cap);
695 }
696 } else if (dev->has_crop_cap && !dev->has_compose_cap) {
697 r.height *= factor;
698 rect_set_size_to(crop, &r);
699 rect_map_inside(crop, &dev->crop_bounds_cap);
700 r = *crop;
701 r.height /= factor;
702 rect_set_size_to(compose, &r);
703 } else if (!dev->has_crop_cap) {
704 rect_map_inside(compose, &r);
705 } else {
706 r.height *= factor;
707 rect_set_max_size(crop, &r);
708 rect_map_inside(crop, &dev->crop_bounds_cap);
709 compose->top *= factor;
710 compose->height *= factor;
711 rect_set_size_to(compose, crop);
712 rect_map_inside(compose, &r);
713 compose->top /= factor;
714 compose->height /= factor;
715 }
716 } else if (vivid_is_webcam(dev)) {
717 /* Guaranteed to be a match */
718 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
719 if (webcam_sizes[i].width == mp->width &&
720 webcam_sizes[i].height == mp->height)
721 break;
722 dev->webcam_size_idx = i;
1381fb3e
PZ
723 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i))
724 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1;
ef834f78
HV
725 vivid_update_format_cap(dev, false);
726 } else {
727 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
728
729 rect_set_size_to(compose, &r);
730 r.height *= factor;
731 rect_set_size_to(crop, &r);
732 }
733
734 dev->fmt_cap_rect.width = mp->width;
735 dev->fmt_cap_rect.height = mp->height;
736 tpg_s_buf_height(&dev->tpg, mp->height);
ddcaee9d
HV
737 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
738 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
739 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
ef834f78 740 dev->field_cap = mp->field;
43047f6b
HV
741 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
742 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true);
743 else
744 tpg_s_field(&dev->tpg, dev->field_cap, false);
ef834f78 745 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
ef834f78
HV
746 if (vivid_is_sdtv_cap(dev))
747 dev->tv_field_cap = mp->field;
748 tpg_update_mv_step(&dev->tpg);
749 return 0;
750}
751
752int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
753 struct v4l2_format *f)
754{
755 struct vivid_dev *dev = video_drvdata(file);
756
757 if (!dev->multiplanar)
758 return -ENOTTY;
759 return vivid_g_fmt_vid_cap(file, priv, f);
760}
761
762int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
763 struct v4l2_format *f)
764{
765 struct vivid_dev *dev = video_drvdata(file);
766
767 if (!dev->multiplanar)
768 return -ENOTTY;
769 return vivid_try_fmt_vid_cap(file, priv, f);
770}
771
772int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
773 struct v4l2_format *f)
774{
775 struct vivid_dev *dev = video_drvdata(file);
776
777 if (!dev->multiplanar)
778 return -ENOTTY;
779 return vivid_s_fmt_vid_cap(file, priv, f);
780}
781
782int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
783 struct v4l2_format *f)
784{
785 struct vivid_dev *dev = video_drvdata(file);
786
787 if (dev->multiplanar)
788 return -ENOTTY;
789 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
790}
791
792int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
793 struct v4l2_format *f)
794{
795 struct vivid_dev *dev = video_drvdata(file);
796
797 if (dev->multiplanar)
798 return -ENOTTY;
799 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
800}
801
802int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
803 struct v4l2_format *f)
804{
805 struct vivid_dev *dev = video_drvdata(file);
806
807 if (dev->multiplanar)
808 return -ENOTTY;
809 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
810}
811
812int vivid_vid_cap_g_selection(struct file *file, void *priv,
813 struct v4l2_selection *sel)
814{
815 struct vivid_dev *dev = video_drvdata(file);
816
817 if (!dev->has_crop_cap && !dev->has_compose_cap)
818 return -ENOTTY;
819 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
820 return -EINVAL;
821 if (vivid_is_webcam(dev))
822 return -EINVAL;
823
824 sel->r.left = sel->r.top = 0;
825 switch (sel->target) {
826 case V4L2_SEL_TGT_CROP:
827 if (!dev->has_crop_cap)
828 return -EINVAL;
829 sel->r = dev->crop_cap;
830 break;
831 case V4L2_SEL_TGT_CROP_DEFAULT:
832 case V4L2_SEL_TGT_CROP_BOUNDS:
833 if (!dev->has_crop_cap)
834 return -EINVAL;
835 sel->r = dev->src_rect;
836 break;
837 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
838 if (!dev->has_compose_cap)
839 return -EINVAL;
840 sel->r = vivid_max_rect;
841 break;
842 case V4L2_SEL_TGT_COMPOSE:
843 if (!dev->has_compose_cap)
844 return -EINVAL;
845 sel->r = dev->compose_cap;
846 break;
847 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
848 if (!dev->has_compose_cap)
849 return -EINVAL;
850 sel->r = dev->fmt_cap_rect;
851 break;
852 default:
853 return -EINVAL;
854 }
855 return 0;
856}
857
858int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
859{
860 struct vivid_dev *dev = video_drvdata(file);
861 struct v4l2_rect *crop = &dev->crop_cap;
862 struct v4l2_rect *compose = &dev->compose_cap;
863 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
864 int ret;
865
866 if (!dev->has_crop_cap && !dev->has_compose_cap)
867 return -ENOTTY;
868 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
869 return -EINVAL;
870 if (vivid_is_webcam(dev))
871 return -EINVAL;
872
873 switch (s->target) {
874 case V4L2_SEL_TGT_CROP:
875 if (!dev->has_crop_cap)
876 return -EINVAL;
877 ret = vivid_vid_adjust_sel(s->flags, &s->r);
878 if (ret)
879 return ret;
880 rect_set_min_size(&s->r, &vivid_min_rect);
881 rect_set_max_size(&s->r, &dev->src_rect);
882 rect_map_inside(&s->r, &dev->crop_bounds_cap);
883 s->r.top /= factor;
884 s->r.height /= factor;
885 if (dev->has_scaler_cap) {
886 struct v4l2_rect fmt = dev->fmt_cap_rect;
887 struct v4l2_rect max_rect = {
888 0, 0,
889 s->r.width * MAX_ZOOM,
890 s->r.height * MAX_ZOOM
891 };
892 struct v4l2_rect min_rect = {
893 0, 0,
894 s->r.width / MAX_ZOOM,
895 s->r.height / MAX_ZOOM
896 };
897
898 rect_set_min_size(&fmt, &min_rect);
899 if (!dev->has_compose_cap)
900 rect_set_max_size(&fmt, &max_rect);
901 if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
902 vb2_is_busy(&dev->vb_vid_cap_q))
903 return -EBUSY;
904 if (dev->has_compose_cap) {
905 rect_set_min_size(compose, &min_rect);
906 rect_set_max_size(compose, &max_rect);
907 }
908 dev->fmt_cap_rect = fmt;
909 tpg_s_buf_height(&dev->tpg, fmt.height);
910 } else if (dev->has_compose_cap) {
911 struct v4l2_rect fmt = dev->fmt_cap_rect;
912
913 rect_set_min_size(&fmt, &s->r);
914 if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
915 vb2_is_busy(&dev->vb_vid_cap_q))
916 return -EBUSY;
917 dev->fmt_cap_rect = fmt;
918 tpg_s_buf_height(&dev->tpg, fmt.height);
919 rect_set_size_to(compose, &s->r);
920 rect_map_inside(compose, &dev->fmt_cap_rect);
921 } else {
922 if (!rect_same_size(&s->r, &dev->fmt_cap_rect) &&
923 vb2_is_busy(&dev->vb_vid_cap_q))
924 return -EBUSY;
925 rect_set_size_to(&dev->fmt_cap_rect, &s->r);
926 rect_set_size_to(compose, &s->r);
927 rect_map_inside(compose, &dev->fmt_cap_rect);
928 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
929 }
930 s->r.top *= factor;
931 s->r.height *= factor;
932 *crop = s->r;
933 break;
934 case V4L2_SEL_TGT_COMPOSE:
935 if (!dev->has_compose_cap)
936 return -EINVAL;
937 ret = vivid_vid_adjust_sel(s->flags, &s->r);
938 if (ret)
939 return ret;
940 rect_set_min_size(&s->r, &vivid_min_rect);
941 rect_set_max_size(&s->r, &dev->fmt_cap_rect);
942 if (dev->has_scaler_cap) {
943 struct v4l2_rect max_rect = {
944 0, 0,
945 dev->src_rect.width * MAX_ZOOM,
946 (dev->src_rect.height / factor) * MAX_ZOOM
947 };
948
949 rect_set_max_size(&s->r, &max_rect);
950 if (dev->has_crop_cap) {
951 struct v4l2_rect min_rect = {
952 0, 0,
953 s->r.width / MAX_ZOOM,
954 (s->r.height * factor) / MAX_ZOOM
955 };
956 struct v4l2_rect max_rect = {
957 0, 0,
958 s->r.width * MAX_ZOOM,
959 (s->r.height * factor) * MAX_ZOOM
960 };
961
962 rect_set_min_size(crop, &min_rect);
963 rect_set_max_size(crop, &max_rect);
964 rect_map_inside(crop, &dev->crop_bounds_cap);
965 }
966 } else if (dev->has_crop_cap) {
967 s->r.top *= factor;
968 s->r.height *= factor;
969 rect_set_max_size(&s->r, &dev->src_rect);
970 rect_set_size_to(crop, &s->r);
971 rect_map_inside(crop, &dev->crop_bounds_cap);
972 s->r.top /= factor;
973 s->r.height /= factor;
974 } else {
975 rect_set_size_to(&s->r, &dev->src_rect);
976 s->r.height /= factor;
977 }
978 rect_map_inside(&s->r, &dev->fmt_cap_rect);
979 if (dev->bitmap_cap && (compose->width != s->r.width ||
980 compose->height != s->r.height)) {
981 kfree(dev->bitmap_cap);
982 dev->bitmap_cap = NULL;
983 }
984 *compose = s->r;
985 break;
986 default:
987 return -EINVAL;
988 }
989
990 tpg_s_crop_compose(&dev->tpg, crop, compose);
991 return 0;
992}
993
994int vivid_vid_cap_cropcap(struct file *file, void *priv,
995 struct v4l2_cropcap *cap)
996{
997 struct vivid_dev *dev = video_drvdata(file);
998
999 if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1000 return -EINVAL;
1001
1002 switch (vivid_get_pixel_aspect(dev)) {
1003 case TPG_PIXEL_ASPECT_NTSC:
1004 cap->pixelaspect.numerator = 11;
1005 cap->pixelaspect.denominator = 10;
1006 break;
1007 case TPG_PIXEL_ASPECT_PAL:
1008 cap->pixelaspect.numerator = 54;
1009 cap->pixelaspect.denominator = 59;
1010 break;
1011 case TPG_PIXEL_ASPECT_SQUARE:
1012 cap->pixelaspect.numerator = 1;
1013 cap->pixelaspect.denominator = 1;
1014 break;
1015 }
1016 return 0;
1017}
1018
1019int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
1020 struct v4l2_fmtdesc *f)
1021{
9832e0e0 1022 struct vivid_dev *dev = video_drvdata(file);
ef834f78
HV
1023 const struct vivid_fmt *fmt;
1024
9832e0e0
HV
1025 if (dev->multiplanar)
1026 return -ENOTTY;
1027
ef834f78
HV
1028 if (f->index >= ARRAY_SIZE(formats_ovl))
1029 return -EINVAL;
1030
1031 fmt = &formats_ovl[f->index];
1032
ef834f78
HV
1033 f->pixelformat = fmt->fourcc;
1034 return 0;
1035}
1036
1037int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
1038 struct v4l2_format *f)
1039{
1040 struct vivid_dev *dev = video_drvdata(file);
1041 const struct v4l2_rect *compose = &dev->compose_cap;
1042 struct v4l2_window *win = &f->fmt.win;
1043 unsigned clipcount = win->clipcount;
1044
9832e0e0
HV
1045 if (dev->multiplanar)
1046 return -ENOTTY;
1047
ef834f78
HV
1048 win->w.top = dev->overlay_cap_top;
1049 win->w.left = dev->overlay_cap_left;
1050 win->w.width = compose->width;
1051 win->w.height = compose->height;
1052 win->field = dev->overlay_cap_field;
1053 win->clipcount = dev->clipcount_cap;
1054 if (clipcount > dev->clipcount_cap)
1055 clipcount = dev->clipcount_cap;
1056 if (dev->bitmap_cap == NULL)
1057 win->bitmap = NULL;
1058 else if (win->bitmap) {
1059 if (copy_to_user(win->bitmap, dev->bitmap_cap,
1060 ((compose->width + 7) / 8) * compose->height))
1061 return -EFAULT;
1062 }
1063 if (clipcount && win->clips) {
1064 if (copy_to_user(win->clips, dev->clips_cap,
1065 clipcount * sizeof(dev->clips_cap[0])))
1066 return -EFAULT;
1067 }
1068 return 0;
1069}
1070
1071int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
1072 struct v4l2_format *f)
1073{
1074 struct vivid_dev *dev = video_drvdata(file);
1075 const struct v4l2_rect *compose = &dev->compose_cap;
1076 struct v4l2_window *win = &f->fmt.win;
1077 int i, j;
1078
9832e0e0
HV
1079 if (dev->multiplanar)
1080 return -ENOTTY;
1081
ef834f78
HV
1082 win->w.left = clamp_t(int, win->w.left,
1083 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1084 win->w.top = clamp_t(int, win->w.top,
1085 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1086 win->w.width = compose->width;
1087 win->w.height = compose->height;
1088 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
1089 win->field = V4L2_FIELD_ANY;
1090 win->chromakey = 0;
1091 win->global_alpha = 0;
1092 if (win->clipcount && !win->clips)
1093 win->clipcount = 0;
1094 if (win->clipcount > MAX_CLIPS)
1095 win->clipcount = MAX_CLIPS;
1096 if (win->clipcount) {
1097 if (copy_from_user(dev->try_clips_cap, win->clips,
1098 win->clipcount * sizeof(dev->clips_cap[0])))
1099 return -EFAULT;
1100 for (i = 0; i < win->clipcount; i++) {
1101 struct v4l2_rect *r = &dev->try_clips_cap[i].c;
1102
1103 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
1104 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
1105 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
1106 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
1107 }
1108 /*
1109 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
1110 * number and it's typically a one-time deal.
1111 */
1112 for (i = 0; i < win->clipcount - 1; i++) {
1113 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
1114
1115 for (j = i + 1; j < win->clipcount; j++) {
1116 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
1117
1118 if (rect_overlap(r1, r2))
1119 return -EINVAL;
1120 }
1121 }
1122 if (copy_to_user(win->clips, dev->try_clips_cap,
1123 win->clipcount * sizeof(dev->clips_cap[0])))
1124 return -EFAULT;
1125 }
1126 return 0;
1127}
1128
1129int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
1130 struct v4l2_format *f)
1131{
1132 struct vivid_dev *dev = video_drvdata(file);
1133 const struct v4l2_rect *compose = &dev->compose_cap;
1134 struct v4l2_window *win = &f->fmt.win;
1135 int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
1136 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
1137 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
1138 void *new_bitmap = NULL;
1139
1140 if (ret)
1141 return ret;
1142
1143 if (win->bitmap) {
1144 new_bitmap = vzalloc(bitmap_size);
1145
1146 if (new_bitmap == NULL)
1147 return -ENOMEM;
1148 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
1149 vfree(new_bitmap);
1150 return -EFAULT;
1151 }
1152 }
1153
1154 dev->overlay_cap_top = win->w.top;
1155 dev->overlay_cap_left = win->w.left;
1156 dev->overlay_cap_field = win->field;
1157 vfree(dev->bitmap_cap);
1158 dev->bitmap_cap = new_bitmap;
1159 dev->clipcount_cap = win->clipcount;
1160 if (dev->clipcount_cap)
1161 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
1162 return 0;
1163}
1164
1165int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
1166{
1167 struct vivid_dev *dev = video_drvdata(file);
1168
9832e0e0
HV
1169 if (dev->multiplanar)
1170 return -ENOTTY;
1171
ef834f78
HV
1172 if (i && dev->fb_vbase_cap == NULL)
1173 return -EINVAL;
1174
1175 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
1176 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
1177 return -EINVAL;
1178 }
1179
1180 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
1181 return -EBUSY;
1182 dev->overlay_cap_owner = i ? fh : NULL;
1183 return 0;
1184}
1185
1186int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
1187 struct v4l2_framebuffer *a)
1188{
1189 struct vivid_dev *dev = video_drvdata(file);
1190
9832e0e0
HV
1191 if (dev->multiplanar)
1192 return -ENOTTY;
1193
ef834f78
HV
1194 *a = dev->fb_cap;
1195 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
1196 V4L2_FBUF_CAP_LIST_CLIPPING;
1197 a->flags = V4L2_FBUF_FLAG_PRIMARY;
1198 a->fmt.field = V4L2_FIELD_NONE;
1199 a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
1200 a->fmt.priv = 0;
1201 return 0;
1202}
1203
1204int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
1205 const struct v4l2_framebuffer *a)
1206{
1207 struct vivid_dev *dev = video_drvdata(file);
1208 const struct vivid_fmt *fmt;
1209
9832e0e0
HV
1210 if (dev->multiplanar)
1211 return -ENOTTY;
1212
ef834f78
HV
1213 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
1214 return -EPERM;
1215
1216 if (dev->overlay_cap_owner)
1217 return -EBUSY;
1218
1219 if (a->base == NULL) {
1220 dev->fb_cap.base = NULL;
1221 dev->fb_vbase_cap = NULL;
1222 return 0;
1223 }
1224
1225 if (a->fmt.width < 48 || a->fmt.height < 32)
1226 return -EINVAL;
1fc78bc9 1227 fmt = vivid_get_format(dev, a->fmt.pixelformat);
ef834f78
HV
1228 if (!fmt || !fmt->can_do_overlay)
1229 return -EINVAL;
96c76efa 1230 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
ef834f78
HV
1231 return -EINVAL;
1232 if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
1233 return -EINVAL;
1234
1235 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
1236 dev->fb_cap = *a;
1237 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
1238 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1239 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
1240 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1241 return 0;
1242}
1243
1244static const struct v4l2_audio vivid_audio_inputs[] = {
1245 { 0, "TV", V4L2_AUDCAP_STEREO },
1246 { 1, "Line-In", V4L2_AUDCAP_STEREO },
1247};
1248
1249int vidioc_enum_input(struct file *file, void *priv,
1250 struct v4l2_input *inp)
1251{
1252 struct vivid_dev *dev = video_drvdata(file);
1253
1254 if (inp->index >= dev->num_inputs)
1255 return -EINVAL;
1256
1257 inp->type = V4L2_INPUT_TYPE_CAMERA;
1258 switch (dev->input_type[inp->index]) {
1259 case WEBCAM:
1260 snprintf(inp->name, sizeof(inp->name), "Webcam %u",
1261 dev->input_name_counter[inp->index]);
1262 inp->capabilities = 0;
1263 break;
1264 case TV:
1265 snprintf(inp->name, sizeof(inp->name), "TV %u",
1266 dev->input_name_counter[inp->index]);
1267 inp->type = V4L2_INPUT_TYPE_TUNER;
1268 inp->std = V4L2_STD_ALL;
1269 if (dev->has_audio_inputs)
1270 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1271 inp->capabilities = V4L2_IN_CAP_STD;
1272 break;
1273 case SVID:
1274 snprintf(inp->name, sizeof(inp->name), "S-Video %u",
1275 dev->input_name_counter[inp->index]);
1276 inp->std = V4L2_STD_ALL;
1277 if (dev->has_audio_inputs)
1278 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1279 inp->capabilities = V4L2_IN_CAP_STD;
1280 break;
1281 case HDMI:
1282 snprintf(inp->name, sizeof(inp->name), "HDMI %u",
1283 dev->input_name_counter[inp->index]);
1284 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
1285 if (dev->edid_blocks == 0 ||
1286 dev->dv_timings_signal_mode == NO_SIGNAL)
1287 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1288 else if (dev->dv_timings_signal_mode == NO_LOCK ||
1289 dev->dv_timings_signal_mode == OUT_OF_RANGE)
1290 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1291 break;
1292 }
1293 if (dev->sensor_hflip)
1294 inp->status |= V4L2_IN_ST_HFLIP;
1295 if (dev->sensor_vflip)
1296 inp->status |= V4L2_IN_ST_VFLIP;
1297 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
1298 if (dev->std_signal_mode == NO_SIGNAL) {
1299 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1300 } else if (dev->std_signal_mode == NO_LOCK) {
1301 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1302 } else if (vivid_is_tv_cap(dev)) {
1303 switch (tpg_g_quality(&dev->tpg)) {
1304 case TPG_QUAL_GRAY:
1305 inp->status |= V4L2_IN_ST_COLOR_KILL;
1306 break;
1307 case TPG_QUAL_NOISE:
1308 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1309 break;
1310 default:
1311 break;
1312 }
1313 }
1314 }
1315 return 0;
1316}
1317
1318int vidioc_g_input(struct file *file, void *priv, unsigned *i)
1319{
1320 struct vivid_dev *dev = video_drvdata(file);
1321
1322 *i = dev->input;
1323 return 0;
1324}
1325
1326int vidioc_s_input(struct file *file, void *priv, unsigned i)
1327{
1328 struct vivid_dev *dev = video_drvdata(file);
1329 struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
1330 unsigned brightness;
1331
1332 if (i >= dev->num_inputs)
1333 return -EINVAL;
1334
1335 if (i == dev->input)
1336 return 0;
1337
1338 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1339 return -EBUSY;
1340
1341 dev->input = i;
1342 dev->vid_cap_dev.tvnorms = 0;
1343 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
1344 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
1345 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
1346 }
1347 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1348 vivid_update_format_cap(dev, false);
1349
1350 if (dev->colorspace) {
1351 switch (dev->input_type[i]) {
1352 case WEBCAM:
cd8adbe7 1353 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
ef834f78
HV
1354 break;
1355 case TV:
1356 case SVID:
cd8adbe7 1357 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
ef834f78
HV
1358 break;
1359 case HDMI:
4a203349 1360 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
ef834f78 1361 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
cd8adbe7 1362 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
ef834f78 1363 else
cd8adbe7 1364 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
ef834f78 1365 } else {
cd8adbe7 1366 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
ef834f78
HV
1367 }
1368 break;
1369 }
1370 }
1371
1372 /*
1373 * Modify the brightness range depending on the input.
1374 * This makes it easy to use vivid to test if applications can
1375 * handle control range modifications and is also how this is
1376 * typically used in practice as different inputs may be hooked
1377 * up to different receivers with different control ranges.
1378 */
1379 brightness = 128 * i + dev->input_brightness[i];
1380 v4l2_ctrl_modify_range(dev->brightness,
1381 128 * i, 255 + 128 * i, 1, 128 + 128 * i);
1382 v4l2_ctrl_s_ctrl(dev->brightness, brightness);
1383 return 0;
1384}
1385
1386int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
1387{
1388 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1389 return -EINVAL;
1390 *vin = vivid_audio_inputs[vin->index];
1391 return 0;
1392}
1393
1394int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
1395{
1396 struct vivid_dev *dev = video_drvdata(file);
1397
1398 if (!vivid_is_sdtv_cap(dev))
1399 return -EINVAL;
1400 *vin = vivid_audio_inputs[dev->tv_audio_input];
1401 return 0;
1402}
1403
1404int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
1405{
1406 struct vivid_dev *dev = video_drvdata(file);
1407
1408 if (!vivid_is_sdtv_cap(dev))
1409 return -EINVAL;
1410 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1411 return -EINVAL;
1412 dev->tv_audio_input = vin->index;
1413 return 0;
1414}
1415
1416int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
1417{
1418 struct vivid_dev *dev = video_drvdata(file);
1419
1420 if (vf->tuner != 0)
1421 return -EINVAL;
1422 vf->frequency = dev->tv_freq;
1423 return 0;
1424}
1425
1426int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
1427{
1428 struct vivid_dev *dev = video_drvdata(file);
1429
1430 if (vf->tuner != 0)
1431 return -EINVAL;
1432 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
1433 if (vivid_is_tv_cap(dev))
1434 vivid_update_quality(dev);
1435 return 0;
1436}
1437
1438int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
1439{
1440 struct vivid_dev *dev = video_drvdata(file);
1441
1442 if (vt->index != 0)
1443 return -EINVAL;
1444 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
1445 return -EINVAL;
1446 dev->tv_audmode = vt->audmode;
1447 return 0;
1448}
1449
1450int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
1451{
1452 struct vivid_dev *dev = video_drvdata(file);
1453 enum tpg_quality qual;
1454
1455 if (vt->index != 0)
1456 return -EINVAL;
1457
1458 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
1459 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
1460 vt->audmode = dev->tv_audmode;
1461 vt->rangelow = MIN_TV_FREQ;
1462 vt->rangehigh = MAX_TV_FREQ;
1463 qual = vivid_get_quality(dev, &vt->afc);
1464 if (qual == TPG_QUAL_COLOR)
1465 vt->signal = 0xffff;
1466 else if (qual == TPG_QUAL_GRAY)
1467 vt->signal = 0x8000;
1468 else
1469 vt->signal = 0;
1470 if (qual == TPG_QUAL_NOISE) {
1471 vt->rxsubchans = 0;
1472 } else if (qual == TPG_QUAL_GRAY) {
1473 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1474 } else {
1475 unsigned channel_nr = dev->tv_freq / (6 * 16);
1476 unsigned options = (dev->std_cap & V4L2_STD_NTSC_M) ? 4 : 3;
1477
1478 switch (channel_nr % options) {
1479 case 0:
1480 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1481 break;
1482 case 1:
1483 vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
1484 break;
1485 case 2:
1486 if (dev->std_cap & V4L2_STD_NTSC_M)
1487 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
1488 else
1489 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
1490 break;
1491 case 3:
1492 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
1493 break;
1494 }
1495 }
1496 strlcpy(vt->name, "TV Tuner", sizeof(vt->name));
1497 return 0;
1498}
1499
1500/* Must remain in sync with the vivid_ctrl_standard_strings array */
1501const v4l2_std_id vivid_standard[] = {
1502 V4L2_STD_NTSC_M,
1503 V4L2_STD_NTSC_M_JP,
1504 V4L2_STD_NTSC_M_KR,
1505 V4L2_STD_NTSC_443,
1506 V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
1507 V4L2_STD_PAL_I,
1508 V4L2_STD_PAL_DK,
1509 V4L2_STD_PAL_M,
1510 V4L2_STD_PAL_N,
1511 V4L2_STD_PAL_Nc,
1512 V4L2_STD_PAL_60,
1513 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
1514 V4L2_STD_SECAM_DK,
1515 V4L2_STD_SECAM_L,
1516 V4L2_STD_SECAM_LC,
1517 V4L2_STD_UNKNOWN
1518};
1519
1520/* Must remain in sync with the vivid_standard array */
1521const char * const vivid_ctrl_standard_strings[] = {
1522 "NTSC-M",
1523 "NTSC-M-JP",
1524 "NTSC-M-KR",
1525 "NTSC-443",
1526 "PAL-BGH",
1527 "PAL-I",
1528 "PAL-DK",
1529 "PAL-M",
1530 "PAL-N",
1531 "PAL-Nc",
1532 "PAL-60",
1533 "SECAM-BGH",
1534 "SECAM-DK",
1535 "SECAM-L",
1536 "SECAM-Lc",
1537 NULL,
1538};
1539
1540int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
1541{
1542 struct vivid_dev *dev = video_drvdata(file);
1543
1544 if (!vivid_is_sdtv_cap(dev))
1545 return -ENODATA;
1546 if (dev->std_signal_mode == NO_SIGNAL ||
1547 dev->std_signal_mode == NO_LOCK) {
1548 *id = V4L2_STD_UNKNOWN;
1549 return 0;
1550 }
1551 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
1552 *id = V4L2_STD_UNKNOWN;
1553 } else if (dev->std_signal_mode == CURRENT_STD) {
1554 *id = dev->std_cap;
1555 } else if (dev->std_signal_mode == SELECTED_STD) {
1556 *id = dev->query_std;
1557 } else {
1558 *id = vivid_standard[dev->query_std_last];
1559 dev->query_std_last = (dev->query_std_last + 1) % ARRAY_SIZE(vivid_standard);
1560 }
1561
1562 return 0;
1563}
1564
1565int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
1566{
1567 struct vivid_dev *dev = video_drvdata(file);
1568
1569 if (!vivid_is_sdtv_cap(dev))
1570 return -ENODATA;
1571 if (dev->std_cap == id)
1572 return 0;
1573 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1574 return -EBUSY;
1575 dev->std_cap = id;
1576 vivid_update_format_cap(dev, false);
1577 return 0;
1578}
1579
5190931d
PL
1580static void find_aspect_ratio(u32 width, u32 height,
1581 u32 *num, u32 *denom)
1582{
1583 if (!(height % 3) && ((height * 4 / 3) == width)) {
1584 *num = 4;
1585 *denom = 3;
1586 } else if (!(height % 9) && ((height * 16 / 9) == width)) {
1587 *num = 16;
1588 *denom = 9;
1589 } else if (!(height % 10) && ((height * 16 / 10) == width)) {
1590 *num = 16;
1591 *denom = 10;
1592 } else if (!(height % 4) && ((height * 5 / 4) == width)) {
1593 *num = 5;
1594 *denom = 4;
1595 } else if (!(height % 9) && ((height * 15 / 9) == width)) {
1596 *num = 15;
1597 *denom = 9;
1598 } else { /* default to 16:9 */
1599 *num = 16;
1600 *denom = 9;
1601 }
1602}
1603
1604static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
1605{
1606 struct v4l2_bt_timings *bt = &timings->bt;
1607 u32 total_h_pixel;
1608 u32 total_v_lines;
1609 u32 h_freq;
1610
1611 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap,
1612 NULL, NULL))
1613 return false;
1614
1615 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt);
1616 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
1617
1618 h_freq = (u32)bt->pixelclock / total_h_pixel;
1619
1620 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
1621 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync,
1622 bt->polarities, timings))
1623 return true;
1624 }
1625
1626 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
1627 struct v4l2_fract aspect_ratio;
1628
1629 find_aspect_ratio(bt->width, bt->height,
1630 &aspect_ratio.numerator,
1631 &aspect_ratio.denominator);
1632 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
1633 bt->polarities, aspect_ratio, timings))
1634 return true;
1635 }
1636 return false;
1637}
1638
ef834f78
HV
1639int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
1640 struct v4l2_dv_timings *timings)
1641{
1642 struct vivid_dev *dev = video_drvdata(file);
1643
1644 if (!vivid_is_hdmi_cap(dev))
1645 return -ENODATA;
ef834f78 1646 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
5190931d
PL
1647 0, NULL, NULL) &&
1648 !valid_cvt_gtf_timings(timings))
ef834f78 1649 return -EINVAL;
5190931d 1650
ef834f78
HV
1651 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0))
1652 return 0;
1bd0835a
HV
1653 if (vb2_is_busy(&dev->vb_vid_cap_q))
1654 return -EBUSY;
5190931d 1655
ef834f78
HV
1656 dev->dv_timings_cap = *timings;
1657 vivid_update_format_cap(dev, false);
1658 return 0;
1659}
1660
1661int vidioc_query_dv_timings(struct file *file, void *_fh,
1662 struct v4l2_dv_timings *timings)
1663{
1664 struct vivid_dev *dev = video_drvdata(file);
1665
1666 if (!vivid_is_hdmi_cap(dev))
1667 return -ENODATA;
1668 if (dev->dv_timings_signal_mode == NO_SIGNAL ||
1669 dev->edid_blocks == 0)
1670 return -ENOLINK;
1671 if (dev->dv_timings_signal_mode == NO_LOCK)
1672 return -ENOLCK;
1673 if (dev->dv_timings_signal_mode == OUT_OF_RANGE) {
1674 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
1675 return -ERANGE;
1676 }
1677 if (dev->dv_timings_signal_mode == CURRENT_DV_TIMINGS) {
1678 *timings = dev->dv_timings_cap;
1679 } else if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS) {
1680 *timings = v4l2_dv_timings_presets[dev->query_dv_timings];
1681 } else {
1682 *timings = v4l2_dv_timings_presets[dev->query_dv_timings_last];
1683 dev->query_dv_timings_last = (dev->query_dv_timings_last + 1) %
1684 dev->query_dv_timings_size;
1685 }
1686 return 0;
1687}
1688
1689int vidioc_s_edid(struct file *file, void *_fh,
1690 struct v4l2_edid *edid)
1691{
1692 struct vivid_dev *dev = video_drvdata(file);
1693
1694 memset(edid->reserved, 0, sizeof(edid->reserved));
1695 if (edid->pad >= dev->num_inputs)
1696 return -EINVAL;
1697 if (dev->input_type[edid->pad] != HDMI || edid->start_block)
1698 return -EINVAL;
1699 if (edid->blocks == 0) {
1700 dev->edid_blocks = 0;
1701 return 0;
1702 }
1703 if (edid->blocks > dev->edid_max_blocks) {
1704 edid->blocks = dev->edid_max_blocks;
1705 return -E2BIG;
1706 }
1707 dev->edid_blocks = edid->blocks;
1708 memcpy(dev->edid, edid->edid, edid->blocks * 128);
1709 return 0;
1710}
1711
1712int vidioc_enum_framesizes(struct file *file, void *fh,
1713 struct v4l2_frmsizeenum *fsize)
1714{
1715 struct vivid_dev *dev = video_drvdata(file);
1716
1717 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
1718 return -EINVAL;
1fc78bc9 1719 if (vivid_get_format(dev, fsize->pixel_format) == NULL)
ef834f78
HV
1720 return -EINVAL;
1721 if (vivid_is_webcam(dev)) {
1722 if (fsize->index >= ARRAY_SIZE(webcam_sizes))
1723 return -EINVAL;
1724 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1725 fsize->discrete = webcam_sizes[fsize->index];
1726 return 0;
1727 }
1728 if (fsize->index)
1729 return -EINVAL;
1730 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1731 fsize->stepwise.min_width = MIN_WIDTH;
1732 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
1733 fsize->stepwise.step_width = 2;
1734 fsize->stepwise.min_height = MIN_HEIGHT;
1735 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
1736 fsize->stepwise.step_height = 2;
1737 return 0;
1738}
1739
1740/* timeperframe is arbitrary and continuous */
1741int vidioc_enum_frameintervals(struct file *file, void *priv,
1742 struct v4l2_frmivalenum *fival)
1743{
1744 struct vivid_dev *dev = video_drvdata(file);
1745 const struct vivid_fmt *fmt;
1746 int i;
1747
1fc78bc9 1748 fmt = vivid_get_format(dev, fival->pixel_format);
ef834f78
HV
1749 if (!fmt)
1750 return -EINVAL;
1751
1752 if (!vivid_is_webcam(dev)) {
ef834f78
HV
1753 if (fival->index)
1754 return -EINVAL;
1755 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
1756 return -EINVAL;
1757 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
1758 return -EINVAL;
29813a6f
HV
1759 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1760 fival->discrete = dev->timeperframe_vid_cap;
ef834f78
HV
1761 return 0;
1762 }
1763
1764 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
1765 if (fival->width == webcam_sizes[i].width &&
1766 fival->height == webcam_sizes[i].height)
1767 break;
1768 if (i == ARRAY_SIZE(webcam_sizes))
1769 return -EINVAL;
1381fb3e 1770 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i))
ef834f78
HV
1771 return -EINVAL;
1772 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1773 fival->discrete = webcam_intervals[fival->index];
1774 return 0;
1775}
1776
1777int vivid_vid_cap_g_parm(struct file *file, void *priv,
1778 struct v4l2_streamparm *parm)
1779{
1780 struct vivid_dev *dev = video_drvdata(file);
1781
1782 if (parm->type != (dev->multiplanar ?
1783 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1784 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1785 return -EINVAL;
1786
1787 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1788 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
1789 parm->parm.capture.readbuffers = 1;
1790 return 0;
1791}
1792
1793#define FRACT_CMP(a, OP, b) \
1794 ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
1795
1796int vivid_vid_cap_s_parm(struct file *file, void *priv,
1797 struct v4l2_streamparm *parm)
1798{
1799 struct vivid_dev *dev = video_drvdata(file);
1381fb3e 1800 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx);
ef834f78
HV
1801 struct v4l2_fract tpf;
1802 unsigned i;
1803
1804 if (parm->type != (dev->multiplanar ?
1805 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1806 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1807 return -EINVAL;
1808 if (!vivid_is_webcam(dev))
1809 return vivid_vid_cap_g_parm(file, priv, parm);
1810
1811 tpf = parm->parm.capture.timeperframe;
1812
1813 if (tpf.denominator == 0)
1814 tpf = webcam_intervals[ival_sz - 1];
1815 for (i = 0; i < ival_sz; i++)
1816 if (FRACT_CMP(tpf, >=, webcam_intervals[i]))
1817 break;
1818 if (i == ival_sz)
1819 i = ival_sz - 1;
1820 dev->webcam_ival_idx = i;
1821 tpf = webcam_intervals[dev->webcam_ival_idx];
1822 tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
1823 tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
1824
1825 /* resync the thread's timings */
1826 dev->cap_seq_resync = true;
1827 dev->timeperframe_vid_cap = tpf;
1828 parm->parm.capture.timeperframe = tpf;
1829 parm->parm.capture.readbuffers = 1;
1830 return 0;
1831}