]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/media/platform/vivid/vivid-vid-cap.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / media / platform / vivid / vivid-vid-cap.c
1 /*
2 * vivid-vid-cap.c - video capture support functions.
3 *
4 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17 * SOFTWARE.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/vmalloc.h>
24 #include <linux/videodev2.h>
25 #include <linux/v4l2-dv-timings.h>
26 #include <media/v4l2-common.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-dv-timings.h>
29 #include <media/v4l2-rect.h>
30
31 #include "vivid-core.h"
32 #include "vivid-vid-common.h"
33 #include "vivid-kthread-cap.h"
34 #include "vivid-vid-cap.h"
35
36 /* timeperframe: min/max and default */
37 static const struct v4l2_fract
38 tpf_min = {.numerator = 1, .denominator = FPS_MAX},
39 tpf_max = {.numerator = FPS_MAX, .denominator = 1};
40
41 static const struct vivid_fmt formats_ovl[] = {
42 {
43 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
44 .vdownsampling = { 1 },
45 .bit_depth = { 16 },
46 .planes = 1,
47 .buffers = 1,
48 },
49 {
50 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
51 .vdownsampling = { 1 },
52 .bit_depth = { 16 },
53 .planes = 1,
54 .buffers = 1,
55 },
56 {
57 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
58 .vdownsampling = { 1 },
59 .bit_depth = { 16 },
60 .planes = 1,
61 .buffers = 1,
62 },
63 };
64
65 /* The number of discrete webcam framesizes */
66 #define VIVID_WEBCAM_SIZES 5
67 /* The number of discrete webcam frameintervals */
68 #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
69
70 /* Sizes must be in increasing order */
71 static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
72 { 320, 180 },
73 { 640, 360 },
74 { 1280, 720 },
75 { 1920, 1080 },
76 { 3840, 2160 },
77 };
78
79 /*
80 * Intervals must be in increasing order and there must be twice as many
81 * elements in this array as there are in webcam_sizes.
82 */
83 static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
84 { 1, 1 },
85 { 1, 2 },
86 { 1, 4 },
87 { 1, 5 },
88 { 1, 10 },
89 { 1, 15 },
90 { 1, 25 },
91 { 1, 30 },
92 { 1, 50 },
93 { 1, 60 },
94 };
95
96 static const struct v4l2_discrete_probe webcam_probe = {
97 webcam_sizes,
98 VIVID_WEBCAM_SIZES
99 };
100
101 static int vid_cap_queue_setup(struct vb2_queue *vq,
102 unsigned *nbuffers, unsigned *nplanes,
103 unsigned sizes[], struct device *alloc_devs[])
104 {
105 struct vivid_dev *dev = vb2_get_drv_priv(vq);
106 unsigned buffers = tpg_g_buffers(&dev->tpg);
107 unsigned h = dev->fmt_cap_rect.height;
108 unsigned p;
109
110 if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
111 /*
112 * You cannot use read() with FIELD_ALTERNATE since the field
113 * information (TOP/BOTTOM) cannot be passed back to the user.
114 */
115 if (vb2_fileio_is_active(vq))
116 return -EINVAL;
117 }
118
119 if (dev->queue_setup_error) {
120 /*
121 * Error injection: test what happens if queue_setup() returns
122 * an error.
123 */
124 dev->queue_setup_error = false;
125 return -EINVAL;
126 }
127 if (*nplanes) {
128 /*
129 * Check if the number of requested planes match
130 * the number of buffers in the current format. You can't mix that.
131 */
132 if (*nplanes != buffers)
133 return -EINVAL;
134 for (p = 0; p < buffers; p++) {
135 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
136 dev->fmt_cap->data_offset[p])
137 return -EINVAL;
138 }
139 } else {
140 for (p = 0; p < buffers; p++)
141 sizes[p] = tpg_g_line_width(&dev->tpg, p) * h +
142 dev->fmt_cap->data_offset[p];
143 }
144
145 if (vq->num_buffers + *nbuffers < 2)
146 *nbuffers = 2 - vq->num_buffers;
147
148 *nplanes = buffers;
149
150 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
151 for (p = 0; p < buffers; p++)
152 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
153
154 return 0;
155 }
156
157 static int vid_cap_buf_prepare(struct vb2_buffer *vb)
158 {
159 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
160 unsigned long size;
161 unsigned buffers = tpg_g_buffers(&dev->tpg);
162 unsigned p;
163
164 dprintk(dev, 1, "%s\n", __func__);
165
166 if (WARN_ON(NULL == dev->fmt_cap))
167 return -EINVAL;
168
169 if (dev->buf_prepare_error) {
170 /*
171 * Error injection: test what happens if buf_prepare() returns
172 * an error.
173 */
174 dev->buf_prepare_error = false;
175 return -EINVAL;
176 }
177 for (p = 0; p < buffers; p++) {
178 size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height +
179 dev->fmt_cap->data_offset[p];
180
181 if (vb2_plane_size(vb, p) < size) {
182 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
183 __func__, p, vb2_plane_size(vb, p), size);
184 return -EINVAL;
185 }
186
187 vb2_set_plane_payload(vb, p, size);
188 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
189 }
190
191 return 0;
192 }
193
194 static void vid_cap_buf_finish(struct vb2_buffer *vb)
195 {
196 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
197 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
198 struct v4l2_timecode *tc = &vbuf->timecode;
199 unsigned fps = 25;
200 unsigned seq = vbuf->sequence;
201
202 if (!vivid_is_sdtv_cap(dev))
203 return;
204
205 /*
206 * Set the timecode. Rarely used, so it is interesting to
207 * test this.
208 */
209 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
210 if (dev->std_cap & V4L2_STD_525_60)
211 fps = 30;
212 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
213 tc->flags = 0;
214 tc->frames = seq % fps;
215 tc->seconds = (seq / fps) % 60;
216 tc->minutes = (seq / (60 * fps)) % 60;
217 tc->hours = (seq / (60 * 60 * fps)) % 24;
218 }
219
220 static void vid_cap_buf_queue(struct vb2_buffer *vb)
221 {
222 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
223 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
224 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
225
226 dprintk(dev, 1, "%s\n", __func__);
227
228 spin_lock(&dev->slock);
229 list_add_tail(&buf->list, &dev->vid_cap_active);
230 spin_unlock(&dev->slock);
231 }
232
233 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
234 {
235 struct vivid_dev *dev = vb2_get_drv_priv(vq);
236 unsigned i;
237 int err;
238
239 if (vb2_is_streaming(&dev->vb_vid_out_q))
240 dev->can_loop_video = vivid_vid_can_loop(dev);
241
242 if (dev->kthread_vid_cap)
243 return 0;
244
245 dev->vid_cap_seq_count = 0;
246 dprintk(dev, 1, "%s\n", __func__);
247 for (i = 0; i < VIDEO_MAX_FRAME; i++)
248 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
249 if (dev->start_streaming_error) {
250 dev->start_streaming_error = false;
251 err = -EINVAL;
252 } else {
253 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
254 }
255 if (err) {
256 struct vivid_buffer *buf, *tmp;
257
258 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
259 list_del(&buf->list);
260 vb2_buffer_done(&buf->vb.vb2_buf,
261 VB2_BUF_STATE_QUEUED);
262 }
263 }
264 return err;
265 }
266
267 /* abort streaming and wait for last buffer */
268 static void vid_cap_stop_streaming(struct vb2_queue *vq)
269 {
270 struct vivid_dev *dev = vb2_get_drv_priv(vq);
271
272 dprintk(dev, 1, "%s\n", __func__);
273 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
274 dev->can_loop_video = false;
275 }
276
277 const struct vb2_ops vivid_vid_cap_qops = {
278 .queue_setup = vid_cap_queue_setup,
279 .buf_prepare = vid_cap_buf_prepare,
280 .buf_finish = vid_cap_buf_finish,
281 .buf_queue = vid_cap_buf_queue,
282 .start_streaming = vid_cap_start_streaming,
283 .stop_streaming = vid_cap_stop_streaming,
284 .wait_prepare = vb2_ops_wait_prepare,
285 .wait_finish = vb2_ops_wait_finish,
286 };
287
288 /*
289 * Determine the 'picture' quality based on the current TV frequency: either
290 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
291 * signal or NOISE for no signal.
292 */
293 void vivid_update_quality(struct vivid_dev *dev)
294 {
295 unsigned freq_modulus;
296
297 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
298 /*
299 * The 'noise' will only be replaced by the actual video
300 * if the output video matches the input video settings.
301 */
302 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
303 return;
304 }
305 if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)) {
306 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
307 return;
308 }
309 if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
310 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
311 return;
312 }
313 if (!vivid_is_tv_cap(dev)) {
314 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
315 return;
316 }
317
318 /*
319 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
320 * From +/- 0.25 MHz around the channel there is color, and from
321 * +/- 1 MHz there is grayscale (chroma is lost).
322 * Everywhere else it is just noise.
323 */
324 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
325 if (freq_modulus > 2 * 16) {
326 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
327 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
328 return;
329 }
330 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
331 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
332 else
333 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
334 }
335
336 /*
337 * Get the current picture quality and the associated afc value.
338 */
339 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
340 {
341 unsigned freq_modulus;
342
343 if (afc)
344 *afc = 0;
345 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
346 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
347 return tpg_g_quality(&dev->tpg);
348
349 /*
350 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
351 * From +/- 0.25 MHz around the channel there is color, and from
352 * +/- 1 MHz there is grayscale (chroma is lost).
353 * Everywhere else it is just gray.
354 */
355 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
356 if (afc)
357 *afc = freq_modulus - 1 * 16;
358 return TPG_QUAL_GRAY;
359 }
360
361 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
362 {
363 if (vivid_is_sdtv_cap(dev))
364 return dev->std_aspect_ratio;
365
366 if (vivid_is_hdmi_cap(dev))
367 return dev->dv_timings_aspect_ratio;
368
369 return TPG_VIDEO_ASPECT_IMAGE;
370 }
371
372 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
373 {
374 if (vivid_is_sdtv_cap(dev))
375 return (dev->std_cap & V4L2_STD_525_60) ?
376 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
377
378 if (vivid_is_hdmi_cap(dev) &&
379 dev->src_rect.width == 720 && dev->src_rect.height <= 576)
380 return dev->src_rect.height == 480 ?
381 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
382
383 return TPG_PIXEL_ASPECT_SQUARE;
384 }
385
386 /*
387 * Called whenever the format has to be reset which can occur when
388 * changing inputs, standard, timings, etc.
389 */
390 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
391 {
392 struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
393 unsigned size;
394 u64 pixelclock;
395
396 switch (dev->input_type[dev->input]) {
397 case WEBCAM:
398 default:
399 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
400 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
401 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
402 dev->field_cap = V4L2_FIELD_NONE;
403 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
404 break;
405 case TV:
406 case SVID:
407 dev->field_cap = dev->tv_field_cap;
408 dev->src_rect.width = 720;
409 if (dev->std_cap & V4L2_STD_525_60) {
410 dev->src_rect.height = 480;
411 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
412 dev->service_set_cap = V4L2_SLICED_CAPTION_525;
413 } else {
414 dev->src_rect.height = 576;
415 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
416 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
417 }
418 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
419 break;
420 case HDMI:
421 dev->src_rect.width = bt->width;
422 dev->src_rect.height = bt->height;
423 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
424 if (dev->reduced_fps && can_reduce_fps(bt)) {
425 pixelclock = div_u64(bt->pixelclock * 1000, 1001);
426 bt->flags |= V4L2_DV_FL_REDUCED_FPS;
427 } else {
428 pixelclock = bt->pixelclock;
429 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS;
430 }
431 dev->timeperframe_vid_cap = (struct v4l2_fract) {
432 size / 100, (u32)pixelclock / 100
433 };
434 if (bt->interlaced)
435 dev->field_cap = V4L2_FIELD_ALTERNATE;
436 else
437 dev->field_cap = V4L2_FIELD_NONE;
438
439 /*
440 * We can be called from within s_ctrl, in that case we can't
441 * set/get controls. Luckily we don't need to in that case.
442 */
443 if (keep_controls || !dev->colorspace)
444 break;
445 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
446 if (bt->width == 720 && bt->height <= 576)
447 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
448 else
449 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
450 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
451 } else {
452 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
453 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
454 }
455 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
456 break;
457 }
458 vivid_update_quality(dev);
459 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
460 dev->crop_cap = dev->src_rect;
461 dev->crop_bounds_cap = dev->src_rect;
462 dev->compose_cap = dev->crop_cap;
463 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
464 dev->compose_cap.height /= 2;
465 dev->fmt_cap_rect = dev->compose_cap;
466 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
467 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
468 tpg_update_mv_step(&dev->tpg);
469 }
470
471 /* Map the field to something that is valid for the current input */
472 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
473 {
474 if (vivid_is_sdtv_cap(dev)) {
475 switch (field) {
476 case V4L2_FIELD_INTERLACED_TB:
477 case V4L2_FIELD_INTERLACED_BT:
478 case V4L2_FIELD_SEQ_TB:
479 case V4L2_FIELD_SEQ_BT:
480 case V4L2_FIELD_TOP:
481 case V4L2_FIELD_BOTTOM:
482 case V4L2_FIELD_ALTERNATE:
483 return field;
484 case V4L2_FIELD_INTERLACED:
485 default:
486 return V4L2_FIELD_INTERLACED;
487 }
488 }
489 if (vivid_is_hdmi_cap(dev))
490 return dev->dv_timings_cap.bt.interlaced ? V4L2_FIELD_ALTERNATE :
491 V4L2_FIELD_NONE;
492 return V4L2_FIELD_NONE;
493 }
494
495 static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
496 {
497 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
498 return tpg_g_colorspace(&dev->tpg);
499 return dev->colorspace_out;
500 }
501
502 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev)
503 {
504 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
505 return tpg_g_xfer_func(&dev->tpg);
506 return dev->xfer_func_out;
507 }
508
509 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
510 {
511 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
512 return tpg_g_ycbcr_enc(&dev->tpg);
513 return dev->ycbcr_enc_out;
514 }
515
516 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
517 {
518 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
519 return tpg_g_hsv_enc(&dev->tpg);
520 return dev->hsv_enc_out;
521 }
522
523 static unsigned vivid_quantization_cap(struct vivid_dev *dev)
524 {
525 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
526 return tpg_g_quantization(&dev->tpg);
527 return dev->quantization_out;
528 }
529
530 int vivid_g_fmt_vid_cap(struct file *file, void *priv,
531 struct v4l2_format *f)
532 {
533 struct vivid_dev *dev = video_drvdata(file);
534 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
535 unsigned p;
536
537 mp->width = dev->fmt_cap_rect.width;
538 mp->height = dev->fmt_cap_rect.height;
539 mp->field = dev->field_cap;
540 mp->pixelformat = dev->fmt_cap->fourcc;
541 mp->colorspace = vivid_colorspace_cap(dev);
542 mp->xfer_func = vivid_xfer_func_cap(dev);
543 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
544 mp->hsv_enc = vivid_hsv_enc_cap(dev);
545 else
546 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
547 mp->quantization = vivid_quantization_cap(dev);
548 mp->num_planes = dev->fmt_cap->buffers;
549 for (p = 0; p < mp->num_planes; p++) {
550 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
551 mp->plane_fmt[p].sizeimage =
552 tpg_g_line_width(&dev->tpg, p) * mp->height +
553 dev->fmt_cap->data_offset[p];
554 }
555 return 0;
556 }
557
558 int vivid_try_fmt_vid_cap(struct file *file, void *priv,
559 struct v4l2_format *f)
560 {
561 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
562 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
563 struct vivid_dev *dev = video_drvdata(file);
564 const struct vivid_fmt *fmt;
565 unsigned bytesperline, max_bpl;
566 unsigned factor = 1;
567 unsigned w, h;
568 unsigned p;
569
570 fmt = vivid_get_format(dev, mp->pixelformat);
571 if (!fmt) {
572 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
573 mp->pixelformat);
574 mp->pixelformat = V4L2_PIX_FMT_YUYV;
575 fmt = vivid_get_format(dev, mp->pixelformat);
576 }
577
578 mp->field = vivid_field_cap(dev, mp->field);
579 if (vivid_is_webcam(dev)) {
580 const struct v4l2_frmsize_discrete *sz =
581 v4l2_find_nearest_format(&webcam_probe, mp->width, mp->height);
582
583 w = sz->width;
584 h = sz->height;
585 } else if (vivid_is_sdtv_cap(dev)) {
586 w = 720;
587 h = (dev->std_cap & V4L2_STD_525_60) ? 480 : 576;
588 } else {
589 w = dev->src_rect.width;
590 h = dev->src_rect.height;
591 }
592 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
593 factor = 2;
594 if (vivid_is_webcam(dev) ||
595 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
596 mp->width = w;
597 mp->height = h / factor;
598 } else {
599 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
600
601 v4l2_rect_set_min_size(&r, &vivid_min_rect);
602 v4l2_rect_set_max_size(&r, &vivid_max_rect);
603 if (dev->has_scaler_cap && !dev->has_compose_cap) {
604 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
605
606 v4l2_rect_set_max_size(&r, &max_r);
607 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
608 v4l2_rect_set_max_size(&r, &dev->src_rect);
609 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
610 v4l2_rect_set_min_size(&r, &dev->src_rect);
611 }
612 mp->width = r.width;
613 mp->height = r.height / factor;
614 }
615
616 /* This driver supports custom bytesperline values */
617
618 mp->num_planes = fmt->buffers;
619 for (p = 0; p < fmt->buffers; p++) {
620 /* Calculate the minimum supported bytesperline value */
621 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
622 /* Calculate the maximum supported bytesperline value */
623 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
624
625 if (pfmt[p].bytesperline > max_bpl)
626 pfmt[p].bytesperline = max_bpl;
627 if (pfmt[p].bytesperline < bytesperline)
628 pfmt[p].bytesperline = bytesperline;
629
630 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
631 fmt->vdownsampling[p] + fmt->data_offset[p];
632
633 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
634 }
635 for (p = fmt->buffers; p < fmt->planes; p++)
636 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
637 (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
638 (fmt->bit_depth[0] / fmt->vdownsampling[0]);
639
640 mp->colorspace = vivid_colorspace_cap(dev);
641 if (fmt->color_enc == TGP_COLOR_ENC_HSV)
642 mp->hsv_enc = vivid_hsv_enc_cap(dev);
643 else
644 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
645 mp->xfer_func = vivid_xfer_func_cap(dev);
646 mp->quantization = vivid_quantization_cap(dev);
647 memset(mp->reserved, 0, sizeof(mp->reserved));
648 return 0;
649 }
650
651 int vivid_s_fmt_vid_cap(struct file *file, void *priv,
652 struct v4l2_format *f)
653 {
654 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
655 struct vivid_dev *dev = video_drvdata(file);
656 struct v4l2_rect *crop = &dev->crop_cap;
657 struct v4l2_rect *compose = &dev->compose_cap;
658 struct vb2_queue *q = &dev->vb_vid_cap_q;
659 int ret = vivid_try_fmt_vid_cap(file, priv, f);
660 unsigned factor = 1;
661 unsigned p;
662 unsigned i;
663
664 if (ret < 0)
665 return ret;
666
667 if (vb2_is_busy(q)) {
668 dprintk(dev, 1, "%s device busy\n", __func__);
669 return -EBUSY;
670 }
671
672 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
673 dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
674 return -EBUSY;
675 }
676
677 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
678 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
679 factor = 2;
680
681 /* Note: the webcam input doesn't support scaling, cropping or composing */
682
683 if (!vivid_is_webcam(dev) &&
684 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
685 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
686
687 if (dev->has_scaler_cap) {
688 if (dev->has_compose_cap)
689 v4l2_rect_map_inside(compose, &r);
690 else
691 *compose = r;
692 if (dev->has_crop_cap && !dev->has_compose_cap) {
693 struct v4l2_rect min_r = {
694 0, 0,
695 r.width / MAX_ZOOM,
696 factor * r.height / MAX_ZOOM
697 };
698 struct v4l2_rect max_r = {
699 0, 0,
700 r.width * MAX_ZOOM,
701 factor * r.height * MAX_ZOOM
702 };
703
704 v4l2_rect_set_min_size(crop, &min_r);
705 v4l2_rect_set_max_size(crop, &max_r);
706 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
707 } else if (dev->has_crop_cap) {
708 struct v4l2_rect min_r = {
709 0, 0,
710 compose->width / MAX_ZOOM,
711 factor * compose->height / MAX_ZOOM
712 };
713 struct v4l2_rect max_r = {
714 0, 0,
715 compose->width * MAX_ZOOM,
716 factor * compose->height * MAX_ZOOM
717 };
718
719 v4l2_rect_set_min_size(crop, &min_r);
720 v4l2_rect_set_max_size(crop, &max_r);
721 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
722 }
723 } else if (dev->has_crop_cap && !dev->has_compose_cap) {
724 r.height *= factor;
725 v4l2_rect_set_size_to(crop, &r);
726 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
727 r = *crop;
728 r.height /= factor;
729 v4l2_rect_set_size_to(compose, &r);
730 } else if (!dev->has_crop_cap) {
731 v4l2_rect_map_inside(compose, &r);
732 } else {
733 r.height *= factor;
734 v4l2_rect_set_max_size(crop, &r);
735 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
736 compose->top *= factor;
737 compose->height *= factor;
738 v4l2_rect_set_size_to(compose, crop);
739 v4l2_rect_map_inside(compose, &r);
740 compose->top /= factor;
741 compose->height /= factor;
742 }
743 } else if (vivid_is_webcam(dev)) {
744 /* Guaranteed to be a match */
745 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
746 if (webcam_sizes[i].width == mp->width &&
747 webcam_sizes[i].height == mp->height)
748 break;
749 dev->webcam_size_idx = i;
750 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i))
751 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1;
752 vivid_update_format_cap(dev, false);
753 } else {
754 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
755
756 v4l2_rect_set_size_to(compose, &r);
757 r.height *= factor;
758 v4l2_rect_set_size_to(crop, &r);
759 }
760
761 dev->fmt_cap_rect.width = mp->width;
762 dev->fmt_cap_rect.height = mp->height;
763 tpg_s_buf_height(&dev->tpg, mp->height);
764 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
765 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
766 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
767 dev->field_cap = mp->field;
768 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
769 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true);
770 else
771 tpg_s_field(&dev->tpg, dev->field_cap, false);
772 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
773 if (vivid_is_sdtv_cap(dev))
774 dev->tv_field_cap = mp->field;
775 tpg_update_mv_step(&dev->tpg);
776 return 0;
777 }
778
779 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
780 struct v4l2_format *f)
781 {
782 struct vivid_dev *dev = video_drvdata(file);
783
784 if (!dev->multiplanar)
785 return -ENOTTY;
786 return vivid_g_fmt_vid_cap(file, priv, f);
787 }
788
789 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
790 struct v4l2_format *f)
791 {
792 struct vivid_dev *dev = video_drvdata(file);
793
794 if (!dev->multiplanar)
795 return -ENOTTY;
796 return vivid_try_fmt_vid_cap(file, priv, f);
797 }
798
799 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
800 struct v4l2_format *f)
801 {
802 struct vivid_dev *dev = video_drvdata(file);
803
804 if (!dev->multiplanar)
805 return -ENOTTY;
806 return vivid_s_fmt_vid_cap(file, priv, f);
807 }
808
809 int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
810 struct v4l2_format *f)
811 {
812 struct vivid_dev *dev = video_drvdata(file);
813
814 if (dev->multiplanar)
815 return -ENOTTY;
816 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
817 }
818
819 int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
820 struct v4l2_format *f)
821 {
822 struct vivid_dev *dev = video_drvdata(file);
823
824 if (dev->multiplanar)
825 return -ENOTTY;
826 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
827 }
828
829 int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
830 struct v4l2_format *f)
831 {
832 struct vivid_dev *dev = video_drvdata(file);
833
834 if (dev->multiplanar)
835 return -ENOTTY;
836 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
837 }
838
839 int vivid_vid_cap_g_selection(struct file *file, void *priv,
840 struct v4l2_selection *sel)
841 {
842 struct vivid_dev *dev = video_drvdata(file);
843
844 if (!dev->has_crop_cap && !dev->has_compose_cap)
845 return -ENOTTY;
846 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
847 return -EINVAL;
848 if (vivid_is_webcam(dev))
849 return -ENODATA;
850
851 sel->r.left = sel->r.top = 0;
852 switch (sel->target) {
853 case V4L2_SEL_TGT_CROP:
854 if (!dev->has_crop_cap)
855 return -EINVAL;
856 sel->r = dev->crop_cap;
857 break;
858 case V4L2_SEL_TGT_CROP_DEFAULT:
859 case V4L2_SEL_TGT_CROP_BOUNDS:
860 if (!dev->has_crop_cap)
861 return -EINVAL;
862 sel->r = dev->src_rect;
863 break;
864 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
865 if (!dev->has_compose_cap)
866 return -EINVAL;
867 sel->r = vivid_max_rect;
868 break;
869 case V4L2_SEL_TGT_COMPOSE:
870 if (!dev->has_compose_cap)
871 return -EINVAL;
872 sel->r = dev->compose_cap;
873 break;
874 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
875 if (!dev->has_compose_cap)
876 return -EINVAL;
877 sel->r = dev->fmt_cap_rect;
878 break;
879 default:
880 return -EINVAL;
881 }
882 return 0;
883 }
884
885 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
886 {
887 struct vivid_dev *dev = video_drvdata(file);
888 struct v4l2_rect *crop = &dev->crop_cap;
889 struct v4l2_rect *compose = &dev->compose_cap;
890 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
891 int ret;
892
893 if (!dev->has_crop_cap && !dev->has_compose_cap)
894 return -ENOTTY;
895 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
896 return -EINVAL;
897 if (vivid_is_webcam(dev))
898 return -ENODATA;
899
900 switch (s->target) {
901 case V4L2_SEL_TGT_CROP:
902 if (!dev->has_crop_cap)
903 return -EINVAL;
904 ret = vivid_vid_adjust_sel(s->flags, &s->r);
905 if (ret)
906 return ret;
907 v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
908 v4l2_rect_set_max_size(&s->r, &dev->src_rect);
909 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap);
910 s->r.top /= factor;
911 s->r.height /= factor;
912 if (dev->has_scaler_cap) {
913 struct v4l2_rect fmt = dev->fmt_cap_rect;
914 struct v4l2_rect max_rect = {
915 0, 0,
916 s->r.width * MAX_ZOOM,
917 s->r.height * MAX_ZOOM
918 };
919 struct v4l2_rect min_rect = {
920 0, 0,
921 s->r.width / MAX_ZOOM,
922 s->r.height / MAX_ZOOM
923 };
924
925 v4l2_rect_set_min_size(&fmt, &min_rect);
926 if (!dev->has_compose_cap)
927 v4l2_rect_set_max_size(&fmt, &max_rect);
928 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
929 vb2_is_busy(&dev->vb_vid_cap_q))
930 return -EBUSY;
931 if (dev->has_compose_cap) {
932 v4l2_rect_set_min_size(compose, &min_rect);
933 v4l2_rect_set_max_size(compose, &max_rect);
934 }
935 dev->fmt_cap_rect = fmt;
936 tpg_s_buf_height(&dev->tpg, fmt.height);
937 } else if (dev->has_compose_cap) {
938 struct v4l2_rect fmt = dev->fmt_cap_rect;
939
940 v4l2_rect_set_min_size(&fmt, &s->r);
941 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
942 vb2_is_busy(&dev->vb_vid_cap_q))
943 return -EBUSY;
944 dev->fmt_cap_rect = fmt;
945 tpg_s_buf_height(&dev->tpg, fmt.height);
946 v4l2_rect_set_size_to(compose, &s->r);
947 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
948 } else {
949 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) &&
950 vb2_is_busy(&dev->vb_vid_cap_q))
951 return -EBUSY;
952 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r);
953 v4l2_rect_set_size_to(compose, &s->r);
954 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
955 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
956 }
957 s->r.top *= factor;
958 s->r.height *= factor;
959 *crop = s->r;
960 break;
961 case V4L2_SEL_TGT_COMPOSE:
962 if (!dev->has_compose_cap)
963 return -EINVAL;
964 ret = vivid_vid_adjust_sel(s->flags, &s->r);
965 if (ret)
966 return ret;
967 v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
968 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect);
969 if (dev->has_scaler_cap) {
970 struct v4l2_rect max_rect = {
971 0, 0,
972 dev->src_rect.width * MAX_ZOOM,
973 (dev->src_rect.height / factor) * MAX_ZOOM
974 };
975
976 v4l2_rect_set_max_size(&s->r, &max_rect);
977 if (dev->has_crop_cap) {
978 struct v4l2_rect min_rect = {
979 0, 0,
980 s->r.width / MAX_ZOOM,
981 (s->r.height * factor) / MAX_ZOOM
982 };
983 struct v4l2_rect max_rect = {
984 0, 0,
985 s->r.width * MAX_ZOOM,
986 (s->r.height * factor) * MAX_ZOOM
987 };
988
989 v4l2_rect_set_min_size(crop, &min_rect);
990 v4l2_rect_set_max_size(crop, &max_rect);
991 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
992 }
993 } else if (dev->has_crop_cap) {
994 s->r.top *= factor;
995 s->r.height *= factor;
996 v4l2_rect_set_max_size(&s->r, &dev->src_rect);
997 v4l2_rect_set_size_to(crop, &s->r);
998 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
999 s->r.top /= factor;
1000 s->r.height /= factor;
1001 } else {
1002 v4l2_rect_set_size_to(&s->r, &dev->src_rect);
1003 s->r.height /= factor;
1004 }
1005 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
1006 if (dev->bitmap_cap && (compose->width != s->r.width ||
1007 compose->height != s->r.height)) {
1008 kfree(dev->bitmap_cap);
1009 dev->bitmap_cap = NULL;
1010 }
1011 *compose = s->r;
1012 break;
1013 default:
1014 return -EINVAL;
1015 }
1016
1017 tpg_s_crop_compose(&dev->tpg, crop, compose);
1018 return 0;
1019 }
1020
1021 int vivid_vid_cap_cropcap(struct file *file, void *priv,
1022 struct v4l2_cropcap *cap)
1023 {
1024 struct vivid_dev *dev = video_drvdata(file);
1025
1026 if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1027 return -EINVAL;
1028
1029 switch (vivid_get_pixel_aspect(dev)) {
1030 case TPG_PIXEL_ASPECT_NTSC:
1031 cap->pixelaspect.numerator = 11;
1032 cap->pixelaspect.denominator = 10;
1033 break;
1034 case TPG_PIXEL_ASPECT_PAL:
1035 cap->pixelaspect.numerator = 54;
1036 cap->pixelaspect.denominator = 59;
1037 break;
1038 case TPG_PIXEL_ASPECT_SQUARE:
1039 cap->pixelaspect.numerator = 1;
1040 cap->pixelaspect.denominator = 1;
1041 break;
1042 }
1043 return 0;
1044 }
1045
1046 int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
1047 struct v4l2_fmtdesc *f)
1048 {
1049 struct vivid_dev *dev = video_drvdata(file);
1050 const struct vivid_fmt *fmt;
1051
1052 if (dev->multiplanar)
1053 return -ENOTTY;
1054
1055 if (f->index >= ARRAY_SIZE(formats_ovl))
1056 return -EINVAL;
1057
1058 fmt = &formats_ovl[f->index];
1059
1060 f->pixelformat = fmt->fourcc;
1061 return 0;
1062 }
1063
1064 int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
1065 struct v4l2_format *f)
1066 {
1067 struct vivid_dev *dev = video_drvdata(file);
1068 const struct v4l2_rect *compose = &dev->compose_cap;
1069 struct v4l2_window *win = &f->fmt.win;
1070 unsigned clipcount = win->clipcount;
1071
1072 if (dev->multiplanar)
1073 return -ENOTTY;
1074
1075 win->w.top = dev->overlay_cap_top;
1076 win->w.left = dev->overlay_cap_left;
1077 win->w.width = compose->width;
1078 win->w.height = compose->height;
1079 win->field = dev->overlay_cap_field;
1080 win->clipcount = dev->clipcount_cap;
1081 if (clipcount > dev->clipcount_cap)
1082 clipcount = dev->clipcount_cap;
1083 if (dev->bitmap_cap == NULL)
1084 win->bitmap = NULL;
1085 else if (win->bitmap) {
1086 if (copy_to_user(win->bitmap, dev->bitmap_cap,
1087 ((compose->width + 7) / 8) * compose->height))
1088 return -EFAULT;
1089 }
1090 if (clipcount && win->clips) {
1091 if (copy_to_user(win->clips, dev->clips_cap,
1092 clipcount * sizeof(dev->clips_cap[0])))
1093 return -EFAULT;
1094 }
1095 return 0;
1096 }
1097
1098 int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
1099 struct v4l2_format *f)
1100 {
1101 struct vivid_dev *dev = video_drvdata(file);
1102 const struct v4l2_rect *compose = &dev->compose_cap;
1103 struct v4l2_window *win = &f->fmt.win;
1104 int i, j;
1105
1106 if (dev->multiplanar)
1107 return -ENOTTY;
1108
1109 win->w.left = clamp_t(int, win->w.left,
1110 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1111 win->w.top = clamp_t(int, win->w.top,
1112 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1113 win->w.width = compose->width;
1114 win->w.height = compose->height;
1115 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
1116 win->field = V4L2_FIELD_ANY;
1117 win->chromakey = 0;
1118 win->global_alpha = 0;
1119 if (win->clipcount && !win->clips)
1120 win->clipcount = 0;
1121 if (win->clipcount > MAX_CLIPS)
1122 win->clipcount = MAX_CLIPS;
1123 if (win->clipcount) {
1124 if (copy_from_user(dev->try_clips_cap, win->clips,
1125 win->clipcount * sizeof(dev->clips_cap[0])))
1126 return -EFAULT;
1127 for (i = 0; i < win->clipcount; i++) {
1128 struct v4l2_rect *r = &dev->try_clips_cap[i].c;
1129
1130 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
1131 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
1132 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
1133 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
1134 }
1135 /*
1136 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
1137 * number and it's typically a one-time deal.
1138 */
1139 for (i = 0; i < win->clipcount - 1; i++) {
1140 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
1141
1142 for (j = i + 1; j < win->clipcount; j++) {
1143 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
1144
1145 if (v4l2_rect_overlap(r1, r2))
1146 return -EINVAL;
1147 }
1148 }
1149 if (copy_to_user(win->clips, dev->try_clips_cap,
1150 win->clipcount * sizeof(dev->clips_cap[0])))
1151 return -EFAULT;
1152 }
1153 return 0;
1154 }
1155
1156 int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
1157 struct v4l2_format *f)
1158 {
1159 struct vivid_dev *dev = video_drvdata(file);
1160 const struct v4l2_rect *compose = &dev->compose_cap;
1161 struct v4l2_window *win = &f->fmt.win;
1162 int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
1163 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
1164 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
1165 void *new_bitmap = NULL;
1166
1167 if (ret)
1168 return ret;
1169
1170 if (win->bitmap) {
1171 new_bitmap = vzalloc(bitmap_size);
1172
1173 if (new_bitmap == NULL)
1174 return -ENOMEM;
1175 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
1176 vfree(new_bitmap);
1177 return -EFAULT;
1178 }
1179 }
1180
1181 dev->overlay_cap_top = win->w.top;
1182 dev->overlay_cap_left = win->w.left;
1183 dev->overlay_cap_field = win->field;
1184 vfree(dev->bitmap_cap);
1185 dev->bitmap_cap = new_bitmap;
1186 dev->clipcount_cap = win->clipcount;
1187 if (dev->clipcount_cap)
1188 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
1189 return 0;
1190 }
1191
1192 int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
1193 {
1194 struct vivid_dev *dev = video_drvdata(file);
1195
1196 if (dev->multiplanar)
1197 return -ENOTTY;
1198
1199 if (i && dev->fb_vbase_cap == NULL)
1200 return -EINVAL;
1201
1202 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
1203 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
1204 return -EINVAL;
1205 }
1206
1207 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
1208 return -EBUSY;
1209 dev->overlay_cap_owner = i ? fh : NULL;
1210 return 0;
1211 }
1212
1213 int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
1214 struct v4l2_framebuffer *a)
1215 {
1216 struct vivid_dev *dev = video_drvdata(file);
1217
1218 if (dev->multiplanar)
1219 return -ENOTTY;
1220
1221 *a = dev->fb_cap;
1222 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
1223 V4L2_FBUF_CAP_LIST_CLIPPING;
1224 a->flags = V4L2_FBUF_FLAG_PRIMARY;
1225 a->fmt.field = V4L2_FIELD_NONE;
1226 a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
1227 a->fmt.priv = 0;
1228 return 0;
1229 }
1230
1231 int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
1232 const struct v4l2_framebuffer *a)
1233 {
1234 struct vivid_dev *dev = video_drvdata(file);
1235 const struct vivid_fmt *fmt;
1236
1237 if (dev->multiplanar)
1238 return -ENOTTY;
1239
1240 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
1241 return -EPERM;
1242
1243 if (dev->overlay_cap_owner)
1244 return -EBUSY;
1245
1246 if (a->base == NULL) {
1247 dev->fb_cap.base = NULL;
1248 dev->fb_vbase_cap = NULL;
1249 return 0;
1250 }
1251
1252 if (a->fmt.width < 48 || a->fmt.height < 32)
1253 return -EINVAL;
1254 fmt = vivid_get_format(dev, a->fmt.pixelformat);
1255 if (!fmt || !fmt->can_do_overlay)
1256 return -EINVAL;
1257 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
1258 return -EINVAL;
1259 if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
1260 return -EINVAL;
1261
1262 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
1263 dev->fb_cap = *a;
1264 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
1265 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1266 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
1267 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1268 return 0;
1269 }
1270
1271 static const struct v4l2_audio vivid_audio_inputs[] = {
1272 { 0, "TV", V4L2_AUDCAP_STEREO },
1273 { 1, "Line-In", V4L2_AUDCAP_STEREO },
1274 };
1275
1276 int vidioc_enum_input(struct file *file, void *priv,
1277 struct v4l2_input *inp)
1278 {
1279 struct vivid_dev *dev = video_drvdata(file);
1280
1281 if (inp->index >= dev->num_inputs)
1282 return -EINVAL;
1283
1284 inp->type = V4L2_INPUT_TYPE_CAMERA;
1285 switch (dev->input_type[inp->index]) {
1286 case WEBCAM:
1287 snprintf(inp->name, sizeof(inp->name), "Webcam %u",
1288 dev->input_name_counter[inp->index]);
1289 inp->capabilities = 0;
1290 break;
1291 case TV:
1292 snprintf(inp->name, sizeof(inp->name), "TV %u",
1293 dev->input_name_counter[inp->index]);
1294 inp->type = V4L2_INPUT_TYPE_TUNER;
1295 inp->std = V4L2_STD_ALL;
1296 if (dev->has_audio_inputs)
1297 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1298 inp->capabilities = V4L2_IN_CAP_STD;
1299 break;
1300 case SVID:
1301 snprintf(inp->name, sizeof(inp->name), "S-Video %u",
1302 dev->input_name_counter[inp->index]);
1303 inp->std = V4L2_STD_ALL;
1304 if (dev->has_audio_inputs)
1305 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1306 inp->capabilities = V4L2_IN_CAP_STD;
1307 break;
1308 case HDMI:
1309 snprintf(inp->name, sizeof(inp->name), "HDMI %u",
1310 dev->input_name_counter[inp->index]);
1311 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
1312 if (dev->edid_blocks == 0 ||
1313 dev->dv_timings_signal_mode == NO_SIGNAL)
1314 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1315 else if (dev->dv_timings_signal_mode == NO_LOCK ||
1316 dev->dv_timings_signal_mode == OUT_OF_RANGE)
1317 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1318 break;
1319 }
1320 if (dev->sensor_hflip)
1321 inp->status |= V4L2_IN_ST_HFLIP;
1322 if (dev->sensor_vflip)
1323 inp->status |= V4L2_IN_ST_VFLIP;
1324 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
1325 if (dev->std_signal_mode == NO_SIGNAL) {
1326 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1327 } else if (dev->std_signal_mode == NO_LOCK) {
1328 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1329 } else if (vivid_is_tv_cap(dev)) {
1330 switch (tpg_g_quality(&dev->tpg)) {
1331 case TPG_QUAL_GRAY:
1332 inp->status |= V4L2_IN_ST_COLOR_KILL;
1333 break;
1334 case TPG_QUAL_NOISE:
1335 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1336 break;
1337 default:
1338 break;
1339 }
1340 }
1341 }
1342 return 0;
1343 }
1344
1345 int vidioc_g_input(struct file *file, void *priv, unsigned *i)
1346 {
1347 struct vivid_dev *dev = video_drvdata(file);
1348
1349 *i = dev->input;
1350 return 0;
1351 }
1352
1353 int vidioc_s_input(struct file *file, void *priv, unsigned i)
1354 {
1355 struct vivid_dev *dev = video_drvdata(file);
1356 struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
1357 unsigned brightness;
1358
1359 if (i >= dev->num_inputs)
1360 return -EINVAL;
1361
1362 if (i == dev->input)
1363 return 0;
1364
1365 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1366 return -EBUSY;
1367
1368 dev->input = i;
1369 dev->vid_cap_dev.tvnorms = 0;
1370 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
1371 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
1372 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
1373 }
1374 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1375 vivid_update_format_cap(dev, false);
1376
1377 if (dev->colorspace) {
1378 switch (dev->input_type[i]) {
1379 case WEBCAM:
1380 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1381 break;
1382 case TV:
1383 case SVID:
1384 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1385 break;
1386 case HDMI:
1387 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
1388 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
1389 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1390 else
1391 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
1392 } else {
1393 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1394 }
1395 break;
1396 }
1397 }
1398
1399 /*
1400 * Modify the brightness range depending on the input.
1401 * This makes it easy to use vivid to test if applications can
1402 * handle control range modifications and is also how this is
1403 * typically used in practice as different inputs may be hooked
1404 * up to different receivers with different control ranges.
1405 */
1406 brightness = 128 * i + dev->input_brightness[i];
1407 v4l2_ctrl_modify_range(dev->brightness,
1408 128 * i, 255 + 128 * i, 1, 128 + 128 * i);
1409 v4l2_ctrl_s_ctrl(dev->brightness, brightness);
1410 return 0;
1411 }
1412
1413 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
1414 {
1415 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1416 return -EINVAL;
1417 *vin = vivid_audio_inputs[vin->index];
1418 return 0;
1419 }
1420
1421 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
1422 {
1423 struct vivid_dev *dev = video_drvdata(file);
1424
1425 if (!vivid_is_sdtv_cap(dev))
1426 return -EINVAL;
1427 *vin = vivid_audio_inputs[dev->tv_audio_input];
1428 return 0;
1429 }
1430
1431 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
1432 {
1433 struct vivid_dev *dev = video_drvdata(file);
1434
1435 if (!vivid_is_sdtv_cap(dev))
1436 return -EINVAL;
1437 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1438 return -EINVAL;
1439 dev->tv_audio_input = vin->index;
1440 return 0;
1441 }
1442
1443 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
1444 {
1445 struct vivid_dev *dev = video_drvdata(file);
1446
1447 if (vf->tuner != 0)
1448 return -EINVAL;
1449 vf->frequency = dev->tv_freq;
1450 return 0;
1451 }
1452
1453 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
1454 {
1455 struct vivid_dev *dev = video_drvdata(file);
1456
1457 if (vf->tuner != 0)
1458 return -EINVAL;
1459 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
1460 if (vivid_is_tv_cap(dev))
1461 vivid_update_quality(dev);
1462 return 0;
1463 }
1464
1465 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
1466 {
1467 struct vivid_dev *dev = video_drvdata(file);
1468
1469 if (vt->index != 0)
1470 return -EINVAL;
1471 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
1472 return -EINVAL;
1473 dev->tv_audmode = vt->audmode;
1474 return 0;
1475 }
1476
1477 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
1478 {
1479 struct vivid_dev *dev = video_drvdata(file);
1480 enum tpg_quality qual;
1481
1482 if (vt->index != 0)
1483 return -EINVAL;
1484
1485 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
1486 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
1487 vt->audmode = dev->tv_audmode;
1488 vt->rangelow = MIN_TV_FREQ;
1489 vt->rangehigh = MAX_TV_FREQ;
1490 qual = vivid_get_quality(dev, &vt->afc);
1491 if (qual == TPG_QUAL_COLOR)
1492 vt->signal = 0xffff;
1493 else if (qual == TPG_QUAL_GRAY)
1494 vt->signal = 0x8000;
1495 else
1496 vt->signal = 0;
1497 if (qual == TPG_QUAL_NOISE) {
1498 vt->rxsubchans = 0;
1499 } else if (qual == TPG_QUAL_GRAY) {
1500 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1501 } else {
1502 unsigned channel_nr = dev->tv_freq / (6 * 16);
1503 unsigned options = (dev->std_cap & V4L2_STD_NTSC_M) ? 4 : 3;
1504
1505 switch (channel_nr % options) {
1506 case 0:
1507 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1508 break;
1509 case 1:
1510 vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
1511 break;
1512 case 2:
1513 if (dev->std_cap & V4L2_STD_NTSC_M)
1514 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
1515 else
1516 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
1517 break;
1518 case 3:
1519 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
1520 break;
1521 }
1522 }
1523 strlcpy(vt->name, "TV Tuner", sizeof(vt->name));
1524 return 0;
1525 }
1526
1527 /* Must remain in sync with the vivid_ctrl_standard_strings array */
1528 const v4l2_std_id vivid_standard[] = {
1529 V4L2_STD_NTSC_M,
1530 V4L2_STD_NTSC_M_JP,
1531 V4L2_STD_NTSC_M_KR,
1532 V4L2_STD_NTSC_443,
1533 V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
1534 V4L2_STD_PAL_I,
1535 V4L2_STD_PAL_DK,
1536 V4L2_STD_PAL_M,
1537 V4L2_STD_PAL_N,
1538 V4L2_STD_PAL_Nc,
1539 V4L2_STD_PAL_60,
1540 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
1541 V4L2_STD_SECAM_DK,
1542 V4L2_STD_SECAM_L,
1543 V4L2_STD_SECAM_LC,
1544 V4L2_STD_UNKNOWN
1545 };
1546
1547 /* Must remain in sync with the vivid_standard array */
1548 const char * const vivid_ctrl_standard_strings[] = {
1549 "NTSC-M",
1550 "NTSC-M-JP",
1551 "NTSC-M-KR",
1552 "NTSC-443",
1553 "PAL-BGH",
1554 "PAL-I",
1555 "PAL-DK",
1556 "PAL-M",
1557 "PAL-N",
1558 "PAL-Nc",
1559 "PAL-60",
1560 "SECAM-BGH",
1561 "SECAM-DK",
1562 "SECAM-L",
1563 "SECAM-Lc",
1564 NULL,
1565 };
1566
1567 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
1568 {
1569 struct vivid_dev *dev = video_drvdata(file);
1570
1571 if (!vivid_is_sdtv_cap(dev))
1572 return -ENODATA;
1573 if (dev->std_signal_mode == NO_SIGNAL ||
1574 dev->std_signal_mode == NO_LOCK) {
1575 *id = V4L2_STD_UNKNOWN;
1576 return 0;
1577 }
1578 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
1579 *id = V4L2_STD_UNKNOWN;
1580 } else if (dev->std_signal_mode == CURRENT_STD) {
1581 *id = dev->std_cap;
1582 } else if (dev->std_signal_mode == SELECTED_STD) {
1583 *id = dev->query_std;
1584 } else {
1585 *id = vivid_standard[dev->query_std_last];
1586 dev->query_std_last = (dev->query_std_last + 1) % ARRAY_SIZE(vivid_standard);
1587 }
1588
1589 return 0;
1590 }
1591
1592 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
1593 {
1594 struct vivid_dev *dev = video_drvdata(file);
1595
1596 if (!vivid_is_sdtv_cap(dev))
1597 return -ENODATA;
1598 if (dev->std_cap == id)
1599 return 0;
1600 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1601 return -EBUSY;
1602 dev->std_cap = id;
1603 vivid_update_format_cap(dev, false);
1604 return 0;
1605 }
1606
1607 static void find_aspect_ratio(u32 width, u32 height,
1608 u32 *num, u32 *denom)
1609 {
1610 if (!(height % 3) && ((height * 4 / 3) == width)) {
1611 *num = 4;
1612 *denom = 3;
1613 } else if (!(height % 9) && ((height * 16 / 9) == width)) {
1614 *num = 16;
1615 *denom = 9;
1616 } else if (!(height % 10) && ((height * 16 / 10) == width)) {
1617 *num = 16;
1618 *denom = 10;
1619 } else if (!(height % 4) && ((height * 5 / 4) == width)) {
1620 *num = 5;
1621 *denom = 4;
1622 } else if (!(height % 9) && ((height * 15 / 9) == width)) {
1623 *num = 15;
1624 *denom = 9;
1625 } else { /* default to 16:9 */
1626 *num = 16;
1627 *denom = 9;
1628 }
1629 }
1630
1631 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
1632 {
1633 struct v4l2_bt_timings *bt = &timings->bt;
1634 u32 total_h_pixel;
1635 u32 total_v_lines;
1636 u32 h_freq;
1637
1638 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap,
1639 NULL, NULL))
1640 return false;
1641
1642 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt);
1643 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
1644
1645 h_freq = (u32)bt->pixelclock / total_h_pixel;
1646
1647 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
1648 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
1649 bt->polarities, bt->interlaced, timings))
1650 return true;
1651 }
1652
1653 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
1654 struct v4l2_fract aspect_ratio;
1655
1656 find_aspect_ratio(bt->width, bt->height,
1657 &aspect_ratio.numerator,
1658 &aspect_ratio.denominator);
1659 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
1660 bt->polarities, bt->interlaced,
1661 aspect_ratio, timings))
1662 return true;
1663 }
1664 return false;
1665 }
1666
1667 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
1668 struct v4l2_dv_timings *timings)
1669 {
1670 struct vivid_dev *dev = video_drvdata(file);
1671
1672 if (!vivid_is_hdmi_cap(dev))
1673 return -ENODATA;
1674 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
1675 0, NULL, NULL) &&
1676 !valid_cvt_gtf_timings(timings))
1677 return -EINVAL;
1678
1679 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0, false))
1680 return 0;
1681 if (vb2_is_busy(&dev->vb_vid_cap_q))
1682 return -EBUSY;
1683
1684 dev->dv_timings_cap = *timings;
1685 vivid_update_format_cap(dev, false);
1686 return 0;
1687 }
1688
1689 int vidioc_query_dv_timings(struct file *file, void *_fh,
1690 struct v4l2_dv_timings *timings)
1691 {
1692 struct vivid_dev *dev = video_drvdata(file);
1693
1694 if (!vivid_is_hdmi_cap(dev))
1695 return -ENODATA;
1696 if (dev->dv_timings_signal_mode == NO_SIGNAL ||
1697 dev->edid_blocks == 0)
1698 return -ENOLINK;
1699 if (dev->dv_timings_signal_mode == NO_LOCK)
1700 return -ENOLCK;
1701 if (dev->dv_timings_signal_mode == OUT_OF_RANGE) {
1702 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
1703 return -ERANGE;
1704 }
1705 if (dev->dv_timings_signal_mode == CURRENT_DV_TIMINGS) {
1706 *timings = dev->dv_timings_cap;
1707 } else if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS) {
1708 *timings = v4l2_dv_timings_presets[dev->query_dv_timings];
1709 } else {
1710 *timings = v4l2_dv_timings_presets[dev->query_dv_timings_last];
1711 dev->query_dv_timings_last = (dev->query_dv_timings_last + 1) %
1712 dev->query_dv_timings_size;
1713 }
1714 return 0;
1715 }
1716
1717 int vidioc_s_edid(struct file *file, void *_fh,
1718 struct v4l2_edid *edid)
1719 {
1720 struct vivid_dev *dev = video_drvdata(file);
1721 u16 phys_addr;
1722 unsigned int i;
1723 int ret;
1724
1725 memset(edid->reserved, 0, sizeof(edid->reserved));
1726 if (edid->pad >= dev->num_inputs)
1727 return -EINVAL;
1728 if (dev->input_type[edid->pad] != HDMI || edid->start_block)
1729 return -EINVAL;
1730 if (edid->blocks == 0) {
1731 dev->edid_blocks = 0;
1732 phys_addr = CEC_PHYS_ADDR_INVALID;
1733 goto set_phys_addr;
1734 }
1735 if (edid->blocks > dev->edid_max_blocks) {
1736 edid->blocks = dev->edid_max_blocks;
1737 return -E2BIG;
1738 }
1739 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
1740 ret = cec_phys_addr_validate(phys_addr, &phys_addr, NULL);
1741 if (ret)
1742 return ret;
1743
1744 if (vb2_is_busy(&dev->vb_vid_cap_q))
1745 return -EBUSY;
1746
1747 dev->edid_blocks = edid->blocks;
1748 memcpy(dev->edid, edid->edid, edid->blocks * 128);
1749
1750 set_phys_addr:
1751 /* TODO: a proper hotplug detect cycle should be emulated here */
1752 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false);
1753
1754 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
1755 cec_s_phys_addr(dev->cec_tx_adap[i],
1756 cec_phys_addr_for_input(phys_addr, i + 1),
1757 false);
1758 return 0;
1759 }
1760
1761 int vidioc_enum_framesizes(struct file *file, void *fh,
1762 struct v4l2_frmsizeenum *fsize)
1763 {
1764 struct vivid_dev *dev = video_drvdata(file);
1765
1766 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
1767 return -EINVAL;
1768 if (vivid_get_format(dev, fsize->pixel_format) == NULL)
1769 return -EINVAL;
1770 if (vivid_is_webcam(dev)) {
1771 if (fsize->index >= ARRAY_SIZE(webcam_sizes))
1772 return -EINVAL;
1773 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1774 fsize->discrete = webcam_sizes[fsize->index];
1775 return 0;
1776 }
1777 if (fsize->index)
1778 return -EINVAL;
1779 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1780 fsize->stepwise.min_width = MIN_WIDTH;
1781 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
1782 fsize->stepwise.step_width = 2;
1783 fsize->stepwise.min_height = MIN_HEIGHT;
1784 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
1785 fsize->stepwise.step_height = 2;
1786 return 0;
1787 }
1788
1789 /* timeperframe is arbitrary and continuous */
1790 int vidioc_enum_frameintervals(struct file *file, void *priv,
1791 struct v4l2_frmivalenum *fival)
1792 {
1793 struct vivid_dev *dev = video_drvdata(file);
1794 const struct vivid_fmt *fmt;
1795 int i;
1796
1797 fmt = vivid_get_format(dev, fival->pixel_format);
1798 if (!fmt)
1799 return -EINVAL;
1800
1801 if (!vivid_is_webcam(dev)) {
1802 if (fival->index)
1803 return -EINVAL;
1804 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
1805 return -EINVAL;
1806 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
1807 return -EINVAL;
1808 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1809 fival->discrete = dev->timeperframe_vid_cap;
1810 return 0;
1811 }
1812
1813 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
1814 if (fival->width == webcam_sizes[i].width &&
1815 fival->height == webcam_sizes[i].height)
1816 break;
1817 if (i == ARRAY_SIZE(webcam_sizes))
1818 return -EINVAL;
1819 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i))
1820 return -EINVAL;
1821 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1822 fival->discrete = webcam_intervals[fival->index];
1823 return 0;
1824 }
1825
1826 int vivid_vid_cap_g_parm(struct file *file, void *priv,
1827 struct v4l2_streamparm *parm)
1828 {
1829 struct vivid_dev *dev = video_drvdata(file);
1830
1831 if (parm->type != (dev->multiplanar ?
1832 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1833 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1834 return -EINVAL;
1835
1836 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1837 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
1838 parm->parm.capture.readbuffers = 1;
1839 return 0;
1840 }
1841
1842 #define FRACT_CMP(a, OP, b) \
1843 ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
1844
1845 int vivid_vid_cap_s_parm(struct file *file, void *priv,
1846 struct v4l2_streamparm *parm)
1847 {
1848 struct vivid_dev *dev = video_drvdata(file);
1849 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx);
1850 struct v4l2_fract tpf;
1851 unsigned i;
1852
1853 if (parm->type != (dev->multiplanar ?
1854 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1855 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1856 return -EINVAL;
1857 if (!vivid_is_webcam(dev))
1858 return vivid_vid_cap_g_parm(file, priv, parm);
1859
1860 tpf = parm->parm.capture.timeperframe;
1861
1862 if (tpf.denominator == 0)
1863 tpf = webcam_intervals[ival_sz - 1];
1864 for (i = 0; i < ival_sz; i++)
1865 if (FRACT_CMP(tpf, >=, webcam_intervals[i]))
1866 break;
1867 if (i == ival_sz)
1868 i = ival_sz - 1;
1869 dev->webcam_ival_idx = i;
1870 tpf = webcam_intervals[dev->webcam_ival_idx];
1871 tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
1872 tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
1873
1874 /* resync the thread's timings */
1875 dev->cap_seq_resync = true;
1876 dev->timeperframe_vid_cap = tpf;
1877 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1878 parm->parm.capture.timeperframe = tpf;
1879 parm->parm.capture.readbuffers = 1;
1880 return 0;
1881 }