]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/pci/intel/ipu-isys-queue.c
UBUNTU: SAUCE: IPU driver release WW52
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / intel / ipu-isys-queue.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
3
4 #include <linux/completion.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/string.h>
8
9 #include <media/media-entity.h>
10 #include <media/videobuf2-dma-contig.h>
11 #include <media/v4l2-ioctl.h>
12
13 #include "ipu.h"
14 #include "ipu-bus.h"
15 #include "ipu-buttress.h"
16 #include "ipu-isys.h"
17 #include "ipu-isys-csi2.h"
18 #include "ipu-isys-video.h"
19
20 static bool wall_clock_ts_on;
21 module_param(wall_clock_ts_on, bool, 0660);
22 MODULE_PARM_DESC(wall_clock_ts_on, "Timestamp based on REALTIME clock");
23
24 static int queue_setup(struct vb2_queue *q,
25 unsigned int *num_buffers, unsigned int *num_planes,
26 unsigned int sizes[],
27 struct device *alloc_devs[])
28 {
29 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
30 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
31 bool use_fmt = false;
32 unsigned int i;
33
34 /* num_planes == 0: we're being called through VIDIOC_REQBUFS */
35 if (!*num_planes) {
36 use_fmt = true;
37 *num_planes = av->mpix.num_planes;
38 }
39
40 for (i = 0; i < *num_planes; i++) {
41 if (use_fmt)
42 sizes[i] = av->mpix.plane_fmt[i].sizeimage;
43 alloc_devs[i] = aq->dev;
44 dev_dbg(&av->isys->adev->dev,
45 "%s: queue setup: plane %d size %u\n",
46 av->vdev.name, i, sizes[i]);
47 }
48
49 return 0;
50 }
51
52 static void ipu_isys_queue_lock(struct vb2_queue *q)
53 {
54 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
55 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
56
57 dev_dbg(&av->isys->adev->dev, "%s: queue lock\n", av->vdev.name);
58 mutex_lock(&av->mutex);
59 }
60
61 static void ipu_isys_queue_unlock(struct vb2_queue *q)
62 {
63 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
64 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
65
66 dev_dbg(&av->isys->adev->dev, "%s: queue unlock\n", av->vdev.name);
67 mutex_unlock(&av->mutex);
68 }
69
70 static int buf_init(struct vb2_buffer *vb)
71 {
72 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
73 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
74
75 dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
76 __func__);
77
78 if (aq->buf_init)
79 return aq->buf_init(vb);
80
81 return 0;
82 }
83
84 int ipu_isys_buf_prepare(struct vb2_buffer *vb)
85 {
86 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
87 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
88
89 dev_dbg(&av->isys->adev->dev,
90 "buffer: %s: configured size %u, buffer size %lu\n",
91 av->vdev.name,
92 av->mpix.plane_fmt[0].sizeimage, vb2_plane_size(vb, 0));
93
94 if (av->mpix.plane_fmt[0].sizeimage > vb2_plane_size(vb, 0))
95 return -EINVAL;
96
97 vb2_set_plane_payload(vb, 0, av->mpix.plane_fmt[0].bytesperline *
98 av->mpix.height);
99 vb->planes[0].data_offset = av->line_header_length / BITS_PER_BYTE;
100
101 return 0;
102 }
103
104 static int buf_prepare(struct vb2_buffer *vb)
105 {
106 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
107 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
108 int rval;
109
110 if (av->isys->adev->isp->flr_done)
111 return -EIO;
112
113 rval = aq->buf_prepare(vb);
114 return rval;
115 }
116
117 static void buf_finish(struct vb2_buffer *vb)
118 {
119 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
120 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
121
122 dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
123 __func__);
124
125 }
126
127 static void buf_cleanup(struct vb2_buffer *vb)
128 {
129 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
130 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
131
132 dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
133 __func__);
134
135 if (aq->buf_cleanup)
136 return aq->buf_cleanup(vb);
137 }
138
139 /*
140 * Queue a buffer list back to incoming or active queues. The buffers
141 * are removed from the buffer list.
142 */
143 void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl,
144 unsigned long op_flags,
145 enum vb2_buffer_state state)
146 {
147 struct ipu_isys_buffer *ib, *ib_safe;
148 unsigned long flags;
149 bool first = true;
150
151 if (!bl)
152 return;
153
154 WARN_ON(!bl->nbufs);
155 WARN_ON(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE &&
156 op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING);
157
158 list_for_each_entry_safe(ib, ib_safe, &bl->head, head) {
159 struct ipu_isys_video *av;
160
161 if (ib->type == IPU_ISYS_VIDEO_BUFFER) {
162 struct vb2_buffer *vb =
163 ipu_isys_buffer_to_vb2_buffer(ib);
164 struct ipu_isys_queue *aq =
165 vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
166
167 av = ipu_isys_queue_to_video(aq);
168 spin_lock_irqsave(&aq->lock, flags);
169 list_del(&ib->head);
170 if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE)
171 list_add(&ib->head, &aq->active);
172 else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING)
173 list_add_tail(&ib->head, &aq->incoming);
174 spin_unlock_irqrestore(&aq->lock, flags);
175
176 if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE)
177 vb2_buffer_done(vb, state);
178 } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) {
179 struct ipu_isys_private_buffer *pb =
180 ipu_isys_buffer_to_private_buffer(ib);
181 struct ipu_isys_pipeline *ip = pb->ip;
182
183 av = container_of(ip, struct ipu_isys_video, ip);
184 spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
185 list_del(&ib->head);
186 if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE)
187 list_add(&ib->head, &ip->short_packet_active);
188 else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING)
189 list_add(&ib->head, &ip->short_packet_incoming);
190 spin_unlock_irqrestore(&ip->short_packet_queue_lock,
191 flags);
192 } else {
193 WARN_ON(1);
194 return;
195 }
196
197 if (first) {
198 dev_dbg(&av->isys->adev->dev,
199 "queue buf list %p flags %lx, s %d, %d bufs\n",
200 bl, op_flags, state, bl->nbufs);
201 first = false;
202 }
203
204 bl->nbufs--;
205 }
206
207 WARN_ON(bl->nbufs);
208 }
209
210 /*
211 * flush_firmware_streamon_fail() - Flush in cases where requests may
212 * have been queued to firmware and the *firmware streamon fails for a
213 * reason or another.
214 */
215 static void flush_firmware_streamon_fail(struct ipu_isys_pipeline *ip)
216 {
217 struct ipu_isys_video *pipe_av =
218 container_of(ip, struct ipu_isys_video, ip);
219 struct ipu_isys_queue *aq;
220 unsigned long flags;
221
222 lockdep_assert_held(&pipe_av->mutex);
223
224 list_for_each_entry(aq, &ip->queues, node) {
225 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
226 struct ipu_isys_buffer *ib, *ib_safe;
227
228 spin_lock_irqsave(&aq->lock, flags);
229 list_for_each_entry_safe(ib, ib_safe, &aq->active, head) {
230 struct vb2_buffer *vb =
231 ipu_isys_buffer_to_vb2_buffer(ib);
232
233 list_del(&ib->head);
234 if (av->streaming) {
235 dev_dbg(&av->isys->adev->dev,
236 "%s: queue buffer %u back to incoming\n",
237 av->vdev.name,
238 vb->index);
239 /* Queue already streaming, return to driver. */
240 list_add(&ib->head, &aq->incoming);
241 continue;
242 }
243 /* Queue not yet streaming, return to user. */
244 dev_dbg(&av->isys->adev->dev,
245 "%s: return %u back to videobuf2\n",
246 av->vdev.name,
247 vb->index);
248 vb2_buffer_done(ipu_isys_buffer_to_vb2_buffer(ib),
249 VB2_BUF_STATE_QUEUED);
250 }
251 spin_unlock_irqrestore(&aq->lock, flags);
252 }
253 }
254
255 /*
256 * Attempt obtaining a buffer list from the incoming queues, a list of
257 * buffers that contains one entry from each video buffer queue. If
258 * all queues have no buffers, the buffers that were already dequeued
259 * are returned to their queues.
260 */
261 static int buffer_list_get(struct ipu_isys_pipeline *ip,
262 struct ipu_isys_buffer_list *bl)
263 {
264 struct ipu_isys_queue *aq;
265 struct ipu_isys_buffer *ib;
266 unsigned long flags;
267 int ret = 0;
268
269 bl->nbufs = 0;
270 INIT_LIST_HEAD(&bl->head);
271
272 list_for_each_entry(aq, &ip->queues, node) {
273 struct ipu_isys_buffer *ib;
274
275 spin_lock_irqsave(&aq->lock, flags);
276 if (list_empty(&aq->incoming)) {
277 spin_unlock_irqrestore(&aq->lock, flags);
278 ret = -ENODATA;
279 goto error;
280 }
281
282 ib = list_last_entry(&aq->incoming,
283 struct ipu_isys_buffer, head);
284 if (ib->req) {
285 spin_unlock_irqrestore(&aq->lock, flags);
286 ret = -ENODATA;
287 goto error;
288 }
289
290 dev_dbg(&ip->isys->adev->dev, "buffer: %s: buffer %u\n",
291 ipu_isys_queue_to_video(aq)->vdev.name,
292 ipu_isys_buffer_to_vb2_buffer(ib)->index
293 );
294 list_del(&ib->head);
295 list_add(&ib->head, &bl->head);
296 spin_unlock_irqrestore(&aq->lock, flags);
297
298 bl->nbufs++;
299 }
300
301 list_for_each_entry(ib, &bl->head, head) {
302 struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
303
304 aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
305 if (aq->prepare_frame_buff_set)
306 aq->prepare_frame_buff_set(vb);
307 }
308
309 /* Get short packet buffer. */
310 if (ip->interlaced && ip->isys->short_packet_source ==
311 IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) {
312 ib = ipu_isys_csi2_get_short_packet_buffer(ip, bl);
313 if (!ib) {
314 ret = -ENODATA;
315 dev_err(&ip->isys->adev->dev,
316 "No more short packet buffers. Driver bug?");
317 WARN_ON(1);
318 goto error;
319 }
320 bl->nbufs++;
321 }
322
323 dev_dbg(&ip->isys->adev->dev, "get buffer list %p, %u buffers\n", bl,
324 bl->nbufs);
325 return ret;
326
327 error:
328 if (!list_empty(&bl->head))
329 ipu_isys_buffer_list_queue(bl,
330 IPU_ISYS_BUFFER_LIST_FL_INCOMING, 0);
331 return ret;
332 }
333
334 void
335 ipu_isys_buffer_to_fw_frame_buff_pin(struct vb2_buffer *vb,
336 struct ipu_fw_isys_frame_buff_set_abi *set)
337 {
338 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
339 struct ipu_isys_video *av = container_of(aq, struct ipu_isys_video, aq);
340
341 if (av->compression)
342 set->output_pins[aq->fw_output].compress = 1;
343
344 set->output_pins[aq->fw_output].addr =
345 vb2_dma_contig_plane_dma_addr(vb, 0);
346 set->output_pins[aq->fw_output].out_buf_id =
347 vb->index + 1;
348 }
349
350 /*
351 * Convert a buffer list to a isys fw ABI framebuffer set. The
352 * buffer list is not modified.
353 */
354 #define IPU_ISYS_FRAME_NUM_THRESHOLD (30)
355 void
356 ipu_isys_buffer_to_fw_frame_buff(struct ipu_fw_isys_frame_buff_set_abi *set,
357 struct ipu_isys_pipeline *ip,
358 struct ipu_isys_buffer_list *bl)
359 {
360 struct ipu_isys_buffer *ib;
361
362 WARN_ON(!bl->nbufs);
363
364 set->send_irq_sof = 1;
365 set->send_resp_sof = 1;
366 set->send_irq_eof = 0;
367 set->send_resp_eof = 0;
368
369 if (ip->streaming)
370 set->send_irq_capture_ack = 0;
371 else
372 set->send_irq_capture_ack = 1;
373 set->send_irq_capture_done = 0;
374
375 set->send_resp_capture_ack = 1;
376 set->send_resp_capture_done = 1;
377 if (!ip->interlaced &&
378 atomic_read(&ip->sequence) >= IPU_ISYS_FRAME_NUM_THRESHOLD) {
379 set->send_resp_capture_ack = 0;
380 set->send_resp_capture_done = 0;
381 }
382
383 list_for_each_entry(ib, &bl->head, head) {
384 if (ib->type == IPU_ISYS_VIDEO_BUFFER) {
385 struct vb2_buffer *vb =
386 ipu_isys_buffer_to_vb2_buffer(ib);
387 struct ipu_isys_queue *aq =
388 vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
389
390 if (aq->fill_frame_buff_set_pin)
391 aq->fill_frame_buff_set_pin(vb, set);
392 } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) {
393 struct ipu_isys_private_buffer *pb =
394 ipu_isys_buffer_to_private_buffer(ib);
395 struct ipu_fw_isys_output_pin_payload_abi *output_pin =
396 &set->output_pins[ip->short_packet_output_pin];
397
398 output_pin->addr = pb->dma_addr;
399 output_pin->out_buf_id = pb->index + 1;
400 } else {
401 WARN_ON(1);
402 }
403 }
404 }
405
406 /* Start streaming for real. The buffer list must be available. */
407 static int ipu_isys_stream_start(struct ipu_isys_pipeline *ip,
408 struct ipu_isys_buffer_list *bl, bool error)
409 {
410 struct ipu_isys_video *pipe_av =
411 container_of(ip, struct ipu_isys_video, ip);
412 struct ipu_isys_buffer_list __bl;
413 int rval;
414
415 mutex_lock(&pipe_av->isys->stream_mutex);
416
417 rval = ipu_isys_video_set_streaming(pipe_av, 1, bl);
418 if (rval) {
419 mutex_unlock(&pipe_av->isys->stream_mutex);
420 goto out_requeue;
421 }
422
423 ip->streaming = 1;
424
425 mutex_unlock(&pipe_av->isys->stream_mutex);
426
427 bl = &__bl;
428
429 do {
430 struct ipu_fw_isys_frame_buff_set_abi *buf = NULL;
431 struct isys_fw_msgs *msg;
432 enum ipu_fw_isys_send_type send_type =
433 IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE;
434
435 rval = buffer_list_get(ip, bl);
436 if (rval == -EINVAL)
437 goto out_requeue;
438 else if (rval < 0)
439 break;
440
441 msg = ipu_get_fw_msg_buf(ip);
442 if (!msg)
443 return -ENOMEM;
444
445 buf = to_frame_msg_buf(msg);
446
447 ipu_isys_buffer_to_fw_frame_buff(buf, ip, bl);
448
449 ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf,
450 ip->nr_output_pins);
451
452 ipu_isys_buffer_list_queue(bl,
453 IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
454
455 rval = ipu_fw_isys_complex_cmd(pipe_av->isys,
456 ip->stream_handle,
457 buf, to_dma_addr(msg),
458 sizeof(*buf),
459 send_type);
460 ipu_put_fw_mgs_buf(pipe_av->isys, (uintptr_t)buf);
461 } while (!WARN_ON(rval));
462
463 return 0;
464
465 out_requeue:
466 if (bl && bl->nbufs)
467 ipu_isys_buffer_list_queue(bl,
468 IPU_ISYS_BUFFER_LIST_FL_INCOMING |
469 (error ?
470 IPU_ISYS_BUFFER_LIST_FL_SET_STATE :
471 0),
472 error ? VB2_BUF_STATE_ERROR :
473 VB2_BUF_STATE_QUEUED);
474 flush_firmware_streamon_fail(ip);
475
476 return rval;
477 }
478
479 static void __buf_queue(struct vb2_buffer *vb, bool force)
480 {
481 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
482 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
483 struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb);
484 struct ipu_isys_pipeline *ip =
485 to_ipu_isys_pipeline(av->vdev.entity.pipe);
486 struct ipu_isys_buffer_list bl;
487
488 struct ipu_fw_isys_frame_buff_set_abi *buf = NULL;
489 struct isys_fw_msgs *msg;
490
491 struct ipu_isys_video *pipe_av =
492 container_of(ip, struct ipu_isys_video, ip);
493 unsigned long flags;
494 unsigned int i;
495 int rval;
496
497 dev_dbg(&av->isys->adev->dev, "buffer: %s: buf_queue %u\n",
498 av->vdev.name,
499 vb->index
500 );
501
502 for (i = 0; i < vb->num_planes; i++)
503 dev_dbg(&av->isys->adev->dev, "iova: plane %u iova 0x%x\n", i,
504 (u32)vb2_dma_contig_plane_dma_addr(vb, i));
505
506 spin_lock_irqsave(&aq->lock, flags);
507 list_add(&ib->head, &aq->incoming);
508 spin_unlock_irqrestore(&aq->lock, flags);
509
510 if (ib->req)
511 return;
512
513 if (!pipe_av || !vb->vb2_queue->streaming) {
514 dev_dbg(&av->isys->adev->dev,
515 "not pipe_av set, adding to incoming\n");
516 return;
517 }
518
519 mutex_unlock(&av->mutex);
520 mutex_lock(&pipe_av->mutex);
521
522 if (!force && ip->nr_streaming != ip->nr_queues) {
523 dev_dbg(&av->isys->adev->dev,
524 "not streaming yet, adding to incoming\n");
525 goto out;
526 }
527
528 /*
529 * We just put one buffer to the incoming list of this queue
530 * (above). Let's see whether all queues in the pipeline would
531 * have a buffer.
532 */
533 rval = buffer_list_get(ip, &bl);
534 if (rval < 0) {
535 if (rval == -EINVAL) {
536 dev_err(&av->isys->adev->dev,
537 "error: should not happen\n");
538 WARN_ON(1);
539 } else {
540 dev_dbg(&av->isys->adev->dev,
541 "not enough buffers available\n");
542 }
543 goto out;
544 }
545
546 msg = ipu_get_fw_msg_buf(ip);
547 if (!msg) {
548 rval = -ENOMEM;
549 goto out;
550 }
551 buf = to_frame_msg_buf(msg);
552
553 ipu_isys_buffer_to_fw_frame_buff(buf, ip, &bl);
554
555 ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf,
556 ip->nr_output_pins);
557
558 if (!ip->streaming) {
559 dev_dbg(&av->isys->adev->dev,
560 "got a buffer to start streaming!\n");
561 rval = ipu_isys_stream_start(ip, &bl, true);
562 if (rval)
563 dev_err(&av->isys->adev->dev,
564 "stream start failed.\n");
565 goto out;
566 }
567
568 /*
569 * We must queue the buffers in the buffer list to the
570 * appropriate video buffer queues BEFORE passing them to the
571 * firmware since we could get a buffer event back before we
572 * have queued them ourselves to the active queue.
573 */
574 ipu_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
575
576 rval = ipu_fw_isys_complex_cmd(pipe_av->isys,
577 ip->stream_handle,
578 buf, to_dma_addr(msg),
579 sizeof(*buf),
580 IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE);
581 ipu_put_fw_mgs_buf(pipe_av->isys, (uintptr_t)buf);
582 if (!WARN_ON(rval < 0))
583 dev_dbg(&av->isys->adev->dev, "queued buffer\n");
584
585 out:
586 mutex_unlock(&pipe_av->mutex);
587 mutex_lock(&av->mutex);
588 }
589
590 static void buf_queue(struct vb2_buffer *vb)
591 {
592 __buf_queue(vb, false);
593 }
594
595 int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq)
596 {
597 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
598 struct v4l2_subdev_format fmt = { 0 };
599 struct media_pad *pad = media_entity_remote_pad(av->vdev.entity.pads);
600 struct v4l2_subdev *sd;
601 int rval;
602
603 if (!pad) {
604 dev_dbg(&av->isys->adev->dev,
605 "video node %s pad not connected\n", av->vdev.name);
606 return -ENOTCONN;
607 }
608
609 sd = media_entity_to_v4l2_subdev(pad->entity);
610
611 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
612 fmt.pad = pad->index;
613 rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
614 if (rval)
615 return rval;
616
617 if (fmt.format.width != av->mpix.width ||
618 fmt.format.height != av->mpix.height) {
619 dev_dbg(&av->isys->adev->dev,
620 "wrong width or height %ux%u (%ux%u expected)\n",
621 av->mpix.width, av->mpix.height,
622 fmt.format.width, fmt.format.height);
623 return -EINVAL;
624 }
625
626 if (fmt.format.field != av->mpix.field) {
627 dev_dbg(&av->isys->adev->dev,
628 "wrong field value 0x%8.8x (0x%8.8x expected)\n",
629 av->mpix.field, fmt.format.field);
630 return -EINVAL;
631 }
632
633 if (fmt.format.code != av->pfmt->code) {
634 dev_dbg(&av->isys->adev->dev,
635 "wrong media bus code 0x%8.8x (0x%8.8x expected)\n",
636 av->pfmt->code, fmt.format.code);
637 return -EINVAL;
638 }
639
640 return 0;
641 }
642
643 /* Return buffers back to videobuf2. */
644 static void return_buffers(struct ipu_isys_queue *aq,
645 enum vb2_buffer_state state)
646 {
647 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
648 int reset_needed = 0;
649 unsigned long flags;
650
651 spin_lock_irqsave(&aq->lock, flags);
652 while (!list_empty(&aq->incoming)) {
653 struct ipu_isys_buffer *ib = list_first_entry(&aq->incoming,
654 struct
655 ipu_isys_buffer,
656 head);
657 struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
658
659 list_del(&ib->head);
660 spin_unlock_irqrestore(&aq->lock, flags);
661
662 vb2_buffer_done(vb, state);
663
664 dev_dbg(&av->isys->adev->dev,
665 "%s: stop_streaming incoming %u\n",
666 ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue
667 (vb->vb2_queue))->vdev.name,
668 vb->index);
669
670 spin_lock_irqsave(&aq->lock, flags);
671 }
672
673 /*
674 * Something went wrong (FW crash / HW hang / not all buffers
675 * returned from isys) if there are still buffers queued in active
676 * queue. We have to clean up places a bit.
677 */
678 while (!list_empty(&aq->active)) {
679 struct ipu_isys_buffer *ib = list_first_entry(&aq->active,
680 struct
681 ipu_isys_buffer,
682 head);
683 struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
684
685 list_del(&ib->head);
686 spin_unlock_irqrestore(&aq->lock, flags);
687
688 vb2_buffer_done(vb, state);
689
690 dev_warn(&av->isys->adev->dev, "%s: cleaning active queue %u\n",
691 ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue
692 (vb->vb2_queue))->vdev.name,
693 vb->index);
694
695 spin_lock_irqsave(&aq->lock, flags);
696 reset_needed = 1;
697 }
698
699 spin_unlock_irqrestore(&aq->lock, flags);
700
701 if (reset_needed) {
702 mutex_lock(&av->isys->mutex);
703 av->isys->reset_needed = true;
704 mutex_unlock(&av->isys->mutex);
705 }
706 }
707
708 static int start_streaming(struct vb2_queue *q, unsigned int count)
709 {
710 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
711 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
712 struct ipu_isys_video *pipe_av;
713 struct ipu_isys_pipeline *ip;
714 struct ipu_isys_buffer_list __bl, *bl = NULL;
715 bool first;
716 int rval;
717
718 dev_dbg(&av->isys->adev->dev,
719 "stream: %s: width %u, height %u, css pixelformat %u\n",
720 av->vdev.name, av->mpix.width, av->mpix.height,
721 av->pfmt->css_pixelformat);
722
723 mutex_lock(&av->isys->stream_mutex);
724
725 first = !av->vdev.entity.pipe;
726
727 if (first) {
728 rval = ipu_isys_video_prepare_streaming(av, 1);
729 if (rval)
730 goto out_return_buffers;
731 }
732
733 mutex_unlock(&av->isys->stream_mutex);
734
735 rval = aq->link_fmt_validate(aq);
736 if (rval) {
737 dev_dbg(&av->isys->adev->dev,
738 "%s: link format validation failed (%d)\n",
739 av->vdev.name, rval);
740 goto out_unprepare_streaming;
741 }
742
743 ip = to_ipu_isys_pipeline(av->vdev.entity.pipe);
744 pipe_av = container_of(ip, struct ipu_isys_video, ip);
745 mutex_unlock(&av->mutex);
746
747 mutex_lock(&pipe_av->mutex);
748 ip->nr_streaming++;
749 dev_dbg(&av->isys->adev->dev, "queue %u of %u\n", ip->nr_streaming,
750 ip->nr_queues);
751 list_add(&aq->node, &ip->queues);
752 if (ip->nr_streaming != ip->nr_queues)
753 goto out;
754
755 if (list_empty(&av->isys->requests)) {
756 bl = &__bl;
757 rval = buffer_list_get(ip, bl);
758 if (rval == -EINVAL) {
759 goto out_stream_start;
760 } else if (rval < 0) {
761 dev_dbg(&av->isys->adev->dev,
762 "no request available, postponing streamon\n");
763 goto out;
764 }
765 }
766
767 rval = ipu_isys_stream_start(ip, bl, false);
768 if (rval)
769 goto out_stream_start;
770
771 out:
772 mutex_unlock(&pipe_av->mutex);
773 mutex_lock(&av->mutex);
774
775 return 0;
776
777 out_stream_start:
778 list_del(&aq->node);
779 ip->nr_streaming--;
780 mutex_unlock(&pipe_av->mutex);
781 mutex_lock(&av->mutex);
782
783 out_unprepare_streaming:
784 mutex_lock(&av->isys->stream_mutex);
785 if (first)
786 ipu_isys_video_prepare_streaming(av, 0);
787
788 out_return_buffers:
789 mutex_unlock(&av->isys->stream_mutex);
790 return_buffers(aq, VB2_BUF_STATE_QUEUED);
791
792 return rval;
793 }
794
795 static void stop_streaming(struct vb2_queue *q)
796 {
797 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
798 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
799 struct ipu_isys_pipeline *ip =
800 to_ipu_isys_pipeline(av->vdev.entity.pipe);
801 struct ipu_isys_video *pipe_av =
802 container_of(ip, struct ipu_isys_video, ip);
803
804 if (pipe_av != av) {
805 mutex_unlock(&av->mutex);
806 mutex_lock(&pipe_av->mutex);
807 }
808
809 mutex_lock(&av->isys->stream_mutex);
810 if (ip->nr_streaming == ip->nr_queues && ip->streaming)
811 ipu_isys_video_set_streaming(av, 0, NULL);
812 if (ip->nr_streaming == 1)
813 ipu_isys_video_prepare_streaming(av, 0);
814 mutex_unlock(&av->isys->stream_mutex);
815
816 ip->nr_streaming--;
817 list_del(&aq->node);
818 ip->streaming = 0;
819
820 if (pipe_av != av) {
821 mutex_unlock(&pipe_av->mutex);
822 mutex_lock(&av->mutex);
823 }
824
825 return_buffers(aq, VB2_BUF_STATE_ERROR);
826 }
827
828 static unsigned int
829 get_sof_sequence_by_timestamp(struct ipu_isys_pipeline *ip,
830 struct ipu_fw_isys_resp_info_abi *info)
831 {
832 struct ipu_isys *isys =
833 container_of(ip, struct ipu_isys_video, ip)->isys;
834 u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
835 unsigned int i;
836
837 /*
838 * The timestamp is invalid as no TSC in some FPGA platform,
839 * so get the sequence from pipeline directly in this case.
840 */
841 if (time == 0)
842 return atomic_read(&ip->sequence) - 1;
843 for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++)
844 if (time == ip->seq[i].timestamp) {
845 dev_dbg(&isys->adev->dev,
846 "sof: using seq nr %u for ts 0x%16.16llx\n",
847 ip->seq[i].sequence, time);
848 return ip->seq[i].sequence;
849 }
850
851 dev_dbg(&isys->adev->dev, "SOF: looking for 0x%16.16llx\n", time);
852 for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++)
853 dev_dbg(&isys->adev->dev,
854 "SOF: sequence %u, timestamp value 0x%16.16llx\n",
855 ip->seq[i].sequence, ip->seq[i].timestamp);
856 dev_dbg(&isys->adev->dev, "SOF sequence number not found\n");
857
858 return 0;
859 }
860
861 static u64 get_sof_ns_delta(struct ipu_isys_video *av,
862 struct ipu_fw_isys_resp_info_abi *info)
863 {
864 struct ipu_bus_device *adev = to_ipu_bus_device(&av->isys->adev->dev);
865 struct ipu_device *isp = adev->isp;
866 u64 delta, tsc_now;
867
868 if (!ipu_buttress_tsc_read(isp, &tsc_now))
869 delta = tsc_now -
870 ((u64)info->timestamp[1] << 32 | info->timestamp[0]);
871 else
872 delta = 0;
873
874 return ipu_buttress_tsc_ticks_to_ns(delta);
875 }
876
877 void
878 ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib,
879 struct ipu_fw_isys_resp_info_abi *info)
880 {
881 struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
882 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
883 struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
884 struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
885 struct device *dev = &av->isys->adev->dev;
886 struct ipu_isys_pipeline *ip =
887 to_ipu_isys_pipeline(av->vdev.entity.pipe);
888 u64 ns;
889 u32 sequence;
890
891 if (ip->has_sof) {
892 ns = (wall_clock_ts_on) ? ktime_get_real_ns() : ktime_get_ns();
893 ns -= get_sof_ns_delta(av, info);
894 sequence = get_sof_sequence_by_timestamp(ip, info);
895 } else {
896 ns = ((wall_clock_ts_on) ? ktime_get_real_ns() :
897 ktime_get_ns());
898 sequence = (atomic_inc_return(&ip->sequence) - 1)
899 / ip->nr_queues;
900 }
901
902 vbuf->vb2_buf.timestamp = ns;
903 vbuf->sequence = sequence;
904
905 dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n",
906 av->vdev.name, ktime_get_ns(), sequence);
907 dev_dbg(dev, "index:%d, vbuf timestamp:%lld, endl\n",
908 vb->index, vbuf->vb2_buf.timestamp);
909 }
910
911 void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib)
912 {
913 struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
914
915 if (atomic_read(&ib->str2mmio_flag)) {
916 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
917 /*
918 * Operation on buffer is ended with error and will be reported
919 * to the userspace when it is de-queued
920 */
921 atomic_set(&ib->str2mmio_flag, 0);
922 } else {
923 vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
924 }
925 }
926
927 void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip,
928 struct ipu_fw_isys_resp_info_abi *info)
929 {
930 struct ipu_isys *isys =
931 container_of(ip, struct ipu_isys_video, ip)->isys;
932 struct ipu_isys_queue *aq = ip->output_pins[info->pin_id].aq;
933 struct ipu_isys_buffer *ib;
934 struct vb2_buffer *vb;
935 unsigned long flags;
936 bool first = true;
937 struct vb2_v4l2_buffer *buf;
938
939 dev_dbg(&isys->adev->dev, "buffer: %s: received buffer %8.8x\n",
940 ipu_isys_queue_to_video(aq)->vdev.name, info->pin.addr);
941
942 spin_lock_irqsave(&aq->lock, flags);
943 if (list_empty(&aq->active)) {
944 spin_unlock_irqrestore(&aq->lock, flags);
945 dev_err(&isys->adev->dev, "active queue empty\n");
946 return;
947 }
948
949 list_for_each_entry_reverse(ib, &aq->active, head) {
950 dma_addr_t addr;
951
952 vb = ipu_isys_buffer_to_vb2_buffer(ib);
953 addr = vb2_dma_contig_plane_dma_addr(vb, 0);
954
955 if (info->pin.addr != addr) {
956 if (first)
957 dev_err(&isys->adev->dev,
958 "WARN: buffer address %pad expected!\n",
959 &addr);
960 first = false;
961 continue;
962 }
963
964 if (info->error_info.error ==
965 IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) {
966 /*
967 * Check for error message:
968 * 'IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO'
969 */
970 atomic_set(&ib->str2mmio_flag, 1);
971 }
972 dev_dbg(&isys->adev->dev, "buffer: found buffer %pad\n", &addr);
973
974 buf = to_vb2_v4l2_buffer(vb);
975 buf->field = V4L2_FIELD_NONE;
976
977 list_del(&ib->head);
978 spin_unlock_irqrestore(&aq->lock, flags);
979
980 ipu_isys_buf_calc_sequence_time(ib, info);
981
982 /*
983 * For interlaced buffers, the notification to user space
984 * is postponed to capture_done event since the field
985 * information is available only at that time.
986 */
987 if (ip->interlaced) {
988 spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
989 list_add(&ib->head, &ip->pending_interlaced_bufs);
990 spin_unlock_irqrestore(&ip->short_packet_queue_lock,
991 flags);
992 } else {
993 ipu_isys_queue_buf_done(ib);
994 }
995
996 return;
997 }
998
999 dev_err(&isys->adev->dev,
1000 "WARNING: cannot find a matching video buffer!\n");
1001
1002 spin_unlock_irqrestore(&aq->lock, flags);
1003 }
1004
1005 void
1006 ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip,
1007 struct ipu_fw_isys_resp_info_abi *info)
1008 {
1009 struct ipu_isys *isys =
1010 container_of(ip, struct ipu_isys_video, ip)->isys;
1011 unsigned long flags;
1012
1013 dev_dbg(&isys->adev->dev, "receive short packet buffer %8.8x\n",
1014 info->pin.addr);
1015 spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
1016 ip->cur_field = ipu_isys_csi2_get_current_field(ip, info->timestamp);
1017 spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
1018 }
1019
1020 struct vb2_ops ipu_isys_queue_ops = {
1021 .queue_setup = queue_setup,
1022 .wait_prepare = ipu_isys_queue_unlock,
1023 .wait_finish = ipu_isys_queue_lock,
1024 .buf_init = buf_init,
1025 .buf_prepare = buf_prepare,
1026 .buf_finish = buf_finish,
1027 .buf_cleanup = buf_cleanup,
1028 .start_streaming = start_streaming,
1029 .stop_streaming = stop_streaming,
1030 .buf_queue = buf_queue,
1031 };
1032
1033 int ipu_isys_queue_init(struct ipu_isys_queue *aq)
1034 {
1035 struct ipu_isys *isys = ipu_isys_queue_to_video(aq)->isys;
1036 int rval;
1037
1038 if (!aq->vbq.io_modes)
1039 aq->vbq.io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1040 aq->vbq.drv_priv = aq;
1041 aq->vbq.ops = &ipu_isys_queue_ops;
1042 aq->vbq.mem_ops = &vb2_dma_contig_memops;
1043 aq->vbq.timestamp_flags = (wall_clock_ts_on) ?
1044 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN : V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1045
1046 rval = vb2_queue_init(&aq->vbq);
1047 if (rval)
1048 return rval;
1049
1050 aq->dev = &isys->adev->dev;
1051 aq->vbq.dev = &isys->adev->dev;
1052 spin_lock_init(&aq->lock);
1053 INIT_LIST_HEAD(&aq->active);
1054 INIT_LIST_HEAD(&aq->incoming);
1055
1056 return 0;
1057 }
1058
1059 void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq)
1060 {
1061 vb2_queue_release(&aq->vbq);
1062 }