1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
4 #include <linux/completion.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/string.h>
9 #include <media/media-entity.h>
10 #include <media/videobuf2-dma-contig.h>
11 #include <media/v4l2-ioctl.h>
15 #include "ipu-buttress.h"
17 #include "ipu-isys-csi2.h"
18 #include "ipu-isys-video.h"
20 static bool wall_clock_ts_on
;
21 module_param(wall_clock_ts_on
, bool, 0660);
22 MODULE_PARM_DESC(wall_clock_ts_on
, "Timestamp based on REALTIME clock");
24 static int queue_setup(struct vb2_queue
*q
,
25 unsigned int *num_buffers
, unsigned int *num_planes
,
27 struct device
*alloc_devs
[])
29 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(q
);
30 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
34 /* num_planes == 0: we're being called through VIDIOC_REQBUFS */
37 *num_planes
= av
->mpix
.num_planes
;
40 for (i
= 0; i
< *num_planes
; i
++) {
42 sizes
[i
] = av
->mpix
.plane_fmt
[i
].sizeimage
;
43 alloc_devs
[i
] = aq
->dev
;
44 dev_dbg(&av
->isys
->adev
->dev
,
45 "%s: queue setup: plane %d size %u\n",
46 av
->vdev
.name
, i
, sizes
[i
]);
52 static void ipu_isys_queue_lock(struct vb2_queue
*q
)
54 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(q
);
55 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
57 dev_dbg(&av
->isys
->adev
->dev
, "%s: queue lock\n", av
->vdev
.name
);
58 mutex_lock(&av
->mutex
);
61 static void ipu_isys_queue_unlock(struct vb2_queue
*q
)
63 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(q
);
64 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
66 dev_dbg(&av
->isys
->adev
->dev
, "%s: queue unlock\n", av
->vdev
.name
);
67 mutex_unlock(&av
->mutex
);
70 static int buf_init(struct vb2_buffer
*vb
)
72 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
73 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
75 dev_dbg(&av
->isys
->adev
->dev
, "buffer: %s: %s\n", av
->vdev
.name
,
79 return aq
->buf_init(vb
);
84 int ipu_isys_buf_prepare(struct vb2_buffer
*vb
)
86 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
87 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
89 dev_dbg(&av
->isys
->adev
->dev
,
90 "buffer: %s: configured size %u, buffer size %lu\n",
92 av
->mpix
.plane_fmt
[0].sizeimage
, vb2_plane_size(vb
, 0));
94 if (av
->mpix
.plane_fmt
[0].sizeimage
> vb2_plane_size(vb
, 0))
97 vb2_set_plane_payload(vb
, 0, av
->mpix
.plane_fmt
[0].bytesperline
*
99 vb
->planes
[0].data_offset
= av
->line_header_length
/ BITS_PER_BYTE
;
104 static int buf_prepare(struct vb2_buffer
*vb
)
106 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
107 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
110 if (av
->isys
->adev
->isp
->flr_done
)
113 rval
= aq
->buf_prepare(vb
);
117 static void buf_finish(struct vb2_buffer
*vb
)
119 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
120 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
122 dev_dbg(&av
->isys
->adev
->dev
, "buffer: %s: %s\n", av
->vdev
.name
,
127 static void buf_cleanup(struct vb2_buffer
*vb
)
129 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
130 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
132 dev_dbg(&av
->isys
->adev
->dev
, "buffer: %s: %s\n", av
->vdev
.name
,
136 return aq
->buf_cleanup(vb
);
140 * Queue a buffer list back to incoming or active queues. The buffers
141 * are removed from the buffer list.
143 void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list
*bl
,
144 unsigned long op_flags
,
145 enum vb2_buffer_state state
)
147 struct ipu_isys_buffer
*ib
, *ib_safe
;
155 WARN_ON(op_flags
& IPU_ISYS_BUFFER_LIST_FL_ACTIVE
&&
156 op_flags
& IPU_ISYS_BUFFER_LIST_FL_INCOMING
);
158 list_for_each_entry_safe(ib
, ib_safe
, &bl
->head
, head
) {
159 struct ipu_isys_video
*av
;
161 if (ib
->type
== IPU_ISYS_VIDEO_BUFFER
) {
162 struct vb2_buffer
*vb
=
163 ipu_isys_buffer_to_vb2_buffer(ib
);
164 struct ipu_isys_queue
*aq
=
165 vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
167 av
= ipu_isys_queue_to_video(aq
);
168 spin_lock_irqsave(&aq
->lock
, flags
);
170 if (op_flags
& IPU_ISYS_BUFFER_LIST_FL_ACTIVE
)
171 list_add(&ib
->head
, &aq
->active
);
172 else if (op_flags
& IPU_ISYS_BUFFER_LIST_FL_INCOMING
)
173 list_add_tail(&ib
->head
, &aq
->incoming
);
174 spin_unlock_irqrestore(&aq
->lock
, flags
);
176 if (op_flags
& IPU_ISYS_BUFFER_LIST_FL_SET_STATE
)
177 vb2_buffer_done(vb
, state
);
178 } else if (ib
->type
== IPU_ISYS_SHORT_PACKET_BUFFER
) {
179 struct ipu_isys_private_buffer
*pb
=
180 ipu_isys_buffer_to_private_buffer(ib
);
181 struct ipu_isys_pipeline
*ip
= pb
->ip
;
183 av
= container_of(ip
, struct ipu_isys_video
, ip
);
184 spin_lock_irqsave(&ip
->short_packet_queue_lock
, flags
);
186 if (op_flags
& IPU_ISYS_BUFFER_LIST_FL_ACTIVE
)
187 list_add(&ib
->head
, &ip
->short_packet_active
);
188 else if (op_flags
& IPU_ISYS_BUFFER_LIST_FL_INCOMING
)
189 list_add(&ib
->head
, &ip
->short_packet_incoming
);
190 spin_unlock_irqrestore(&ip
->short_packet_queue_lock
,
198 dev_dbg(&av
->isys
->adev
->dev
,
199 "queue buf list %p flags %lx, s %d, %d bufs\n",
200 bl
, op_flags
, state
, bl
->nbufs
);
211 * flush_firmware_streamon_fail() - Flush in cases where requests may
212 * have been queued to firmware and the *firmware streamon fails for a
215 static void flush_firmware_streamon_fail(struct ipu_isys_pipeline
*ip
)
217 struct ipu_isys_video
*pipe_av
=
218 container_of(ip
, struct ipu_isys_video
, ip
);
219 struct ipu_isys_queue
*aq
;
222 lockdep_assert_held(&pipe_av
->mutex
);
224 list_for_each_entry(aq
, &ip
->queues
, node
) {
225 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
226 struct ipu_isys_buffer
*ib
, *ib_safe
;
228 spin_lock_irqsave(&aq
->lock
, flags
);
229 list_for_each_entry_safe(ib
, ib_safe
, &aq
->active
, head
) {
230 struct vb2_buffer
*vb
=
231 ipu_isys_buffer_to_vb2_buffer(ib
);
235 dev_dbg(&av
->isys
->adev
->dev
,
236 "%s: queue buffer %u back to incoming\n",
239 /* Queue already streaming, return to driver. */
240 list_add(&ib
->head
, &aq
->incoming
);
243 /* Queue not yet streaming, return to user. */
244 dev_dbg(&av
->isys
->adev
->dev
,
245 "%s: return %u back to videobuf2\n",
248 vb2_buffer_done(ipu_isys_buffer_to_vb2_buffer(ib
),
249 VB2_BUF_STATE_QUEUED
);
251 spin_unlock_irqrestore(&aq
->lock
, flags
);
256 * Attempt obtaining a buffer list from the incoming queues, a list of
257 * buffers that contains one entry from each video buffer queue. If
258 * all queues have no buffers, the buffers that were already dequeued
259 * are returned to their queues.
261 static int buffer_list_get(struct ipu_isys_pipeline
*ip
,
262 struct ipu_isys_buffer_list
*bl
)
264 struct ipu_isys_queue
*aq
;
265 struct ipu_isys_buffer
*ib
;
270 INIT_LIST_HEAD(&bl
->head
);
272 list_for_each_entry(aq
, &ip
->queues
, node
) {
273 struct ipu_isys_buffer
*ib
;
275 spin_lock_irqsave(&aq
->lock
, flags
);
276 if (list_empty(&aq
->incoming
)) {
277 spin_unlock_irqrestore(&aq
->lock
, flags
);
282 ib
= list_last_entry(&aq
->incoming
,
283 struct ipu_isys_buffer
, head
);
285 spin_unlock_irqrestore(&aq
->lock
, flags
);
290 dev_dbg(&ip
->isys
->adev
->dev
, "buffer: %s: buffer %u\n",
291 ipu_isys_queue_to_video(aq
)->vdev
.name
,
292 ipu_isys_buffer_to_vb2_buffer(ib
)->index
295 list_add(&ib
->head
, &bl
->head
);
296 spin_unlock_irqrestore(&aq
->lock
, flags
);
301 list_for_each_entry(ib
, &bl
->head
, head
) {
302 struct vb2_buffer
*vb
= ipu_isys_buffer_to_vb2_buffer(ib
);
304 aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
305 if (aq
->prepare_frame_buff_set
)
306 aq
->prepare_frame_buff_set(vb
);
309 /* Get short packet buffer. */
310 if (ip
->interlaced
&& ip
->isys
->short_packet_source
==
311 IPU_ISYS_SHORT_PACKET_FROM_RECEIVER
) {
312 ib
= ipu_isys_csi2_get_short_packet_buffer(ip
, bl
);
315 dev_err(&ip
->isys
->adev
->dev
,
316 "No more short packet buffers. Driver bug?");
323 dev_dbg(&ip
->isys
->adev
->dev
, "get buffer list %p, %u buffers\n", bl
,
328 if (!list_empty(&bl
->head
))
329 ipu_isys_buffer_list_queue(bl
,
330 IPU_ISYS_BUFFER_LIST_FL_INCOMING
, 0);
335 ipu_isys_buffer_to_fw_frame_buff_pin(struct vb2_buffer
*vb
,
336 struct ipu_fw_isys_frame_buff_set_abi
*set
)
338 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
339 struct ipu_isys_video
*av
= container_of(aq
, struct ipu_isys_video
, aq
);
342 set
->output_pins
[aq
->fw_output
].compress
= 1;
344 set
->output_pins
[aq
->fw_output
].addr
=
345 vb2_dma_contig_plane_dma_addr(vb
, 0);
346 set
->output_pins
[aq
->fw_output
].out_buf_id
=
351 * Convert a buffer list to a isys fw ABI framebuffer set. The
352 * buffer list is not modified.
354 #define IPU_ISYS_FRAME_NUM_THRESHOLD (30)
356 ipu_isys_buffer_to_fw_frame_buff(struct ipu_fw_isys_frame_buff_set_abi
*set
,
357 struct ipu_isys_pipeline
*ip
,
358 struct ipu_isys_buffer_list
*bl
)
360 struct ipu_isys_buffer
*ib
;
364 set
->send_irq_sof
= 1;
365 set
->send_resp_sof
= 1;
366 set
->send_irq_eof
= 0;
367 set
->send_resp_eof
= 0;
370 set
->send_irq_capture_ack
= 0;
372 set
->send_irq_capture_ack
= 1;
373 set
->send_irq_capture_done
= 0;
375 set
->send_resp_capture_ack
= 1;
376 set
->send_resp_capture_done
= 1;
377 if (!ip
->interlaced
&&
378 atomic_read(&ip
->sequence
) >= IPU_ISYS_FRAME_NUM_THRESHOLD
) {
379 set
->send_resp_capture_ack
= 0;
380 set
->send_resp_capture_done
= 0;
383 list_for_each_entry(ib
, &bl
->head
, head
) {
384 if (ib
->type
== IPU_ISYS_VIDEO_BUFFER
) {
385 struct vb2_buffer
*vb
=
386 ipu_isys_buffer_to_vb2_buffer(ib
);
387 struct ipu_isys_queue
*aq
=
388 vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
390 if (aq
->fill_frame_buff_set_pin
)
391 aq
->fill_frame_buff_set_pin(vb
, set
);
392 } else if (ib
->type
== IPU_ISYS_SHORT_PACKET_BUFFER
) {
393 struct ipu_isys_private_buffer
*pb
=
394 ipu_isys_buffer_to_private_buffer(ib
);
395 struct ipu_fw_isys_output_pin_payload_abi
*output_pin
=
396 &set
->output_pins
[ip
->short_packet_output_pin
];
398 output_pin
->addr
= pb
->dma_addr
;
399 output_pin
->out_buf_id
= pb
->index
+ 1;
406 /* Start streaming for real. The buffer list must be available. */
407 static int ipu_isys_stream_start(struct ipu_isys_pipeline
*ip
,
408 struct ipu_isys_buffer_list
*bl
, bool error
)
410 struct ipu_isys_video
*pipe_av
=
411 container_of(ip
, struct ipu_isys_video
, ip
);
412 struct ipu_isys_buffer_list __bl
;
415 mutex_lock(&pipe_av
->isys
->stream_mutex
);
417 rval
= ipu_isys_video_set_streaming(pipe_av
, 1, bl
);
419 mutex_unlock(&pipe_av
->isys
->stream_mutex
);
425 mutex_unlock(&pipe_av
->isys
->stream_mutex
);
430 struct ipu_fw_isys_frame_buff_set_abi
*buf
= NULL
;
431 struct isys_fw_msgs
*msg
;
432 enum ipu_fw_isys_send_type send_type
=
433 IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE
;
435 rval
= buffer_list_get(ip
, bl
);
441 msg
= ipu_get_fw_msg_buf(ip
);
445 buf
= to_frame_msg_buf(msg
);
447 ipu_isys_buffer_to_fw_frame_buff(buf
, ip
, bl
);
449 ipu_fw_isys_dump_frame_buff_set(&pipe_av
->isys
->adev
->dev
, buf
,
452 ipu_isys_buffer_list_queue(bl
,
453 IPU_ISYS_BUFFER_LIST_FL_ACTIVE
, 0);
455 rval
= ipu_fw_isys_complex_cmd(pipe_av
->isys
,
457 buf
, to_dma_addr(msg
),
460 ipu_put_fw_mgs_buf(pipe_av
->isys
, (uintptr_t)buf
);
461 } while (!WARN_ON(rval
));
467 ipu_isys_buffer_list_queue(bl
,
468 IPU_ISYS_BUFFER_LIST_FL_INCOMING
|
470 IPU_ISYS_BUFFER_LIST_FL_SET_STATE
:
472 error
? VB2_BUF_STATE_ERROR
:
473 VB2_BUF_STATE_QUEUED
);
474 flush_firmware_streamon_fail(ip
);
479 static void __buf_queue(struct vb2_buffer
*vb
, bool force
)
481 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
482 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
483 struct ipu_isys_buffer
*ib
= vb2_buffer_to_ipu_isys_buffer(vb
);
484 struct ipu_isys_pipeline
*ip
=
485 to_ipu_isys_pipeline(av
->vdev
.entity
.pipe
);
486 struct ipu_isys_buffer_list bl
;
488 struct ipu_fw_isys_frame_buff_set_abi
*buf
= NULL
;
489 struct isys_fw_msgs
*msg
;
491 struct ipu_isys_video
*pipe_av
=
492 container_of(ip
, struct ipu_isys_video
, ip
);
497 dev_dbg(&av
->isys
->adev
->dev
, "buffer: %s: buf_queue %u\n",
502 for (i
= 0; i
< vb
->num_planes
; i
++)
503 dev_dbg(&av
->isys
->adev
->dev
, "iova: plane %u iova 0x%x\n", i
,
504 (u32
)vb2_dma_contig_plane_dma_addr(vb
, i
));
506 spin_lock_irqsave(&aq
->lock
, flags
);
507 list_add(&ib
->head
, &aq
->incoming
);
508 spin_unlock_irqrestore(&aq
->lock
, flags
);
513 if (!pipe_av
|| !vb
->vb2_queue
->streaming
) {
514 dev_dbg(&av
->isys
->adev
->dev
,
515 "not pipe_av set, adding to incoming\n");
519 mutex_unlock(&av
->mutex
);
520 mutex_lock(&pipe_av
->mutex
);
522 if (!force
&& ip
->nr_streaming
!= ip
->nr_queues
) {
523 dev_dbg(&av
->isys
->adev
->dev
,
524 "not streaming yet, adding to incoming\n");
529 * We just put one buffer to the incoming list of this queue
530 * (above). Let's see whether all queues in the pipeline would
533 rval
= buffer_list_get(ip
, &bl
);
535 if (rval
== -EINVAL
) {
536 dev_err(&av
->isys
->adev
->dev
,
537 "error: should not happen\n");
540 dev_dbg(&av
->isys
->adev
->dev
,
541 "not enough buffers available\n");
546 msg
= ipu_get_fw_msg_buf(ip
);
551 buf
= to_frame_msg_buf(msg
);
553 ipu_isys_buffer_to_fw_frame_buff(buf
, ip
, &bl
);
555 ipu_fw_isys_dump_frame_buff_set(&pipe_av
->isys
->adev
->dev
, buf
,
558 if (!ip
->streaming
) {
559 dev_dbg(&av
->isys
->adev
->dev
,
560 "got a buffer to start streaming!\n");
561 rval
= ipu_isys_stream_start(ip
, &bl
, true);
563 dev_err(&av
->isys
->adev
->dev
,
564 "stream start failed.\n");
569 * We must queue the buffers in the buffer list to the
570 * appropriate video buffer queues BEFORE passing them to the
571 * firmware since we could get a buffer event back before we
572 * have queued them ourselves to the active queue.
574 ipu_isys_buffer_list_queue(&bl
, IPU_ISYS_BUFFER_LIST_FL_ACTIVE
, 0);
576 rval
= ipu_fw_isys_complex_cmd(pipe_av
->isys
,
578 buf
, to_dma_addr(msg
),
580 IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE
);
581 ipu_put_fw_mgs_buf(pipe_av
->isys
, (uintptr_t)buf
);
582 if (!WARN_ON(rval
< 0))
583 dev_dbg(&av
->isys
->adev
->dev
, "queued buffer\n");
586 mutex_unlock(&pipe_av
->mutex
);
587 mutex_lock(&av
->mutex
);
590 static void buf_queue(struct vb2_buffer
*vb
)
592 __buf_queue(vb
, false);
595 int ipu_isys_link_fmt_validate(struct ipu_isys_queue
*aq
)
597 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
598 struct v4l2_subdev_format fmt
= { 0 };
599 struct media_pad
*pad
= media_entity_remote_pad(av
->vdev
.entity
.pads
);
600 struct v4l2_subdev
*sd
;
604 dev_dbg(&av
->isys
->adev
->dev
,
605 "video node %s pad not connected\n", av
->vdev
.name
);
609 sd
= media_entity_to_v4l2_subdev(pad
->entity
);
611 fmt
.which
= V4L2_SUBDEV_FORMAT_ACTIVE
;
612 fmt
.pad
= pad
->index
;
613 rval
= v4l2_subdev_call(sd
, pad
, get_fmt
, NULL
, &fmt
);
617 if (fmt
.format
.width
!= av
->mpix
.width
||
618 fmt
.format
.height
!= av
->mpix
.height
) {
619 dev_dbg(&av
->isys
->adev
->dev
,
620 "wrong width or height %ux%u (%ux%u expected)\n",
621 av
->mpix
.width
, av
->mpix
.height
,
622 fmt
.format
.width
, fmt
.format
.height
);
626 if (fmt
.format
.field
!= av
->mpix
.field
) {
627 dev_dbg(&av
->isys
->adev
->dev
,
628 "wrong field value 0x%8.8x (0x%8.8x expected)\n",
629 av
->mpix
.field
, fmt
.format
.field
);
633 if (fmt
.format
.code
!= av
->pfmt
->code
) {
634 dev_dbg(&av
->isys
->adev
->dev
,
635 "wrong media bus code 0x%8.8x (0x%8.8x expected)\n",
636 av
->pfmt
->code
, fmt
.format
.code
);
643 /* Return buffers back to videobuf2. */
644 static void return_buffers(struct ipu_isys_queue
*aq
,
645 enum vb2_buffer_state state
)
647 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
648 int reset_needed
= 0;
651 spin_lock_irqsave(&aq
->lock
, flags
);
652 while (!list_empty(&aq
->incoming
)) {
653 struct ipu_isys_buffer
*ib
= list_first_entry(&aq
->incoming
,
657 struct vb2_buffer
*vb
= ipu_isys_buffer_to_vb2_buffer(ib
);
660 spin_unlock_irqrestore(&aq
->lock
, flags
);
662 vb2_buffer_done(vb
, state
);
664 dev_dbg(&av
->isys
->adev
->dev
,
665 "%s: stop_streaming incoming %u\n",
666 ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue
667 (vb
->vb2_queue
))->vdev
.name
,
670 spin_lock_irqsave(&aq
->lock
, flags
);
674 * Something went wrong (FW crash / HW hang / not all buffers
675 * returned from isys) if there are still buffers queued in active
676 * queue. We have to clean up places a bit.
678 while (!list_empty(&aq
->active
)) {
679 struct ipu_isys_buffer
*ib
= list_first_entry(&aq
->active
,
683 struct vb2_buffer
*vb
= ipu_isys_buffer_to_vb2_buffer(ib
);
686 spin_unlock_irqrestore(&aq
->lock
, flags
);
688 vb2_buffer_done(vb
, state
);
690 dev_warn(&av
->isys
->adev
->dev
, "%s: cleaning active queue %u\n",
691 ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue
692 (vb
->vb2_queue
))->vdev
.name
,
695 spin_lock_irqsave(&aq
->lock
, flags
);
699 spin_unlock_irqrestore(&aq
->lock
, flags
);
702 mutex_lock(&av
->isys
->mutex
);
703 av
->isys
->reset_needed
= true;
704 mutex_unlock(&av
->isys
->mutex
);
708 static int start_streaming(struct vb2_queue
*q
, unsigned int count
)
710 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(q
);
711 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
712 struct ipu_isys_video
*pipe_av
;
713 struct ipu_isys_pipeline
*ip
;
714 struct ipu_isys_buffer_list __bl
, *bl
= NULL
;
718 dev_dbg(&av
->isys
->adev
->dev
,
719 "stream: %s: width %u, height %u, css pixelformat %u\n",
720 av
->vdev
.name
, av
->mpix
.width
, av
->mpix
.height
,
721 av
->pfmt
->css_pixelformat
);
723 mutex_lock(&av
->isys
->stream_mutex
);
725 first
= !av
->vdev
.entity
.pipe
;
728 rval
= ipu_isys_video_prepare_streaming(av
, 1);
730 goto out_return_buffers
;
733 mutex_unlock(&av
->isys
->stream_mutex
);
735 rval
= aq
->link_fmt_validate(aq
);
737 dev_dbg(&av
->isys
->adev
->dev
,
738 "%s: link format validation failed (%d)\n",
739 av
->vdev
.name
, rval
);
740 goto out_unprepare_streaming
;
743 ip
= to_ipu_isys_pipeline(av
->vdev
.entity
.pipe
);
744 pipe_av
= container_of(ip
, struct ipu_isys_video
, ip
);
745 mutex_unlock(&av
->mutex
);
747 mutex_lock(&pipe_av
->mutex
);
749 dev_dbg(&av
->isys
->adev
->dev
, "queue %u of %u\n", ip
->nr_streaming
,
751 list_add(&aq
->node
, &ip
->queues
);
752 if (ip
->nr_streaming
!= ip
->nr_queues
)
755 if (list_empty(&av
->isys
->requests
)) {
757 rval
= buffer_list_get(ip
, bl
);
758 if (rval
== -EINVAL
) {
759 goto out_stream_start
;
760 } else if (rval
< 0) {
761 dev_dbg(&av
->isys
->adev
->dev
,
762 "no request available, postponing streamon\n");
767 rval
= ipu_isys_stream_start(ip
, bl
, false);
769 goto out_stream_start
;
772 mutex_unlock(&pipe_av
->mutex
);
773 mutex_lock(&av
->mutex
);
780 mutex_unlock(&pipe_av
->mutex
);
781 mutex_lock(&av
->mutex
);
783 out_unprepare_streaming
:
784 mutex_lock(&av
->isys
->stream_mutex
);
786 ipu_isys_video_prepare_streaming(av
, 0);
789 mutex_unlock(&av
->isys
->stream_mutex
);
790 return_buffers(aq
, VB2_BUF_STATE_QUEUED
);
795 static void stop_streaming(struct vb2_queue
*q
)
797 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(q
);
798 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
799 struct ipu_isys_pipeline
*ip
=
800 to_ipu_isys_pipeline(av
->vdev
.entity
.pipe
);
801 struct ipu_isys_video
*pipe_av
=
802 container_of(ip
, struct ipu_isys_video
, ip
);
805 mutex_unlock(&av
->mutex
);
806 mutex_lock(&pipe_av
->mutex
);
809 mutex_lock(&av
->isys
->stream_mutex
);
810 if (ip
->nr_streaming
== ip
->nr_queues
&& ip
->streaming
)
811 ipu_isys_video_set_streaming(av
, 0, NULL
);
812 if (ip
->nr_streaming
== 1)
813 ipu_isys_video_prepare_streaming(av
, 0);
814 mutex_unlock(&av
->isys
->stream_mutex
);
821 mutex_unlock(&pipe_av
->mutex
);
822 mutex_lock(&av
->mutex
);
825 return_buffers(aq
, VB2_BUF_STATE_ERROR
);
829 get_sof_sequence_by_timestamp(struct ipu_isys_pipeline
*ip
,
830 struct ipu_fw_isys_resp_info_abi
*info
)
832 struct ipu_isys
*isys
=
833 container_of(ip
, struct ipu_isys_video
, ip
)->isys
;
834 u64 time
= (u64
)info
->timestamp
[1] << 32 | info
->timestamp
[0];
838 * The timestamp is invalid as no TSC in some FPGA platform,
839 * so get the sequence from pipeline directly in this case.
842 return atomic_read(&ip
->sequence
) - 1;
843 for (i
= 0; i
< IPU_ISYS_MAX_PARALLEL_SOF
; i
++)
844 if (time
== ip
->seq
[i
].timestamp
) {
845 dev_dbg(&isys
->adev
->dev
,
846 "sof: using seq nr %u for ts 0x%16.16llx\n",
847 ip
->seq
[i
].sequence
, time
);
848 return ip
->seq
[i
].sequence
;
851 dev_dbg(&isys
->adev
->dev
, "SOF: looking for 0x%16.16llx\n", time
);
852 for (i
= 0; i
< IPU_ISYS_MAX_PARALLEL_SOF
; i
++)
853 dev_dbg(&isys
->adev
->dev
,
854 "SOF: sequence %u, timestamp value 0x%16.16llx\n",
855 ip
->seq
[i
].sequence
, ip
->seq
[i
].timestamp
);
856 dev_dbg(&isys
->adev
->dev
, "SOF sequence number not found\n");
861 static u64
get_sof_ns_delta(struct ipu_isys_video
*av
,
862 struct ipu_fw_isys_resp_info_abi
*info
)
864 struct ipu_bus_device
*adev
= to_ipu_bus_device(&av
->isys
->adev
->dev
);
865 struct ipu_device
*isp
= adev
->isp
;
868 if (!ipu_buttress_tsc_read(isp
, &tsc_now
))
870 ((u64
)info
->timestamp
[1] << 32 | info
->timestamp
[0]);
874 return ipu_buttress_tsc_ticks_to_ns(delta
);
878 ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer
*ib
,
879 struct ipu_fw_isys_resp_info_abi
*info
)
881 struct vb2_buffer
*vb
= ipu_isys_buffer_to_vb2_buffer(ib
);
882 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
883 struct ipu_isys_queue
*aq
= vb2_queue_to_ipu_isys_queue(vb
->vb2_queue
);
884 struct ipu_isys_video
*av
= ipu_isys_queue_to_video(aq
);
885 struct device
*dev
= &av
->isys
->adev
->dev
;
886 struct ipu_isys_pipeline
*ip
=
887 to_ipu_isys_pipeline(av
->vdev
.entity
.pipe
);
892 ns
= (wall_clock_ts_on
) ? ktime_get_real_ns() : ktime_get_ns();
893 ns
-= get_sof_ns_delta(av
, info
);
894 sequence
= get_sof_sequence_by_timestamp(ip
, info
);
896 ns
= ((wall_clock_ts_on
) ? ktime_get_real_ns() :
898 sequence
= (atomic_inc_return(&ip
->sequence
) - 1)
902 vbuf
->vb2_buf
.timestamp
= ns
;
903 vbuf
->sequence
= sequence
;
905 dev_dbg(dev
, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n",
906 av
->vdev
.name
, ktime_get_ns(), sequence
);
907 dev_dbg(dev
, "index:%d, vbuf timestamp:%lld, endl\n",
908 vb
->index
, vbuf
->vb2_buf
.timestamp
);
911 void ipu_isys_queue_buf_done(struct ipu_isys_buffer
*ib
)
913 struct vb2_buffer
*vb
= ipu_isys_buffer_to_vb2_buffer(ib
);
915 if (atomic_read(&ib
->str2mmio_flag
)) {
916 vb2_buffer_done(vb
, VB2_BUF_STATE_ERROR
);
918 * Operation on buffer is ended with error and will be reported
919 * to the userspace when it is de-queued
921 atomic_set(&ib
->str2mmio_flag
, 0);
923 vb2_buffer_done(vb
, VB2_BUF_STATE_DONE
);
927 void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline
*ip
,
928 struct ipu_fw_isys_resp_info_abi
*info
)
930 struct ipu_isys
*isys
=
931 container_of(ip
, struct ipu_isys_video
, ip
)->isys
;
932 struct ipu_isys_queue
*aq
= ip
->output_pins
[info
->pin_id
].aq
;
933 struct ipu_isys_buffer
*ib
;
934 struct vb2_buffer
*vb
;
937 struct vb2_v4l2_buffer
*buf
;
939 dev_dbg(&isys
->adev
->dev
, "buffer: %s: received buffer %8.8x\n",
940 ipu_isys_queue_to_video(aq
)->vdev
.name
, info
->pin
.addr
);
942 spin_lock_irqsave(&aq
->lock
, flags
);
943 if (list_empty(&aq
->active
)) {
944 spin_unlock_irqrestore(&aq
->lock
, flags
);
945 dev_err(&isys
->adev
->dev
, "active queue empty\n");
949 list_for_each_entry_reverse(ib
, &aq
->active
, head
) {
952 vb
= ipu_isys_buffer_to_vb2_buffer(ib
);
953 addr
= vb2_dma_contig_plane_dma_addr(vb
, 0);
955 if (info
->pin
.addr
!= addr
) {
957 dev_err(&isys
->adev
->dev
,
958 "WARN: buffer address %pad expected!\n",
964 if (info
->error_info
.error
==
965 IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO
) {
967 * Check for error message:
968 * 'IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO'
970 atomic_set(&ib
->str2mmio_flag
, 1);
972 dev_dbg(&isys
->adev
->dev
, "buffer: found buffer %pad\n", &addr
);
974 buf
= to_vb2_v4l2_buffer(vb
);
975 buf
->field
= V4L2_FIELD_NONE
;
978 spin_unlock_irqrestore(&aq
->lock
, flags
);
980 ipu_isys_buf_calc_sequence_time(ib
, info
);
983 * For interlaced buffers, the notification to user space
984 * is postponed to capture_done event since the field
985 * information is available only at that time.
987 if (ip
->interlaced
) {
988 spin_lock_irqsave(&ip
->short_packet_queue_lock
, flags
);
989 list_add(&ib
->head
, &ip
->pending_interlaced_bufs
);
990 spin_unlock_irqrestore(&ip
->short_packet_queue_lock
,
993 ipu_isys_queue_buf_done(ib
);
999 dev_err(&isys
->adev
->dev
,
1000 "WARNING: cannot find a matching video buffer!\n");
1002 spin_unlock_irqrestore(&aq
->lock
, flags
);
1006 ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline
*ip
,
1007 struct ipu_fw_isys_resp_info_abi
*info
)
1009 struct ipu_isys
*isys
=
1010 container_of(ip
, struct ipu_isys_video
, ip
)->isys
;
1011 unsigned long flags
;
1013 dev_dbg(&isys
->adev
->dev
, "receive short packet buffer %8.8x\n",
1015 spin_lock_irqsave(&ip
->short_packet_queue_lock
, flags
);
1016 ip
->cur_field
= ipu_isys_csi2_get_current_field(ip
, info
->timestamp
);
1017 spin_unlock_irqrestore(&ip
->short_packet_queue_lock
, flags
);
1020 struct vb2_ops ipu_isys_queue_ops
= {
1021 .queue_setup
= queue_setup
,
1022 .wait_prepare
= ipu_isys_queue_unlock
,
1023 .wait_finish
= ipu_isys_queue_lock
,
1024 .buf_init
= buf_init
,
1025 .buf_prepare
= buf_prepare
,
1026 .buf_finish
= buf_finish
,
1027 .buf_cleanup
= buf_cleanup
,
1028 .start_streaming
= start_streaming
,
1029 .stop_streaming
= stop_streaming
,
1030 .buf_queue
= buf_queue
,
1033 int ipu_isys_queue_init(struct ipu_isys_queue
*aq
)
1035 struct ipu_isys
*isys
= ipu_isys_queue_to_video(aq
)->isys
;
1038 if (!aq
->vbq
.io_modes
)
1039 aq
->vbq
.io_modes
= VB2_USERPTR
| VB2_MMAP
| VB2_DMABUF
;
1040 aq
->vbq
.drv_priv
= aq
;
1041 aq
->vbq
.ops
= &ipu_isys_queue_ops
;
1042 aq
->vbq
.mem_ops
= &vb2_dma_contig_memops
;
1043 aq
->vbq
.timestamp_flags
= (wall_clock_ts_on
) ?
1044 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN
: V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
;
1046 rval
= vb2_queue_init(&aq
->vbq
);
1050 aq
->dev
= &isys
->adev
->dev
;
1051 aq
->vbq
.dev
= &isys
->adev
->dev
;
1052 spin_lock_init(&aq
->lock
);
1053 INIT_LIST_HEAD(&aq
->active
);
1054 INIT_LIST_HEAD(&aq
->incoming
);
1059 void ipu_isys_queue_cleanup(struct ipu_isys_queue
*aq
)
1061 vb2_queue_release(&aq
->vbq
);