]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
media: ti-vpe: cal: Set cal_dmaqueue.pending to NULL when no pending buffer
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Sun, 6 Dec 2020 23:53:50 +0000 (00:53 +0100)
committerMauro Carvalho Chehab <mchehab+huawei@kernel.org>
Mon, 4 Jan 2021 11:13:09 +0000 (12:13 +0100)
When a pending buffer becomes active, the cal_dmaqueue.active field is
updated, but the pending field keeps the same value until a new buffer
becomes pending. This requires handling the special case of
pending == active in different places. Simplify the code by setting the
pending field to NULL when the pending buffer becomes active. Buffers
are now simply moved from queue to pending and from pending to active.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Benoit Parrot <bparrot@ti.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
drivers/media/platform/ti-vpe/cal-video.c
drivers/media/platform/ti-vpe/cal.c

index 34dfe38dc9603a332e6da71e101f7daf04544dc9..438447728b46a92b11e0edd842327b3bf84397d2 100644 (file)
@@ -491,12 +491,15 @@ static void cal_release_buffers(struct cal_ctx *ctx,
                vb2_buffer_done(&buf->vb.vb2_buf, state);
        }
 
-       if (ctx->dma.pending != ctx->dma.active)
+       if (ctx->dma.pending) {
                vb2_buffer_done(&ctx->dma.pending->vb.vb2_buf, state);
-       vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state);
+               ctx->dma.pending = NULL;
+       }
 
-       ctx->dma.active = NULL;
-       ctx->dma.pending = NULL;
+       if (ctx->dma.active) {
+               vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state);
+               ctx->dma.active = NULL;
+       }
 
        spin_unlock_irq(&ctx->dma.lock);
 }
@@ -510,12 +513,11 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
 
        spin_lock_irq(&ctx->dma.lock);
        buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list);
-       ctx->dma.active = buf;
        ctx->dma.pending = buf;
        list_del(&buf->list);
        spin_unlock_irq(&ctx->dma.lock);
 
-       addr = vb2_dma_contig_plane_dma_addr(&ctx->dma.active->vb.vb2_buf, 0);
+       addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
        ctx->sequence = 0;
        ctx->dma.state = CAL_DMA_RUNNING;
 
index 3e0a69bb7fe508b90b7ee3e41e17b445ce1c64f7..547dffcfe68fa5e71592877419c0966b2e25f856 100644 (file)
@@ -485,8 +485,7 @@ static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
                 */
                cal_ctx_wr_dma_disable(ctx);
                ctx->dma.state = CAL_DMA_STOP_PENDING;
-       } else if (!list_empty(&ctx->dma.queue) &&
-                  ctx->dma.active == ctx->dma.pending) {
+       } else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) {
                /*
                 * Otherwise, if a new buffer is available, queue it to the
                 * hardware.
@@ -519,9 +518,10 @@ static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
        }
 
        /* If a new buffer was queued, complete the current buffer. */
-       if (ctx->dma.active != ctx->dma.pending) {
+       if (ctx->dma.pending) {
                buf = ctx->dma.active;
                ctx->dma.active = ctx->dma.pending;
+               ctx->dma.pending = NULL;
        }
 
        spin_unlock(&ctx->dma.lock);