]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/pci/intel/ipu-isys-csi2.c
UBUNTU: SAUCE: IPU6: 2022-03-11 alpha release for Andrews MLK
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / intel / ipu-isys-csi2.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2021 Intel Corporation
3
4 #include <linux/device.h>
5 #include <linux/module.h>
6 #include <linux/version.h>
7
8 #include <media/ipu-isys.h>
9 #include <media/media-entity.h>
10 #include <media/v4l2-device.h>
11 #include <media/v4l2-event.h>
12
13 #include "ipu.h"
14 #include "ipu-bus.h"
15 #include "ipu-buttress.h"
16 #include "ipu-isys.h"
17 #include "ipu-isys-subdev.h"
18 #include "ipu-isys-video.h"
19 #include "ipu-platform-regs.h"
20
21 static const u32 csi2_supported_codes_pad_sink[] = {
22 MEDIA_BUS_FMT_Y10_1X10,
23 MEDIA_BUS_FMT_RGB565_1X16,
24 MEDIA_BUS_FMT_RGB888_1X24,
25 MEDIA_BUS_FMT_UYVY8_1X16,
26 MEDIA_BUS_FMT_YUYV8_1X16,
27 MEDIA_BUS_FMT_YUYV10_1X20,
28 MEDIA_BUS_FMT_SBGGR10_1X10,
29 MEDIA_BUS_FMT_SGBRG10_1X10,
30 MEDIA_BUS_FMT_SGRBG10_1X10,
31 MEDIA_BUS_FMT_SRGGB10_1X10,
32 MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
33 MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
34 MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
35 MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
36 MEDIA_BUS_FMT_SBGGR12_1X12,
37 MEDIA_BUS_FMT_SGBRG12_1X12,
38 MEDIA_BUS_FMT_SGRBG12_1X12,
39 MEDIA_BUS_FMT_SRGGB12_1X12,
40 MEDIA_BUS_FMT_SBGGR8_1X8,
41 MEDIA_BUS_FMT_SGBRG8_1X8,
42 MEDIA_BUS_FMT_SGRBG8_1X8,
43 MEDIA_BUS_FMT_SRGGB8_1X8,
44 0,
45 };
46
47 static const u32 csi2_supported_codes_pad_source[] = {
48 MEDIA_BUS_FMT_Y10_1X10,
49 MEDIA_BUS_FMT_RGB565_1X16,
50 MEDIA_BUS_FMT_RGB888_1X24,
51 MEDIA_BUS_FMT_UYVY8_1X16,
52 MEDIA_BUS_FMT_YUYV8_1X16,
53 MEDIA_BUS_FMT_YUYV10_1X20,
54 MEDIA_BUS_FMT_SBGGR10_1X10,
55 MEDIA_BUS_FMT_SGBRG10_1X10,
56 MEDIA_BUS_FMT_SGRBG10_1X10,
57 MEDIA_BUS_FMT_SRGGB10_1X10,
58 MEDIA_BUS_FMT_SBGGR12_1X12,
59 MEDIA_BUS_FMT_SGBRG12_1X12,
60 MEDIA_BUS_FMT_SGRBG12_1X12,
61 MEDIA_BUS_FMT_SRGGB12_1X12,
62 MEDIA_BUS_FMT_SBGGR8_1X8,
63 MEDIA_BUS_FMT_SGBRG8_1X8,
64 MEDIA_BUS_FMT_SGRBG8_1X8,
65 MEDIA_BUS_FMT_SRGGB8_1X8,
66 0,
67 };
68
69 static const u32 *csi2_supported_codes[NR_OF_CSI2_PADS];
70
71 static struct v4l2_subdev_internal_ops csi2_sd_internal_ops = {
72 .open = ipu_isys_subdev_open,
73 .close = ipu_isys_subdev_close,
74 };
75
76 int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq)
77 {
78 struct ipu_isys_pipeline *pipe = container_of(csi2->asd.sd.entity.pipe,
79 struct ipu_isys_pipeline,
80 pipe);
81 struct v4l2_subdev *ext_sd =
82 media_entity_to_v4l2_subdev(pipe->external->entity);
83 struct v4l2_ext_control c = {.id = V4L2_CID_LINK_FREQ, };
84 struct v4l2_ext_controls cs = {.count = 1,
85 .controls = &c,
86 };
87 struct v4l2_querymenu qm = {.id = c.id, };
88 int rval;
89
90 if (!ext_sd) {
91 WARN_ON(1);
92 return -ENODEV;
93 }
94 rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler,
95 ext_sd->devnode,
96 ext_sd->v4l2_dev->mdev,
97 &cs);
98 if (rval) {
99 dev_info(&csi2->isys->adev->dev, "can't get link frequency\n");
100 return rval;
101 }
102
103 qm.index = c.value;
104
105 rval = v4l2_querymenu(ext_sd->ctrl_handler, &qm);
106 if (rval) {
107 dev_info(&csi2->isys->adev->dev, "can't get menu item\n");
108 return rval;
109 }
110
111 dev_dbg(&csi2->isys->adev->dev, "%s: link frequency %lld\n", __func__,
112 qm.value);
113
114 if (!qm.value)
115 return -EINVAL;
116 *link_freq = qm.value;
117 return 0;
118 }
119
120 static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
121 struct v4l2_event_subscription *sub)
122 {
123 struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
124
125 dev_dbg(&csi2->isys->adev->dev, "subscribe event(type %u id %u)\n",
126 sub->type, sub->id);
127
128 switch (sub->type) {
129 case V4L2_EVENT_FRAME_SYNC:
130 return v4l2_event_subscribe(fh, sub, 10, NULL);
131 case V4L2_EVENT_CTRL:
132 return v4l2_ctrl_subscribe_event(fh, sub);
133 default:
134 return -EINVAL;
135 }
136 }
137
138 static const struct v4l2_subdev_core_ops csi2_sd_core_ops = {
139 .subscribe_event = subscribe_event,
140 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
141 };
142
143 /*
144 * The input system CSI2+ receiver has several
145 * parameters affecting the receiver timings. These depend
146 * on the MIPI bus frequency F in Hz (sensor transmitter rate)
147 * as follows:
148 * register value = (A/1e9 + B * UI) / COUNT_ACC
149 * where
150 * UI = 1 / (2 * F) in seconds
151 * COUNT_ACC = counter accuracy in seconds
152 * For legacy IPU, COUNT_ACC = 0.125 ns
153 *
154 * A and B are coefficients from the table below,
155 * depending whether the register minimum or maximum value is
156 * calculated.
157 * Minimum Maximum
158 * Clock lane A B A B
159 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
160 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
161 * Data lanes
162 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
163 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
164 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
165 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
166 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
167 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
168 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
169 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
170 *
171 * We use the minimum values of both A and B.
172 */
173
174 #define DIV_SHIFT 8
175
176 static uint32_t calc_timing(s32 a, int32_t b, int64_t link_freq, int32_t accinv)
177 {
178 return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT)
179 / (int32_t)(link_freq >> DIV_SHIFT));
180 }
181
182 static int
183 ipu_isys_csi2_calc_timing(struct ipu_isys_csi2 *csi2,
184 struct ipu_isys_csi2_timing *timing, uint32_t accinv)
185 {
186 __s64 link_freq;
187 int rval;
188
189 rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq);
190 if (rval)
191 return rval;
192
193 timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A,
194 CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B,
195 link_freq, accinv);
196 timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A,
197 CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B,
198 link_freq, accinv);
199 dev_dbg(&csi2->isys->adev->dev, "ctermen %u\n", timing->ctermen);
200 dev_dbg(&csi2->isys->adev->dev, "csettle %u\n", timing->csettle);
201
202 timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A,
203 CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B,
204 link_freq, accinv);
205 timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A,
206 CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B,
207 link_freq, accinv);
208 dev_dbg(&csi2->isys->adev->dev, "dtermen %u\n", timing->dtermen);
209 dev_dbg(&csi2->isys->adev->dev, "dsettle %u\n", timing->dsettle);
210
211 return 0;
212 }
213
214 #define CSI2_ACCINV 8
215
216 static int set_stream(struct v4l2_subdev *sd, int enable)
217 {
218 struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
219 struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
220 struct ipu_isys_pipeline,
221 pipe);
222 struct ipu_isys_csi2_config *cfg;
223 struct v4l2_subdev *ext_sd;
224 struct ipu_isys_csi2_timing timing = {0};
225 unsigned int nlanes;
226 int rval;
227
228 dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable);
229
230 if (!ip->external->entity) {
231 WARN_ON(1);
232 return -ENODEV;
233 }
234 ext_sd = media_entity_to_v4l2_subdev(ip->external->entity);
235 cfg = v4l2_get_subdev_hostdata(ext_sd);
236
237 if (!enable) {
238 ipu_isys_csi2_set_stream(sd, timing, 0, enable);
239 return 0;
240 }
241
242 ip->has_sof = true;
243
244 nlanes = cfg->nlanes;
245
246 dev_dbg(&csi2->isys->adev->dev, "lane nr %d.\n", nlanes);
247
248 rval = ipu_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV);
249 if (rval)
250 return rval;
251
252 rval = ipu_isys_csi2_set_stream(sd, timing, nlanes, enable);
253
254 return rval;
255 }
256
257 static void csi2_capture_done(struct ipu_isys_pipeline *ip,
258 struct ipu_fw_isys_resp_info_abi *info)
259 {
260 if (ip->interlaced && ip->isys->short_packet_source ==
261 IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) {
262 struct ipu_isys_buffer *ib;
263 unsigned long flags;
264
265 spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
266 if (!list_empty(&ip->short_packet_active)) {
267 ib = list_last_entry(&ip->short_packet_active,
268 struct ipu_isys_buffer, head);
269 list_move(&ib->head, &ip->short_packet_incoming);
270 }
271 spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
272 }
273 }
274
275 static int csi2_link_validate(struct media_link *link)
276 {
277 struct ipu_isys_csi2 *csi2;
278 struct ipu_isys_pipeline *ip;
279 int rval;
280
281 if (!link->sink->entity ||
282 !link->sink->entity->pipe || !link->source->entity)
283 return -EINVAL;
284 csi2 =
285 to_ipu_isys_csi2(media_entity_to_v4l2_subdev(link->sink->entity));
286 ip = to_ipu_isys_pipeline(link->sink->entity->pipe);
287 csi2->receiver_errors = 0;
288 ip->csi2 = csi2;
289 ipu_isys_video_add_capture_done(to_ipu_isys_pipeline
290 (link->sink->entity->pipe),
291 csi2_capture_done);
292
293 rval = v4l2_subdev_link_validate(link);
294 if (rval)
295 return rval;
296
297 if (!v4l2_ctrl_g_ctrl(csi2->store_csi2_header)) {
298 struct media_pad *remote_pad =
299 media_entity_remote_pad(&csi2->asd.pad[CSI2_PAD_SOURCE]);
300
301 if (remote_pad &&
302 is_media_entity_v4l2_subdev(remote_pad->entity)) {
303 dev_err(&csi2->isys->adev->dev,
304 "CSI2 BE requires CSI2 headers.\n");
305 return -EINVAL;
306 }
307 }
308
309 return 0;
310 }
311
312 static const struct v4l2_subdev_video_ops csi2_sd_video_ops = {
313 .s_stream = set_stream,
314 };
315
316 static int ipu_isys_csi2_get_fmt(struct v4l2_subdev *sd,
317 struct v4l2_subdev_state *sd_state,
318 struct v4l2_subdev_format *fmt)
319 {
320 return ipu_isys_subdev_get_ffmt(sd, sd_state, fmt);
321 }
322
323 static int ipu_isys_csi2_set_fmt(struct v4l2_subdev *sd,
324 struct v4l2_subdev_state *sd_state,
325 struct v4l2_subdev_format *fmt)
326 {
327 return ipu_isys_subdev_set_ffmt(sd, sd_state, fmt);
328 }
329
330 static int __subdev_link_validate(struct v4l2_subdev *sd,
331 struct media_link *link,
332 struct v4l2_subdev_format *source_fmt,
333 struct v4l2_subdev_format *sink_fmt)
334 {
335 struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
336 struct ipu_isys_pipeline,
337 pipe);
338
339 if (source_fmt->format.field == V4L2_FIELD_ALTERNATE)
340 ip->interlaced = true;
341
342 return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt);
343 }
344
345 static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = {
346 .link_validate = __subdev_link_validate,
347 .get_fmt = ipu_isys_csi2_get_fmt,
348 .set_fmt = ipu_isys_csi2_set_fmt,
349 .enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
350 };
351
352 static struct v4l2_subdev_ops csi2_sd_ops = {
353 .core = &csi2_sd_core_ops,
354 .video = &csi2_sd_video_ops,
355 .pad = &csi2_sd_pad_ops,
356 };
357
358 static struct media_entity_operations csi2_entity_ops = {
359 .link_validate = csi2_link_validate,
360 };
361
362 static void csi2_set_ffmt(struct v4l2_subdev *sd,
363 struct v4l2_subdev_state *sd_state,
364 struct v4l2_subdev_format *fmt)
365 {
366 enum isys_subdev_prop_tgt tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT;
367 struct v4l2_mbus_framefmt *ffmt =
368 __ipu_isys_get_ffmt(sd, sd_state, fmt->pad,
369 fmt->which);
370
371 if (fmt->format.field != V4L2_FIELD_ALTERNATE)
372 fmt->format.field = V4L2_FIELD_NONE;
373
374 if (fmt->pad == CSI2_PAD_SINK) {
375 *ffmt = fmt->format;
376 ipu_isys_subdev_fmt_propagate(sd, sd_state, &fmt->format, NULL,
377 tgt, fmt->pad, fmt->which);
378 return;
379 }
380
381 if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) {
382 ffmt->width = fmt->format.width;
383 ffmt->height = fmt->format.height;
384 ffmt->field = fmt->format.field;
385 ffmt->code =
386 ipu_isys_subdev_code_to_uncompressed(fmt->format.code);
387 return;
388 }
389
390 WARN_ON(1);
391 }
392
393 static const struct ipu_isys_pixelformat *
394 csi2_try_fmt(struct ipu_isys_video *av,
395 struct v4l2_pix_format_mplane *mpix)
396 {
397 struct media_link *link = list_first_entry(&av->vdev.entity.links,
398 struct media_link, list);
399 struct v4l2_subdev *sd =
400 media_entity_to_v4l2_subdev(link->source->entity);
401 struct ipu_isys_csi2 *csi2;
402
403 if (!sd)
404 return NULL;
405
406 csi2 = to_ipu_isys_csi2(sd);
407
408 return ipu_isys_video_try_fmt_vid_mplane(av, mpix,
409 v4l2_ctrl_g_ctrl(csi2->store_csi2_header));
410 }
411
412 void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2)
413 {
414 if (!csi2->isys)
415 return;
416
417 v4l2_device_unregister_subdev(&csi2->asd.sd);
418 ipu_isys_subdev_cleanup(&csi2->asd);
419 csi2->isys = NULL;
420 }
421
422 static void csi_ctrl_init(struct v4l2_subdev *sd)
423 {
424 struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
425
426 static const struct v4l2_ctrl_config cfg = {
427 .id = V4L2_CID_IPU_STORE_CSI2_HEADER,
428 .name = "Store CSI-2 Headers",
429 .type = V4L2_CTRL_TYPE_BOOLEAN,
430 .min = 0,
431 .max = 1,
432 .step = 1,
433 .def = 1,
434 };
435
436 csi2->store_csi2_header = v4l2_ctrl_new_custom(&csi2->asd.ctrl_handler,
437 &cfg, NULL);
438 }
439
440 int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2,
441 struct ipu_isys *isys,
442 void __iomem *base, unsigned int index)
443 {
444 struct v4l2_subdev_format fmt = {
445 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
446 .pad = CSI2_PAD_SINK,
447 .format = {
448 .width = 4096,
449 .height = 3072,
450 },
451 };
452 int i, rval, src;
453
454 dev_dbg(&isys->adev->dev, "csi-%d base = 0x%lx\n", index,
455 (unsigned long)base);
456 csi2->isys = isys;
457 csi2->base = base;
458 csi2->index = index;
459
460 csi2->asd.sd.entity.ops = &csi2_entity_ops;
461 csi2->asd.ctrl_init = csi_ctrl_init;
462 csi2->asd.isys = isys;
463 init_completion(&csi2->eof_completion);
464 rval = ipu_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0,
465 NR_OF_CSI2_PADS,
466 NR_OF_CSI2_SOURCE_PADS,
467 NR_OF_CSI2_SINK_PADS,
468 0);
469 if (rval)
470 goto fail;
471
472 csi2->asd.pad[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
473 | MEDIA_PAD_FL_MUST_CONNECT;
474 csi2->asd.pad[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
475
476 src = index;
477 csi2->asd.source = IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 + src;
478 csi2_supported_codes[CSI2_PAD_SINK] = csi2_supported_codes_pad_sink;
479
480 for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++)
481 csi2_supported_codes[i + 1] = csi2_supported_codes_pad_source;
482 csi2->asd.supported_codes = csi2_supported_codes;
483 csi2->asd.set_ffmt = csi2_set_ffmt;
484
485 csi2->asd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
486 csi2->asd.sd.internal_ops = &csi2_sd_internal_ops;
487 snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name),
488 IPU_ISYS_ENTITY_PREFIX " CSI-2 %u", index);
489 v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd);
490
491 rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd);
492 if (rval) {
493 dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
494 goto fail;
495 }
496
497 mutex_lock(&csi2->asd.mutex);
498 __ipu_isys_subdev_set_ffmt(&csi2->asd.sd, NULL, &fmt);
499 mutex_unlock(&csi2->asd.mutex);
500
501 return 0;
502
503 fail:
504 ipu_isys_csi2_cleanup(csi2);
505
506 return rval;
507 }
508
509 void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2)
510 {
511 struct ipu_isys_pipeline *ip = NULL;
512 struct v4l2_event ev = {
513 .type = V4L2_EVENT_FRAME_SYNC,
514 };
515 struct video_device *vdev = csi2->asd.sd.devnode;
516 unsigned long flags;
517 unsigned int i;
518
519 spin_lock_irqsave(&csi2->isys->lock, flags);
520 csi2->in_frame = true;
521
522 for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
523 if (csi2->isys->pipes[i] &&
524 csi2->isys->pipes[i]->csi2 == csi2) {
525 ip = csi2->isys->pipes[i];
526 break;
527 }
528 }
529
530 /* Pipe already vanished */
531 if (!ip) {
532 spin_unlock_irqrestore(&csi2->isys->lock, flags);
533 return;
534 }
535
536 ev.u.frame_sync.frame_sequence = atomic_inc_return(&ip->sequence) - 1;
537 spin_unlock_irqrestore(&csi2->isys->lock, flags);
538
539 v4l2_event_queue(vdev, &ev);
540 dev_dbg(&csi2->isys->adev->dev,
541 "sof_event::csi2-%i sequence: %i\n",
542 csi2->index, ev.u.frame_sync.frame_sequence);
543 }
544
545 void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2)
546 {
547 struct ipu_isys_pipeline *ip = NULL;
548 unsigned long flags;
549 unsigned int i;
550 u32 frame_sequence;
551
552 spin_lock_irqsave(&csi2->isys->lock, flags);
553 csi2->in_frame = false;
554 if (csi2->wait_for_sync)
555 complete(&csi2->eof_completion);
556
557 for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
558 if (csi2->isys->pipes[i] &&
559 csi2->isys->pipes[i]->csi2 == csi2) {
560 ip = csi2->isys->pipes[i];
561 break;
562 }
563 }
564
565 if (ip) {
566 frame_sequence = atomic_read(&ip->sequence);
567 spin_unlock_irqrestore(&csi2->isys->lock, flags);
568
569 dev_dbg(&csi2->isys->adev->dev,
570 "eof_event::csi2-%i sequence: %i\n",
571 csi2->index, frame_sequence);
572 return;
573 }
574
575 spin_unlock_irqrestore(&csi2->isys->lock, flags);
576 }
577
578 /* Call this function only _after_ the sensor has been stopped */
579 void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2)
580 {
581 unsigned long flags, tout;
582
583 spin_lock_irqsave(&csi2->isys->lock, flags);
584
585 if (!csi2->in_frame) {
586 spin_unlock_irqrestore(&csi2->isys->lock, flags);
587 return;
588 }
589
590 reinit_completion(&csi2->eof_completion);
591 csi2->wait_for_sync = true;
592 spin_unlock_irqrestore(&csi2->isys->lock, flags);
593 tout = wait_for_completion_timeout(&csi2->eof_completion,
594 IPU_EOF_TIMEOUT_JIFFIES);
595 if (!tout)
596 dev_err(&csi2->isys->adev->dev,
597 "csi2-%d: timeout at sync to eof\n",
598 csi2->index);
599 csi2->wait_for_sync = false;
600 }
601
602 struct ipu_isys_buffer *
603 ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip,
604 struct ipu_isys_buffer_list *bl)
605 {
606 struct ipu_isys_buffer *ib;
607 struct ipu_isys_private_buffer *pb;
608 struct ipu_isys_mipi_packet_header *ph;
609 unsigned long flags;
610
611 spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
612 if (list_empty(&ip->short_packet_incoming)) {
613 spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
614 return NULL;
615 }
616 ib = list_last_entry(&ip->short_packet_incoming,
617 struct ipu_isys_buffer, head);
618 pb = ipu_isys_buffer_to_private_buffer(ib);
619 ph = (struct ipu_isys_mipi_packet_header *)pb->buffer;
620
621 /* Fill the packet header with magic number. */
622 ph->word_count = 0xffff;
623 ph->dtype = 0xff;
624
625 dma_sync_single_for_cpu(&ip->isys->adev->dev, pb->dma_addr,
626 sizeof(*ph), DMA_BIDIRECTIONAL);
627 spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
628 list_move(&ib->head, &bl->head);
629
630 return ib;
631 }