1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2021 Intel Corporation
4 #include <linux/debugfs.h>
5 #include <linux/delay.h>
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/firmware.h>
9 #include <linux/kthread.h>
10 #include <linux/module.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/string.h>
13 #include <linux/sched.h>
14 #include <linux/version.h>
16 #include <media/ipu-isys.h>
17 #include <media/v4l2-mc.h>
18 #include <media/v4l2-subdev.h>
19 #include <media/v4l2-fwnode.h>
20 #include <media/v4l2-ctrls.h>
21 #include <media/v4l2-device.h>
22 #include <media/v4l2-event.h>
23 #include <media/v4l2-ioctl.h>
24 #include <media/v4l2-async.h>
31 #include "ipu-isys-csi2.h"
32 #include "ipu-isys-tpg.h"
33 #include "ipu-isys-video.h"
34 #include "ipu-platform-regs.h"
35 #include "ipu-buttress.h"
36 #include "ipu-platform.h"
37 #include "ipu-platform-buttress-regs.h"
39 #define ISYS_PM_QOS_VALUE 300
41 #define IPU_BUTTRESS_FABIC_CONTROL 0x68
42 #define GDA_ENABLE_IWAKE_INDEX 2
43 #define GDA_IWAKE_THRESHOLD_INDEX 1
44 #define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0
46 /* LTR & DID value are 10 bit at most */
47 #define LTR_DID_VAL_MAX 1023
48 #define LTR_DEFAULT_VALUE 0x70503C19
49 #define FILL_TIME_DEFAULT_VALUE 0xFFF0783C
50 #define LTR_DID_PKGC_2R 20
51 #define LTR_DID_PKGC_8 100
52 #define LTR_SCALE_DEFAULT 5
53 #define LTR_SCALE_1024NS 2
54 #define REG_PKGC_PMON_CFG 0xB00
56 #define VAL_PKGC_PMON_CFG_RESET 0x38
57 #define VAL_PKGC_PMON_CFG_START 0x7
59 #define IS_PIXEL_BUFFER_PAGES 0x80
60 /* BIOS provides the driver the LTR and threshold information in IPU,
61 * IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6.
63 #define IPU6_MAX_SRAM_SIZE (200 << 10)
64 /* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE.
66 #define IPU6SE_MAX_SRAM_SIZE (96 << 10)
67 /* When iwake mode is disabled the critical threshold is statically set to 75%
68 * of the IS pixel buffer criticalThreshold = (128 * 3) / 4
70 #define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4)
80 u16 keep_power_in_D0
: 1;
81 u16 keep_power_override
: 1;
95 isys_complete_ext_device_registration(struct ipu_isys
*isys
,
96 struct v4l2_subdev
*sd
,
97 struct ipu_isys_csi2_config
*csi2
)
102 v4l2_set_subdev_hostdata(sd
, csi2
);
104 for (i
= 0; i
< sd
->entity
.num_pads
; i
++) {
105 if (sd
->entity
.pads
[i
].flags
& MEDIA_PAD_FL_SOURCE
)
109 if (i
== sd
->entity
.num_pads
) {
110 dev_warn(&isys
->adev
->dev
,
111 "no source pad in external entity\n");
113 goto skip_unregister_subdev
;
116 rval
= media_create_pad_link(&sd
->entity
, i
,
117 &isys
->csi2
[csi2
->port
].asd
.sd
.entity
,
120 dev_warn(&isys
->adev
->dev
, "can't create link\n");
121 goto skip_unregister_subdev
;
124 isys
->csi2
[csi2
->port
].nlanes
= csi2
->nlanes
;
127 skip_unregister_subdev
:
128 v4l2_device_unregister_subdev(sd
);
132 static void isys_unregister_subdevices(struct ipu_isys
*isys
)
134 const struct ipu_isys_internal_tpg_pdata
*tpg
=
135 &isys
->pdata
->ipdata
->tpg
;
136 const struct ipu_isys_internal_csi2_pdata
*csi2
=
137 &isys
->pdata
->ipdata
->csi2
;
140 ipu_isys_csi2_be_cleanup(&isys
->csi2_be
);
141 for (i
= 0; i
< NR_OF_CSI2_BE_SOC_DEV
; i
++)
142 ipu_isys_csi2_be_soc_cleanup(&isys
->csi2_be_soc
[i
]);
144 for (i
= 0; i
< tpg
->ntpgs
; i
++)
145 ipu_isys_tpg_cleanup(&isys
->tpg
[i
]);
147 for (i
= 0; i
< csi2
->nports
; i
++)
148 ipu_isys_csi2_cleanup(&isys
->csi2
[i
]);
151 static int isys_register_subdevices(struct ipu_isys
*isys
)
153 const struct ipu_isys_internal_tpg_pdata
*tpg
=
154 &isys
->pdata
->ipdata
->tpg
;
155 const struct ipu_isys_internal_csi2_pdata
*csi2
=
156 &isys
->pdata
->ipdata
->csi2
;
157 struct ipu_isys_csi2_be_soc
*csi2_be_soc
;
161 isys
->csi2
= devm_kcalloc(&isys
->adev
->dev
, csi2
->nports
,
162 sizeof(*isys
->csi2
), GFP_KERNEL
);
168 for (i
= 0; i
< csi2
->nports
; i
++) {
169 rval
= ipu_isys_csi2_init(&isys
->csi2
[i
], isys
,
171 csi2
->offsets
[i
], i
);
175 isys
->isr_csi2_bits
|= IPU_ISYS_UNISPART_IRQ_CSI2(i
);
178 isys
->tpg
= devm_kcalloc(&isys
->adev
->dev
, tpg
->ntpgs
,
179 sizeof(*isys
->tpg
), GFP_KERNEL
);
185 for (i
= 0; i
< tpg
->ntpgs
; i
++) {
186 rval
= ipu_isys_tpg_init(&isys
->tpg
[i
], isys
,
189 tpg
->sels
? (isys
->pdata
->base
+
190 tpg
->sels
[i
]) : NULL
, i
);
195 for (k
= 0; k
< NR_OF_CSI2_BE_SOC_DEV
; k
++) {
196 rval
= ipu_isys_csi2_be_soc_init(&isys
->csi2_be_soc
[k
],
199 dev_info(&isys
->adev
->dev
,
200 "can't register csi2 soc be device %d\n", k
);
205 rval
= ipu_isys_csi2_be_init(&isys
->csi2_be
, isys
);
207 dev_info(&isys
->adev
->dev
,
208 "can't register raw csi2 be device\n");
212 for (i
= 0; i
< csi2
->nports
; i
++) {
213 rval
= media_create_pad_link(&isys
->csi2
[i
].asd
.sd
.entity
,
215 &isys
->csi2_be
.asd
.sd
.entity
,
216 CSI2_BE_PAD_SINK
, 0);
218 dev_info(&isys
->adev
->dev
,
219 "can't create link csi2 <=> csi2_be\n");
222 for (k
= 0; k
< NR_OF_CSI2_BE_SOC_DEV
; k
++) {
223 csi2_be_soc
= &isys
->csi2_be_soc
[k
];
225 media_create_pad_link(&isys
->csi2
[i
].asd
.sd
.entity
,
227 &csi2_be_soc
->asd
.sd
.entity
,
228 CSI2_BE_SOC_PAD_SINK
, 0);
230 dev_info(&isys
->adev
->dev
,
231 "can't create link csi2->be_soc\n");
237 for (i
= 0; i
< tpg
->ntpgs
; i
++) {
238 rval
= media_create_pad_link(&isys
->tpg
[i
].asd
.sd
.entity
,
240 &isys
->csi2_be
.asd
.sd
.entity
,
241 CSI2_BE_PAD_SINK
, 0);
243 dev_info(&isys
->adev
->dev
,
244 "can't create link between tpg and csi2_be\n");
248 for (k
= 0; k
< NR_OF_CSI2_BE_SOC_DEV
; k
++) {
249 csi2_be_soc
= &isys
->csi2_be_soc
[k
];
251 media_create_pad_link(&isys
->tpg
[i
].asd
.sd
.entity
,
253 &csi2_be_soc
->asd
.sd
.entity
,
254 CSI2_BE_SOC_PAD_SINK
, 0);
256 dev_info(&isys
->adev
->dev
,
257 "can't create link tpg->be_soc\n");
266 isys_unregister_subdevices(isys
);
270 /* read ltrdid threshold values from BIOS or system configuration */
271 static void get_lut_ltrdid(struct ipu_isys
*isys
, struct ltr_did
*pltr_did
)
273 struct isys_iwake_watermark
*iwake_watermark
= isys
->iwake_watermark
;
275 struct ltr_did ltrdid_default
;
277 ltrdid_default
.lut_ltr
.value
= LTR_DEFAULT_VALUE
;
278 ltrdid_default
.lut_fill_time
.value
= FILL_TIME_DEFAULT_VALUE
;
280 if (iwake_watermark
->ltrdid
.lut_ltr
.value
)
281 *pltr_did
= iwake_watermark
->ltrdid
;
283 *pltr_did
= ltrdid_default
;
286 static int set_iwake_register(struct ipu_isys
*isys
, u32 index
, u32 value
)
292 ret
= ipu_fw_isys_send_proxy_token(isys
, req_id
, index
, offset
, value
);
294 dev_err(&isys
->adev
->dev
, "write %d failed %d", index
, ret
);
300 * When input system is powered up and before enabling any new sensor capture,
301 * or after disabling any sensor capture the following values need to be set:
302 * LTR_value = LTR(usec) from calculation;
304 * DID_value = DID(usec) from calculation;
307 * When input system is powered down, the LTR and DID values
308 * must be returned to the default values:
314 static void set_iwake_ltrdid(struct ipu_isys
*isys
,
317 enum ltr_did_type use
)
319 /* did_scale will set to 2= 1us */
320 u16 ltr_val
, ltr_scale
, did_val
;
321 union fabric_ctrl fc
;
322 struct ipu_device
*isp
= isys
->adev
->isp
;
326 ltr_val
= min_t(u16
, ltr
, (u16
)LTR_DID_VAL_MAX
);
327 did_val
= min_t(u16
, did
, (u16
)LTR_DID_VAL_MAX
);
328 ltr_scale
= (ltr
== LTR_DID_VAL_MAX
&&
329 did
== LTR_DID_VAL_MAX
) ?
330 LTR_SCALE_DEFAULT
: LTR_SCALE_1024NS
;
334 ltr_val
= LTR_DID_PKGC_2R
;
335 did_val
= LTR_DID_PKGC_2R
;
336 ltr_scale
= LTR_SCALE_1024NS
;
339 ltr_val
= LTR_DID_VAL_MAX
;
340 did_val
= LTR_DID_VAL_MAX
;
341 ltr_scale
= LTR_SCALE_DEFAULT
;
347 fc
.value
= readl(isp
->base
+ IPU_BUTTRESS_FABIC_CONTROL
);
348 fc
.bits
.ltr_val
= ltr_val
;
349 fc
.bits
.ltr_scale
= ltr_scale
;
350 fc
.bits
.did_val
= did_val
;
351 fc
.bits
.did_scale
= 2;
352 dev_dbg(&isys
->adev
->dev
,
353 "%s ltr: %d did: %d", __func__
, ltr_val
, did_val
);
354 writel(fc
.value
, isp
->base
+ IPU_BUTTRESS_FABIC_CONTROL
);
357 /* SW driver may clear register GDA_ENABLE_IWAKE before the FW configures the
358 * stream for debug purposes. Otherwise SW should not access this register.
360 static int enable_iwake(struct ipu_isys
*isys
, bool enable
)
363 struct isys_iwake_watermark
*iwake_watermark
= isys
->iwake_watermark
;
365 mutex_lock(&iwake_watermark
->mutex
);
366 if (iwake_watermark
->iwake_enabled
== enable
) {
367 mutex_unlock(&iwake_watermark
->mutex
);
370 ret
= set_iwake_register(isys
, GDA_ENABLE_IWAKE_INDEX
, enable
);
372 iwake_watermark
->iwake_enabled
= enable
;
373 mutex_unlock(&iwake_watermark
->mutex
);
377 void update_watermark_setting(struct ipu_isys
*isys
)
379 struct isys_iwake_watermark
*iwake_watermark
= isys
->iwake_watermark
;
380 struct list_head
*stream_node
;
381 struct video_stream_watermark
*p_watermark
;
382 struct ltr_did ltrdid
;
383 u16 calc_fill_time_us
= 0;
386 u32 iwake_threshold
, iwake_critical_threshold
;
388 u64 isys_pb_datarate_mbs
= 0;
389 u16 sram_granulrity_shift
=
390 (ipu_ver
== IPU_VER_6
|| ipu_ver
== IPU_VER_6EP
) ?
391 IPU6_SRAM_GRANULRITY_SHIFT
: IPU6SE_SRAM_GRANULRITY_SHIFT
;
393 (ipu_ver
== IPU_VER_6
|| ipu_ver
== IPU_VER_6EP
) ?
394 IPU6_MAX_SRAM_SIZE
: IPU6SE_MAX_SRAM_SIZE
;
396 mutex_lock(&iwake_watermark
->mutex
);
397 if (iwake_watermark
->force_iwake_disable
) {
398 set_iwake_ltrdid(isys
, 0, 0, LTR_IWAKE_OFF
);
399 set_iwake_register(isys
, GDA_IRQ_CRITICAL_THRESHOLD_INDEX
,
400 CRITICAL_THRESHOLD_IWAKE_DISABLE
);
401 mutex_unlock(&iwake_watermark
->mutex
);
405 if (list_empty(&iwake_watermark
->video_list
)) {
406 isys_pb_datarate_mbs
= 0;
408 list_for_each(stream_node
, &iwake_watermark
->video_list
)
410 p_watermark
= list_entry(stream_node
,
411 struct video_stream_watermark
,
413 isys_pb_datarate_mbs
+= p_watermark
->stream_data_rate
;
416 mutex_unlock(&iwake_watermark
->mutex
);
418 if (!isys_pb_datarate_mbs
) {
419 enable_iwake(isys
, false);
420 set_iwake_ltrdid(isys
, 0, 0, LTR_IWAKE_OFF
);
421 mutex_lock(&iwake_watermark
->mutex
);
422 set_iwake_register(isys
, GDA_IRQ_CRITICAL_THRESHOLD_INDEX
,
423 CRITICAL_THRESHOLD_IWAKE_DISABLE
);
424 mutex_unlock(&iwake_watermark
->mutex
);
426 /* should enable iwake by default according to FW */
427 enable_iwake(isys
, true);
428 calc_fill_time_us
= (u16
)(max_sram_size
/ isys_pb_datarate_mbs
);
429 get_lut_ltrdid(isys
, <rdid
);
431 if (calc_fill_time_us
<= ltrdid
.lut_fill_time
.bits
.th0
)
433 else if (calc_fill_time_us
<= ltrdid
.lut_fill_time
.bits
.th1
)
434 ltr
= ltrdid
.lut_ltr
.bits
.val0
;
435 else if (calc_fill_time_us
<= ltrdid
.lut_fill_time
.bits
.th2
)
436 ltr
= ltrdid
.lut_ltr
.bits
.val1
;
437 else if (calc_fill_time_us
<= ltrdid
.lut_fill_time
.bits
.th3
)
438 ltr
= ltrdid
.lut_ltr
.bits
.val2
;
440 ltr
= ltrdid
.lut_ltr
.bits
.val3
;
442 did
= calc_fill_time_us
- ltr
;
444 threshold_bytes
= did
* isys_pb_datarate_mbs
;
445 /* calculate iwake threshold with 2KB granularity pages */
447 max_t(u32
, 1, threshold_bytes
>> sram_granulrity_shift
);
449 iwake_threshold
= min_t(u32
, iwake_threshold
, max_sram_size
);
451 /* set the critical threshold to halfway between
452 * iwake threshold and the full buffer.
454 iwake_critical_threshold
= iwake_threshold
+
455 (IS_PIXEL_BUFFER_PAGES
- iwake_threshold
) / 2;
457 dev_dbg(&isys
->adev
->dev
, "%s threshold: %u critical: %u",
458 __func__
, iwake_threshold
, iwake_critical_threshold
);
459 set_iwake_ltrdid(isys
, ltr
, did
, LTR_IWAKE_ON
);
460 mutex_lock(&iwake_watermark
->mutex
);
461 set_iwake_register(isys
,
462 GDA_IWAKE_THRESHOLD_INDEX
, iwake_threshold
);
464 set_iwake_register(isys
,
465 GDA_IRQ_CRITICAL_THRESHOLD_INDEX
,
466 iwake_critical_threshold
);
467 mutex_unlock(&iwake_watermark
->mutex
);
469 writel(VAL_PKGC_PMON_CFG_RESET
,
470 isys
->adev
->isp
->base
+ REG_PKGC_PMON_CFG
);
471 writel(VAL_PKGC_PMON_CFG_START
,
472 isys
->adev
->isp
->base
+ REG_PKGC_PMON_CFG
);
476 static int isys_iwake_watermark_init(struct ipu_isys
*isys
)
478 struct isys_iwake_watermark
*iwake_watermark
;
480 if (isys
->iwake_watermark
)
483 iwake_watermark
= devm_kzalloc(&isys
->adev
->dev
,
484 sizeof(*iwake_watermark
), GFP_KERNEL
);
485 if (!iwake_watermark
)
487 INIT_LIST_HEAD(&iwake_watermark
->video_list
);
488 mutex_init(&iwake_watermark
->mutex
);
490 iwake_watermark
->ltrdid
.lut_ltr
.value
= 0;
491 isys
->iwake_watermark
= iwake_watermark
;
492 iwake_watermark
->isys
= isys
;
493 iwake_watermark
->iwake_enabled
= false;
494 iwake_watermark
->force_iwake_disable
= false;
498 static int isys_iwake_watermark_cleanup(struct ipu_isys
*isys
)
500 struct isys_iwake_watermark
*iwake_watermark
= isys
->iwake_watermark
;
502 if (!iwake_watermark
)
504 mutex_lock(&iwake_watermark
->mutex
);
505 list_del(&iwake_watermark
->video_list
);
506 mutex_unlock(&iwake_watermark
->mutex
);
507 mutex_destroy(&iwake_watermark
->mutex
);
508 isys
->iwake_watermark
= NULL
;
512 /* The .bound() notifier callback when a match is found */
513 static int isys_notifier_bound(struct v4l2_async_notifier
*notifier
,
514 struct v4l2_subdev
*sd
,
515 struct v4l2_async_subdev
*asd
)
517 struct ipu_isys
*isys
= container_of(notifier
,
518 struct ipu_isys
, notifier
);
519 struct sensor_async_subdev
*s_asd
= container_of(asd
,
520 struct sensor_async_subdev
, asd
);
522 dev_info(&isys
->adev
->dev
, "bind %s nlanes is %d port is %d\n",
523 sd
->name
, s_asd
->csi2
.nlanes
, s_asd
->csi2
.port
);
524 isys_complete_ext_device_registration(isys
, sd
, &s_asd
->csi2
);
526 return v4l2_device_register_subdev_nodes(&isys
->v4l2_dev
);
529 static void isys_notifier_unbind(struct v4l2_async_notifier
*notifier
,
530 struct v4l2_subdev
*sd
,
531 struct v4l2_async_subdev
*asd
)
533 struct ipu_isys
*isys
= container_of(notifier
,
534 struct ipu_isys
, notifier
);
536 dev_info(&isys
->adev
->dev
, "unbind %s\n", sd
->name
);
539 static int isys_notifier_complete(struct v4l2_async_notifier
*notifier
)
541 struct ipu_isys
*isys
= container_of(notifier
,
542 struct ipu_isys
, notifier
);
544 dev_info(&isys
->adev
->dev
, "All sensor registration completed.\n");
546 return v4l2_device_register_subdev_nodes(&isys
->v4l2_dev
);
549 static const struct v4l2_async_notifier_operations isys_async_ops
= {
550 .bound
= isys_notifier_bound
,
551 .unbind
= isys_notifier_unbind
,
552 .complete
= isys_notifier_complete
,
555 static int isys_fwnode_parse(struct device
*dev
,
556 struct v4l2_fwnode_endpoint
*vep
,
557 struct v4l2_async_subdev
*asd
)
559 struct sensor_async_subdev
*s_asd
=
560 container_of(asd
, struct sensor_async_subdev
, asd
);
562 s_asd
->csi2
.port
= vep
->base
.port
;
563 s_asd
->csi2
.nlanes
= vep
->bus
.mipi_csi2
.num_data_lanes
;
568 static int isys_notifier_init(struct ipu_isys
*isys
)
570 struct ipu_device
*isp
= isys
->adev
->isp
;
571 size_t asd_struct_size
= sizeof(struct sensor_async_subdev
);
574 v4l2_async_notifier_init(&isys
->notifier
);
575 ret
= v4l2_async_notifier_parse_fwnode_endpoints(&isp
->pdev
->dev
,
581 dev_err(&isys
->adev
->dev
,
582 "v4l2 parse_fwnode_endpoints() failed: %d\n", ret
);
586 if (list_empty(&isys
->notifier
.asd_list
)) {
587 /* isys probe could continue with async subdevs missing */
588 dev_warn(&isys
->adev
->dev
, "no subdev found in graph\n");
592 isys
->notifier
.ops
= &isys_async_ops
;
593 ret
= v4l2_async_notifier_register(&isys
->v4l2_dev
, &isys
->notifier
);
595 dev_err(&isys
->adev
->dev
,
596 "failed to register async notifier : %d\n", ret
);
597 v4l2_async_notifier_cleanup(&isys
->notifier
);
603 static void isys_notifier_cleanup(struct ipu_isys
*isys
)
605 v4l2_async_notifier_unregister(&isys
->notifier
);
606 v4l2_async_notifier_cleanup(&isys
->notifier
);
609 static struct media_device_ops isys_mdev_ops
= {
610 .link_notify
= v4l2_pipeline_link_notify
,
613 static int isys_register_devices(struct ipu_isys
*isys
)
617 isys
->media_dev
.dev
= &isys
->adev
->dev
;
618 isys
->media_dev
.ops
= &isys_mdev_ops
;
619 strlcpy(isys
->media_dev
.model
,
620 IPU_MEDIA_DEV_MODEL_NAME
, sizeof(isys
->media_dev
.model
));
621 snprintf(isys
->media_dev
.bus_info
, sizeof(isys
->media_dev
.bus_info
),
622 "pci:%s", dev_name(isys
->adev
->dev
.parent
->parent
));
623 strlcpy(isys
->v4l2_dev
.name
, isys
->media_dev
.model
,
624 sizeof(isys
->v4l2_dev
.name
));
626 media_device_init(&isys
->media_dev
);
628 rval
= media_device_register(&isys
->media_dev
);
630 dev_info(&isys
->adev
->dev
, "can't register media device\n");
631 goto out_media_device_unregister
;
634 isys
->v4l2_dev
.mdev
= &isys
->media_dev
;
636 rval
= v4l2_device_register(&isys
->adev
->dev
, &isys
->v4l2_dev
);
638 dev_info(&isys
->adev
->dev
, "can't register v4l2 device\n");
639 goto out_media_device_unregister
;
642 rval
= isys_register_subdevices(isys
);
644 goto out_v4l2_device_unregister
;
646 rval
= isys_notifier_init(isys
);
648 goto out_isys_unregister_subdevices
;
650 rval
= v4l2_device_register_subdev_nodes(&isys
->v4l2_dev
);
652 goto out_isys_notifier_cleanup
;
656 out_isys_notifier_cleanup
:
657 isys_notifier_cleanup(isys
);
659 out_isys_unregister_subdevices
:
660 isys_unregister_subdevices(isys
);
662 out_v4l2_device_unregister
:
663 v4l2_device_unregister(&isys
->v4l2_dev
);
665 out_media_device_unregister
:
666 media_device_unregister(&isys
->media_dev
);
667 media_device_cleanup(&isys
->media_dev
);
672 static void isys_unregister_devices(struct ipu_isys
*isys
)
674 isys_unregister_subdevices(isys
);
675 v4l2_device_unregister(&isys
->v4l2_dev
);
676 media_device_unregister(&isys
->media_dev
);
677 media_device_cleanup(&isys
->media_dev
);
681 static int isys_runtime_pm_resume(struct device
*dev
)
683 struct ipu_bus_device
*adev
= to_ipu_bus_device(dev
);
684 struct ipu_device
*isp
= adev
->isp
;
685 struct ipu_isys
*isys
= ipu_bus_get_drvdata(adev
);
692 ret
= ipu_mmu_hw_init(adev
->mmu
);
696 ipu_trace_restore(dev
);
698 cpu_latency_qos_update_request(&isys
->pm_qos
, ISYS_PM_QOS_VALUE
);
700 ret
= ipu_buttress_start_tsc_sync(isp
);
704 spin_lock_irqsave(&isys
->power_lock
, flags
);
706 spin_unlock_irqrestore(&isys
->power_lock
, flags
);
708 if (isys
->short_packet_source
== IPU_ISYS_SHORT_PACKET_FROM_TUNIT
) {
709 mutex_lock(&isys
->short_packet_tracing_mutex
);
710 isys
->short_packet_tracing_count
= 0;
711 mutex_unlock(&isys
->short_packet_tracing_mutex
);
715 set_iwake_ltrdid(isys
, 0, 0, LTR_ISYS_ON
);
719 static int isys_runtime_pm_suspend(struct device
*dev
)
721 struct ipu_bus_device
*adev
= to_ipu_bus_device(dev
);
722 struct ipu_isys
*isys
= ipu_bus_get_drvdata(adev
);
728 spin_lock_irqsave(&isys
->power_lock
, flags
);
730 spin_unlock_irqrestore(&isys
->power_lock
, flags
);
733 mutex_lock(&isys
->mutex
);
734 isys
->reset_needed
= false;
735 mutex_unlock(&isys
->mutex
);
737 cpu_latency_qos_update_request(&isys
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
739 ipu_mmu_hw_cleanup(adev
->mmu
);
741 set_iwake_ltrdid(isys
, 0, 0, LTR_ISYS_OFF
);
745 static int isys_suspend(struct device
*dev
)
747 struct ipu_bus_device
*adev
= to_ipu_bus_device(dev
);
748 struct ipu_isys
*isys
= ipu_bus_get_drvdata(adev
);
750 /* If stream is open, refuse to suspend */
751 if (isys
->stream_opened
)
757 static int isys_resume(struct device
*dev
)
762 static const struct dev_pm_ops isys_pm_ops
= {
763 .runtime_suspend
= isys_runtime_pm_suspend
,
764 .runtime_resume
= isys_runtime_pm_resume
,
765 .suspend
= isys_suspend
,
766 .resume
= isys_resume
,
769 #define ISYS_PM_OPS (&isys_pm_ops)
771 #define ISYS_PM_OPS NULL
774 static void isys_remove(struct ipu_bus_device
*adev
)
776 struct ipu_isys
*isys
= ipu_bus_get_drvdata(adev
);
777 struct ipu_device
*isp
= adev
->isp
;
778 struct isys_fw_msgs
*fwmsg
, *safe
;
780 dev_info(&adev
->dev
, "removed\n");
781 #ifdef CONFIG_DEBUG_FS
783 debugfs_remove_recursive(isys
->debugfsdir
);
786 list_for_each_entry_safe(fwmsg
, safe
, &isys
->framebuflist
, head
) {
787 dma_free_attrs(&adev
->dev
, sizeof(struct isys_fw_msgs
),
788 fwmsg
, fwmsg
->dma_addr
,
792 list_for_each_entry_safe(fwmsg
, safe
, &isys
->framebuflist_fw
, head
) {
793 dma_free_attrs(&adev
->dev
, sizeof(struct isys_fw_msgs
),
794 fwmsg
, fwmsg
->dma_addr
,
799 isys_iwake_watermark_cleanup(isys
);
801 ipu_trace_uninit(&adev
->dev
);
802 isys_notifier_cleanup(isys
);
803 isys_unregister_devices(isys
);
805 cpu_latency_qos_remove_request(&isys
->pm_qos
);
807 if (!isp
->secure_mode
) {
808 ipu_cpd_free_pkg_dir(adev
, isys
->pkg_dir
,
809 isys
->pkg_dir_dma_addr
,
811 ipu_buttress_unmap_fw_image(adev
, &isys
->fw_sgt
);
812 release_firmware(isys
->fw
);
815 mutex_destroy(&isys
->stream_mutex
);
816 mutex_destroy(&isys
->mutex
);
818 if (isys
->short_packet_source
== IPU_ISYS_SHORT_PACKET_FROM_TUNIT
) {
819 u32 trace_size
= IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE
;
821 dma_free_coherent(&adev
->dev
, trace_size
,
822 isys
->short_packet_trace_buffer
,
823 isys
->short_packet_trace_buffer_dma_addr
);
827 #ifdef CONFIG_DEBUG_FS
828 static int ipu_isys_icache_prefetch_get(void *data
, u64
*val
)
830 struct ipu_isys
*isys
= data
;
832 *val
= isys
->icache_prefetch
;
836 static int ipu_isys_icache_prefetch_set(void *data
, u64 val
)
838 struct ipu_isys
*isys
= data
;
843 isys
->icache_prefetch
= val
;
848 static int isys_iwake_control_get(void *data
, u64
*val
)
850 struct ipu_isys
*isys
= data
;
851 struct isys_iwake_watermark
*iwake_watermark
= isys
->iwake_watermark
;
853 mutex_lock(&iwake_watermark
->mutex
);
854 *val
= isys
->iwake_watermark
->force_iwake_disable
;
855 mutex_unlock(&iwake_watermark
->mutex
);
859 static int isys_iwake_control_set(void *data
, u64 val
)
861 struct ipu_isys
*isys
= data
;
862 struct isys_iwake_watermark
*iwake_watermark
;
866 /* If stream is open, refuse to set iwake */
867 if (isys
->stream_opened
)
870 iwake_watermark
= isys
->iwake_watermark
;
871 mutex_lock(&iwake_watermark
->mutex
);
872 isys
->iwake_watermark
->force_iwake_disable
= !!val
;
873 mutex_unlock(&iwake_watermark
->mutex
);
877 DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops
,
878 ipu_isys_icache_prefetch_get
,
879 ipu_isys_icache_prefetch_set
, "%llu\n");
881 DEFINE_SIMPLE_ATTRIBUTE(isys_iwake_control_fops
,
882 isys_iwake_control_get
,
883 isys_iwake_control_set
, "%llu\n");
885 static int ipu_isys_init_debugfs(struct ipu_isys
*isys
)
893 dir
= debugfs_create_dir("isys", isys
->adev
->isp
->ipu_dir
);
897 file
= debugfs_create_file("icache_prefetch", 0600,
898 dir
, isys
, &isys_icache_prefetch_fops
);
902 file
= debugfs_create_file("iwake_disable", 0600,
903 dir
, isys
, &isys_iwake_control_fops
);
907 isys
->debugfsdir
= dir
;
910 ret
= ipu_isys_gpc_init_debugfs(isys
);
917 debugfs_remove_recursive(dir
);
922 static int alloc_fw_msg_bufs(struct ipu_isys
*isys
, int amount
)
925 struct isys_fw_msgs
*addr
;
929 for (i
= 0; i
< amount
; i
++) {
930 addr
= dma_alloc_attrs(&isys
->adev
->dev
,
931 sizeof(struct isys_fw_msgs
),
932 &dma_addr
, GFP_KERNEL
,
936 addr
->dma_addr
= dma_addr
;
938 spin_lock_irqsave(&isys
->listlock
, flags
);
939 list_add(&addr
->head
, &isys
->framebuflist
);
940 spin_unlock_irqrestore(&isys
->listlock
, flags
);
944 spin_lock_irqsave(&isys
->listlock
, flags
);
945 while (!list_empty(&isys
->framebuflist
)) {
946 addr
= list_first_entry(&isys
->framebuflist
,
947 struct isys_fw_msgs
, head
);
948 list_del(&addr
->head
);
949 spin_unlock_irqrestore(&isys
->listlock
, flags
);
950 dma_free_attrs(&isys
->adev
->dev
,
951 sizeof(struct isys_fw_msgs
),
952 addr
, addr
->dma_addr
,
954 spin_lock_irqsave(&isys
->listlock
, flags
);
956 spin_unlock_irqrestore(&isys
->listlock
, flags
);
960 struct isys_fw_msgs
*ipu_get_fw_msg_buf(struct ipu_isys_pipeline
*ip
)
962 struct ipu_isys_video
*pipe_av
=
963 container_of(ip
, struct ipu_isys_video
, ip
);
964 struct ipu_isys
*isys
;
965 struct isys_fw_msgs
*msg
;
968 isys
= pipe_av
->isys
;
970 spin_lock_irqsave(&isys
->listlock
, flags
);
971 if (list_empty(&isys
->framebuflist
)) {
972 spin_unlock_irqrestore(&isys
->listlock
, flags
);
973 dev_dbg(&isys
->adev
->dev
, "Frame list empty - Allocate more");
975 alloc_fw_msg_bufs(isys
, 5);
977 spin_lock_irqsave(&isys
->listlock
, flags
);
978 if (list_empty(&isys
->framebuflist
)) {
979 spin_unlock_irqrestore(&isys
->listlock
, flags
);
980 dev_err(&isys
->adev
->dev
, "Frame list empty");
984 msg
= list_last_entry(&isys
->framebuflist
, struct isys_fw_msgs
, head
);
985 list_move(&msg
->head
, &isys
->framebuflist_fw
);
986 spin_unlock_irqrestore(&isys
->listlock
, flags
);
987 memset(&msg
->fw_msg
, 0, sizeof(msg
->fw_msg
));
992 void ipu_cleanup_fw_msg_bufs(struct ipu_isys
*isys
)
994 struct isys_fw_msgs
*fwmsg
, *fwmsg0
;
997 spin_lock_irqsave(&isys
->listlock
, flags
);
998 list_for_each_entry_safe(fwmsg
, fwmsg0
, &isys
->framebuflist_fw
, head
)
999 list_move(&fwmsg
->head
, &isys
->framebuflist
);
1000 spin_unlock_irqrestore(&isys
->listlock
, flags
);
1003 void ipu_put_fw_mgs_buf(struct ipu_isys
*isys
, u64 data
)
1005 struct isys_fw_msgs
*msg
;
1006 unsigned long flags
;
1007 u64
*ptr
= (u64
*)(unsigned long)data
;
1012 spin_lock_irqsave(&isys
->listlock
, flags
);
1013 msg
= container_of(ptr
, struct isys_fw_msgs
, fw_msg
.dummy
);
1014 list_move(&msg
->head
, &isys
->framebuflist
);
1015 spin_unlock_irqrestore(&isys
->listlock
, flags
);
1017 EXPORT_SYMBOL_GPL(ipu_put_fw_mgs_buf
);
1019 static int isys_probe(struct ipu_bus_device
*adev
)
1021 struct ipu_isys
*isys
;
1022 struct ipu_device
*isp
= adev
->isp
;
1023 const struct firmware
*fw
;
1026 isys
= devm_kzalloc(&adev
->dev
, sizeof(*isys
), GFP_KERNEL
);
1030 rval
= ipu_mmu_hw_init(adev
->mmu
);
1034 /* By default, short packet is captured from T-Unit. */
1035 isys
->short_packet_source
= IPU_ISYS_SHORT_PACKET_FROM_RECEIVER
;
1037 isys
->pdata
= adev
->pdata
;
1039 /* initial streamID for different sensor types */
1040 if (ipu_ver
== IPU_VER_6
|| ipu_ver
== IPU_VER_6EP
) {
1041 isys
->sensor_info
.vc1_data_start
=
1042 IPU6_FW_ISYS_VC1_SENSOR_DATA_START
;
1043 isys
->sensor_info
.vc1_data_end
=
1044 IPU6_FW_ISYS_VC1_SENSOR_DATA_END
;
1045 isys
->sensor_info
.vc0_data_start
=
1046 IPU6_FW_ISYS_VC0_SENSOR_DATA_START
;
1047 isys
->sensor_info
.vc0_data_end
=
1048 IPU6_FW_ISYS_VC0_SENSOR_DATA_END
;
1049 isys
->sensor_info
.vc1_pdaf_start
=
1050 IPU6_FW_ISYS_VC1_SENSOR_PDAF_START
;
1051 isys
->sensor_info
.vc1_pdaf_end
=
1052 IPU6_FW_ISYS_VC1_SENSOR_PDAF_END
;
1053 isys
->sensor_info
.sensor_metadata
=
1054 IPU6_FW_ISYS_SENSOR_METADATA
;
1056 isys
->sensor_types
[IPU_FW_ISYS_VC1_SENSOR_DATA
] =
1057 IPU6_FW_ISYS_VC1_SENSOR_DATA_START
;
1058 isys
->sensor_types
[IPU_FW_ISYS_VC1_SENSOR_PDAF
] =
1059 IPU6_FW_ISYS_VC1_SENSOR_PDAF_START
;
1060 isys
->sensor_types
[IPU_FW_ISYS_VC0_SENSOR_DATA
] =
1061 IPU6_FW_ISYS_VC0_SENSOR_DATA_START
;
1062 } else if (ipu_ver
== IPU_VER_6SE
) {
1063 isys
->sensor_info
.vc1_data_start
=
1064 IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START
;
1065 isys
->sensor_info
.vc1_data_end
=
1066 IPU6SE_FW_ISYS_VC1_SENSOR_DATA_END
;
1067 isys
->sensor_info
.vc0_data_start
=
1068 IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START
;
1069 isys
->sensor_info
.vc0_data_end
=
1070 IPU6SE_FW_ISYS_VC0_SENSOR_DATA_END
;
1071 isys
->sensor_info
.vc1_pdaf_start
=
1072 IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START
;
1073 isys
->sensor_info
.vc1_pdaf_end
=
1074 IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_END
;
1075 isys
->sensor_info
.sensor_metadata
=
1076 IPU6SE_FW_ISYS_SENSOR_METADATA
;
1078 isys
->sensor_types
[IPU_FW_ISYS_VC1_SENSOR_DATA
] =
1079 IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START
;
1080 isys
->sensor_types
[IPU_FW_ISYS_VC1_SENSOR_PDAF
] =
1081 IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START
;
1082 isys
->sensor_types
[IPU_FW_ISYS_VC0_SENSOR_DATA
] =
1083 IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START
;
1086 INIT_LIST_HEAD(&isys
->requests
);
1088 spin_lock_init(&isys
->lock
);
1089 spin_lock_init(&isys
->power_lock
);
1092 mutex_init(&isys
->mutex
);
1093 mutex_init(&isys
->stream_mutex
);
1094 mutex_init(&isys
->lib_mutex
);
1096 spin_lock_init(&isys
->listlock
);
1097 INIT_LIST_HEAD(&isys
->framebuflist
);
1098 INIT_LIST_HEAD(&isys
->framebuflist_fw
);
1100 dev_dbg(&adev
->dev
, "isys probe %p %p\n", adev
, &adev
->dev
);
1101 ipu_bus_set_drvdata(adev
, isys
);
1103 isys
->line_align
= IPU_ISYS_2600_MEM_LINE_ALIGN
;
1104 isys
->icache_prefetch
= 0;
1107 isys_setup_hw(isys
);
1110 if (!isp
->secure_mode
) {
1112 rval
= ipu_buttress_map_fw_image(adev
, fw
, &isys
->fw_sgt
);
1114 goto release_firmware
;
1117 ipu_cpd_create_pkg_dir(adev
, isp
->cpd_fw
->data
,
1118 sg_dma_address(isys
->fw_sgt
.sgl
),
1119 &isys
->pkg_dir_dma_addr
,
1120 &isys
->pkg_dir_size
);
1121 if (!isys
->pkg_dir
) {
1123 goto remove_shared_buffer
;
1127 #ifdef CONFIG_DEBUG_FS
1128 /* Debug fs failure is not fatal. */
1129 ipu_isys_init_debugfs(isys
);
1132 ipu_trace_init(adev
->isp
, isys
->pdata
->base
, &adev
->dev
,
1135 cpu_latency_qos_add_request(&isys
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
1136 alloc_fw_msg_bufs(isys
, 20);
1138 rval
= isys_register_devices(isys
);
1140 goto out_remove_pkg_dir_shared_buffer
;
1141 rval
= isys_iwake_watermark_init(isys
);
1143 goto out_unregister_devices
;
1145 ipu_mmu_hw_cleanup(adev
->mmu
);
1149 out_unregister_devices
:
1150 isys_iwake_watermark_cleanup(isys
);
1151 isys_unregister_devices(isys
);
1152 out_remove_pkg_dir_shared_buffer
:
1153 if (!isp
->secure_mode
)
1154 ipu_cpd_free_pkg_dir(adev
, isys
->pkg_dir
,
1155 isys
->pkg_dir_dma_addr
,
1156 isys
->pkg_dir_size
);
1157 remove_shared_buffer
:
1158 if (!isp
->secure_mode
)
1159 ipu_buttress_unmap_fw_image(adev
, &isys
->fw_sgt
);
1161 if (!isp
->secure_mode
)
1162 release_firmware(isys
->fw
);
1163 ipu_trace_uninit(&adev
->dev
);
1165 mutex_destroy(&isys
->mutex
);
1166 mutex_destroy(&isys
->stream_mutex
);
1168 if (isys
->short_packet_source
== IPU_ISYS_SHORT_PACKET_FROM_TUNIT
)
1169 mutex_destroy(&isys
->short_packet_tracing_mutex
);
1171 ipu_mmu_hw_cleanup(adev
->mmu
);
1182 static const struct fwmsg fw_msg
[] = {
1183 {IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE
, "STREAM_OPEN_DONE", 0},
1184 {IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK
, "STREAM_CLOSE_ACK", 0},
1185 {IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK
, "STREAM_START_ACK", 0},
1186 {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK
,
1187 "STREAM_START_AND_CAPTURE_ACK", 0},
1188 {IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK
, "STREAM_STOP_ACK", 0},
1189 {IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK
, "STREAM_FLUSH_ACK", 0},
1190 {IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY
, "PIN_DATA_READY", 1},
1191 {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK
, "STREAM_CAPTURE_ACK", 0},
1192 {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE
,
1193 "STREAM_START_AND_CAPTURE_DONE", 1},
1194 {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE
, "STREAM_CAPTURE_DONE", 1},
1195 {IPU_FW_ISYS_RESP_TYPE_FRAME_SOF
, "FRAME_SOF", 1},
1196 {IPU_FW_ISYS_RESP_TYPE_FRAME_EOF
, "FRAME_EOF", 1},
1197 {IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY
, "STATS_READY", 1},
1198 {-1, "UNKNOWN MESSAGE", 0},
1201 static int resp_type_to_index(int type
)
1205 for (i
= 0; i
< ARRAY_SIZE(fw_msg
); i
++)
1206 if (fw_msg
[i
].type
== type
)
1212 int isys_isr_one(struct ipu_bus_device
*adev
)
1214 struct ipu_isys
*isys
= ipu_bus_get_drvdata(adev
);
1215 struct ipu_fw_isys_resp_info_abi resp_data
;
1216 struct ipu_fw_isys_resp_info_abi
*resp
;
1217 struct ipu_isys_pipeline
*pipe
;
1224 resp
= ipu_fw_isys_get_resp(isys
->fwcom
, IPU_BASE_MSG_RECV_QUEUES
,
1229 ts
= (u64
)resp
->timestamp
[1] << 32 | resp
->timestamp
[0];
1231 if (resp
->error_info
.error
== IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION
)
1232 /* Suspension is kind of special case: not enough buffers */
1234 "hostlib: error resp %02d %s, stream %u, error SUSPENSION, details %d, timestamp 0x%16.16llx, pin %d\n",
1236 fw_msg
[resp_type_to_index(resp
->type
)].msg
,
1237 resp
->stream_handle
,
1238 resp
->error_info
.error_details
,
1239 fw_msg
[resp_type_to_index(resp
->type
)].valid_ts
?
1240 ts
: 0, resp
->pin_id
);
1241 else if (resp
->error_info
.error
)
1243 "hostlib: error resp %02d %s, stream %u, error %d, details %d, timestamp 0x%16.16llx, pin %d\n",
1245 fw_msg
[resp_type_to_index(resp
->type
)].msg
,
1246 resp
->stream_handle
,
1247 resp
->error_info
.error
, resp
->error_info
.error_details
,
1248 fw_msg
[resp_type_to_index(resp
->type
)].valid_ts
?
1249 ts
: 0, resp
->pin_id
);
1252 "hostlib: resp %02d %s, stream %u, timestamp 0x%16.16llx, pin %d\n",
1254 fw_msg
[resp_type_to_index(resp
->type
)].msg
,
1255 resp
->stream_handle
,
1256 fw_msg
[resp_type_to_index(resp
->type
)].valid_ts
?
1257 ts
: 0, resp
->pin_id
);
1259 if (resp
->stream_handle
>= IPU_ISYS_MAX_STREAMS
) {
1260 dev_err(&adev
->dev
, "bad stream handle %u\n",
1261 resp
->stream_handle
);
1265 pipe
= isys
->pipes
[resp
->stream_handle
];
1267 dev_err(&adev
->dev
, "no pipeline for stream %u\n",
1268 resp
->stream_handle
);
1271 pipe
->error
= resp
->error_info
.error
;
1273 switch (resp
->type
) {
1274 case IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE
:
1275 ipu_put_fw_mgs_buf(ipu_bus_get_drvdata(adev
), resp
->buf_id
);
1276 complete(&pipe
->stream_open_completion
);
1278 case IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK
:
1279 complete(&pipe
->stream_close_completion
);
1281 case IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK
:
1282 complete(&pipe
->stream_start_completion
);
1284 case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK
:
1285 ipu_put_fw_mgs_buf(ipu_bus_get_drvdata(adev
), resp
->buf_id
);
1286 complete(&pipe
->stream_start_completion
);
1288 case IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK
:
1289 complete(&pipe
->stream_stop_completion
);
1291 case IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK
:
1292 complete(&pipe
->stream_stop_completion
);
1294 case IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY
:
1295 if (resp
->pin_id
< IPU_ISYS_OUTPUT_PINS
&&
1296 pipe
->output_pins
[resp
->pin_id
].pin_ready
)
1297 pipe
->output_pins
[resp
->pin_id
].pin_ready(pipe
, resp
);
1300 "%d:No data pin ready handler for pin id %d\n",
1301 resp
->stream_handle
, resp
->pin_id
);
1303 ipu_isys_csi2_error(pipe
->csi2
);
1306 case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK
:
1308 case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE
:
1309 case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE
:
1310 if (pipe
->interlaced
) {
1311 struct ipu_isys_buffer
*ib
, *ib_safe
;
1312 struct list_head list
;
1313 unsigned long flags
;
1314 unsigned int *ts
= resp
->timestamp
;
1316 if (pipe
->isys
->short_packet_source
==
1317 IPU_ISYS_SHORT_PACKET_FROM_TUNIT
)
1319 ipu_isys_csi2_get_current_field(pipe
, ts
);
1322 * Move the pending buffers to a local temp list.
1323 * Then we do not need to handle the lock during
1326 spin_lock_irqsave(&pipe
->short_packet_queue_lock
,
1328 list_cut_position(&list
,
1329 &pipe
->pending_interlaced_bufs
,
1330 pipe
->pending_interlaced_bufs
.prev
);
1331 spin_unlock_irqrestore(&pipe
->short_packet_queue_lock
,
1334 list_for_each_entry_safe(ib
, ib_safe
, &list
, head
) {
1335 struct vb2_buffer
*vb
;
1337 vb
= ipu_isys_buffer_to_vb2_buffer(ib
);
1338 to_vb2_v4l2_buffer(vb
)->field
= pipe
->cur_field
;
1339 list_del(&ib
->head
);
1341 ipu_isys_queue_buf_done(ib
);
1344 for (i
= 0; i
< IPU_NUM_CAPTURE_DONE
; i
++)
1345 if (pipe
->capture_done
[i
])
1346 pipe
->capture_done
[i
] (pipe
, resp
);
1349 case IPU_FW_ISYS_RESP_TYPE_FRAME_SOF
:
1351 ipu_isys_csi2_sof_event(pipe
->csi2
);
1353 #ifdef IPU_TPG_FRAME_SYNC
1355 ipu_isys_tpg_sof_event(pipe
->tpg
);
1357 pipe
->seq
[pipe
->seq_index
].sequence
=
1358 atomic_read(&pipe
->sequence
) - 1;
1359 pipe
->seq
[pipe
->seq_index
].timestamp
= ts
;
1361 "sof: handle %d: (index %u), timestamp 0x%16.16llx\n",
1362 resp
->stream_handle
,
1363 pipe
->seq
[pipe
->seq_index
].sequence
, ts
);
1364 pipe
->seq_index
= (pipe
->seq_index
+ 1)
1365 % IPU_ISYS_MAX_PARALLEL_SOF
;
1367 case IPU_FW_ISYS_RESP_TYPE_FRAME_EOF
:
1369 ipu_isys_csi2_eof_event(pipe
->csi2
);
1371 #ifdef IPU_TPG_FRAME_SYNC
1373 ipu_isys_tpg_eof_event(pipe
->tpg
);
1377 "eof: handle %d: (index %u), timestamp 0x%16.16llx\n",
1378 resp
->stream_handle
,
1379 pipe
->seq
[pipe
->seq_index
].sequence
, ts
);
1381 case IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY
:
1384 dev_err(&adev
->dev
, "%d:unknown response type %u\n",
1385 resp
->stream_handle
, resp
->type
);
1390 ipu_fw_isys_put_resp(isys
->fwcom
, IPU_BASE_MSG_RECV_QUEUES
);
1394 static void isys_isr_poll(struct ipu_bus_device
*adev
)
1396 struct ipu_isys
*isys
= ipu_bus_get_drvdata(adev
);
1399 dev_dbg(&isys
->adev
->dev
,
1400 "got interrupt but device not configured yet\n");
1404 mutex_lock(&isys
->mutex
);
1406 mutex_unlock(&isys
->mutex
);
1409 int ipu_isys_isr_run(void *ptr
)
1411 struct ipu_isys
*isys
= ptr
;
1413 while (!kthread_should_stop()) {
1414 usleep_range(500, 1000);
1415 if (isys
->stream_opened
)
1416 isys_isr_poll(isys
->adev
);
1422 static struct ipu_bus_driver isys_driver
= {
1423 .probe
= isys_probe
,
1424 .remove
= isys_remove
,
1426 .wanted
= IPU_ISYS_NAME
,
1428 .name
= IPU_ISYS_NAME
,
1429 .owner
= THIS_MODULE
,
1434 module_ipu_bus_driver(isys_driver
);
1436 static const struct pci_device_id ipu_pci_tbl
[] = {
1437 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6_PCI_ID
)},
1438 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6SE_PCI_ID
)},
1439 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6EP_PCI_ID
)},
1442 MODULE_DEVICE_TABLE(pci
, ipu_pci_tbl
);
1444 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
1445 MODULE_AUTHOR("Samu Onkalo <samu.onkalo@intel.com>");
1446 MODULE_AUTHOR("Jouni Högander <jouni.hogander@intel.com>");
1447 MODULE_AUTHOR("Jouni Ukkonen <jouni.ukkonen@intel.com>");
1448 MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng@intel.com>");
1449 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
1450 MODULE_AUTHOR("Renwei Wu <renwei.wu@intel.com>");
1451 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
1452 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
1453 MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang@intel.com>");
1454 MODULE_AUTHOR("Leifu Zhao <leifu.zhao@intel.com>");
1455 MODULE_AUTHOR("Xia Wu <xia.wu@intel.com>");
1456 MODULE_AUTHOR("Kun Jiang <kun.jiang@intel.com>");
1457 MODULE_AUTHOR("Yu Xia <yu.y.xia@intel.com>");
1458 MODULE_AUTHOR("Jerry Hu <jerry.w.hu@intel.com>");
1459 MODULE_LICENSE("GPL");
1460 MODULE_DESCRIPTION("Intel ipu input system driver");