1 /* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2017 Linaro Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/completion.h>
16 #include <linux/i2c.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
23 #define CCI_HW_VERSION 0x0
24 #define CCI_RESET_CMD 0x004
25 #define CCI_RESET_CMD_MASK 0x0f73f3f7
26 #define CCI_RESET_CMD_M0_MASK 0x000003f1
27 #define CCI_RESET_CMD_M1_MASK 0x0003f001
28 #define CCI_QUEUE_START 0x008
29 #define CCI_HALT_REQ 0x034
30 #define CCI_HALT_REQ_I2C_M0_Q0Q1 BIT(0)
31 #define CCI_HALT_REQ_I2C_M1_Q0Q1 BIT(1)
33 #define CCI_I2C_Mm_SCL_CTL(m) (0x100 + 0x100 * (m))
34 #define CCI_I2C_Mm_SDA_CTL_0(m) (0x104 + 0x100 * (m))
35 #define CCI_I2C_Mm_SDA_CTL_1(m) (0x108 + 0x100 * (m))
36 #define CCI_I2C_Mm_SDA_CTL_2(m) (0x10c + 0x100 * (m))
37 #define CCI_I2C_Mm_MISC_CTL(m) (0x110 + 0x100 * (m))
39 #define CCI_I2C_Mm_READ_DATA(m) (0x118 + 0x100 * (m))
40 #define CCI_I2C_Mm_READ_BUF_LEVEL(m) (0x11c + 0x100 * (m))
41 #define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n) (0x300 + 0x200 * (m) + 0x100 * (n))
42 #define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n) (0x304 + 0x200 * (m) + 0x100 * (n))
43 #define CCI_I2C_Mm_Qn_CUR_CMD(m, n) (0x308 + 0x200 * (m) + 0x100 * (n))
44 #define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n) (0x30c + 0x200 * (m) + 0x100 * (n))
45 #define CCI_I2C_Mm_Qn_LOAD_DATA(m, n) (0x310 + 0x200 * (m) + 0x100 * (n))
47 #define CCI_IRQ_GLOBAL_CLEAR_CMD 0xc00
48 #define CCI_IRQ_MASK_0 0xc04
49 #define CCI_IRQ_MASK_0_I2C_M0_RD_DONE BIT(0)
50 #define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT BIT(4)
51 #define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT BIT(8)
52 #define CCI_IRQ_MASK_0_I2C_M1_RD_DONE BIT(12)
53 #define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT BIT(16)
54 #define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT BIT(20)
55 #define CCI_IRQ_MASK_0_RST_DONE_ACK BIT(24)
56 #define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK BIT(25)
57 #define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK BIT(26)
58 #define CCI_IRQ_MASK_0_I2C_M0_ERROR 0x18000ee6
59 #define CCI_IRQ_MASK_0_I2C_M1_ERROR 0x60ee6000
60 #define CCI_IRQ_CLEAR_0 0xc08
61 #define CCI_IRQ_STATUS_0 0xc0c
62 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE BIT(0)
63 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT BIT(4)
64 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT BIT(8)
65 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE BIT(12)
66 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT BIT(16)
67 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT BIT(20)
68 #define CCI_IRQ_STATUS_0_RST_DONE_ACK BIT(24)
69 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK BIT(25)
70 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK BIT(26)
71 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR 0x18000ee6
72 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR 0x60ee6000
74 #define CCI_TIMEOUT_MS 100
78 /* Max number of resources + 1 for a NULL terminator */
82 CCI_I2C_SET_PARAM
= 1,
85 CCI_I2C_WAIT_GPIO_EVENT
,
86 CCI_I2C_TRIG_I2C_EVENT
,
92 CCI_I2C_WRITE_DISABLE_P
,
93 CCI_I2C_READ_DISABLE_P
,
102 enum cci_i2c_queue_t
{
108 char *clock
[CCI_RES_MAX
];
109 u32 clock_rate
[CCI_RES_MAX
];
133 bool complete_pending
;
134 struct completion irq_complete
;
139 struct i2c_adapter adap
;
142 struct clk_bulk_data
*clock
;
145 u16 queue_size
[NUM_QUEUES
];
146 struct cci_master master
[NUM_MASTERS
];
149 static const struct cci_res res_v1_0_8
= {
150 .clock
= { "camss_top_ahb",
160 static const struct cci_res res_v1_4_0
= {
161 .clock
= { "mmss_mmagic_ahb",
173 static const struct hw_params hw_params_v1_0_8
[3] = {
174 { /* I2C_MODE_STANDARD */
186 { /* I2C_MODE_FAST */
200 static const struct hw_params hw_params_v1_4_0
[3] = {
201 { /* I2C_MODE_STANDARD */
213 { /* I2C_MODE_FAST */
225 { /* I2C_MODE_FAST_PLUS */
239 static const u16 queue_0_size_v1_0_8
= 64;
240 static const u16 queue_1_size_v1_0_8
= 16;
242 static const u16 queue_0_size_v1_4_0
= 64;
243 static const u16 queue_1_size_v1_4_0
= 16;
246 * cci_clock_set_rate() - Set clock frequency rates
247 * @nclocks: Number of clocks
248 * @clock: Clock array
249 * @clock_freq: Clock frequency rate array
252 * Return 0 on success or a negative error code otherwise
254 int cci_clock_set_rate(int nclocks
, struct clk_bulk_data
*clock
,
255 u32
*clock_freq
, struct device
*dev
)
259 for (i
= 0; i
< nclocks
; i
++)
264 rate
= clk_round_rate(clock
[i
].clk
, clock_freq
[i
]);
266 dev_err(dev
, "clk round rate failed: %ld\n",
271 ret
= clk_set_rate(clock
[i
].clk
, clock_freq
[i
]);
273 dev_err(dev
, "clk set rate failed: %d\n", ret
);
281 static irqreturn_t
cci_isr(int irq
, void *dev
)
283 struct cci
*cci
= dev
;
287 val
= readl(cci
->base
+ CCI_IRQ_STATUS_0
);
288 writel(val
, cci
->base
+ CCI_IRQ_CLEAR_0
);
289 writel(0x1, cci
->base
+ CCI_IRQ_GLOBAL_CLEAR_CMD
);
291 if (val
& CCI_IRQ_STATUS_0_RST_DONE_ACK
) {
292 if (cci
->master
[0].complete_pending
) {
293 cci
->master
[0].complete_pending
= false;
294 complete(&cci
->master
[0].irq_complete
);
297 if (cci
->master
[1].complete_pending
) {
298 cci
->master
[1].complete_pending
= false;
299 complete(&cci
->master
[1].irq_complete
);
303 if (val
& CCI_IRQ_STATUS_0_I2C_M0_RD_DONE
||
304 val
& CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT
||
305 val
& CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT
) {
306 cci
->master
[0].status
= 0;
307 complete(&cci
->master
[0].irq_complete
);
310 if (val
& CCI_IRQ_STATUS_0_I2C_M1_RD_DONE
||
311 val
& CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT
||
312 val
& CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT
) {
313 cci
->master
[1].status
= 0;
314 complete(&cci
->master
[1].irq_complete
);
317 if (unlikely(val
& CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK
)) {
318 cci
->master
[0].complete_pending
= true;
319 reset
= CCI_RESET_CMD_M0_MASK
;
322 if (unlikely(val
& CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK
)) {
323 cci
->master
[1].complete_pending
= true;
324 reset
= CCI_RESET_CMD_M1_MASK
;
328 writel(reset
, cci
->base
+ CCI_RESET_CMD
);
330 if (unlikely(val
& CCI_IRQ_STATUS_0_I2C_M0_ERROR
)) {
331 dev_err_ratelimited(cci
->dev
, "Master 0 error 0x%08x\n", val
);
332 cci
->master
[0].status
= -EIO
;
333 writel(CCI_HALT_REQ_I2C_M0_Q0Q1
, cci
->base
+ CCI_HALT_REQ
);
336 if (unlikely(val
& CCI_IRQ_STATUS_0_I2C_M1_ERROR
)) {
337 dev_err_ratelimited(cci
->dev
, "Master 1 error 0x%08x\n", val
);
338 cci
->master
[1].status
= -EIO
;
339 writel(CCI_HALT_REQ_I2C_M1_Q0Q1
, cci
->base
+ CCI_HALT_REQ
);
345 static void cci_halt(struct cci
*cci
)
348 u32 val
= CCI_HALT_REQ_I2C_M0_Q0Q1
| CCI_HALT_REQ_I2C_M1_Q0Q1
;
350 cci
->master
[0].complete_pending
= true;
351 writel(val
, cci
->base
+ CCI_HALT_REQ
);
352 time
= wait_for_completion_timeout(
353 &cci
->master
[0].irq_complete
,
354 msecs_to_jiffies(CCI_TIMEOUT_MS
));
356 dev_err(cci
->dev
, "CCI halt timeout\n");
359 static int cci_reset(struct cci
*cci
)
363 cci
->master
[0].complete_pending
= true;
364 writel(CCI_RESET_CMD_MASK
, cci
->base
+ CCI_RESET_CMD
);
365 time
= wait_for_completion_timeout(
366 &cci
->master
[0].irq_complete
,
367 msecs_to_jiffies(CCI_TIMEOUT_MS
));
369 dev_err(cci
->dev
, "CCI reset timeout\n");
376 static int cci_init(struct cci
*cci
, const struct hw_params
*hw
)
378 u32 val
= CCI_IRQ_MASK_0_I2C_M0_RD_DONE
|
379 CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT
|
380 CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT
|
381 CCI_IRQ_MASK_0_I2C_M1_RD_DONE
|
382 CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT
|
383 CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT
|
384 CCI_IRQ_MASK_0_RST_DONE_ACK
|
385 CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK
|
386 CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK
|
387 CCI_IRQ_MASK_0_I2C_M0_ERROR
|
388 CCI_IRQ_MASK_0_I2C_M1_ERROR
;
391 writel(val
, cci
->base
+ CCI_IRQ_MASK_0
);
393 for (i
= 0; i
< NUM_MASTERS
; i
++) {
394 val
= hw
->thigh
<< 16 | hw
->tlow
;
395 writel(val
, cci
->base
+ CCI_I2C_Mm_SCL_CTL(i
));
397 val
= hw
->tsu_sto
<< 16 | hw
->tsu_sta
;
398 writel(val
, cci
->base
+ CCI_I2C_Mm_SDA_CTL_0(i
));
400 val
= hw
->thd_dat
<< 16 | hw
->thd_sta
;
401 writel(val
, cci
->base
+ CCI_I2C_Mm_SDA_CTL_1(i
));
404 writel(val
, cci
->base
+ CCI_I2C_Mm_SDA_CTL_2(i
));
406 val
= hw
->scl_stretch_en
<< 8 | hw
->trdhld
<< 4 | hw
->tsp
;
407 writel(val
, cci
->base
+ CCI_I2C_Mm_MISC_CTL(i
));
413 static int cci_run_queue(struct cci
*cci
, u8 master
, u8 queue
)
419 val
= readl(cci
->base
+ CCI_I2C_Mm_Qn_CUR_WORD_CNT(master
, queue
));
420 writel(val
, cci
->base
+ CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master
, queue
));
422 val
= BIT(master
* 2 + queue
);
423 writel(val
, cci
->base
+ CCI_QUEUE_START
);
425 time
= wait_for_completion_timeout(&cci
->master
[master
].irq_complete
,
426 msecs_to_jiffies(CCI_TIMEOUT_MS
));
428 dev_err(cci
->dev
, "master %d queue %d timeout\n",
436 ret
= cci
->master
[master
].status
;
438 dev_err(cci
->dev
, "master %d queue %d error %d\n",
444 static int cci_validate_queue(struct cci
*cci
, u8 master
, u8 queue
)
449 val
= readl(cci
->base
+ CCI_I2C_Mm_Qn_CUR_WORD_CNT(master
, queue
));
451 if (val
== cci
->queue_size
[queue
])
455 val
= CCI_I2C_REPORT
| BIT(8);
456 writel(val
, cci
->base
+ CCI_I2C_Mm_Qn_LOAD_DATA(master
, queue
));
458 ret
= cci_run_queue(cci
, master
, queue
);
464 static int cci_i2c_read(struct cci
*cci
, u16 addr
, u8
*buf
, u16 len
)
469 u32 words_read
, words_exp
;
475 * Call validate queue to make sure queue is empty before starting.
476 * This is to avoid overflow / underflow of queue.
478 ret
= cci_validate_queue(cci
, master
, queue
);
482 val
= CCI_I2C_SET_PARAM
| ((addr
>> 1) & 0x7f) << 4;
483 writel(val
, cci
->base
+ CCI_I2C_Mm_Qn_LOAD_DATA(master
, queue
));
485 val
= CCI_I2C_READ
| len
<< 4;
486 writel(val
, cci
->base
+ CCI_I2C_Mm_Qn_LOAD_DATA(master
, queue
));
488 ret
= cci_run_queue(cci
, master
, queue
);
492 words_read
= readl(cci
->base
+ CCI_I2C_Mm_READ_BUF_LEVEL(master
));
493 words_exp
= len
/ 4 + 1;
494 if (words_read
!= words_exp
) {
495 dev_err(cci
->dev
, "words read = %d, words expected = %d\n",
496 words_read
, words_exp
);
503 val
= readl(cci
->base
+ CCI_I2C_Mm_READ_DATA(master
));
505 for (i
= 0; i
< 4 && index
< len
; i
++) {
510 buf
[index
++] = (val
>> (i
* 8)) & 0xff;
512 } while (--words_read
);
517 static int cci_i2c_write(struct cci
*cci
, u16 addr
, u8
*buf
, u16 len
)
527 * Call validate queue to make sure queue is empty before starting.
528 * This is to avoid overflow / underflow of queue.
530 ret
= cci_validate_queue(cci
, master
, queue
);
534 val
= CCI_I2C_SET_PARAM
| ((addr
>> 1) & 0x7f) << 4;
535 writel(val
, cci
->base
+ CCI_I2C_Mm_Qn_LOAD_DATA(master
, queue
));
538 load
[i
++] = CCI_I2C_WRITE
| len
<< 4;
540 for (j
= 0; j
< len
; j
++)
543 for (j
= 0; j
< i
; j
+= 4) {
545 val
|= load
[j
+ 1] << 8;
546 val
|= load
[j
+ 2] << 16;
547 val
|= load
[j
+ 3] << 24;
548 writel(val
, cci
->base
+ CCI_I2C_Mm_Qn_LOAD_DATA(master
, queue
));
551 val
= CCI_I2C_REPORT
| BIT(8);
552 writel(val
, cci
->base
+ CCI_I2C_Mm_Qn_LOAD_DATA(master
, queue
));
554 return cci_run_queue(cci
, master
, queue
);
557 static int cci_xfer(struct i2c_adapter
*adap
, struct i2c_msg msgs
[], int num
)
559 struct cci
*cci
= i2c_get_adapdata(adap
);
563 for (i
= 0; i
< num
; i
++) {
564 if (msgs
[i
].flags
& I2C_M_RD
)
565 ret
= cci_i2c_read(cci
, msgs
[i
].addr
, msgs
[i
].buf
,
568 ret
= cci_i2c_write(cci
, msgs
[i
].addr
, msgs
[i
].buf
,
572 dev_err(cci
->dev
, "cci i2c xfer error %d", ret
);
583 static u32
cci_func(struct i2c_adapter
*adap
)
588 static const struct i2c_algorithm cci_algo
= {
589 .master_xfer
= cci_xfer
,
590 .functionality
= cci_func
,
593 static const struct i2c_adapter_quirks cci_quirks_v1_0_8
= {
598 static const struct i2c_adapter_quirks cci_quirks_v1_4_0
= {
604 * cci_probe() - Probe CCI platform device
605 * @pdev: Pointer to CCI platform device
607 * Return 0 on success or a negative error code on failure
609 static int cci_probe(struct platform_device
*pdev
)
611 struct device
*dev
= &pdev
->dev
;
612 const struct cci_res
*res
;
613 const struct hw_params
*hw
;
621 cci
= devm_kzalloc(dev
, sizeof(*cci
), GFP_KERNEL
);
626 platform_set_drvdata(pdev
, cci
);
628 if (of_device_is_compatible(dev
->of_node
, "qcom,cci-v1.0.8")) {
630 hw
= hw_params_v1_0_8
;
631 cci
->queue_size
[0] = queue_0_size_v1_0_8
;
632 cci
->queue_size
[1] = queue_1_size_v1_0_8
;
633 cci
->adap
.quirks
= &cci_quirks_v1_0_8
;
634 } else if (of_device_is_compatible(dev
->of_node
, "qcom,cci-v1.4.0")) {
636 hw
= hw_params_v1_4_0
;
637 cci
->queue_size
[0] = queue_0_size_v1_4_0
;
638 cci
->queue_size
[1] = queue_1_size_v1_4_0
;
639 cci
->adap
.quirks
= &cci_quirks_v1_4_0
;
644 cci
->adap
.algo
= &cci_algo
;
645 cci
->adap
.dev
.parent
= cci
->dev
;
646 cci
->adap
.dev
.of_node
= dev
->of_node
;
647 i2c_set_adapdata(&cci
->adap
, cci
);
649 strlcpy(cci
->adap
.name
, "Qualcomm Camera Control Interface",
650 sizeof(cci
->adap
.name
));
652 mode
= I2C_MODE_STANDARD
;
653 ret
= of_property_read_u32(pdev
->dev
.of_node
, "clock-frequency", &val
);
656 mode
= I2C_MODE_FAST
;
657 else if (val
== 1000000)
658 mode
= I2C_MODE_FAST_PLUS
;
663 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
664 cci
->base
= devm_ioremap_resource(dev
, r
);
665 if (IS_ERR(cci
->base
)) {
666 dev_err(dev
, "could not map memory\n");
667 return PTR_ERR(cci
->base
);
672 cci
->irq
= platform_get_irq(pdev
, 0);
674 dev_err(dev
, "missing IRQ\n");
678 ret
= devm_request_irq(dev
, cci
->irq
, cci_isr
,
679 IRQF_TRIGGER_RISING
, dev_name(dev
), cci
);
681 dev_err(dev
, "request_irq failed, ret: %d\n", ret
);
685 disable_irq(cci
->irq
);
690 while (res
->clock
[cci
->nclocks
])
693 cci
->clock
= devm_kzalloc(dev
, cci
->nclocks
*
694 sizeof(*cci
->clock
), GFP_KERNEL
);
698 cci
->clock_freq
= devm_kzalloc(dev
, cci
->nclocks
*
699 sizeof(*cci
->clock_freq
), GFP_KERNEL
);
700 if (!cci
->clock_freq
)
703 for (i
= 0; i
< cci
->nclocks
; i
++) {
704 struct clk_bulk_data
*clock
= &cci
->clock
[i
];
706 clock
->clk
= devm_clk_get(dev
, res
->clock
[i
]);
707 if (IS_ERR(clock
->clk
))
708 return PTR_ERR(clock
->clk
);
710 clock
->id
= res
->clock
[i
];
711 cci
->clock_freq
[i
] = res
->clock_rate
[i
];
714 ret
= cci_clock_set_rate(cci
->nclocks
, cci
->clock
,
715 cci
->clock_freq
, dev
);
719 ret
= clk_bulk_prepare_enable(cci
->nclocks
, cci
->clock
);
723 val
= readl_relaxed(cci
->base
+ CCI_HW_VERSION
);
724 dev_dbg(dev
, "%s: CCI HW version = 0x%08x", __func__
, val
);
726 init_completion(&cci
->master
[0].irq_complete
);
727 init_completion(&cci
->master
[1].irq_complete
);
729 enable_irq(cci
->irq
);
731 ret
= cci_reset(cci
);
735 ret
= cci_init(cci
, &hw
[mode
]);
739 ret
= i2c_add_adapter(&cci
->adap
);
746 clk_bulk_disable_unprepare(cci
->nclocks
, cci
->clock
);
752 * cci_remove() - Remove CCI platform device
753 * @pdev: Pointer to CCI platform device
757 static int cci_remove(struct platform_device
*pdev
)
759 struct cci
*cci
= platform_get_drvdata(pdev
);
761 disable_irq(cci
->irq
);
762 clk_bulk_disable_unprepare(cci
->nclocks
, cci
->clock
);
764 i2c_del_adapter(&cci
->adap
);
769 static const struct of_device_id cci_dt_match
[] = {
770 { .compatible
= "qcom,cci-v1.0.8" },
771 { .compatible
= "qcom,cci-v1.4.0" },
774 MODULE_DEVICE_TABLE(of
, cci_dt_match
);
776 static struct platform_driver qcom_cci_driver
= {
778 .remove
= cci_remove
,
780 .name
= "i2c-qcom-cci",
781 .of_match_table
= cci_dt_match
,
785 module_platform_driver(qcom_cci_driver
);
787 MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver");
788 MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
789 MODULE_LICENSE("GPL v2");