]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/i3c/master/dw-i3c-master.c
i3c: master: Add driver for Synopsys DesignWare IP
[mirror_ubuntu-eoan-kernel.git] / drivers / i3c / master / dw-i3c-master.c
CommitLineData
1dd728f5
VS
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 *
5 * Author: Vitor Soares <vitor.soares@synopsys.com>
6 */
7
8#include <linux/bitops.h>
9#include <linux/clk.h>
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/errno.h>
13#include <linux/i3c/master.h>
14#include <linux/interrupt.h>
15#include <linux/ioport.h>
16#include <linux/iopoll.h>
17#include <linux/list.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21#include <linux/reset.h>
22#include <linux/slab.h>
23
24#define DEVICE_CTRL 0x0
25#define DEV_CTRL_ENABLE BIT(31)
26#define DEV_CTRL_RESUME BIT(30)
27#define DEV_CTRL_HOT_JOIN_NACK BIT(8)
28#define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7)
29
30#define DEVICE_ADDR 0x4
31#define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31)
32#define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16))
33
34#define HW_CAPABILITY 0x8
35#define COMMAND_QUEUE_PORT 0xc
36#define COMMAND_PORT_TOC BIT(30)
37#define COMMAND_PORT_READ_TRANSFER BIT(28)
38#define COMMAND_PORT_SDAP BIT(27)
39#define COMMAND_PORT_ROC BIT(26)
40#define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21))
41#define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16))
42#define COMMAND_PORT_CP BIT(15)
43#define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7))
44#define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3))
45
46#define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16))
47#define COMMAND_PORT_ARG_DATA_LEN_MAX 65536
48#define COMMAND_PORT_TRANSFER_ARG 0x01
49
50#define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24))
51#define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16))
52#define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8))
53#define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5)
54#define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4)
55#define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3)
56#define COMMAND_PORT_SHORT_DATA_ARG 0x02
57
58#define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21))
59#define COMMAND_PORT_ADDR_ASSGN_CMD 0x03
60
61#define RESPONSE_QUEUE_PORT 0x10
62#define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28)
63#define RESPONSE_NO_ERROR 0
64#define RESPONSE_ERROR_CRC 1
65#define RESPONSE_ERROR_PARITY 2
66#define RESPONSE_ERROR_FRAME 3
67#define RESPONSE_ERROR_IBA_NACK 4
68#define RESPONSE_ERROR_ADDRESS_NACK 5
69#define RESPONSE_ERROR_OVER_UNDER_FLOW 6
70#define RESPONSE_ERROR_TRANSF_ABORT 8
71#define RESPONSE_ERROR_I2C_W_NACK_ERR 9
72#define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24)
73#define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0))
74
75#define RX_TX_DATA_PORT 0x14
76#define IBI_QUEUE_STATUS 0x18
77#define QUEUE_THLD_CTRL 0x1c
78#define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8)
79#define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
80
81#define DATA_BUFFER_THLD_CTRL 0x20
82#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8)
83
84#define IBI_QUEUE_CTRL 0x24
85#define IBI_MR_REQ_REJECT 0x2C
86#define IBI_SIR_REQ_REJECT 0x30
87#define IBI_REQ_REJECT_ALL GENMASK(31, 0)
88
89#define RESET_CTRL 0x34
90#define RESET_CTRL_IBI_QUEUE BIT(5)
91#define RESET_CTRL_RX_FIFO BIT(4)
92#define RESET_CTRL_TX_FIFO BIT(3)
93#define RESET_CTRL_RESP_QUEUE BIT(2)
94#define RESET_CTRL_CMD_QUEUE BIT(1)
95#define RESET_CTRL_SOFT BIT(0)
96
97#define SLV_EVENT_CTRL 0x38
98#define INTR_STATUS 0x3c
99#define INTR_STATUS_EN 0x40
100#define INTR_SIGNAL_EN 0x44
101#define INTR_FORCE 0x48
102#define INTR_BUSOWNER_UPDATE_STAT BIT(13)
103#define INTR_IBI_UPDATED_STAT BIT(12)
104#define INTR_READ_REQ_RECV_STAT BIT(11)
105#define INTR_DEFSLV_STAT BIT(10)
106#define INTR_TRANSFER_ERR_STAT BIT(9)
107#define INTR_DYN_ADDR_ASSGN_STAT BIT(8)
108#define INTR_CCC_UPDATED_STAT BIT(6)
109#define INTR_TRANSFER_ABORT_STAT BIT(5)
110#define INTR_RESP_READY_STAT BIT(4)
111#define INTR_CMD_QUEUE_READY_STAT BIT(3)
112#define INTR_IBI_THLD_STAT BIT(2)
113#define INTR_RX_THLD_STAT BIT(1)
114#define INTR_TX_THLD_STAT BIT(0)
115#define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \
116 INTR_IBI_UPDATED_STAT | \
117 INTR_READ_REQ_RECV_STAT | \
118 INTR_DEFSLV_STAT | \
119 INTR_TRANSFER_ERR_STAT | \
120 INTR_DYN_ADDR_ASSGN_STAT | \
121 INTR_CCC_UPDATED_STAT | \
122 INTR_TRANSFER_ABORT_STAT | \
123 INTR_RESP_READY_STAT | \
124 INTR_CMD_QUEUE_READY_STAT | \
125 INTR_IBI_THLD_STAT | \
126 INTR_TX_THLD_STAT | \
127 INTR_RX_THLD_STAT)
128
129#define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \
130 INTR_RESP_READY_STAT)
131
132#define QUEUE_STATUS_LEVEL 0x4c
133#define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24)
134#define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16)
135#define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8)
136#define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0))
137
138#define DATA_BUFFER_STATUS_LEVEL 0x50
139#define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0))
140
141#define PRESENT_STATE 0x54
142#define CCC_DEVICE_STATUS 0x58
143#define DEVICE_ADDR_TABLE_POINTER 0x5c
144#define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16)
145#define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0))
146
147#define DEV_CHAR_TABLE_POINTER 0x60
148#define VENDOR_SPECIFIC_REG_POINTER 0x6c
149#define SLV_PID_VALUE 0x74
150#define SLV_CHAR_CTRL 0x78
151#define SLV_MAX_LEN 0x7c
152#define MAX_READ_TURNAROUND 0x80
153#define MAX_DATA_SPEED 0x84
154#define SLV_DEBUG_STATUS 0x88
155#define SLV_INTR_REQ 0x8c
156#define DEVICE_CTRL_EXTENDED 0xb0
157#define SCL_I3C_OD_TIMING 0xb4
158#define SCL_I3C_PP_TIMING 0xb8
159#define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
160#define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0))
161#define SCL_I3C_TIMING_CNT_MIN 5
162
163#define SCL_I2C_FM_TIMING 0xbc
164#define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16))
165#define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
166
167#define SCL_I2C_FMP_TIMING 0xc0
168#define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
169#define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
170
171#define SCL_EXT_LCNT_TIMING 0xc8
172#define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24))
173#define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16))
174#define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8))
175#define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0))
176
177#define SCL_EXT_TERMN_LCNT_TIMING 0xcc
178#define BUS_FREE_TIMING 0xd4
179#define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0))
180
181#define BUS_IDLE_TIMING 0xd8
182#define I3C_VER_ID 0xe0
183#define I3C_VER_TYPE 0xe4
184#define EXTENDED_CAPABILITY 0xe8
185#define SLAVE_CONFIG 0xec
186
187#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
188#define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
189#define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
190#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
191
192#define MAX_DEVS 32
193
194#define I3C_BUS_SDR1_SCL_RATE 8000000
195#define I3C_BUS_SDR2_SCL_RATE 6000000
196#define I3C_BUS_SDR3_SCL_RATE 4000000
197#define I3C_BUS_SDR4_SCL_RATE 2000000
198#define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300
199#define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500
200#define I3C_BUS_THIGH_MAX_NS 41
201
202#define XFER_TIMEOUT (msecs_to_jiffies(1000))
203
204struct dw_i3c_master_caps {
205 u8 cmdfifodepth;
206 u8 datafifodepth;
207};
208
209struct dw_i3c_cmd {
210 u32 cmd_lo;
211 u32 cmd_hi;
212 u16 tx_len;
213 const void *tx_buf;
214 u16 rx_len;
215 void *rx_buf;
216 u8 error;
217};
218
219struct dw_i3c_xfer {
220 struct list_head node;
221 struct completion comp;
222 int ret;
223 unsigned int ncmds;
224 struct dw_i3c_cmd cmds[0];
225};
226
227struct dw_i3c_master {
228 struct i3c_master_controller base;
229 u16 maxdevs;
230 u16 datstartaddr;
231 u32 free_pos;
232 struct {
233 struct list_head list;
234 struct dw_i3c_xfer *cur;
235 spinlock_t lock;
236 } xferqueue;
237 struct dw_i3c_master_caps caps;
238 void __iomem *regs;
239 struct reset_control *core_rst;
240 struct clk *core_clk;
241 char version[5];
242 char type[5];
243 u8 addrs[MAX_DEVS];
244};
245
246struct dw_i3c_i2c_dev_data {
247 u8 index;
248};
249
250static u8 even_parity(u8 p)
251{
252 p ^= p >> 4;
253 p &= 0xf;
254
255 return (0x9669 >> p) & 1;
256}
257
258static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
259 const struct i3c_ccc_cmd *cmd)
260{
261 if (cmd->ndests > 1)
262 return false;
263
264 switch (cmd->id) {
265 case I3C_CCC_ENEC(true):
266 case I3C_CCC_ENEC(false):
267 case I3C_CCC_DISEC(true):
268 case I3C_CCC_DISEC(false):
269 case I3C_CCC_ENTAS(0, true):
270 case I3C_CCC_ENTAS(0, false):
271 case I3C_CCC_RSTDAA(true):
272 case I3C_CCC_RSTDAA(false):
273 case I3C_CCC_ENTDAA:
274 case I3C_CCC_SETMWL(true):
275 case I3C_CCC_SETMWL(false):
276 case I3C_CCC_SETMRL(true):
277 case I3C_CCC_SETMRL(false):
278 case I3C_CCC_ENTHDR(0):
279 case I3C_CCC_SETDASA:
280 case I3C_CCC_SETNEWDA:
281 case I3C_CCC_GETMWL:
282 case I3C_CCC_GETMRL:
283 case I3C_CCC_GETPID:
284 case I3C_CCC_GETBCR:
285 case I3C_CCC_GETDCR:
286 case I3C_CCC_GETSTATUS:
287 case I3C_CCC_GETMXDS:
288 case I3C_CCC_GETHDRCAP:
289 return true;
290 default:
291 return false;
292 }
293}
294
295static inline struct dw_i3c_master *
296to_dw_i3c_master(struct i3c_master_controller *master)
297{
298 return container_of(master, struct dw_i3c_master, base);
299}
300
301static void dw_i3c_master_disable(struct dw_i3c_master *master)
302{
303 writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE,
304 master->regs + DEVICE_CTRL);
305}
306
307static void dw_i3c_master_enable(struct dw_i3c_master *master)
308{
309 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
310 master->regs + DEVICE_CTRL);
311}
312
313static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr)
314{
315 int pos;
316
317 for (pos = 0; pos < master->maxdevs; pos++) {
318 if (addr == master->addrs[pos])
319 return pos;
320 }
321
322 return -EINVAL;
323}
324
325static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
326{
327 if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0)))
328 return -ENOSPC;
329
330 return ffs(master->free_pos) - 1;
331}
332
333static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
334 const u8 *bytes, int nbytes)
335{
336 writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
337 if (nbytes & 3) {
338 u32 tmp = 0;
339
340 memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
341 writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
342 }
343}
344
345static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
346 u8 *bytes, int nbytes)
347{
348 readsl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
349 if (nbytes & 3) {
350 u32 tmp;
351
352 readsl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
353 memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
354 }
355}
356
357static struct dw_i3c_xfer *
358dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds)
359{
360 struct dw_i3c_xfer *xfer;
361
362 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
363 if (!xfer)
364 return NULL;
365
366 INIT_LIST_HEAD(&xfer->node);
367 xfer->ncmds = ncmds;
368 xfer->ret = -ETIMEDOUT;
369
370 return xfer;
371}
372
373static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer)
374{
375 kfree(xfer);
376}
377
378static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master)
379{
380 struct dw_i3c_xfer *xfer = master->xferqueue.cur;
381 unsigned int i;
382 u32 thld_ctrl;
383
384 if (!xfer)
385 return;
386
387 for (i = 0; i < xfer->ncmds; i++) {
388 struct dw_i3c_cmd *cmd = &xfer->cmds[i];
389
390 dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
391 }
392
393 thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
394 thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
395 thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds);
396 writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
397
398 for (i = 0; i < xfer->ncmds; i++) {
399 struct dw_i3c_cmd *cmd = &xfer->cmds[i];
400
401 writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT);
402 writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
403 }
404}
405
406static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
407 struct dw_i3c_xfer *xfer)
408{
409 unsigned long flags;
410
411 init_completion(&xfer->comp);
412 spin_lock_irqsave(&master->xferqueue.lock, flags);
413 if (master->xferqueue.cur) {
414 list_add_tail(&xfer->node, &master->xferqueue.list);
415 } else {
416 master->xferqueue.cur = xfer;
417 dw_i3c_master_start_xfer_locked(master);
418 }
419 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
420}
421
422static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
423 struct dw_i3c_xfer *xfer)
424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&master->xferqueue.lock, flags);
428 if (master->xferqueue.cur == xfer) {
429 u32 status;
430
431 master->xferqueue.cur = NULL;
432
433 writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO |
434 RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE,
435 master->regs + RESET_CTRL);
436
437 readl_poll_timeout_atomic(master->regs + RESET_CTRL, status,
438 !status, 10, 1000000);
439 } else {
440 list_del_init(&xfer->node);
441 }
442 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
443}
444
445static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
446{
447 struct dw_i3c_xfer *xfer = master->xferqueue.cur;
448 int i, ret = 0;
449 u32 nresp;
450
451 if (!xfer)
452 return;
453
454 nresp = readl(master->regs + QUEUE_STATUS_LEVEL);
455 nresp = QUEUE_STATUS_LEVEL_RESP(nresp);
456
457 for (i = 0; i < nresp; i++) {
458 struct dw_i3c_cmd *cmd;
459 u32 resp;
460
461 resp = readl(master->regs + RESPONSE_QUEUE_PORT);
462
463 cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)];
464 cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp);
465 cmd->error = RESPONSE_PORT_ERR_STATUS(resp);
466 if (cmd->rx_len && !cmd->error)
467 dw_i3c_master_read_rx_fifo(master, cmd->rx_buf,
468 cmd->rx_len);
469 }
470
471 for (i = 0; i < nresp; i++) {
472 switch (xfer->cmds[i].error) {
473 case RESPONSE_NO_ERROR:
474 break;
475 case RESPONSE_ERROR_PARITY:
476 case RESPONSE_ERROR_IBA_NACK:
477 case RESPONSE_ERROR_TRANSF_ABORT:
478 case RESPONSE_ERROR_CRC:
479 case RESPONSE_ERROR_FRAME:
480 ret = -EIO;
481 break;
482 case RESPONSE_ERROR_OVER_UNDER_FLOW:
483 ret = -ENOSPC;
484 break;
485 case RESPONSE_ERROR_I2C_W_NACK_ERR:
486 case RESPONSE_ERROR_ADDRESS_NACK:
487 default:
488 ret = -EINVAL;
489 break;
490 }
491 }
492
493 xfer->ret = ret;
494 complete(&xfer->comp);
495
496 if (ret < 0) {
497 dw_i3c_master_dequeue_xfer(master, xfer);
498 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
499 master->regs + DEVICE_CTRL);
500 }
501
502 xfer = list_first_entry_or_null(&master->xferqueue.list,
503 struct dw_i3c_xfer,
504 node);
505 if (xfer)
506 list_del_init(&xfer->node);
507
508 master->xferqueue.cur = xfer;
509 dw_i3c_master_start_xfer_locked(master);
510}
511
512static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
513{
514 unsigned long core_rate, core_period;
515 u32 scl_timing;
516 u8 hcnt, lcnt;
517
518 core_rate = clk_get_rate(master->core_clk);
519 if (!core_rate)
520 return -EINVAL;
521
522 core_period = DIV_ROUND_UP(1000000000, core_rate);
523
524 hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
525 if (hcnt < SCL_I3C_TIMING_CNT_MIN)
526 hcnt = SCL_I3C_TIMING_CNT_MIN;
527
528 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_TYP_I3C_SCL_RATE) - hcnt;
529 if (lcnt < SCL_I3C_TIMING_CNT_MIN)
530 lcnt = SCL_I3C_TIMING_CNT_MIN;
531
532 scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
533 writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
534
535 if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_I2C_SLAVE_PRESENT))
536 writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
537
538 lcnt = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period);
539 scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
540 writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
541
542 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
543 scl_timing = SCL_EXT_LCNT_1(lcnt);
544 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt;
545 scl_timing |= SCL_EXT_LCNT_2(lcnt);
546 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt;
547 scl_timing |= SCL_EXT_LCNT_3(lcnt);
548 lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt;
549 scl_timing |= SCL_EXT_LCNT_4(lcnt);
550 writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
551
552 return 0;
553}
554
555static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
556{
557 unsigned long core_rate, core_period;
558 u16 hcnt, lcnt;
559 u32 scl_timing;
560
561 core_rate = clk_get_rate(master->core_clk);
562 if (!core_rate)
563 return -EINVAL;
564
565 core_period = DIV_ROUND_UP(1000000000, core_rate);
566
567 lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
568 hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
569 scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
570 SCL_I2C_FMP_TIMING_LCNT(lcnt);
571 writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
572
573 lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
574 hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
575 scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
576 SCL_I2C_FM_TIMING_LCNT(lcnt);
577 writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
578
579 writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
580 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
581 master->regs + DEVICE_CTRL);
582
583 return 0;
584}
585
586static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
587{
588 struct dw_i3c_master *master = to_dw_i3c_master(m);
589 struct i3c_bus *bus = i3c_master_get_bus(m);
590 struct i3c_device_info info = { };
591 u32 thld_ctrl;
592 int ret;
593
594 switch (bus->mode) {
595 case I3C_BUS_MODE_MIXED_FAST:
596 ret = dw_i2c_clk_cfg(master);
597 if (ret)
598 return ret;
599 case I3C_BUS_MODE_PURE:
600 ret = dw_i3c_clk_cfg(master);
601 if (ret)
602 return ret;
603 break;
604 default:
605 return -EINVAL;
606 }
607
608 thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
609 thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
610 writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
611
612 thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
613 thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
614 writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
615
616 writel(INTR_ALL, master->regs + INTR_STATUS);
617 writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
618 writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
619
620 ret = i3c_master_get_free_addr(m, 0);
621 if (ret < 0)
622 return ret;
623
624 writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
625 master->regs + DEVICE_ADDR);
626
627 memset(&info, 0, sizeof(info));
628 info.dyn_addr = ret;
629
630 ret = i3c_master_set_info(&master->base, &info);
631 if (ret)
632 return ret;
633
634 writel(IBI_REQ_REJECT_ALL, master->regs + IBI_SIR_REQ_REJECT);
635 writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
636
637 /* For now don't support Hot-Join */
638 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
639 master->regs + DEVICE_CTRL);
640
641 dw_i3c_master_enable(master);
642
643 return 0;
644}
645
646static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
647{
648 struct dw_i3c_master *master = to_dw_i3c_master(m);
649
650 dw_i3c_master_disable(master);
651}
652
653static int dw_i3c_ccc_set(struct dw_i3c_master *master,
654 struct i3c_ccc_cmd *ccc)
655{
656 struct dw_i3c_xfer *xfer;
657 struct dw_i3c_cmd *cmd;
658 int ret, pos = 0;
659
660 if (ccc->id & I3C_CCC_DIRECT) {
661 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
662 if (pos < 0)
663 return pos;
664 }
665
666 xfer = dw_i3c_master_alloc_xfer(master, 1);
667 if (!xfer)
668 return -ENOMEM;
669
670 cmd = xfer->cmds;
671 cmd->tx_buf = ccc->dests[0].payload.data;
672 cmd->tx_len = ccc->dests[0].payload.len;
673
674 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
675 COMMAND_PORT_TRANSFER_ARG;
676
677 cmd->cmd_lo = COMMAND_PORT_CP |
678 COMMAND_PORT_DEV_INDEX(pos) |
679 COMMAND_PORT_CMD(ccc->id) |
680 COMMAND_PORT_TOC |
681 COMMAND_PORT_ROC;
682
683 dw_i3c_master_enqueue_xfer(master, xfer);
684 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
685 dw_i3c_master_dequeue_xfer(master, xfer);
686
687 ret = xfer->ret;
688 if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
689 ccc->err = I3C_ERROR_M2;
690
691 dw_i3c_master_free_xfer(xfer);
692
693 return ret;
694}
695
696static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
697{
698 struct dw_i3c_xfer *xfer;
699 struct dw_i3c_cmd *cmd;
700 int ret, pos;
701
702 pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
703 if (pos < 0)
704 return pos;
705
706 xfer = dw_i3c_master_alloc_xfer(master, 1);
707 if (!xfer)
708 return -ENOMEM;
709
710 cmd = xfer->cmds;
711 cmd->rx_buf = ccc->dests[0].payload.data;
712 cmd->rx_len = ccc->dests[0].payload.len;
713
714 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
715 COMMAND_PORT_TRANSFER_ARG;
716
717 cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
718 COMMAND_PORT_CP |
719 COMMAND_PORT_DEV_INDEX(pos) |
720 COMMAND_PORT_CMD(ccc->id) |
721 COMMAND_PORT_TOC |
722 COMMAND_PORT_ROC;
723
724 dw_i3c_master_enqueue_xfer(master, xfer);
725 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
726 dw_i3c_master_dequeue_xfer(master, xfer);
727
728 ret = xfer->ret;
729 if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
730 ccc->err = I3C_ERROR_M2;
731 dw_i3c_master_free_xfer(xfer);
732
733 return ret;
734}
735
736static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
737 struct i3c_ccc_cmd *ccc)
738{
739 struct dw_i3c_master *master = to_dw_i3c_master(m);
740 int ret = 0;
741
742 if (ccc->id == I3C_CCC_ENTDAA)
743 return -EINVAL;
744
745 if (ccc->rnw)
746 ret = dw_i3c_ccc_get(master, ccc);
747 else
748 ret = dw_i3c_ccc_set(master, ccc);
749
750 return ret;
751}
752
753static int dw_i3c_master_daa(struct i3c_master_controller *m)
754{
755 struct dw_i3c_master *master = to_dw_i3c_master(m);
756 struct dw_i3c_xfer *xfer;
757 struct dw_i3c_cmd *cmd;
758 u32 olddevs, newdevs;
759 u8 p, last_addr = 0;
760 int ret, pos;
761
762 olddevs = ~(master->free_pos);
763
764 /* Prepare DAT before launching DAA. */
765 for (pos = 0; pos < master->maxdevs; pos++) {
766 if (olddevs & BIT(pos))
767 continue;
768
769 ret = i3c_master_get_free_addr(m, last_addr + 1);
770 if (ret < 0)
771 return -ENOSPC;
772
773 master->addrs[pos] = ret;
774 p = even_parity(ret);
775 last_addr = ret;
776 ret |= (p << 7);
777
778 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
779 master->regs +
780 DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
781 }
782
783 xfer = dw_i3c_master_alloc_xfer(master, 1);
784 if (!xfer)
785 return -ENOMEM;
786
787 pos = dw_i3c_master_get_free_pos(master);
788 cmd = &xfer->cmds[0];
789 cmd->cmd_hi = 0x1;
790 cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) |
791 COMMAND_PORT_DEV_INDEX(pos) |
792 COMMAND_PORT_CMD(I3C_CCC_ENTDAA) |
793 COMMAND_PORT_ADDR_ASSGN_CMD |
794 COMMAND_PORT_TOC |
795 COMMAND_PORT_ROC;
796
797 dw_i3c_master_enqueue_xfer(master, xfer);
798 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
799 dw_i3c_master_dequeue_xfer(master, xfer);
800
801 newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0);
802 newdevs &= ~olddevs;
803
804 for (pos = 0; pos < master->maxdevs; pos++) {
805 if (newdevs & BIT(pos))
806 i3c_master_add_i3c_dev_locked(m, master->addrs[pos]);
807 }
808
809 dw_i3c_master_free_xfer(xfer);
810
811 i3c_master_disec_locked(m, I3C_BROADCAST_ADDR,
812 I3C_CCC_EVENT_HJ |
813 I3C_CCC_EVENT_MR |
814 I3C_CCC_EVENT_SIR);
815
816 return 0;
817}
818
819static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
820 struct i3c_priv_xfer *i3c_xfers,
821 int i3c_nxfers)
822{
823 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
824 struct i3c_master_controller *m = i3c_dev_get_master(dev);
825 struct dw_i3c_master *master = to_dw_i3c_master(m);
826 unsigned int nrxwords = 0, ntxwords = 0;
827 struct dw_i3c_xfer *xfer;
828 int i, ret = 0;
829
830 if (!i3c_nxfers)
831 return 0;
832
833 if (i3c_nxfers > master->caps.cmdfifodepth)
834 return -ENOTSUPP;
835
836 for (i = 0; i < i3c_nxfers; i++) {
837 if (i3c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
838 return -ENOTSUPP;
839 }
840
841 for (i = 0; i < i3c_nxfers; i++) {
842 if (i3c_xfers[i].rnw)
843 nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
844 else
845 ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
846 }
847
848 if (ntxwords > master->caps.datafifodepth ||
849 nrxwords > master->caps.datafifodepth)
850 return -ENOTSUPP;
851
852 xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
853 if (!xfer)
854 return -ENOMEM;
855
856 for (i = 0; i < i3c_nxfers; i++) {
857 struct dw_i3c_cmd *cmd = &xfer->cmds[i];
858
859 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) |
860 COMMAND_PORT_TRANSFER_ARG;
861
862 if (i3c_xfers[i].rnw) {
863 cmd->rx_buf = i3c_xfers[i].data.in;
864 cmd->rx_len = i3c_xfers[i].len;
865 cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
866 COMMAND_PORT_SPEED(dev->info.max_read_ds);
867
868 } else {
869 cmd->tx_buf = i3c_xfers[i].data.out;
870 cmd->tx_len = i3c_xfers[i].len;
871 cmd->cmd_lo =
872 COMMAND_PORT_SPEED(dev->info.max_write_ds);
873 }
874
875 cmd->cmd_lo |= COMMAND_PORT_TID(i) |
876 COMMAND_PORT_DEV_INDEX(data->index) |
877 COMMAND_PORT_ROC;
878
879 if (i == (i3c_nxfers - 1))
880 cmd->cmd_lo |= COMMAND_PORT_TOC;
881 }
882
883 dw_i3c_master_enqueue_xfer(master, xfer);
884 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
885 dw_i3c_master_dequeue_xfer(master, xfer);
886
887 ret = xfer->ret;
888 dw_i3c_master_free_xfer(xfer);
889
890 return ret;
891}
892
893static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
894 u8 old_dyn_addr)
895{
896 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
897 struct i3c_master_controller *m = i3c_dev_get_master(dev);
898 struct dw_i3c_master *master = to_dw_i3c_master(m);
899
900 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
901 master->regs +
902 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
903
904 if (!old_dyn_addr)
905 return 0;
906
907 master->addrs[data->index] = dev->info.dyn_addr;
908
909 return 0;
910}
911
912static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
913{
914 struct i3c_master_controller *m = i3c_dev_get_master(dev);
915 struct dw_i3c_master *master = to_dw_i3c_master(m);
916 struct dw_i3c_i2c_dev_data *data;
917 int pos;
918
919 pos = dw_i3c_master_get_free_pos(master);
920 if (pos < 0)
921 return pos;
922
923 data = kzalloc(sizeof(*data), GFP_KERNEL);
924 if (!data)
925 return -ENOMEM;
926
927 data->index = pos;
928 master->addrs[pos] = dev->info.dyn_addr;
929 master->free_pos &= ~BIT(pos);
930 i3c_dev_set_master_data(dev, data);
931
932 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
933 master->regs +
934 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
935
936 return 0;
937}
938
939static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
940{
941 struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
942 struct i3c_master_controller *m = i3c_dev_get_master(dev);
943 struct dw_i3c_master *master = to_dw_i3c_master(m);
944
945 writel(0,
946 master->regs +
947 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
948
949 i3c_dev_set_master_data(dev, NULL);
950 master->addrs[data->index] = 0;
951 master->free_pos |= BIT(data->index);
952 kfree(data);
953}
954
955static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
956 const struct i2c_msg *i2c_xfers,
957 int i2c_nxfers)
958{
959 struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
960 struct i3c_master_controller *m = i2c_dev_get_master(dev);
961 struct dw_i3c_master *master = to_dw_i3c_master(m);
962 unsigned int nrxwords = 0, ntxwords = 0;
963 struct dw_i3c_xfer *xfer;
964 int i, ret = 0;
965
966 if (!i2c_nxfers)
967 return 0;
968
969 if (i2c_nxfers > master->caps.cmdfifodepth)
970 return -ENOTSUPP;
971
972 for (i = 0; i < i2c_nxfers; i++) {
973 if (i2c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
974 return -ENOTSUPP;
975 }
976
977 for (i = 0; i < i2c_nxfers; i++) {
978 if (i2c_xfers[i].flags & I2C_M_RD)
979 nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
980 else
981 ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
982 }
983
984 if (ntxwords > master->caps.datafifodepth ||
985 nrxwords > master->caps.datafifodepth)
986 return -ENOTSUPP;
987
988 xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
989 if (!xfer)
990 return -ENOMEM;
991
992 for (i = 0; i < i2c_nxfers; i++) {
993 struct dw_i3c_cmd *cmd = &xfer->cmds[i];
994
995 cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) |
996 COMMAND_PORT_TRANSFER_ARG;
997
998 cmd->cmd_lo = COMMAND_PORT_TID(i) |
999 COMMAND_PORT_DEV_INDEX(data->index) |
1000 COMMAND_PORT_ROC;
1001
1002 if (i2c_xfers[i].flags & I2C_M_RD) {
1003 cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER;
1004 cmd->rx_buf = i2c_xfers[i].buf;
1005 cmd->rx_len = i2c_xfers[i].len;
1006 } else {
1007 cmd->tx_buf = i2c_xfers[i].buf;
1008 cmd->tx_len = i2c_xfers[i].len;
1009 }
1010
1011 if (i == (i2c_nxfers - 1))
1012 cmd->cmd_lo |= COMMAND_PORT_TOC;
1013 }
1014
1015 dw_i3c_master_enqueue_xfer(master, xfer);
1016 if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
1017 dw_i3c_master_dequeue_xfer(master, xfer);
1018
1019 ret = xfer->ret;
1020 dw_i3c_master_free_xfer(xfer);
1021
1022 return ret;
1023}
1024
1025static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
1026{
1027 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1028 struct dw_i3c_master *master = to_dw_i3c_master(m);
1029 struct dw_i3c_i2c_dev_data *data;
1030 int pos;
1031
1032 pos = dw_i3c_master_get_free_pos(master);
1033 if (pos < 0)
1034 return pos;
1035
1036 data = kzalloc(sizeof(*data), GFP_KERNEL);
1037 if (!data)
1038 return -ENOMEM;
1039
1040 data->index = pos;
1041 master->addrs[pos] = dev->boardinfo->base.addr;
1042 master->free_pos &= ~BIT(pos);
1043 i2c_dev_set_master_data(dev, data);
1044
1045 writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
1046 DEV_ADDR_TABLE_STATIC_ADDR(dev->boardinfo->base.addr),
1047 master->regs +
1048 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
1049
1050 return 0;
1051}
1052
1053static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
1054{
1055 struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1056 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1057 struct dw_i3c_master *master = to_dw_i3c_master(m);
1058
1059 writel(0,
1060 master->regs +
1061 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
1062
1063 i2c_dev_set_master_data(dev, NULL);
1064 master->addrs[data->index] = 0;
1065 master->free_pos |= BIT(data->index);
1066 kfree(data);
1067}
1068
1069static u32 dw_i3c_master_i2c_funcs(struct i3c_master_controller *m)
1070{
1071 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1072}
1073
1074static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
1075{
1076 struct dw_i3c_master *master = dev_id;
1077 u32 status;
1078
1079 status = readl(master->regs + INTR_STATUS);
1080
1081 if (!(status & readl(master->regs + INTR_STATUS_EN))) {
1082 writel(INTR_ALL, master->regs + INTR_STATUS);
1083 return IRQ_NONE;
1084 }
1085
1086 spin_lock(&master->xferqueue.lock);
1087 dw_i3c_master_end_xfer_locked(master, status);
1088 if (status | INTR_TRANSFER_ERR_STAT)
1089 writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
1090 spin_unlock(&master->xferqueue.lock);
1091
1092 return IRQ_HANDLED;
1093}
1094
1095static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
1096 .bus_init = dw_i3c_master_bus_init,
1097 .bus_cleanup = dw_i3c_master_bus_cleanup,
1098 .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
1099 .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
1100 .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
1101 .do_daa = dw_i3c_master_daa,
1102 .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
1103 .send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
1104 .priv_xfers = dw_i3c_master_priv_xfers,
1105 .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
1106 .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
1107 .i2c_xfers = dw_i3c_master_i2c_xfers,
1108 .i2c_funcs = dw_i3c_master_i2c_funcs,
1109};
1110
1111static int dw_i3c_probe(struct platform_device *pdev)
1112{
1113 struct dw_i3c_master *master;
1114 struct resource *res;
1115 int ret, irq;
1116
1117 master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
1118 if (!master)
1119 return -ENOMEM;
1120
1121 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1122 master->regs = devm_ioremap_resource(&pdev->dev, res);
1123 if (IS_ERR(master->regs))
1124 return PTR_ERR(master->regs);
1125
1126 master->core_clk = devm_clk_get(&pdev->dev, NULL);
1127 if (IS_ERR(master->core_clk))
1128 return PTR_ERR(master->core_clk);
1129
1130 master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
1131 "core_rst");
1132 if (IS_ERR(master->core_rst))
1133 return PTR_ERR(master->core_rst);
1134
1135 ret = clk_prepare_enable(master->core_clk);
1136 if (ret)
1137 goto err_disable_core_clk;
1138
1139 reset_control_deassert(master->core_rst);
1140
1141 spin_lock_init(&master->xferqueue.lock);
1142 INIT_LIST_HEAD(&master->xferqueue.list);
1143
1144 writel(INTR_ALL, master->regs + INTR_STATUS);
1145 irq = platform_get_irq(pdev, 0);
1146 ret = devm_request_irq(&pdev->dev, irq,
1147 dw_i3c_master_irq_handler, 0,
1148 dev_name(&pdev->dev), master);
1149 if (ret)
1150 goto err_assert_rst;
1151
1152 platform_set_drvdata(pdev, master);
1153
1154 /* Information regarding the FIFOs/QUEUEs depth */
1155 ret = readl(master->regs + QUEUE_STATUS_LEVEL);
1156 master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
1157
1158 ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL);
1159 master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret);
1160
1161 ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER);
1162 master->datstartaddr = ret;
1163 master->maxdevs = ret >> 16;
1164 master->free_pos = GENMASK(master->maxdevs - 1, 0);
1165
1166 ret = i3c_master_register(&master->base, &pdev->dev,
1167 &dw_mipi_i3c_ops, false);
1168 if (ret)
1169 goto err_assert_rst;
1170
1171 return 0;
1172
1173err_assert_rst:
1174 reset_control_assert(master->core_rst);
1175
1176err_disable_core_clk:
1177 clk_disable_unprepare(master->core_clk);
1178
1179 return ret;
1180}
1181
1182static int dw_i3c_remove(struct platform_device *pdev)
1183{
1184 struct dw_i3c_master *master = platform_get_drvdata(pdev);
1185 int ret;
1186
1187 ret = i3c_master_unregister(&master->base);
1188 if (ret)
1189 return ret;
1190
1191 reset_control_assert(master->core_rst);
1192
1193 clk_disable_unprepare(master->core_clk);
1194
1195 return 0;
1196}
1197
1198static const struct of_device_id dw_i3c_master_of_match[] = {
1199 { .compatible = "snps,dw-i3c-master-1.00a", },
1200 {},
1201};
1202MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
1203
1204static struct platform_driver dw_i3c_driver = {
1205 .probe = dw_i3c_probe,
1206 .remove = dw_i3c_remove,
1207 .driver = {
1208 .name = "dw-i3c-master",
1209 .of_match_table = of_match_ptr(dw_i3c_master_of_match),
1210 },
1211};
1212module_platform_driver(dw_i3c_driver);
1213
1214MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
1215MODULE_DESCRIPTION("DesignWare MIPI I3C driver");
1216MODULE_LICENSE("GPL v2");