]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/spmi/spmi-pmic-arb.c
powerpc/mm: Ensure cpumask update is ordered
[mirror_ubuntu-artful-kernel.git] / drivers / spmi / spmi-pmic-arb.c
1 /*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13 #include <linux/bitmap.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/irqchip/chained_irq.h>
19 #include <linux/irqdomain.h>
20 #include <linux/irq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spmi.h>
27
28 /* PMIC Arbiter configuration registers */
29 #define PMIC_ARB_VERSION 0x0000
30 #define PMIC_ARB_VERSION_V2_MIN 0x20010000
31 #define PMIC_ARB_VERSION_V3_MIN 0x30000000
32 #define PMIC_ARB_INT_EN 0x0004
33
34 /* PMIC Arbiter channel registers offsets */
35 #define PMIC_ARB_CMD 0x00
36 #define PMIC_ARB_CONFIG 0x04
37 #define PMIC_ARB_STATUS 0x08
38 #define PMIC_ARB_WDATA0 0x10
39 #define PMIC_ARB_WDATA1 0x14
40 #define PMIC_ARB_RDATA0 0x18
41 #define PMIC_ARB_RDATA1 0x1C
42 #define PMIC_ARB_REG_CHNL(N) (0x800 + 0x4 * (N))
43
44 /* Mapping Table */
45 #define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
46 #define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF)
47 #define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1)
48 #define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF)
49 #define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1)
50 #define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF)
51
52 #define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
53 #define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */
54 #define PMIC_ARB_CHAN_VALID BIT(15)
55
56 /* Ownership Table */
57 #define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
58 #define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7)
59
60 /* Channel Status fields */
61 enum pmic_arb_chnl_status {
62 PMIC_ARB_STATUS_DONE = BIT(0),
63 PMIC_ARB_STATUS_FAILURE = BIT(1),
64 PMIC_ARB_STATUS_DENIED = BIT(2),
65 PMIC_ARB_STATUS_DROPPED = BIT(3),
66 };
67
68 /* Command register fields */
69 #define PMIC_ARB_CMD_MAX_BYTE_COUNT 8
70
71 /* Command Opcodes */
72 enum pmic_arb_cmd_op_code {
73 PMIC_ARB_OP_EXT_WRITEL = 0,
74 PMIC_ARB_OP_EXT_READL = 1,
75 PMIC_ARB_OP_EXT_WRITE = 2,
76 PMIC_ARB_OP_RESET = 3,
77 PMIC_ARB_OP_SLEEP = 4,
78 PMIC_ARB_OP_SHUTDOWN = 5,
79 PMIC_ARB_OP_WAKEUP = 6,
80 PMIC_ARB_OP_AUTHENTICATE = 7,
81 PMIC_ARB_OP_MSTR_READ = 8,
82 PMIC_ARB_OP_MSTR_WRITE = 9,
83 PMIC_ARB_OP_EXT_READ = 13,
84 PMIC_ARB_OP_WRITE = 14,
85 PMIC_ARB_OP_READ = 15,
86 PMIC_ARB_OP_ZERO_WRITE = 16,
87 };
88
89 /* Maximum number of support PMIC peripherals */
90 #define PMIC_ARB_MAX_PERIPHS 512
91 #define PMIC_ARB_TIMEOUT_US 100
92 #define PMIC_ARB_MAX_TRANS_BYTES (8)
93
94 #define PMIC_ARB_APID_MASK 0xFF
95 #define PMIC_ARB_PPID_MASK 0xFFF
96
97 /* interrupt enable bit */
98 #define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
99
100 #define HWIRQ(slave_id, periph_id, irq_id, apid) \
101 ((((slave_id) & 0xF) << 28) | \
102 (((periph_id) & 0xFF) << 20) | \
103 (((irq_id) & 0x7) << 16) | \
104 (((apid) & 0x1FF) << 0))
105
106 #define HWIRQ_SID(hwirq) (((hwirq) >> 28) & 0xF)
107 #define HWIRQ_PER(hwirq) (((hwirq) >> 20) & 0xFF)
108 #define HWIRQ_IRQ(hwirq) (((hwirq) >> 16) & 0x7)
109 #define HWIRQ_APID(hwirq) (((hwirq) >> 0) & 0x1FF)
110
111 struct pmic_arb_ver_ops;
112
113 struct apid_data {
114 u16 ppid;
115 u8 owner;
116 };
117
118 /**
119 * spmi_pmic_arb - SPMI PMIC Arbiter object
120 *
121 * @rd_base: on v1 "core", on v2 "observer" register base off DT.
122 * @wr_base: on v1 "core", on v2 "chnls" register base off DT.
123 * @intr: address of the SPMI interrupt control registers.
124 * @cnfg: address of the PMIC Arbiter configuration registers.
125 * @lock: lock to synchronize accesses.
126 * @channel: execution environment channel to use for accesses.
127 * @irq: PMIC ARB interrupt.
128 * @ee: the current Execution Environment
129 * @min_apid: minimum APID (used for bounding IRQ search)
130 * @max_apid: maximum APID
131 * @max_periph: maximum number of PMIC peripherals supported by HW.
132 * @mapping_table: in-memory copy of PPID -> APID mapping table.
133 * @domain: irq domain object for PMIC IRQ domain
134 * @spmic: SPMI controller object
135 * @ver_ops: version dependent operations.
136 * @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table.
137 * v2 only.
138 */
139 struct spmi_pmic_arb {
140 void __iomem *rd_base;
141 void __iomem *wr_base;
142 void __iomem *intr;
143 void __iomem *cnfg;
144 void __iomem *core;
145 resource_size_t core_size;
146 raw_spinlock_t lock;
147 u8 channel;
148 int irq;
149 u8 ee;
150 u16 min_apid;
151 u16 max_apid;
152 u16 max_periph;
153 u32 *mapping_table;
154 DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
155 struct irq_domain *domain;
156 struct spmi_controller *spmic;
157 const struct pmic_arb_ver_ops *ver_ops;
158 u16 *ppid_to_apid;
159 u16 last_apid;
160 struct apid_data apid_data[PMIC_ARB_MAX_PERIPHS];
161 };
162
163 /**
164 * pmic_arb_ver: version dependent functionality.
165 *
166 * @ver_str: version string.
167 * @ppid_to_apid: finds the apid for a given ppid.
168 * @mode: access rights to specified pmic peripheral.
169 * @non_data_cmd: on v1 issues an spmi non-data command.
170 * on v2 no HW support, returns -EOPNOTSUPP.
171 * @offset: on v1 offset of per-ee channel.
172 * on v2 offset of per-ee and per-ppid channel.
173 * @fmt_cmd: formats a GENI/SPMI command.
174 * @owner_acc_status: on v1 offset of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn
175 * on v2 offset of SPMI_PIC_OWNERm_ACC_STATUSn.
176 * @acc_enable: on v1 offset of PMIC_ARB_SPMI_PIC_ACC_ENABLEn
177 * on v2 offset of SPMI_PIC_ACC_ENABLEn.
178 * @irq_status: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_STATUSn
179 * on v2 offset of SPMI_PIC_IRQ_STATUSn.
180 * @irq_clear: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
181 * on v2 offset of SPMI_PIC_IRQ_CLEARn.
182 */
183 struct pmic_arb_ver_ops {
184 const char *ver_str;
185 int (*ppid_to_apid)(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
186 u16 *apid);
187 int (*mode)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
188 mode_t *mode);
189 /* spmi commands (read_cmd, write_cmd, cmd) functionality */
190 int (*offset)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
191 u32 *offset);
192 u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
193 int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
194 /* Interrupts controller functionality (offset of PIC registers) */
195 u32 (*owner_acc_status)(u8 m, u16 n);
196 u32 (*acc_enable)(u16 n);
197 u32 (*irq_status)(u16 n);
198 u32 (*irq_clear)(u16 n);
199 };
200
201 static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
202 u32 offset, u32 val)
203 {
204 writel_relaxed(val, pa->wr_base + offset);
205 }
206
207 static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pa,
208 u32 offset, u32 val)
209 {
210 writel_relaxed(val, pa->rd_base + offset);
211 }
212
213 /**
214 * pa_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
215 * @bc: byte count -1. range: 0..3
216 * @reg: register's address
217 * @buf: output parameter, length must be bc + 1
218 */
219 static void pa_read_data(struct spmi_pmic_arb *pa, u8 *buf, u32 reg, u8 bc)
220 {
221 u32 data = __raw_readl(pa->rd_base + reg);
222
223 memcpy(buf, &data, (bc & 3) + 1);
224 }
225
226 /**
227 * pa_write_data: write 1..4 bytes from buf to pmic-arb's register
228 * @bc: byte-count -1. range: 0..3.
229 * @reg: register's address.
230 * @buf: buffer to write. length must be bc + 1.
231 */
232 static void
233 pa_write_data(struct spmi_pmic_arb *pa, const u8 *buf, u32 reg, u8 bc)
234 {
235 u32 data = 0;
236
237 memcpy(&data, buf, (bc & 3) + 1);
238 pmic_arb_base_write(pa, reg, data);
239 }
240
241 static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
242 void __iomem *base, u8 sid, u16 addr)
243 {
244 struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
245 u32 status = 0;
246 u32 timeout = PMIC_ARB_TIMEOUT_US;
247 u32 offset;
248 int rc;
249
250 rc = pa->ver_ops->offset(pa, sid, addr, &offset);
251 if (rc)
252 return rc;
253
254 offset += PMIC_ARB_STATUS;
255
256 while (timeout--) {
257 status = readl_relaxed(base + offset);
258
259 if (status & PMIC_ARB_STATUS_DONE) {
260 if (status & PMIC_ARB_STATUS_DENIED) {
261 dev_err(&ctrl->dev,
262 "%s: transaction denied (0x%x)\n",
263 __func__, status);
264 return -EPERM;
265 }
266
267 if (status & PMIC_ARB_STATUS_FAILURE) {
268 dev_err(&ctrl->dev,
269 "%s: transaction failed (0x%x)\n",
270 __func__, status);
271 return -EIO;
272 }
273
274 if (status & PMIC_ARB_STATUS_DROPPED) {
275 dev_err(&ctrl->dev,
276 "%s: transaction dropped (0x%x)\n",
277 __func__, status);
278 return -EIO;
279 }
280
281 return 0;
282 }
283 udelay(1);
284 }
285
286 dev_err(&ctrl->dev,
287 "%s: timeout, status 0x%x\n",
288 __func__, status);
289 return -ETIMEDOUT;
290 }
291
292 static int
293 pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
294 {
295 struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
296 unsigned long flags;
297 u32 cmd;
298 int rc;
299 u32 offset;
300
301 rc = pa->ver_ops->offset(pa, sid, 0, &offset);
302 if (rc)
303 return rc;
304
305 cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
306
307 raw_spin_lock_irqsave(&pa->lock, flags);
308 pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
309 rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, 0);
310 raw_spin_unlock_irqrestore(&pa->lock, flags);
311
312 return rc;
313 }
314
315 static int
316 pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid)
317 {
318 return -EOPNOTSUPP;
319 }
320
321 /* Non-data command */
322 static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
323 {
324 struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
325
326 dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
327
328 /* Check for valid non-data command */
329 if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
330 return -EINVAL;
331
332 return pa->ver_ops->non_data_cmd(ctrl, opc, sid);
333 }
334
335 static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
336 u16 addr, u8 *buf, size_t len)
337 {
338 struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
339 unsigned long flags;
340 u8 bc = len - 1;
341 u32 cmd;
342 int rc;
343 u32 offset;
344 mode_t mode;
345
346 rc = pa->ver_ops->offset(pa, sid, addr, &offset);
347 if (rc)
348 return rc;
349
350 rc = pa->ver_ops->mode(pa, sid, addr, &mode);
351 if (rc)
352 return rc;
353
354 if (!(mode & S_IRUSR)) {
355 dev_err(&pa->spmic->dev,
356 "error: impermissible read from peripheral sid:%d addr:0x%x\n",
357 sid, addr);
358 return -EPERM;
359 }
360
361 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
362 dev_err(&ctrl->dev,
363 "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
364 PMIC_ARB_MAX_TRANS_BYTES, len);
365 return -EINVAL;
366 }
367
368 /* Check the opcode */
369 if (opc >= 0x60 && opc <= 0x7F)
370 opc = PMIC_ARB_OP_READ;
371 else if (opc >= 0x20 && opc <= 0x2F)
372 opc = PMIC_ARB_OP_EXT_READ;
373 else if (opc >= 0x38 && opc <= 0x3F)
374 opc = PMIC_ARB_OP_EXT_READL;
375 else
376 return -EINVAL;
377
378 cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
379
380 raw_spin_lock_irqsave(&pa->lock, flags);
381 pmic_arb_set_rd_cmd(pa, offset + PMIC_ARB_CMD, cmd);
382 rc = pmic_arb_wait_for_done(ctrl, pa->rd_base, sid, addr);
383 if (rc)
384 goto done;
385
386 pa_read_data(pa, buf, offset + PMIC_ARB_RDATA0,
387 min_t(u8, bc, 3));
388
389 if (bc > 3)
390 pa_read_data(pa, buf + 4, offset + PMIC_ARB_RDATA1, bc - 4);
391
392 done:
393 raw_spin_unlock_irqrestore(&pa->lock, flags);
394 return rc;
395 }
396
397 static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
398 u16 addr, const u8 *buf, size_t len)
399 {
400 struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
401 unsigned long flags;
402 u8 bc = len - 1;
403 u32 cmd;
404 int rc;
405 u32 offset;
406 mode_t mode;
407
408 rc = pa->ver_ops->offset(pa, sid, addr, &offset);
409 if (rc)
410 return rc;
411
412 rc = pa->ver_ops->mode(pa, sid, addr, &mode);
413 if (rc)
414 return rc;
415
416 if (!(mode & S_IWUSR)) {
417 dev_err(&pa->spmic->dev,
418 "error: impermissible write to peripheral sid:%d addr:0x%x\n",
419 sid, addr);
420 return -EPERM;
421 }
422
423 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
424 dev_err(&ctrl->dev,
425 "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
426 PMIC_ARB_MAX_TRANS_BYTES, len);
427 return -EINVAL;
428 }
429
430 /* Check the opcode */
431 if (opc >= 0x40 && opc <= 0x5F)
432 opc = PMIC_ARB_OP_WRITE;
433 else if (opc >= 0x00 && opc <= 0x0F)
434 opc = PMIC_ARB_OP_EXT_WRITE;
435 else if (opc >= 0x30 && opc <= 0x37)
436 opc = PMIC_ARB_OP_EXT_WRITEL;
437 else if (opc >= 0x80)
438 opc = PMIC_ARB_OP_ZERO_WRITE;
439 else
440 return -EINVAL;
441
442 cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
443
444 /* Write data to FIFOs */
445 raw_spin_lock_irqsave(&pa->lock, flags);
446 pa_write_data(pa, buf, offset + PMIC_ARB_WDATA0, min_t(u8, bc, 3));
447 if (bc > 3)
448 pa_write_data(pa, buf + 4, offset + PMIC_ARB_WDATA1, bc - 4);
449
450 /* Start the transaction */
451 pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
452 rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, addr);
453 raw_spin_unlock_irqrestore(&pa->lock, flags);
454
455 return rc;
456 }
457
458 enum qpnpint_regs {
459 QPNPINT_REG_RT_STS = 0x10,
460 QPNPINT_REG_SET_TYPE = 0x11,
461 QPNPINT_REG_POLARITY_HIGH = 0x12,
462 QPNPINT_REG_POLARITY_LOW = 0x13,
463 QPNPINT_REG_LATCHED_CLR = 0x14,
464 QPNPINT_REG_EN_SET = 0x15,
465 QPNPINT_REG_EN_CLR = 0x16,
466 QPNPINT_REG_LATCHED_STS = 0x18,
467 };
468
469 struct spmi_pmic_arb_qpnpint_type {
470 u8 type; /* 1 -> edge */
471 u8 polarity_high;
472 u8 polarity_low;
473 } __packed;
474
475 /* Simplified accessor functions for irqchip callbacks */
476 static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
477 size_t len)
478 {
479 struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
480 u8 sid = HWIRQ_SID(d->hwirq);
481 u8 per = HWIRQ_PER(d->hwirq);
482
483 if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
484 (per << 8) + reg, buf, len))
485 dev_err_ratelimited(&pa->spmic->dev,
486 "failed irqchip transaction on %x\n",
487 d->irq);
488 }
489
490 static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
491 {
492 struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
493 u8 sid = HWIRQ_SID(d->hwirq);
494 u8 per = HWIRQ_PER(d->hwirq);
495
496 if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
497 (per << 8) + reg, buf, len))
498 dev_err_ratelimited(&pa->spmic->dev,
499 "failed irqchip transaction on %x\n",
500 d->irq);
501 }
502
503 static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
504 {
505 u16 ppid = pa->apid_data[apid].ppid;
506 u8 sid = ppid >> 8;
507 u8 per = ppid & 0xFF;
508 u8 irq_mask = BIT(id);
509
510 writel_relaxed(irq_mask, pa->intr + pa->ver_ops->irq_clear(apid));
511
512 if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
513 (per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
514 dev_err_ratelimited(&pa->spmic->dev,
515 "failed to ack irq_mask = 0x%x for ppid = %x\n",
516 irq_mask, ppid);
517
518 if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
519 (per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
520 dev_err_ratelimited(&pa->spmic->dev,
521 "failed to ack irq_mask = 0x%x for ppid = %x\n",
522 irq_mask, ppid);
523 }
524
525 static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
526 {
527 unsigned int irq;
528 u32 status;
529 int id;
530 u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
531 u8 per = pa->apid_data[apid].ppid & 0xFF;
532
533 status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
534 while (status) {
535 id = ffs(status) - 1;
536 status &= ~BIT(id);
537 irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
538 if (irq == 0) {
539 cleanup_irq(pa, apid, id);
540 continue;
541 }
542 generic_handle_irq(irq);
543 }
544 }
545
546 static void pmic_arb_chained_irq(struct irq_desc *desc)
547 {
548 struct spmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
549 struct irq_chip *chip = irq_desc_get_chip(desc);
550 void __iomem *intr = pa->intr;
551 int first = pa->min_apid >> 5;
552 int last = pa->max_apid >> 5;
553 u32 status, enable;
554 int i, id, apid;
555
556 chained_irq_enter(chip, desc);
557
558 for (i = first; i <= last; ++i) {
559 status = readl_relaxed(intr +
560 pa->ver_ops->owner_acc_status(pa->ee, i));
561 while (status) {
562 id = ffs(status) - 1;
563 status &= ~BIT(id);
564 apid = id + i * 32;
565 enable = readl_relaxed(intr +
566 pa->ver_ops->acc_enable(apid));
567 if (enable & SPMI_PIC_ACC_ENABLE_BIT)
568 periph_interrupt(pa, apid);
569 }
570 }
571
572 chained_irq_exit(chip, desc);
573 }
574
575 static void qpnpint_irq_ack(struct irq_data *d)
576 {
577 struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
578 u8 irq = HWIRQ_IRQ(d->hwirq);
579 u16 apid = HWIRQ_APID(d->hwirq);
580 u8 data;
581
582 writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
583
584 data = BIT(irq);
585 qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
586 }
587
588 static void qpnpint_irq_mask(struct irq_data *d)
589 {
590 u8 irq = HWIRQ_IRQ(d->hwirq);
591 u8 data = BIT(irq);
592
593 qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
594 }
595
596 static void qpnpint_irq_unmask(struct irq_data *d)
597 {
598 struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
599 u8 irq = HWIRQ_IRQ(d->hwirq);
600 u16 apid = HWIRQ_APID(d->hwirq);
601 u8 buf[2];
602
603 writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
604 pa->intr + pa->ver_ops->acc_enable(apid));
605
606 qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
607 if (!(buf[0] & BIT(irq))) {
608 /*
609 * Since the interrupt is currently disabled, write to both the
610 * LATCHED_CLR and EN_SET registers so that a spurious interrupt
611 * cannot be triggered when the interrupt is enabled
612 */
613 buf[0] = BIT(irq);
614 buf[1] = BIT(irq);
615 qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2);
616 }
617 }
618
619 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
620 {
621 struct spmi_pmic_arb_qpnpint_type type;
622 u8 irq = HWIRQ_IRQ(d->hwirq);
623 u8 bit_mask_irq = BIT(irq);
624
625 qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
626
627 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
628 type.type |= bit_mask_irq;
629 if (flow_type & IRQF_TRIGGER_RISING)
630 type.polarity_high |= bit_mask_irq;
631 if (flow_type & IRQF_TRIGGER_FALLING)
632 type.polarity_low |= bit_mask_irq;
633 } else {
634 if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
635 (flow_type & (IRQF_TRIGGER_LOW)))
636 return -EINVAL;
637
638 type.type &= ~bit_mask_irq; /* level trig */
639 if (flow_type & IRQF_TRIGGER_HIGH)
640 type.polarity_high |= bit_mask_irq;
641 else
642 type.polarity_low |= bit_mask_irq;
643 }
644
645 qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
646
647 if (flow_type & IRQ_TYPE_EDGE_BOTH)
648 irq_set_handler_locked(d, handle_edge_irq);
649 else
650 irq_set_handler_locked(d, handle_level_irq);
651
652 return 0;
653 }
654
655 static int qpnpint_get_irqchip_state(struct irq_data *d,
656 enum irqchip_irq_state which,
657 bool *state)
658 {
659 u8 irq = HWIRQ_IRQ(d->hwirq);
660 u8 status = 0;
661
662 if (which != IRQCHIP_STATE_LINE_LEVEL)
663 return -EINVAL;
664
665 qpnpint_spmi_read(d, QPNPINT_REG_RT_STS, &status, 1);
666 *state = !!(status & BIT(irq));
667
668 return 0;
669 }
670
671 static struct irq_chip pmic_arb_irqchip = {
672 .name = "pmic_arb",
673 .irq_ack = qpnpint_irq_ack,
674 .irq_mask = qpnpint_irq_mask,
675 .irq_unmask = qpnpint_irq_unmask,
676 .irq_set_type = qpnpint_irq_set_type,
677 .irq_get_irqchip_state = qpnpint_get_irqchip_state,
678 .flags = IRQCHIP_MASK_ON_SUSPEND
679 | IRQCHIP_SKIP_SET_WAKE,
680 };
681
682 static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
683 struct device_node *controller,
684 const u32 *intspec,
685 unsigned int intsize,
686 unsigned long *out_hwirq,
687 unsigned int *out_type)
688 {
689 struct spmi_pmic_arb *pa = d->host_data;
690 int rc;
691 u16 apid;
692
693 dev_dbg(&pa->spmic->dev,
694 "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
695 intspec[0], intspec[1], intspec[2]);
696
697 if (irq_domain_get_of_node(d) != controller)
698 return -EINVAL;
699 if (intsize != 4)
700 return -EINVAL;
701 if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
702 return -EINVAL;
703
704 rc = pa->ver_ops->ppid_to_apid(pa, intspec[0],
705 (intspec[1] << 8), &apid);
706 if (rc < 0) {
707 dev_err(&pa->spmic->dev,
708 "failed to xlate sid = 0x%x, periph = 0x%x, irq = %x rc = %d\n",
709 intspec[0], intspec[1], intspec[2], rc);
710 return rc;
711 }
712
713 /* Keep track of {max,min}_apid for bounding search during interrupt */
714 if (apid > pa->max_apid)
715 pa->max_apid = apid;
716 if (apid < pa->min_apid)
717 pa->min_apid = apid;
718
719 *out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
720 *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
721
722 dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
723
724 return 0;
725 }
726
727 static int qpnpint_irq_domain_map(struct irq_domain *d,
728 unsigned int virq,
729 irq_hw_number_t hwirq)
730 {
731 struct spmi_pmic_arb *pa = d->host_data;
732
733 dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
734
735 irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq);
736 irq_set_chip_data(virq, d->host_data);
737 irq_set_noprobe(virq);
738 return 0;
739 }
740
741 static int
742 pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
743 {
744 u16 ppid = sid << 8 | ((addr >> 8) & 0xFF);
745 u32 *mapping_table = pa->mapping_table;
746 int index = 0, i;
747 u16 apid_valid;
748 u32 data;
749
750 apid_valid = pa->ppid_to_apid[ppid];
751 if (apid_valid & PMIC_ARB_CHAN_VALID) {
752 *apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
753 return 0;
754 }
755
756 for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
757 if (!test_and_set_bit(index, pa->mapping_table_valid))
758 mapping_table[index] = readl_relaxed(pa->cnfg +
759 SPMI_MAPPING_TABLE_REG(index));
760
761 data = mapping_table[index];
762
763 if (ppid & BIT(SPMI_MAPPING_BIT_INDEX(data))) {
764 if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
765 index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
766 } else {
767 *apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
768 pa->ppid_to_apid[ppid]
769 = *apid | PMIC_ARB_CHAN_VALID;
770 pa->apid_data[*apid].ppid = ppid;
771 return 0;
772 }
773 } else {
774 if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
775 index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
776 } else {
777 *apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
778 pa->ppid_to_apid[ppid]
779 = *apid | PMIC_ARB_CHAN_VALID;
780 pa->apid_data[*apid].ppid = ppid;
781 return 0;
782 }
783 }
784 }
785
786 return -ENODEV;
787 }
788
789 static int
790 pmic_arb_mode_v1_v3(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
791 {
792 *mode = S_IRUSR | S_IWUSR;
793 return 0;
794 }
795
796 /* v1 offset per ee */
797 static int
798 pmic_arb_offset_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
799 {
800 *offset = 0x800 + 0x80 * pa->channel;
801 return 0;
802 }
803
804 static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid)
805 {
806 u32 regval, offset;
807 u16 apid;
808 u16 id;
809
810 /*
811 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
812 * ppid_to_apid is an in-memory invert of that table.
813 */
814 for (apid = pa->last_apid; apid < pa->max_periph; apid++) {
815 regval = readl_relaxed(pa->cnfg +
816 SPMI_OWNERSHIP_TABLE_REG(apid));
817 pa->apid_data[apid].owner = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
818
819 offset = PMIC_ARB_REG_CHNL(apid);
820 if (offset >= pa->core_size)
821 break;
822
823 regval = readl_relaxed(pa->core + offset);
824 if (!regval)
825 continue;
826
827 id = (regval >> 8) & PMIC_ARB_PPID_MASK;
828 pa->ppid_to_apid[id] = apid | PMIC_ARB_CHAN_VALID;
829 pa->apid_data[apid].ppid = id;
830 if (id == ppid) {
831 apid |= PMIC_ARB_CHAN_VALID;
832 break;
833 }
834 }
835 pa->last_apid = apid & ~PMIC_ARB_CHAN_VALID;
836
837 return apid;
838 }
839
840
841 static int
842 pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
843 {
844 u16 ppid = (sid << 8) | (addr >> 8);
845 u16 apid_valid;
846
847 apid_valid = pa->ppid_to_apid[ppid];
848 if (!(apid_valid & PMIC_ARB_CHAN_VALID))
849 apid_valid = pmic_arb_find_apid(pa, ppid);
850 if (!(apid_valid & PMIC_ARB_CHAN_VALID))
851 return -ENODEV;
852
853 *apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
854 return 0;
855 }
856
857 static int
858 pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
859 {
860 u16 apid;
861 int rc;
862
863 rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
864 if (rc < 0)
865 return rc;
866
867 *mode = 0;
868 *mode |= S_IRUSR;
869
870 if (pa->ee == pa->apid_data[apid].owner)
871 *mode |= S_IWUSR;
872 return 0;
873 }
874
875 /* v2 offset per ppid and per ee */
876 static int
877 pmic_arb_offset_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
878 {
879 u16 apid;
880 int rc;
881
882 rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
883 if (rc < 0)
884 return rc;
885
886 *offset = 0x1000 * pa->ee + 0x8000 * apid;
887 return 0;
888 }
889
890 static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
891 {
892 return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
893 }
894
895 static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
896 {
897 return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
898 }
899
900 static u32 pmic_arb_owner_acc_status_v1(u8 m, u16 n)
901 {
902 return 0x20 * m + 0x4 * n;
903 }
904
905 static u32 pmic_arb_owner_acc_status_v2(u8 m, u16 n)
906 {
907 return 0x100000 + 0x1000 * m + 0x4 * n;
908 }
909
910 static u32 pmic_arb_owner_acc_status_v3(u8 m, u16 n)
911 {
912 return 0x200000 + 0x1000 * m + 0x4 * n;
913 }
914
915 static u32 pmic_arb_acc_enable_v1(u16 n)
916 {
917 return 0x200 + 0x4 * n;
918 }
919
920 static u32 pmic_arb_acc_enable_v2(u16 n)
921 {
922 return 0x1000 * n;
923 }
924
925 static u32 pmic_arb_irq_status_v1(u16 n)
926 {
927 return 0x600 + 0x4 * n;
928 }
929
930 static u32 pmic_arb_irq_status_v2(u16 n)
931 {
932 return 0x4 + 0x1000 * n;
933 }
934
935 static u32 pmic_arb_irq_clear_v1(u16 n)
936 {
937 return 0xA00 + 0x4 * n;
938 }
939
940 static u32 pmic_arb_irq_clear_v2(u16 n)
941 {
942 return 0x8 + 0x1000 * n;
943 }
944
945 static const struct pmic_arb_ver_ops pmic_arb_v1 = {
946 .ver_str = "v1",
947 .ppid_to_apid = pmic_arb_ppid_to_apid_v1,
948 .mode = pmic_arb_mode_v1_v3,
949 .non_data_cmd = pmic_arb_non_data_cmd_v1,
950 .offset = pmic_arb_offset_v1,
951 .fmt_cmd = pmic_arb_fmt_cmd_v1,
952 .owner_acc_status = pmic_arb_owner_acc_status_v1,
953 .acc_enable = pmic_arb_acc_enable_v1,
954 .irq_status = pmic_arb_irq_status_v1,
955 .irq_clear = pmic_arb_irq_clear_v1,
956 };
957
958 static const struct pmic_arb_ver_ops pmic_arb_v2 = {
959 .ver_str = "v2",
960 .ppid_to_apid = pmic_arb_ppid_to_apid_v2,
961 .mode = pmic_arb_mode_v2,
962 .non_data_cmd = pmic_arb_non_data_cmd_v2,
963 .offset = pmic_arb_offset_v2,
964 .fmt_cmd = pmic_arb_fmt_cmd_v2,
965 .owner_acc_status = pmic_arb_owner_acc_status_v2,
966 .acc_enable = pmic_arb_acc_enable_v2,
967 .irq_status = pmic_arb_irq_status_v2,
968 .irq_clear = pmic_arb_irq_clear_v2,
969 };
970
971 static const struct pmic_arb_ver_ops pmic_arb_v3 = {
972 .ver_str = "v3",
973 .ppid_to_apid = pmic_arb_ppid_to_apid_v2,
974 .mode = pmic_arb_mode_v1_v3,
975 .non_data_cmd = pmic_arb_non_data_cmd_v2,
976 .offset = pmic_arb_offset_v2,
977 .fmt_cmd = pmic_arb_fmt_cmd_v2,
978 .owner_acc_status = pmic_arb_owner_acc_status_v3,
979 .acc_enable = pmic_arb_acc_enable_v2,
980 .irq_status = pmic_arb_irq_status_v2,
981 .irq_clear = pmic_arb_irq_clear_v2,
982 };
983
984 static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
985 .map = qpnpint_irq_domain_map,
986 .xlate = qpnpint_irq_domain_dt_translate,
987 };
988
989 static int spmi_pmic_arb_probe(struct platform_device *pdev)
990 {
991 struct spmi_pmic_arb *pa;
992 struct spmi_controller *ctrl;
993 struct resource *res;
994 void __iomem *core;
995 u32 channel, ee, hw_ver;
996 int err;
997
998 ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
999 if (!ctrl)
1000 return -ENOMEM;
1001
1002 pa = spmi_controller_get_drvdata(ctrl);
1003 pa->spmic = ctrl;
1004
1005 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
1006 pa->core_size = resource_size(res);
1007 if (pa->core_size <= 0x800) {
1008 dev_err(&pdev->dev, "core_size is smaller than 0x800. Failing Probe\n");
1009 err = -EINVAL;
1010 goto err_put_ctrl;
1011 }
1012
1013 core = devm_ioremap_resource(&ctrl->dev, res);
1014 if (IS_ERR(core)) {
1015 err = PTR_ERR(core);
1016 goto err_put_ctrl;
1017 }
1018
1019 hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
1020
1021 if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
1022 pa->ver_ops = &pmic_arb_v1;
1023 pa->wr_base = core;
1024 pa->rd_base = core;
1025 } else {
1026 pa->core = core;
1027
1028 if (hw_ver < PMIC_ARB_VERSION_V3_MIN)
1029 pa->ver_ops = &pmic_arb_v2;
1030 else
1031 pa->ver_ops = &pmic_arb_v3;
1032
1033 /* the apid to ppid table starts at PMIC_ARB_REG_CHNL(0) */
1034 pa->max_periph = (pa->core_size - PMIC_ARB_REG_CHNL(0)) / 4;
1035
1036 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1037 "obsrvr");
1038 pa->rd_base = devm_ioremap_resource(&ctrl->dev, res);
1039 if (IS_ERR(pa->rd_base)) {
1040 err = PTR_ERR(pa->rd_base);
1041 goto err_put_ctrl;
1042 }
1043
1044 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1045 "chnls");
1046 pa->wr_base = devm_ioremap_resource(&ctrl->dev, res);
1047 if (IS_ERR(pa->wr_base)) {
1048 err = PTR_ERR(pa->wr_base);
1049 goto err_put_ctrl;
1050 }
1051
1052 pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
1053 PMIC_ARB_MAX_PPID,
1054 sizeof(*pa->ppid_to_apid),
1055 GFP_KERNEL);
1056 if (!pa->ppid_to_apid) {
1057 err = -ENOMEM;
1058 goto err_put_ctrl;
1059 }
1060 }
1061
1062 dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
1063 pa->ver_ops->ver_str, hw_ver);
1064
1065 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
1066 pa->intr = devm_ioremap_resource(&ctrl->dev, res);
1067 if (IS_ERR(pa->intr)) {
1068 err = PTR_ERR(pa->intr);
1069 goto err_put_ctrl;
1070 }
1071
1072 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
1073 pa->cnfg = devm_ioremap_resource(&ctrl->dev, res);
1074 if (IS_ERR(pa->cnfg)) {
1075 err = PTR_ERR(pa->cnfg);
1076 goto err_put_ctrl;
1077 }
1078
1079 pa->irq = platform_get_irq_byname(pdev, "periph_irq");
1080 if (pa->irq < 0) {
1081 err = pa->irq;
1082 goto err_put_ctrl;
1083 }
1084
1085 err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
1086 if (err) {
1087 dev_err(&pdev->dev, "channel unspecified.\n");
1088 goto err_put_ctrl;
1089 }
1090
1091 if (channel > 5) {
1092 dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
1093 channel);
1094 err = -EINVAL;
1095 goto err_put_ctrl;
1096 }
1097
1098 pa->channel = channel;
1099
1100 err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
1101 if (err) {
1102 dev_err(&pdev->dev, "EE unspecified.\n");
1103 goto err_put_ctrl;
1104 }
1105
1106 if (ee > 5) {
1107 dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee);
1108 err = -EINVAL;
1109 goto err_put_ctrl;
1110 }
1111
1112 pa->ee = ee;
1113
1114 pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
1115 sizeof(*pa->mapping_table), GFP_KERNEL);
1116 if (!pa->mapping_table) {
1117 err = -ENOMEM;
1118 goto err_put_ctrl;
1119 }
1120
1121 /* Initialize max_apid/min_apid to the opposite bounds, during
1122 * the irq domain translation, we are sure to update these */
1123 pa->max_apid = 0;
1124 pa->min_apid = PMIC_ARB_MAX_PERIPHS - 1;
1125
1126 platform_set_drvdata(pdev, ctrl);
1127 raw_spin_lock_init(&pa->lock);
1128
1129 ctrl->cmd = pmic_arb_cmd;
1130 ctrl->read_cmd = pmic_arb_read_cmd;
1131 ctrl->write_cmd = pmic_arb_write_cmd;
1132
1133 dev_dbg(&pdev->dev, "adding irq domain\n");
1134 pa->domain = irq_domain_add_tree(pdev->dev.of_node,
1135 &pmic_arb_irq_domain_ops, pa);
1136 if (!pa->domain) {
1137 dev_err(&pdev->dev, "unable to create irq_domain\n");
1138 err = -ENOMEM;
1139 goto err_put_ctrl;
1140 }
1141
1142 irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
1143 enable_irq_wake(pa->irq);
1144
1145 err = spmi_controller_add(ctrl);
1146 if (err)
1147 goto err_domain_remove;
1148
1149 return 0;
1150
1151 err_domain_remove:
1152 irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
1153 irq_domain_remove(pa->domain);
1154 err_put_ctrl:
1155 spmi_controller_put(ctrl);
1156 return err;
1157 }
1158
1159 static int spmi_pmic_arb_remove(struct platform_device *pdev)
1160 {
1161 struct spmi_controller *ctrl = platform_get_drvdata(pdev);
1162 struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
1163 spmi_controller_remove(ctrl);
1164 irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
1165 irq_domain_remove(pa->domain);
1166 spmi_controller_put(ctrl);
1167 return 0;
1168 }
1169
1170 static const struct of_device_id spmi_pmic_arb_match_table[] = {
1171 { .compatible = "qcom,spmi-pmic-arb", },
1172 {},
1173 };
1174 MODULE_DEVICE_TABLE(of, spmi_pmic_arb_match_table);
1175
1176 static struct platform_driver spmi_pmic_arb_driver = {
1177 .probe = spmi_pmic_arb_probe,
1178 .remove = spmi_pmic_arb_remove,
1179 .driver = {
1180 .name = "spmi_pmic_arb",
1181 .of_match_table = spmi_pmic_arb_match_table,
1182 },
1183 };
1184 module_platform_driver(spmi_pmic_arb_driver);
1185
1186 MODULE_LICENSE("GPL v2");
1187 MODULE_ALIAS("platform:spmi_pmic_arb");