]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/soc/qcom/smp2p.c
Merge tag 'gcc-plugins-v4.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / soc / qcom / smp2p.c
CommitLineData
50e99641
BA
1/*
2 * Copyright (c) 2015, Sony Mobile Communications AB.
3 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/io.h>
18#include <linux/of.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/mfd/syscon.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25#include <linux/soc/qcom/smem.h>
26#include <linux/soc/qcom/smem_state.h>
27#include <linux/spinlock.h>
28
29/*
30 * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
31 * of a single 32-bit value between two processors. Each value has a single
32 * writer (the local side) and a single reader (the remote side). Values are
33 * uniquely identified in the system by the directed edge (local processor ID
34 * to remote processor ID) and a string identifier.
35 *
36 * Each processor is responsible for creating the outgoing SMEM items and each
37 * item is writable by the local processor and readable by the remote
38 * processor. By using two separate SMEM items that are single-reader and
39 * single-writer, SMP2P does not require any remote locking mechanisms.
40 *
41 * The driver uses the Linux GPIO and interrupt framework to expose a virtual
42 * GPIO for each outbound entry and a virtual interrupt controller for each
43 * inbound entry.
44 */
45
46#define SMP2P_MAX_ENTRY 16
47#define SMP2P_MAX_ENTRY_NAME 16
48
49#define SMP2P_FEATURE_SSR_ACK 0x1
50
51#define SMP2P_MAGIC 0x504d5324
52
53/**
54 * struct smp2p_smem_item - in memory communication structure
55 * @magic: magic number
56 * @version: version - must be 1
57 * @features: features flag - currently unused
58 * @local_pid: processor id of sending end
59 * @remote_pid: processor id of receiving end
60 * @total_entries: number of entries - always SMP2P_MAX_ENTRY
61 * @valid_entries: number of allocated entries
62 * @flags:
63 * @entries: individual communication entries
64 * @name: name of the entry
65 * @value: content of the entry
66 */
67struct smp2p_smem_item {
68 u32 magic;
69 u8 version;
70 unsigned features:24;
71 u16 local_pid;
72 u16 remote_pid;
73 u16 total_entries;
74 u16 valid_entries;
75 u32 flags;
76
77 struct {
78 u8 name[SMP2P_MAX_ENTRY_NAME];
79 u32 value;
80 } entries[SMP2P_MAX_ENTRY];
81} __packed;
82
83/**
84 * struct smp2p_entry - driver context matching one entry
85 * @node: list entry to keep track of allocated entries
86 * @smp2p: reference to the device driver context
87 * @name: name of the entry, to match against smp2p_smem_item
88 * @value: pointer to smp2p_smem_item entry value
89 * @last_value: last handled value
90 * @domain: irq_domain for inbound entries
91 * @irq_enabled:bitmap to track enabled irq bits
92 * @irq_rising: bitmap to mark irq bits for rising detection
93 * @irq_falling:bitmap to mark irq bits for falling detection
94 * @state: smem state handle
95 * @lock: spinlock to protect read-modify-write of the value
96 */
97struct smp2p_entry {
98 struct list_head node;
99 struct qcom_smp2p *smp2p;
100
101 const char *name;
102 u32 *value;
103 u32 last_value;
104
105 struct irq_domain *domain;
106 DECLARE_BITMAP(irq_enabled, 32);
107 DECLARE_BITMAP(irq_rising, 32);
108 DECLARE_BITMAP(irq_falling, 32);
109
110 struct qcom_smem_state *state;
111
112 spinlock_t lock;
113};
114
115#define SMP2P_INBOUND 0
116#define SMP2P_OUTBOUND 1
117
118/**
119 * struct qcom_smp2p - device driver context
120 * @dev: device driver handle
121 * @in: pointer to the inbound smem item
122 * @smem_items: ids of the two smem items
123 * @valid_entries: already scanned inbound entries
124 * @local_pid: processor id of the inbound edge
125 * @remote_pid: processor id of the outbound edge
126 * @ipc_regmap: regmap for the outbound ipc
127 * @ipc_offset: offset within the regmap
128 * @ipc_bit: bit in regmap@offset to kick to signal remote processor
129 * @inbound: list of inbound entries
130 * @outbound: list of outbound entries
131 */
132struct qcom_smp2p {
133 struct device *dev;
134
135 struct smp2p_smem_item *in;
136 struct smp2p_smem_item *out;
137
138 unsigned smem_items[SMP2P_OUTBOUND + 1];
139
140 unsigned valid_entries;
141
142 unsigned local_pid;
143 unsigned remote_pid;
144
145 struct regmap *ipc_regmap;
146 int ipc_offset;
147 int ipc_bit;
148
149 struct list_head inbound;
150 struct list_head outbound;
151};
152
153static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
154{
155 /* Make sure any updated data is written before the kick */
156 wmb();
157 regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
158}
159
160/**
161 * qcom_smp2p_intr() - interrupt handler for incoming notifications
162 * @irq: unused
163 * @data: smp2p driver context
164 *
165 * Handle notifications from the remote side to handle newly allocated entries
166 * or any changes to the state bits of existing entries.
167 */
168static irqreturn_t qcom_smp2p_intr(int irq, void *data)
169{
170 struct smp2p_smem_item *in;
171 struct smp2p_entry *entry;
172 struct qcom_smp2p *smp2p = data;
173 unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
174 unsigned pid = smp2p->remote_pid;
175 size_t size;
176 int irq_pin;
177 u32 status;
178 char buf[SMP2P_MAX_ENTRY_NAME];
179 u32 val;
180 int i;
181
182 in = smp2p->in;
183
184 /* Acquire smem item, if not already found */
185 if (!in) {
186 in = qcom_smem_get(pid, smem_id, &size);
187 if (IS_ERR(in)) {
188 dev_err(smp2p->dev,
189 "Unable to acquire remote smp2p item\n");
190 return IRQ_HANDLED;
191 }
192
193 smp2p->in = in;
194 }
195
196 /* Match newly created entries */
197 for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
198 list_for_each_entry(entry, &smp2p->inbound, node) {
e7306dd7 199 memcpy(buf, in->entries[i].name, sizeof(buf));
50e99641
BA
200 if (!strcmp(buf, entry->name)) {
201 entry->value = &in->entries[i].value;
202 break;
203 }
204 }
205 }
206 smp2p->valid_entries = i;
207
208 /* Fire interrupts based on any value changes */
209 list_for_each_entry(entry, &smp2p->inbound, node) {
210 /* Ignore entries not yet allocated by the remote side */
211 if (!entry->value)
212 continue;
213
214 val = readl(entry->value);
215
216 status = val ^ entry->last_value;
217 entry->last_value = val;
218
219 /* No changes of this entry? */
220 if (!status)
221 continue;
222
223 for_each_set_bit(i, entry->irq_enabled, 32) {
224 if (!(status & BIT(i)))
225 continue;
226
227 if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
228 (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
229 irq_pin = irq_find_mapping(entry->domain, i);
230 handle_nested_irq(irq_pin);
231 }
232 }
233 }
234
235 return IRQ_HANDLED;
236}
237
238static void smp2p_mask_irq(struct irq_data *irqd)
239{
240 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
241 irq_hw_number_t irq = irqd_to_hwirq(irqd);
242
243 clear_bit(irq, entry->irq_enabled);
244}
245
246static void smp2p_unmask_irq(struct irq_data *irqd)
247{
248 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
249 irq_hw_number_t irq = irqd_to_hwirq(irqd);
250
251 set_bit(irq, entry->irq_enabled);
252}
253
254static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
255{
256 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
257 irq_hw_number_t irq = irqd_to_hwirq(irqd);
258
259 if (!(type & IRQ_TYPE_EDGE_BOTH))
260 return -EINVAL;
261
262 if (type & IRQ_TYPE_EDGE_RISING)
263 set_bit(irq, entry->irq_rising);
264 else
265 clear_bit(irq, entry->irq_rising);
266
267 if (type & IRQ_TYPE_EDGE_FALLING)
268 set_bit(irq, entry->irq_falling);
269 else
270 clear_bit(irq, entry->irq_falling);
271
272 return 0;
273}
274
275static struct irq_chip smp2p_irq_chip = {
276 .name = "smp2p",
277 .irq_mask = smp2p_mask_irq,
278 .irq_unmask = smp2p_unmask_irq,
279 .irq_set_type = smp2p_set_irq_type,
280};
281
282static int smp2p_irq_map(struct irq_domain *d,
283 unsigned int irq,
284 irq_hw_number_t hw)
285{
286 struct smp2p_entry *entry = d->host_data;
287
288 irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
289 irq_set_chip_data(irq, entry);
290 irq_set_nested_thread(irq, 1);
291 irq_set_noprobe(irq);
292
293 return 0;
294}
295
296static const struct irq_domain_ops smp2p_irq_ops = {
297 .map = smp2p_irq_map,
298 .xlate = irq_domain_xlate_twocell,
299};
300
301static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
302 struct smp2p_entry *entry,
303 struct device_node *node)
304{
305 entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
306 if (!entry->domain) {
307 dev_err(smp2p->dev, "failed to add irq_domain\n");
308 return -ENOMEM;
309 }
310
311 return 0;
312}
313
314static int smp2p_update_bits(void *data, u32 mask, u32 value)
315{
316 struct smp2p_entry *entry = data;
317 u32 orig;
318 u32 val;
319
320 spin_lock(&entry->lock);
321 val = orig = readl(entry->value);
322 val &= ~mask;
323 val |= value;
324 writel(val, entry->value);
325 spin_unlock(&entry->lock);
326
327 if (val != orig)
328 qcom_smp2p_kick(entry->smp2p);
329
330 return 0;
331}
332
333static const struct qcom_smem_state_ops smp2p_state_ops = {
334 .update_bits = smp2p_update_bits,
335};
336
337static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
338 struct smp2p_entry *entry,
339 struct device_node *node)
340{
341 struct smp2p_smem_item *out = smp2p->out;
342 char buf[SMP2P_MAX_ENTRY_NAME] = {};
343
344 /* Allocate an entry from the smem item */
345 strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
e7306dd7 346 memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
50e99641
BA
347
348 /* Make the logical entry reference the physical value */
349 entry->value = &out->entries[out->valid_entries].value;
350
63af8e44
BA
351 out->valid_entries++;
352
50e99641
BA
353 entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
354 if (IS_ERR(entry->state)) {
355 dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
356 return PTR_ERR(entry->state);
357 }
358
359 return 0;
360}
361
362static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
363{
364 struct smp2p_smem_item *out;
365 unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
366 unsigned pid = smp2p->remote_pid;
367 int ret;
368
369 ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
370 if (ret < 0 && ret != -EEXIST) {
371 if (ret != -EPROBE_DEFER)
372 dev_err(smp2p->dev,
373 "unable to allocate local smp2p item\n");
374 return ret;
375 }
376
377 out = qcom_smem_get(pid, smem_id, NULL);
378 if (IS_ERR(out)) {
379 dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
380 return PTR_ERR(out);
381 }
382
383 memset(out, 0, sizeof(*out));
384 out->magic = SMP2P_MAGIC;
385 out->local_pid = smp2p->local_pid;
386 out->remote_pid = smp2p->remote_pid;
387 out->total_entries = SMP2P_MAX_ENTRY;
388 out->valid_entries = 0;
389
390 /*
391 * Make sure the rest of the header is written before we validate the
392 * item by writing a valid version number.
393 */
394 wmb();
395 out->version = 1;
396
397 qcom_smp2p_kick(smp2p);
398
399 smp2p->out = out;
400
401 return 0;
402}
403
404static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
405{
406 struct device_node *syscon;
407 struct device *dev = smp2p->dev;
408 const char *key;
409 int ret;
410
411 syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
412 if (!syscon) {
413 dev_err(dev, "no qcom,ipc node\n");
414 return -ENODEV;
415 }
416
417 smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
418 if (IS_ERR(smp2p->ipc_regmap))
419 return PTR_ERR(smp2p->ipc_regmap);
420
421 key = "qcom,ipc";
422 ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
423 if (ret < 0) {
424 dev_err(dev, "no offset in %s\n", key);
425 return -EINVAL;
426 }
427
428 ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
429 if (ret < 0) {
430 dev_err(dev, "no bit in %s\n", key);
431 return -EINVAL;
432 }
433
434 return 0;
435}
436
437static int qcom_smp2p_probe(struct platform_device *pdev)
438{
439 struct smp2p_entry *entry;
440 struct device_node *node;
441 struct qcom_smp2p *smp2p;
442 const char *key;
443 int irq;
444 int ret;
445
446 smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
447 if (!smp2p)
448 return -ENOMEM;
449
450 smp2p->dev = &pdev->dev;
451 INIT_LIST_HEAD(&smp2p->inbound);
452 INIT_LIST_HEAD(&smp2p->outbound);
453
454 platform_set_drvdata(pdev, smp2p);
455
456 ret = smp2p_parse_ipc(smp2p);
457 if (ret)
458 return ret;
459
460 key = "qcom,smem";
461 ret = of_property_read_u32_array(pdev->dev.of_node, key,
462 smp2p->smem_items, 2);
463 if (ret)
464 return ret;
465
466 key = "qcom,local-pid";
467 ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
468 if (ret < 0) {
469 dev_err(&pdev->dev, "failed to read %s\n", key);
470 return -EINVAL;
471 }
472
473 key = "qcom,remote-pid";
474 ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
475 if (ret < 0) {
476 dev_err(&pdev->dev, "failed to read %s\n", key);
477 return -EINVAL;
478 }
479
480 irq = platform_get_irq(pdev, 0);
481 if (irq < 0) {
482 dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
483 return irq;
484 }
485
486 ret = qcom_smp2p_alloc_outbound_item(smp2p);
487 if (ret < 0)
488 return ret;
489
490 for_each_available_child_of_node(pdev->dev.of_node, node) {
491 entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
492 if (!entry) {
493 ret = -ENOMEM;
494 goto unwind_interfaces;
495 }
496
497 entry->smp2p = smp2p;
498 spin_lock_init(&entry->lock);
499
500 ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
501 if (ret < 0)
502 goto unwind_interfaces;
503
504 if (of_property_read_bool(node, "interrupt-controller")) {
505 ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
506 if (ret < 0)
507 goto unwind_interfaces;
508
509 list_add(&entry->node, &smp2p->inbound);
510 } else {
511 ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
512 if (ret < 0)
513 goto unwind_interfaces;
514
515 list_add(&entry->node, &smp2p->outbound);
516 }
517 }
518
519 /* Kick the outgoing edge after allocating entries */
520 qcom_smp2p_kick(smp2p);
521
522 ret = devm_request_threaded_irq(&pdev->dev, irq,
523 NULL, qcom_smp2p_intr,
524 IRQF_ONESHOT,
525 "smp2p", (void *)smp2p);
526 if (ret) {
527 dev_err(&pdev->dev, "failed to request interrupt\n");
528 goto unwind_interfaces;
529 }
530
531
532 return 0;
533
534unwind_interfaces:
535 list_for_each_entry(entry, &smp2p->inbound, node)
536 irq_domain_remove(entry->domain);
537
538 list_for_each_entry(entry, &smp2p->outbound, node)
539 qcom_smem_state_unregister(entry->state);
540
541 smp2p->out->valid_entries = 0;
542
543 return ret;
544}
545
546static int qcom_smp2p_remove(struct platform_device *pdev)
547{
548 struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
549 struct smp2p_entry *entry;
550
551 list_for_each_entry(entry, &smp2p->inbound, node)
552 irq_domain_remove(entry->domain);
553
554 list_for_each_entry(entry, &smp2p->outbound, node)
555 qcom_smem_state_unregister(entry->state);
556
557 smp2p->out->valid_entries = 0;
558
559 return 0;
560}
561
562static const struct of_device_id qcom_smp2p_of_match[] = {
563 { .compatible = "qcom,smp2p" },
564 {}
565};
566MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
567
568static struct platform_driver qcom_smp2p_driver = {
569 .probe = qcom_smp2p_probe,
570 .remove = qcom_smp2p_remove,
571 .driver = {
572 .name = "qcom_smp2p",
573 .of_match_table = qcom_smp2p_of_match,
574 },
575};
576module_platform_driver(qcom_smp2p_driver);
577
578MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
579MODULE_LICENSE("GPL v2");