1 /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
19 #include <linux/clk.h>
20 #include <linux/iommu.h>
21 #include <linux/interrupt.h>
22 #include <linux/msm-bus.h>
23 #include <linux/err.h>
24 #include <linux/slab.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
29 #include "msm_iommu_hw-v1.h"
30 #include <linux/qcom_iommu.h>
31 #include "msm_iommu_perfmon.h"
32 #include <linux/qcom_scm.h>
34 static const struct of_device_id msm_iommu_ctx_match_table
[];
36 #ifdef CONFIG_IOMMU_LPAE
37 static const char *BFB_REG_NODE_NAME
= "qcom,iommu-lpae-bfb-regs";
38 static const char *BFB_DATA_NODE_NAME
= "qcom,iommu-lpae-bfb-data";
40 static const char *BFB_REG_NODE_NAME
= "qcom,iommu-bfb-regs";
41 static const char *BFB_DATA_NODE_NAME
= "qcom,iommu-bfb-data";
44 static int msm_iommu_parse_bfb_settings(struct platform_device
*pdev
,
45 struct msm_iommu_drvdata
*drvdata
)
47 struct msm_iommu_bfb_settings
*bfb_settings
;
52 * It is not valid for a device to have the BFB_REG_NODE_NAME
53 * property but not the BFB_DATA_NODE_NAME property, and vice versa.
55 if (!of_get_property(pdev
->dev
.of_node
, BFB_REG_NODE_NAME
, &nreg
)) {
56 if (of_get_property(pdev
->dev
.of_node
, BFB_DATA_NODE_NAME
,
62 if (!of_get_property(pdev
->dev
.of_node
, BFB_DATA_NODE_NAME
, &nval
))
65 if (nreg
>= sizeof(bfb_settings
->regs
))
68 if (nval
>= sizeof(bfb_settings
->data
))
74 bfb_settings
= devm_kzalloc(&pdev
->dev
, sizeof(*bfb_settings
),
79 ret
= of_property_read_u32_array(pdev
->dev
.of_node
,
82 nreg
/ sizeof(*bfb_settings
->regs
));
86 ret
= of_property_read_u32_array(pdev
->dev
.of_node
,
89 nval
/ sizeof(*bfb_settings
->data
));
93 bfb_settings
->length
= nreg
/ sizeof(*bfb_settings
->regs
);
95 drvdata
->bfb_settings
= bfb_settings
;
100 static int __get_bus_vote_client(struct platform_device
*pdev
,
101 struct msm_iommu_drvdata
*drvdata
)
104 struct msm_bus_scale_pdata
*bs_table
;
107 /* Check whether bus scaling has been specified for this node */
108 ret
= of_property_read_string(pdev
->dev
.of_node
, "qcom,msm-bus,name",
113 bs_table
= msm_bus_cl_get_pdata(pdev
);
115 drvdata
->bus_client
= msm_bus_scale_register_client(bs_table
);
116 if (IS_ERR(&drvdata
->bus_client
)) {
117 pr_err("%s(): Bus client register failed.\n", __func__
);
125 static void __put_bus_vote_client(struct msm_iommu_drvdata
*drvdata
)
127 msm_bus_scale_unregister_client(drvdata
->bus_client
);
128 drvdata
->bus_client
= 0;
132 * CONFIG_IOMMU_NON_SECURE allows us to override the secure
133 * designation of SMMUs in device tree. With this config enabled
134 * all SMMUs will be programmed by this driver.
136 #ifdef CONFIG_IOMMU_NON_SECURE
137 static inline void get_secure_id(struct device_node
*node
,
138 struct msm_iommu_drvdata
*drvdata
)
142 static inline void get_secure_ctx(struct device_node
*node
,
143 struct msm_iommu_drvdata
*iommu_drvdata
,
144 struct msm_iommu_ctx_drvdata
*ctx_drvdata
)
146 ctx_drvdata
->secure_context
= 0;
149 static void get_secure_id(struct device_node
*node
,
150 struct msm_iommu_drvdata
*drvdata
)
152 if (msm_iommu_get_scm_call_avail())
153 of_property_read_u32(node
, "qcom,iommu-secure-id",
157 static void get_secure_ctx(struct device_node
*node
,
158 struct msm_iommu_drvdata
*iommu_drvdata
,
159 struct msm_iommu_ctx_drvdata
*ctx_drvdata
)
163 if (msm_iommu_get_scm_call_avail())
164 secure_ctx
= of_property_read_bool(node
, "qcom,secure-context");
166 ctx_drvdata
->secure_context
= secure_ctx
;
170 static int msm_iommu_parse_dt(struct platform_device
*pdev
,
171 struct msm_iommu_drvdata
*drvdata
)
173 struct device_node
*child
;
176 drvdata
->dev
= &pdev
->dev
;
178 ret
= __get_bus_vote_client(pdev
, drvdata
);
182 ret
= msm_iommu_parse_bfb_settings(pdev
, drvdata
);
186 for_each_available_child_of_node(pdev
->dev
.of_node
, child
)
189 ret
= of_property_read_string(pdev
->dev
.of_node
, "label",
194 drvdata
->sec_id
= -1;
195 get_secure_id(pdev
->dev
.of_node
, drvdata
);
197 drvdata
->halt_enabled
= of_property_read_bool(pdev
->dev
.of_node
,
198 "qcom,iommu-enable-halt");
200 msm_iommu_add_drv(drvdata
);
205 __put_bus_vote_client(drvdata
);
209 static int msm_iommu_pmon_parse_dt(struct platform_device
*pdev
,
210 struct iommu_pmon
*pmon_info
)
212 struct device
*dev
= &pdev
->dev
;
213 struct device_node
*np
= pdev
->dev
.of_node
;
214 unsigned int cls_prop_size
;
217 irq
= platform_get_irq(pdev
, 0);
218 if (irq
< 0 && irq
== -EPROBE_DEFER
)
219 return -EPROBE_DEFER
;
221 pmon_info
->iommu
.evt_irq
= -1;
225 pmon_info
->iommu
.evt_irq
= irq
;
227 ret
= of_property_read_u32(np
, "qcom,iommu-pmu-ngroups",
228 &pmon_info
->num_groups
);
230 dev_err(dev
, "Error reading qcom,iommu-pmu-ngroups\n");
234 ret
= of_property_read_u32(np
, "qcom,iommu-pmu-ncounters",
235 &pmon_info
->num_counters
);
237 dev_err(dev
, "Error reading qcom,iommu-pmu-ncounters\n");
241 if (!of_get_property(np
, "qcom,iommu-pmu-event-classes",
243 dev_err(dev
, "Error reading qcom,iommu-pmu-event-classes\n");
247 pmon_info
->event_cls_supported
= devm_kzalloc(dev
, cls_prop_size
,
249 if (!pmon_info
->event_cls_supported
) {
250 dev_err(dev
, "Unable to get memory for event class array\n");
254 pmon_info
->nevent_cls_supported
= cls_prop_size
/ sizeof(u32
);
256 ret
= of_property_read_u32_array(np
, "qcom,iommu-pmu-event-classes",
257 pmon_info
->event_cls_supported
,
258 pmon_info
->nevent_cls_supported
);
260 dev_err(dev
, "Error reading qcom,iommu-pmu-event-classes\n");
267 #define SCM_SVC_MP 0xc
268 #define MAXIMUM_VIRT_SIZE (300 * SZ_1M)
269 #define MAKE_VERSION(major, minor, patch) \
270 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
272 static int msm_iommu_sec_ptbl_init(struct device
*dev
)
274 int psize
[2] = {0, 0};
275 unsigned int spare
= 0;
280 DEFINE_DMA_ATTRS(attrs
);
281 static bool allocated
= false;
286 version
= qcom_scm_get_feat_version(SCM_SVC_MP
);
288 if (version
>= MAKE_VERSION(1, 1, 1)) {
289 ret
= qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE
, 0);
291 dev_err(dev
, "failed setting max virtual size (%d)\n",
297 ret
= qcom_scm_iommu_secure_ptbl_size(spare
, psize
);
299 dev_err(dev
, "failed to get iommu secure pgtable size (%d)\n",
305 dev_err(dev
, "failed to get iommu secure pgtable size (%d)\n",
310 dev_info(dev
, "iommu sec: pgtable size: %d\n", psize
[0]);
312 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &attrs
);
314 cpu_addr
= dma_alloc_attrs(dev
, psize
[0], &paddr
, GFP_KERNEL
, &attrs
);
316 dev_err(dev
, "failed to allocate %d bytes for pgtable\n",
321 ret
= qcom_scm_iommu_secure_ptbl_init(paddr
, psize
[0], spare
);
323 dev_err(dev
, "failed to init iommu pgtable (%d)\n", ret
);
332 dma_free_attrs(dev
, psize
[0], cpu_addr
, paddr
, &attrs
);
336 static int msm_iommu_probe(struct platform_device
*pdev
)
338 struct device
*dev
= &pdev
->dev
;
339 struct device_node
*np
= pdev
->dev
.of_node
;
340 struct iommu_pmon
*pmon_info
;
341 struct msm_iommu_drvdata
*drvdata
;
342 struct resource
*res
;
344 int global_cfg_irq
, global_client_irq
;
348 if (!qcom_scm_is_available())
349 return -EPROBE_DEFER
;
351 msm_iommu_check_scm_call_avail();
352 msm_set_iommu_access_ops(&iommu_access_ops_v1
);
353 msm_iommu_sec_set_access_ops(&iommu_access_ops_v1
);
355 drvdata
= devm_kzalloc(dev
, sizeof(*drvdata
), GFP_KERNEL
);
361 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "iommu_base");
362 drvdata
->base
= devm_ioremap_resource(dev
, res
);
363 if (IS_ERR(drvdata
->base
))
364 return PTR_ERR(drvdata
->base
);
366 drvdata
->glb_base
= drvdata
->base
;
367 drvdata
->phys_base
= res
->start
;
369 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
371 drvdata
->smmu_local_base
= devm_ioremap_resource(dev
, res
);
372 if (IS_ERR(drvdata
->smmu_local_base
) &&
373 PTR_ERR(drvdata
->smmu_local_base
) != -EPROBE_DEFER
)
374 drvdata
->smmu_local_base
= NULL
;
376 if (of_device_is_compatible(np
, "qcom,msm-mmu-500"))
377 drvdata
->model
= MMU_500
;
379 drvdata
->iface
= devm_clk_get(dev
, "iface_clk");
380 if (IS_ERR(drvdata
->iface
))
381 return PTR_ERR(drvdata
->iface
);
383 drvdata
->core
= devm_clk_get(dev
, "core_clk");
384 if (IS_ERR(drvdata
->core
))
385 return PTR_ERR(drvdata
->core
);
387 if (!of_property_read_u32(np
, "qcom,cb-base-offset", &temp
))
388 drvdata
->cb_base
= drvdata
->base
+ temp
;
390 drvdata
->cb_base
= drvdata
->base
+ 0x8000;
392 rate
= clk_get_rate(drvdata
->core
);
394 rate
= clk_round_rate(drvdata
->core
, 1000);
395 clk_set_rate(drvdata
->core
, rate
);
398 dev_info(&pdev
->dev
, "iface: %lu, core: %lu\n",
399 clk_get_rate(drvdata
->iface
), clk_get_rate(drvdata
->core
));
401 ret
= msm_iommu_parse_dt(pdev
, drvdata
);
405 dev_info(dev
, "device %s (model: %d) mapped at %p, with %d ctx banks\n",
406 drvdata
->name
, drvdata
->model
, drvdata
->base
, drvdata
->ncb
);
408 if (drvdata
->sec_id
!= -1) {
409 ret
= msm_iommu_sec_ptbl_init(dev
);
414 platform_set_drvdata(pdev
, drvdata
);
416 pmon_info
= msm_iommu_pm_alloc(dev
);
418 ret
= msm_iommu_pmon_parse_dt(pdev
, pmon_info
);
420 msm_iommu_pm_free(dev
);
421 dev_info(dev
, "%s: pmon not available\n",
424 pmon_info
->iommu
.base
= drvdata
->base
;
425 pmon_info
->iommu
.ops
= msm_get_iommu_access_ops();
426 pmon_info
->iommu
.hw_ops
= iommu_pm_get_hw_ops_v1();
427 pmon_info
->iommu
.iommu_name
= drvdata
->name
;
428 ret
= msm_iommu_pm_iommu_register(pmon_info
);
430 dev_err(dev
, "%s iommu register fail\n",
432 msm_iommu_pm_free(dev
);
434 dev_dbg(dev
, "%s iommu registered for pmon\n",
435 pmon_info
->iommu
.iommu_name
);
440 global_cfg_irq
= platform_get_irq_byname(pdev
, "global_cfg_NS_irq");
441 if (global_cfg_irq
< 0 && global_cfg_irq
== -EPROBE_DEFER
)
442 return -EPROBE_DEFER
;
443 if (global_cfg_irq
> 0) {
444 ret
= devm_request_threaded_irq(dev
, global_cfg_irq
,
446 msm_iommu_global_fault_handler
,
447 IRQF_ONESHOT
| IRQF_SHARED
|
449 "msm_iommu_global_cfg_irq",
452 dev_err(dev
, "Request Global CFG IRQ %d failed with ret=%d\n",
453 global_cfg_irq
, ret
);
457 platform_get_irq_byname(pdev
, "global_client_NS_irq");
458 if (global_client_irq
< 0 && global_client_irq
== -EPROBE_DEFER
)
459 return -EPROBE_DEFER
;
461 if (global_client_irq
> 0) {
462 ret
= devm_request_threaded_irq(dev
, global_client_irq
,
464 msm_iommu_global_fault_handler
,
465 IRQF_ONESHOT
| IRQF_SHARED
|
467 "msm_iommu_global_client_irq",
470 dev_err(dev
, "Request Global Client IRQ %d failed with ret=%d\n",
471 global_client_irq
, ret
);
474 ret
= of_platform_populate(np
, msm_iommu_ctx_match_table
, NULL
, dev
);
476 dev_err(dev
, "Failed to create iommu context device\n");
481 static int msm_iommu_remove(struct platform_device
*pdev
)
483 struct msm_iommu_drvdata
*drv
;
485 msm_iommu_pm_iommu_unregister(&pdev
->dev
);
486 msm_iommu_pm_free(&pdev
->dev
);
488 drv
= platform_get_drvdata(pdev
);
490 __put_bus_vote_client(drv
);
491 msm_iommu_remove_drv(drv
);
492 platform_set_drvdata(pdev
, NULL
);
498 static int msm_iommu_ctx_parse_dt(struct platform_device
*pdev
,
499 struct msm_iommu_ctx_drvdata
*ctx_drvdata
)
501 struct resource
*r
, rp
;
502 int irq
= 0, ret
= 0;
503 struct msm_iommu_drvdata
*drvdata
;
506 unsigned long cb_offset
;
508 drvdata
= dev_get_drvdata(pdev
->dev
.parent
);
510 get_secure_ctx(pdev
->dev
.of_node
, drvdata
, ctx_drvdata
);
512 if (ctx_drvdata
->secure_context
) {
513 irq
= platform_get_irq(pdev
, 1);
514 if (irq
< 0 && irq
== -EPROBE_DEFER
)
515 return -EPROBE_DEFER
;
518 ret
= devm_request_threaded_irq(&pdev
->dev
, irq
, NULL
,
519 msm_iommu_secure_fault_handler_v2
,
520 IRQF_ONESHOT
| IRQF_SHARED
,
521 "msm_iommu_secure_irq", pdev
);
523 pr_err("Request IRQ %d failed with ret=%d\n",
529 irq
= platform_get_irq(pdev
, 0);
530 if (irq
< 0 && irq
== -EPROBE_DEFER
)
531 return -EPROBE_DEFER
;
534 ret
= devm_request_threaded_irq(&pdev
->dev
, irq
, NULL
,
535 msm_iommu_fault_handler_v2
,
536 IRQF_ONESHOT
| IRQF_SHARED
,
537 "msm_iommu_nonsecure_irq", pdev
);
539 pr_err("Request IRQ %d failed with ret=%d\n",
546 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
552 ret
= of_address_to_resource(pdev
->dev
.parent
->of_node
, 0, &rp
);
556 /* Calculate the context bank number using the base addresses.
557 * Typically CB0 base address is 0x8000 pages away if the number
558 * of CBs are <=8. So, assume the offset 0x8000 until mentioned
561 cb_offset
= drvdata
->cb_base
- drvdata
->base
;
562 ctx_drvdata
->num
= (r
->start
- rp
.start
- cb_offset
) >> CTX_SHIFT
;
564 if (of_property_read_string(pdev
->dev
.of_node
, "label",
566 ctx_drvdata
->name
= dev_name(&pdev
->dev
);
568 if (!of_get_property(pdev
->dev
.of_node
, "qcom,iommu-ctx-sids", &nsid
)) {
573 if (nsid
>= sizeof(ctx_drvdata
->sids
)) {
578 if (of_property_read_u32_array(pdev
->dev
.of_node
, "qcom,iommu-ctx-sids",
580 nsid
/ sizeof(*ctx_drvdata
->sids
))) {
585 ctx_drvdata
->nsid
= nsid
;
586 ctx_drvdata
->asid
= -1;
588 if (!of_get_property(pdev
->dev
.of_node
, "qcom,iommu-sid-mask",
590 memset(ctx_drvdata
->sid_mask
, 0, MAX_NUM_SMR
);
594 if (n_sid_mask
!= nsid
) {
599 if (of_property_read_u32_array(pdev
->dev
.of_node
, "qcom,iommu-sid-mask",
600 ctx_drvdata
->sid_mask
,
601 n_sid_mask
/ sizeof(*ctx_drvdata
->sid_mask
))) {
606 ctx_drvdata
->n_sid_mask
= n_sid_mask
;
612 static int msm_iommu_ctx_probe(struct platform_device
*pdev
)
614 struct msm_iommu_ctx_drvdata
*ctx_drvdata
;
617 if (!qcom_scm_is_available())
618 return -EPROBE_DEFER
;
620 if (!pdev
->dev
.parent
)
623 ctx_drvdata
= devm_kzalloc(&pdev
->dev
, sizeof(*ctx_drvdata
),
628 ctx_drvdata
->pdev
= pdev
;
629 INIT_LIST_HEAD(&ctx_drvdata
->attached_elm
);
631 ret
= msm_iommu_ctx_parse_dt(pdev
, ctx_drvdata
);
635 platform_set_drvdata(pdev
, ctx_drvdata
);
637 dev_info(&pdev
->dev
, "context %s using bank %d\n",
638 ctx_drvdata
->name
, ctx_drvdata
->num
);
643 static int msm_iommu_ctx_remove(struct platform_device
*pdev
)
645 platform_set_drvdata(pdev
, NULL
);
650 static const struct of_device_id msm_iommu_match_table
[] = {
651 { .compatible
= "qcom,msm-smmu-v1", },
652 { .compatible
= "qcom,msm-smmu-v2", },
656 static struct platform_driver msm_iommu_driver
= {
659 .of_match_table
= msm_iommu_match_table
,
661 .probe
= msm_iommu_probe
,
662 .remove
= msm_iommu_remove
,
665 static const struct of_device_id msm_iommu_ctx_match_table
[] = {
666 { .compatible
= "qcom,msm-smmu-v1-ctx", },
667 { .compatible
= "qcom,msm-smmu-v2-ctx", },
671 static struct platform_driver msm_iommu_ctx_driver
= {
673 .name
= "msm_iommu_ctx",
674 .of_match_table
= msm_iommu_ctx_match_table
,
676 .probe
= msm_iommu_ctx_probe
,
677 .remove
= msm_iommu_ctx_remove
,
680 static int __init
msm_iommu_driver_init(void)
684 ret
= platform_driver_register(&msm_iommu_driver
);
686 pr_err("Failed to register IOMMU driver\n");
690 ret
= platform_driver_register(&msm_iommu_ctx_driver
);
692 pr_err("Failed to register IOMMU context driver\n");
693 platform_driver_unregister(&msm_iommu_driver
);
700 static void __exit
msm_iommu_driver_exit(void)
702 platform_driver_unregister(&msm_iommu_ctx_driver
);
703 platform_driver_unregister(&msm_iommu_driver
);
705 subsys_initcall(msm_iommu_driver_init
);
706 module_exit(msm_iommu_driver_exit
);
708 MODULE_LICENSE("GPL v2");