]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/iommu/qcom/msm_iommu_dev-v1.c
iommu: qcom: check scm avaiablity before initialization.
[mirror_ubuntu-artful-kernel.git] / drivers / iommu / qcom / msm_iommu_dev-v1.c
1 /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/io.h>
19 #include <linux/clk.h>
20 #include <linux/iommu.h>
21 #include <linux/interrupt.h>
22 #include <linux/msm-bus.h>
23 #include <linux/err.h>
24 #include <linux/slab.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28
29 #include "msm_iommu_hw-v1.h"
30 #include <linux/qcom_iommu.h>
31 #include "msm_iommu_perfmon.h"
32 #include <linux/qcom_scm.h>
33
34 static const struct of_device_id msm_iommu_ctx_match_table[];
35
36 #ifdef CONFIG_IOMMU_LPAE
37 static const char *BFB_REG_NODE_NAME = "qcom,iommu-lpae-bfb-regs";
38 static const char *BFB_DATA_NODE_NAME = "qcom,iommu-lpae-bfb-data";
39 #else
40 static const char *BFB_REG_NODE_NAME = "qcom,iommu-bfb-regs";
41 static const char *BFB_DATA_NODE_NAME = "qcom,iommu-bfb-data";
42 #endif
43
44 static int msm_iommu_parse_bfb_settings(struct platform_device *pdev,
45 struct msm_iommu_drvdata *drvdata)
46 {
47 struct msm_iommu_bfb_settings *bfb_settings;
48 u32 nreg, nval;
49 int ret;
50
51 /*
52 * It is not valid for a device to have the BFB_REG_NODE_NAME
53 * property but not the BFB_DATA_NODE_NAME property, and vice versa.
54 */
55 if (!of_get_property(pdev->dev.of_node, BFB_REG_NODE_NAME, &nreg)) {
56 if (of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME,
57 &nval))
58 return -EINVAL;
59 return 0;
60 }
61
62 if (!of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, &nval))
63 return -EINVAL;
64
65 if (nreg >= sizeof(bfb_settings->regs))
66 return -EINVAL;
67
68 if (nval >= sizeof(bfb_settings->data))
69 return -EINVAL;
70
71 if (nval != nreg)
72 return -EINVAL;
73
74 bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings),
75 GFP_KERNEL);
76 if (!bfb_settings)
77 return -ENOMEM;
78
79 ret = of_property_read_u32_array(pdev->dev.of_node,
80 BFB_REG_NODE_NAME,
81 bfb_settings->regs,
82 nreg / sizeof(*bfb_settings->regs));
83 if (ret)
84 return ret;
85
86 ret = of_property_read_u32_array(pdev->dev.of_node,
87 BFB_DATA_NODE_NAME,
88 bfb_settings->data,
89 nval / sizeof(*bfb_settings->data));
90 if (ret)
91 return ret;
92
93 bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
94
95 drvdata->bfb_settings = bfb_settings;
96
97 return 0;
98 }
99
100 static int __get_bus_vote_client(struct platform_device *pdev,
101 struct msm_iommu_drvdata *drvdata)
102 {
103 int ret = 0;
104 struct msm_bus_scale_pdata *bs_table;
105 const char *dummy;
106
107 /* Check whether bus scaling has been specified for this node */
108 ret = of_property_read_string(pdev->dev.of_node, "qcom,msm-bus,name",
109 &dummy);
110 if (ret)
111 return 0;
112
113 bs_table = msm_bus_cl_get_pdata(pdev);
114 if (bs_table) {
115 drvdata->bus_client = msm_bus_scale_register_client(bs_table);
116 if (IS_ERR(&drvdata->bus_client)) {
117 pr_err("%s(): Bus client register failed.\n", __func__);
118 ret = -EINVAL;
119 }
120 }
121
122 return ret;
123 }
124
125 static void __put_bus_vote_client(struct msm_iommu_drvdata *drvdata)
126 {
127 msm_bus_scale_unregister_client(drvdata->bus_client);
128 drvdata->bus_client = 0;
129 }
130
131 /*
132 * CONFIG_IOMMU_NON_SECURE allows us to override the secure
133 * designation of SMMUs in device tree. With this config enabled
134 * all SMMUs will be programmed by this driver.
135 */
136 #ifdef CONFIG_IOMMU_NON_SECURE
137 static inline void get_secure_id(struct device_node *node,
138 struct msm_iommu_drvdata *drvdata)
139 {
140 }
141
142 static inline void get_secure_ctx(struct device_node *node,
143 struct msm_iommu_drvdata *iommu_drvdata,
144 struct msm_iommu_ctx_drvdata *ctx_drvdata)
145 {
146 ctx_drvdata->secure_context = 0;
147 }
148 #else
149 static void get_secure_id(struct device_node *node,
150 struct msm_iommu_drvdata *drvdata)
151 {
152 if (msm_iommu_get_scm_call_avail())
153 of_property_read_u32(node, "qcom,iommu-secure-id",
154 &drvdata->sec_id);
155 }
156
157 static void get_secure_ctx(struct device_node *node,
158 struct msm_iommu_drvdata *iommu_drvdata,
159 struct msm_iommu_ctx_drvdata *ctx_drvdata)
160 {
161 u32 secure_ctx = 0;
162
163 if (msm_iommu_get_scm_call_avail())
164 secure_ctx = of_property_read_bool(node, "qcom,secure-context");
165
166 ctx_drvdata->secure_context = secure_ctx;
167 }
168 #endif
169
170 static int msm_iommu_parse_dt(struct platform_device *pdev,
171 struct msm_iommu_drvdata *drvdata)
172 {
173 struct device_node *child;
174 int ret;
175
176 drvdata->dev = &pdev->dev;
177
178 ret = __get_bus_vote_client(pdev, drvdata);
179 if (ret)
180 goto fail;
181
182 ret = msm_iommu_parse_bfb_settings(pdev, drvdata);
183 if (ret)
184 goto fail;
185
186 for_each_available_child_of_node(pdev->dev.of_node, child)
187 drvdata->ncb++;
188
189 ret = of_property_read_string(pdev->dev.of_node, "label",
190 &drvdata->name);
191 if (ret)
192 goto fail;
193
194 drvdata->sec_id = -1;
195 get_secure_id(pdev->dev.of_node, drvdata);
196
197 drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node,
198 "qcom,iommu-enable-halt");
199
200 msm_iommu_add_drv(drvdata);
201
202 return 0;
203
204 fail:
205 __put_bus_vote_client(drvdata);
206 return ret;
207 }
208
209 static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
210 struct iommu_pmon *pmon_info)
211 {
212 struct device *dev = &pdev->dev;
213 struct device_node *np = pdev->dev.of_node;
214 unsigned int cls_prop_size;
215 int ret, irq;
216
217 irq = platform_get_irq(pdev, 0);
218 if (irq < 0 && irq == -EPROBE_DEFER)
219 return -EPROBE_DEFER;
220 if (irq <= 0) {
221 pmon_info->iommu.evt_irq = -1;
222 return irq;
223 }
224
225 pmon_info->iommu.evt_irq = irq;
226
227 ret = of_property_read_u32(np, "qcom,iommu-pmu-ngroups",
228 &pmon_info->num_groups);
229 if (ret) {
230 dev_err(dev, "Error reading qcom,iommu-pmu-ngroups\n");
231 return ret;
232 }
233
234 ret = of_property_read_u32(np, "qcom,iommu-pmu-ncounters",
235 &pmon_info->num_counters);
236 if (ret) {
237 dev_err(dev, "Error reading qcom,iommu-pmu-ncounters\n");
238 return ret;
239 }
240
241 if (!of_get_property(np, "qcom,iommu-pmu-event-classes",
242 &cls_prop_size)) {
243 dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
244 return -EINVAL;
245 }
246
247 pmon_info->event_cls_supported = devm_kzalloc(dev, cls_prop_size,
248 GFP_KERNEL);
249 if (!pmon_info->event_cls_supported) {
250 dev_err(dev, "Unable to get memory for event class array\n");
251 return -ENOMEM;
252 }
253
254 pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
255
256 ret = of_property_read_u32_array(np, "qcom,iommu-pmu-event-classes",
257 pmon_info->event_cls_supported,
258 pmon_info->nevent_cls_supported);
259 if (ret) {
260 dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
261 return ret;
262 }
263
264 return 0;
265 }
266
267 #define SCM_SVC_MP 0xc
268 #define MAXIMUM_VIRT_SIZE (300 * SZ_1M)
269 #define MAKE_VERSION(major, minor, patch) \
270 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
271
272 static int msm_iommu_sec_ptbl_init(struct device *dev)
273 {
274 int psize[2] = {0, 0};
275 unsigned int spare = 0;
276 int ret;
277 int version;
278 void *cpu_addr;
279 dma_addr_t paddr;
280 DEFINE_DMA_ATTRS(attrs);
281 static bool allocated = false;
282
283 if (allocated)
284 return 0;
285
286 version = qcom_scm_get_feat_version(SCM_SVC_MP);
287
288 if (version >= MAKE_VERSION(1, 1, 1)) {
289 ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
290 if (ret) {
291 dev_err(dev, "failed setting max virtual size (%d)\n",
292 ret);
293 return ret;
294 }
295 }
296
297 ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
298 if (ret) {
299 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
300 ret);
301 return ret;
302 }
303
304 if (psize[1]) {
305 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
306 ret);
307 return psize[1];
308 }
309
310 dev_info(dev, "iommu sec: pgtable size: %d\n", psize[0]);
311
312 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
313
314 cpu_addr = dma_alloc_attrs(dev, psize[0], &paddr, GFP_KERNEL, &attrs);
315 if (!cpu_addr) {
316 dev_err(dev, "failed to allocate %d bytes for pgtable\n",
317 psize[0]);
318 return -ENOMEM;
319 }
320
321 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
322 if (ret) {
323 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
324 goto free_mem;
325 }
326
327 allocated = true;
328
329 return 0;
330
331 free_mem:
332 dma_free_attrs(dev, psize[0], cpu_addr, paddr, &attrs);
333 return ret;
334 }
335
336 static int msm_iommu_probe(struct platform_device *pdev)
337 {
338 struct device *dev = &pdev->dev;
339 struct device_node *np = pdev->dev.of_node;
340 struct iommu_pmon *pmon_info;
341 struct msm_iommu_drvdata *drvdata;
342 struct resource *res;
343 int ret;
344 int global_cfg_irq, global_client_irq;
345 u32 temp;
346 unsigned long rate;
347
348 if (!qcom_scm_is_available())
349 return -EPROBE_DEFER;
350
351 msm_iommu_check_scm_call_avail();
352 msm_set_iommu_access_ops(&iommu_access_ops_v1);
353 msm_iommu_sec_set_access_ops(&iommu_access_ops_v1);
354
355 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
356 if (!drvdata)
357 return -ENOMEM;
358
359 drvdata->dev = dev;
360
361 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
362 drvdata->base = devm_ioremap_resource(dev, res);
363 if (IS_ERR(drvdata->base))
364 return PTR_ERR(drvdata->base);
365
366 drvdata->glb_base = drvdata->base;
367 drvdata->phys_base = res->start;
368
369 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
370 "smmu_local_base");
371 drvdata->smmu_local_base = devm_ioremap_resource(dev, res);
372 if (IS_ERR(drvdata->smmu_local_base) &&
373 PTR_ERR(drvdata->smmu_local_base) != -EPROBE_DEFER)
374 drvdata->smmu_local_base = NULL;
375
376 if (of_device_is_compatible(np, "qcom,msm-mmu-500"))
377 drvdata->model = MMU_500;
378
379 drvdata->iface = devm_clk_get(dev, "iface_clk");
380 if (IS_ERR(drvdata->iface))
381 return PTR_ERR(drvdata->iface);
382
383 drvdata->core = devm_clk_get(dev, "core_clk");
384 if (IS_ERR(drvdata->core))
385 return PTR_ERR(drvdata->core);
386
387 if (!of_property_read_u32(np, "qcom,cb-base-offset", &temp))
388 drvdata->cb_base = drvdata->base + temp;
389 else
390 drvdata->cb_base = drvdata->base + 0x8000;
391
392 rate = clk_get_rate(drvdata->core);
393 if (!rate) {
394 rate = clk_round_rate(drvdata->core, 1000);
395 clk_set_rate(drvdata->core, rate);
396 }
397
398 dev_info(&pdev->dev, "iface: %lu, core: %lu\n",
399 clk_get_rate(drvdata->iface), clk_get_rate(drvdata->core));
400
401 ret = msm_iommu_parse_dt(pdev, drvdata);
402 if (ret)
403 return ret;
404
405 dev_info(dev, "device %s (model: %d) mapped at %p, with %d ctx banks\n",
406 drvdata->name, drvdata->model, drvdata->base, drvdata->ncb);
407
408 if (drvdata->sec_id != -1) {
409 ret = msm_iommu_sec_ptbl_init(dev);
410 if (ret)
411 return ret;
412 }
413
414 platform_set_drvdata(pdev, drvdata);
415
416 pmon_info = msm_iommu_pm_alloc(dev);
417 if (pmon_info) {
418 ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
419 if (ret) {
420 msm_iommu_pm_free(dev);
421 dev_info(dev, "%s: pmon not available\n",
422 drvdata->name);
423 } else {
424 pmon_info->iommu.base = drvdata->base;
425 pmon_info->iommu.ops = msm_get_iommu_access_ops();
426 pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
427 pmon_info->iommu.iommu_name = drvdata->name;
428 ret = msm_iommu_pm_iommu_register(pmon_info);
429 if (ret) {
430 dev_err(dev, "%s iommu register fail\n",
431 drvdata->name);
432 msm_iommu_pm_free(dev);
433 } else {
434 dev_dbg(dev, "%s iommu registered for pmon\n",
435 pmon_info->iommu.iommu_name);
436 }
437 }
438 }
439
440 global_cfg_irq = platform_get_irq_byname(pdev, "global_cfg_NS_irq");
441 if (global_cfg_irq < 0 && global_cfg_irq == -EPROBE_DEFER)
442 return -EPROBE_DEFER;
443 if (global_cfg_irq > 0) {
444 ret = devm_request_threaded_irq(dev, global_cfg_irq,
445 NULL,
446 msm_iommu_global_fault_handler,
447 IRQF_ONESHOT | IRQF_SHARED |
448 IRQF_TRIGGER_RISING,
449 "msm_iommu_global_cfg_irq",
450 pdev);
451 if (ret < 0)
452 dev_err(dev, "Request Global CFG IRQ %d failed with ret=%d\n",
453 global_cfg_irq, ret);
454 }
455
456 global_client_irq =
457 platform_get_irq_byname(pdev, "global_client_NS_irq");
458 if (global_client_irq < 0 && global_client_irq == -EPROBE_DEFER)
459 return -EPROBE_DEFER;
460
461 if (global_client_irq > 0) {
462 ret = devm_request_threaded_irq(dev, global_client_irq,
463 NULL,
464 msm_iommu_global_fault_handler,
465 IRQF_ONESHOT | IRQF_SHARED |
466 IRQF_TRIGGER_RISING,
467 "msm_iommu_global_client_irq",
468 pdev);
469 if (ret < 0)
470 dev_err(dev, "Request Global Client IRQ %d failed with ret=%d\n",
471 global_client_irq, ret);
472 }
473
474 ret = of_platform_populate(np, msm_iommu_ctx_match_table, NULL, dev);
475 if (ret)
476 dev_err(dev, "Failed to create iommu context device\n");
477
478 return ret;
479 }
480
481 static int msm_iommu_remove(struct platform_device *pdev)
482 {
483 struct msm_iommu_drvdata *drv;
484
485 msm_iommu_pm_iommu_unregister(&pdev->dev);
486 msm_iommu_pm_free(&pdev->dev);
487
488 drv = platform_get_drvdata(pdev);
489 if (drv) {
490 __put_bus_vote_client(drv);
491 msm_iommu_remove_drv(drv);
492 platform_set_drvdata(pdev, NULL);
493 }
494
495 return 0;
496 }
497
498 static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
499 struct msm_iommu_ctx_drvdata *ctx_drvdata)
500 {
501 struct resource *r, rp;
502 int irq = 0, ret = 0;
503 struct msm_iommu_drvdata *drvdata;
504 u32 nsid;
505 u32 n_sid_mask;
506 unsigned long cb_offset;
507
508 drvdata = dev_get_drvdata(pdev->dev.parent);
509
510 get_secure_ctx(pdev->dev.of_node, drvdata, ctx_drvdata);
511
512 if (ctx_drvdata->secure_context) {
513 irq = platform_get_irq(pdev, 1);
514 if (irq < 0 && irq == -EPROBE_DEFER)
515 return -EPROBE_DEFER;
516
517 if (irq > 0) {
518 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
519 msm_iommu_secure_fault_handler_v2,
520 IRQF_ONESHOT | IRQF_SHARED,
521 "msm_iommu_secure_irq", pdev);
522 if (ret) {
523 pr_err("Request IRQ %d failed with ret=%d\n",
524 irq, ret);
525 return ret;
526 }
527 }
528 } else {
529 irq = platform_get_irq(pdev, 0);
530 if (irq < 0 && irq == -EPROBE_DEFER)
531 return -EPROBE_DEFER;
532
533 if (irq > 0) {
534 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
535 msm_iommu_fault_handler_v2,
536 IRQF_ONESHOT | IRQF_SHARED,
537 "msm_iommu_nonsecure_irq", pdev);
538 if (ret) {
539 pr_err("Request IRQ %d failed with ret=%d\n",
540 irq, ret);
541 goto out;
542 }
543 }
544 }
545
546 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
547 if (!r) {
548 ret = -EINVAL;
549 goto out;
550 }
551
552 ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
553 if (ret)
554 goto out;
555
556 /* Calculate the context bank number using the base addresses.
557 * Typically CB0 base address is 0x8000 pages away if the number
558 * of CBs are <=8. So, assume the offset 0x8000 until mentioned
559 * explicitely.
560 */
561 cb_offset = drvdata->cb_base - drvdata->base;
562 ctx_drvdata->num = (r->start - rp.start - cb_offset) >> CTX_SHIFT;
563
564 if (of_property_read_string(pdev->dev.of_node, "label",
565 &ctx_drvdata->name))
566 ctx_drvdata->name = dev_name(&pdev->dev);
567
568 if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid)) {
569 ret = -EINVAL;
570 goto out;
571 }
572
573 if (nsid >= sizeof(ctx_drvdata->sids)) {
574 ret = -EINVAL;
575 goto out;
576 }
577
578 if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
579 ctx_drvdata->sids,
580 nsid / sizeof(*ctx_drvdata->sids))) {
581 ret = -EINVAL;
582 goto out;
583 }
584
585 ctx_drvdata->nsid = nsid;
586 ctx_drvdata->asid = -1;
587
588 if (!of_get_property(pdev->dev.of_node, "qcom,iommu-sid-mask",
589 &n_sid_mask)) {
590 memset(ctx_drvdata->sid_mask, 0, MAX_NUM_SMR);
591 goto out;
592 }
593
594 if (n_sid_mask != nsid) {
595 ret = -EINVAL;
596 goto out;
597 }
598
599 if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-sid-mask",
600 ctx_drvdata->sid_mask,
601 n_sid_mask / sizeof(*ctx_drvdata->sid_mask))) {
602 ret = -EINVAL;
603 goto out;
604 }
605
606 ctx_drvdata->n_sid_mask = n_sid_mask;
607
608 out:
609 return ret;
610 }
611
612 static int msm_iommu_ctx_probe(struct platform_device *pdev)
613 {
614 struct msm_iommu_ctx_drvdata *ctx_drvdata;
615 int ret;
616
617 if (!qcom_scm_is_available())
618 return -EPROBE_DEFER;
619
620 if (!pdev->dev.parent)
621 return -EINVAL;
622
623 ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
624 GFP_KERNEL);
625 if (!ctx_drvdata)
626 return -ENOMEM;
627
628 ctx_drvdata->pdev = pdev;
629 INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
630
631 ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
632 if (ret)
633 return ret;
634
635 platform_set_drvdata(pdev, ctx_drvdata);
636
637 dev_info(&pdev->dev, "context %s using bank %d\n",
638 ctx_drvdata->name, ctx_drvdata->num);
639
640 return 0;
641 }
642
643 static int msm_iommu_ctx_remove(struct platform_device *pdev)
644 {
645 platform_set_drvdata(pdev, NULL);
646
647 return 0;
648 }
649
650 static const struct of_device_id msm_iommu_match_table[] = {
651 { .compatible = "qcom,msm-smmu-v1", },
652 { .compatible = "qcom,msm-smmu-v2", },
653 {}
654 };
655
656 static struct platform_driver msm_iommu_driver = {
657 .driver = {
658 .name = "msm_iommu",
659 .of_match_table = msm_iommu_match_table,
660 },
661 .probe = msm_iommu_probe,
662 .remove = msm_iommu_remove,
663 };
664
665 static const struct of_device_id msm_iommu_ctx_match_table[] = {
666 { .compatible = "qcom,msm-smmu-v1-ctx", },
667 { .compatible = "qcom,msm-smmu-v2-ctx", },
668 {}
669 };
670
671 static struct platform_driver msm_iommu_ctx_driver = {
672 .driver = {
673 .name = "msm_iommu_ctx",
674 .of_match_table = msm_iommu_ctx_match_table,
675 },
676 .probe = msm_iommu_ctx_probe,
677 .remove = msm_iommu_ctx_remove,
678 };
679
680 static int __init msm_iommu_driver_init(void)
681 {
682 int ret;
683
684 ret = platform_driver_register(&msm_iommu_driver);
685 if (ret) {
686 pr_err("Failed to register IOMMU driver\n");
687 return ret;
688 }
689
690 ret = platform_driver_register(&msm_iommu_ctx_driver);
691 if (ret) {
692 pr_err("Failed to register IOMMU context driver\n");
693 platform_driver_unregister(&msm_iommu_driver);
694 return ret;
695 }
696
697 return 0;
698 }
699
700 static void __exit msm_iommu_driver_exit(void)
701 {
702 platform_driver_unregister(&msm_iommu_ctx_driver);
703 platform_driver_unregister(&msm_iommu_driver);
704 }
705 subsys_initcall(msm_iommu_driver_init);
706 module_exit(msm_iommu_driver_exit);
707
708 MODULE_LICENSE("GPL v2");