2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/irqdomain.h>
18 #include <linux/irq.h>
24 * If needed, this can become more specific: something like struct mdp5_mdss,
25 * which contains a 'struct msm_mdss base' member.
28 struct drm_device
*dev
;
30 void __iomem
*mmio
, *vbif
;
32 struct regulator
*vdd
;
35 volatile unsigned long enabled_mask
;
36 struct irq_domain
*domain
;
40 static inline void mdss_write(struct msm_mdss
*mdss
, u32 reg
, u32 data
)
42 msm_writel(data
, mdss
->mmio
+ reg
);
45 static inline u32
mdss_read(struct msm_mdss
*mdss
, u32 reg
)
47 return msm_readl(mdss
->mmio
+ reg
);
50 static irqreturn_t
mdss_irq(int irq
, void *arg
)
52 struct msm_mdss
*mdss
= arg
;
55 intr
= mdss_read(mdss
, REG_MDSS_HW_INTR_STATUS
);
57 VERB("intr=%08x", intr
);
60 irq_hw_number_t hwirq
= fls(intr
) - 1;
62 generic_handle_irq(irq_find_mapping(
63 mdss
->irqcontroller
.domain
, hwirq
));
64 intr
&= ~(1 << hwirq
);
71 * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
72 * can register to get their irq's delivered
75 #define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
76 MDSS_HW_INTR_STATUS_INTR_DSI0 | \
77 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
78 MDSS_HW_INTR_STATUS_INTR_HDMI | \
79 MDSS_HW_INTR_STATUS_INTR_EDP)
81 static void mdss_hw_mask_irq(struct irq_data
*irqd
)
83 struct msm_mdss
*mdss
= irq_data_get_irq_chip_data(irqd
);
85 smp_mb__before_atomic();
86 clear_bit(irqd
->hwirq
, &mdss
->irqcontroller
.enabled_mask
);
87 smp_mb__after_atomic();
90 static void mdss_hw_unmask_irq(struct irq_data
*irqd
)
92 struct msm_mdss
*mdss
= irq_data_get_irq_chip_data(irqd
);
94 smp_mb__before_atomic();
95 set_bit(irqd
->hwirq
, &mdss
->irqcontroller
.enabled_mask
);
96 smp_mb__after_atomic();
99 static struct irq_chip mdss_hw_irq_chip
= {
101 .irq_mask
= mdss_hw_mask_irq
,
102 .irq_unmask
= mdss_hw_unmask_irq
,
105 static int mdss_hw_irqdomain_map(struct irq_domain
*d
, unsigned int irq
,
106 irq_hw_number_t hwirq
)
108 struct msm_mdss
*mdss
= d
->host_data
;
110 if (!(VALID_IRQS
& (1 << hwirq
)))
113 irq_set_chip_and_handler(irq
, &mdss_hw_irq_chip
, handle_level_irq
);
114 irq_set_chip_data(irq
, mdss
);
119 static const struct irq_domain_ops mdss_hw_irqdomain_ops
= {
120 .map
= mdss_hw_irqdomain_map
,
121 .xlate
= irq_domain_xlate_onecell
,
125 static int mdss_irq_domain_init(struct msm_mdss
*mdss
)
127 struct device
*dev
= mdss
->dev
->dev
;
128 struct irq_domain
*d
;
130 d
= irq_domain_add_linear(dev
->of_node
, 32, &mdss_hw_irqdomain_ops
,
133 dev_err(dev
, "mdss irq domain add failed\n");
137 mdss
->irqcontroller
.enabled_mask
= 0;
138 mdss
->irqcontroller
.domain
= d
;
143 void msm_mdss_destroy(struct drm_device
*dev
)
145 struct msm_drm_private
*priv
= dev
->dev_private
;
146 struct msm_mdss
*mdss
= priv
->mdss
;
151 irq_domain_remove(mdss
->irqcontroller
.domain
);
152 mdss
->irqcontroller
.domain
= NULL
;
154 regulator_disable(mdss
->vdd
);
156 pm_runtime_put_sync(dev
->dev
);
158 pm_runtime_disable(dev
->dev
);
161 int msm_mdss_init(struct drm_device
*dev
)
163 struct platform_device
*pdev
= to_platform_device(dev
->dev
);
164 struct msm_drm_private
*priv
= dev
->dev_private
;
165 struct msm_mdss
*mdss
;
170 if (!of_device_is_compatible(dev
->dev
->of_node
, "qcom,mdss"))
173 mdss
= devm_kzalloc(dev
->dev
, sizeof(*mdss
), GFP_KERNEL
);
181 mdss
->mmio
= msm_ioremap(pdev
, "mdss_phys", "MDSS");
182 if (IS_ERR(mdss
->mmio
)) {
183 ret
= PTR_ERR(mdss
->mmio
);
187 mdss
->vbif
= msm_ioremap(pdev
, "vbif_phys", "VBIF");
188 if (IS_ERR(mdss
->vbif
)) {
189 ret
= PTR_ERR(mdss
->vbif
);
193 /* Regulator to enable GDSCs in downstream kernels */
194 mdss
->vdd
= devm_regulator_get(dev
->dev
, "vdd");
195 if (IS_ERR(mdss
->vdd
)) {
196 ret
= PTR_ERR(mdss
->vdd
);
200 ret
= regulator_enable(mdss
->vdd
);
202 dev_err(dev
->dev
, "failed to enable regulator vdd: %d\n",
207 ret
= devm_request_irq(dev
->dev
, platform_get_irq(pdev
, 0),
208 mdss_irq
, 0, "mdss_isr", mdss
);
210 dev_err(dev
->dev
, "failed to init irq: %d\n", ret
);
214 ret
= mdss_irq_domain_init(mdss
);
216 dev_err(dev
->dev
, "failed to init sub-block irqs: %d\n", ret
);
222 pm_runtime_enable(dev
->dev
);
225 * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
226 * domain. Remove this once runtime PM is adapted for all the devices.
228 pm_runtime_get_sync(dev
->dev
);
232 regulator_disable(mdss
->vdd
);