]>
Commit | Line | Data |
---|---|---|
82a82383 | 1 | /* |
f9a66600 PG |
2 | * Qualcomm PCIe root complex driver |
3 | * | |
82a82383 SV |
4 | * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. |
5 | * Copyright 2015 Linaro Limited. | |
6 | * | |
f9a66600 PG |
7 | * Author: Stanimir Varbanov <svarbanov@mm-sol.com> |
8 | * | |
82a82383 SV |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 and | |
11 | * only version 2 as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | */ | |
18 | ||
19 | #include <linux/clk.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/gpio.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/iopoll.h> | |
25 | #include <linux/kernel.h> | |
f9a66600 | 26 | #include <linux/init.h> |
82a82383 SV |
27 | #include <linux/of_device.h> |
28 | #include <linux/of_gpio.h> | |
29 | #include <linux/pci.h> | |
30 | #include <linux/platform_device.h> | |
31 | #include <linux/phy/phy.h> | |
32 | #include <linux/regulator/consumer.h> | |
33 | #include <linux/reset.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/types.h> | |
36 | ||
37 | #include "pcie-designware.h" | |
38 | ||
39 | #define PCIE20_PARF_PHY_CTRL 0x40 | |
40 | #define PCIE20_PARF_PHY_REFCLK 0x4C | |
41 | #define PCIE20_PARF_DBI_BASE_ADDR 0x168 | |
42 | #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c | |
43 | #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 | |
44 | ||
45 | #define PCIE20_ELBI_SYS_CTRL 0x04 | |
46 | #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) | |
47 | ||
48 | #define PCIE20_CAP 0x70 | |
49 | ||
50 | #define PERST_DELAY_US 1000 | |
51 | ||
52 | struct qcom_pcie_resources_v0 { | |
53 | struct clk *iface_clk; | |
54 | struct clk *core_clk; | |
55 | struct clk *phy_clk; | |
56 | struct reset_control *pci_reset; | |
57 | struct reset_control *axi_reset; | |
58 | struct reset_control *ahb_reset; | |
59 | struct reset_control *por_reset; | |
60 | struct reset_control *phy_reset; | |
61 | struct regulator *vdda; | |
62 | struct regulator *vdda_phy; | |
63 | struct regulator *vdda_refclk; | |
64 | }; | |
65 | ||
66 | struct qcom_pcie_resources_v1 { | |
67 | struct clk *iface; | |
68 | struct clk *aux; | |
69 | struct clk *master_bus; | |
70 | struct clk *slave_bus; | |
71 | struct reset_control *core; | |
72 | struct regulator *vdda; | |
73 | }; | |
74 | ||
75 | union qcom_pcie_resources { | |
76 | struct qcom_pcie_resources_v0 v0; | |
77 | struct qcom_pcie_resources_v1 v1; | |
78 | }; | |
79 | ||
80 | struct qcom_pcie; | |
81 | ||
82 | struct qcom_pcie_ops { | |
83 | int (*get_resources)(struct qcom_pcie *pcie); | |
84 | int (*init)(struct qcom_pcie *pcie); | |
85 | void (*deinit)(struct qcom_pcie *pcie); | |
86 | }; | |
87 | ||
88 | struct qcom_pcie { | |
ee053694 BH |
89 | struct pcie_port pp; /* pp.dbi_base is DT dbi */ |
90 | void __iomem *parf; /* DT parf */ | |
91 | void __iomem *elbi; /* DT elbi */ | |
82a82383 | 92 | union qcom_pcie_resources res; |
82a82383 SV |
93 | struct phy *phy; |
94 | struct gpio_desc *reset; | |
95 | struct qcom_pcie_ops *ops; | |
96 | }; | |
97 | ||
98 | #define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp) | |
99 | ||
100 | static void qcom_ep_reset_assert(struct qcom_pcie *pcie) | |
101 | { | |
102 | gpiod_set_value(pcie->reset, 1); | |
103 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); | |
104 | } | |
105 | ||
106 | static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) | |
107 | { | |
108 | gpiod_set_value(pcie->reset, 0); | |
109 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); | |
110 | } | |
111 | ||
112 | static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg) | |
113 | { | |
114 | struct pcie_port *pp = arg; | |
115 | ||
116 | return dw_handle_msi_irq(pp); | |
117 | } | |
118 | ||
119 | static int qcom_pcie_establish_link(struct qcom_pcie *pcie) | |
120 | { | |
82a82383 SV |
121 | u32 val; |
122 | ||
123 | if (dw_pcie_link_up(&pcie->pp)) | |
124 | return 0; | |
125 | ||
126 | /* enable link training */ | |
127 | val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); | |
128 | val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; | |
129 | writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); | |
130 | ||
886bc5ce | 131 | return dw_pcie_wait_for_link(&pcie->pp); |
82a82383 SV |
132 | } |
133 | ||
134 | static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) | |
135 | { | |
136 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; | |
e6a087ee | 137 | struct device *dev = pcie->pp.dev; |
82a82383 SV |
138 | |
139 | res->vdda = devm_regulator_get(dev, "vdda"); | |
140 | if (IS_ERR(res->vdda)) | |
141 | return PTR_ERR(res->vdda); | |
142 | ||
143 | res->vdda_phy = devm_regulator_get(dev, "vdda_phy"); | |
144 | if (IS_ERR(res->vdda_phy)) | |
145 | return PTR_ERR(res->vdda_phy); | |
146 | ||
147 | res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk"); | |
148 | if (IS_ERR(res->vdda_refclk)) | |
149 | return PTR_ERR(res->vdda_refclk); | |
150 | ||
151 | res->iface_clk = devm_clk_get(dev, "iface"); | |
152 | if (IS_ERR(res->iface_clk)) | |
153 | return PTR_ERR(res->iface_clk); | |
154 | ||
155 | res->core_clk = devm_clk_get(dev, "core"); | |
156 | if (IS_ERR(res->core_clk)) | |
157 | return PTR_ERR(res->core_clk); | |
158 | ||
159 | res->phy_clk = devm_clk_get(dev, "phy"); | |
160 | if (IS_ERR(res->phy_clk)) | |
161 | return PTR_ERR(res->phy_clk); | |
162 | ||
163 | res->pci_reset = devm_reset_control_get(dev, "pci"); | |
164 | if (IS_ERR(res->pci_reset)) | |
165 | return PTR_ERR(res->pci_reset); | |
166 | ||
167 | res->axi_reset = devm_reset_control_get(dev, "axi"); | |
168 | if (IS_ERR(res->axi_reset)) | |
169 | return PTR_ERR(res->axi_reset); | |
170 | ||
171 | res->ahb_reset = devm_reset_control_get(dev, "ahb"); | |
172 | if (IS_ERR(res->ahb_reset)) | |
173 | return PTR_ERR(res->ahb_reset); | |
174 | ||
175 | res->por_reset = devm_reset_control_get(dev, "por"); | |
176 | if (IS_ERR(res->por_reset)) | |
177 | return PTR_ERR(res->por_reset); | |
178 | ||
179 | res->phy_reset = devm_reset_control_get(dev, "phy"); | |
180 | if (IS_ERR(res->phy_reset)) | |
181 | return PTR_ERR(res->phy_reset); | |
182 | ||
183 | return 0; | |
184 | } | |
185 | ||
186 | static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) | |
187 | { | |
188 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | |
e6a087ee | 189 | struct device *dev = pcie->pp.dev; |
82a82383 SV |
190 | |
191 | res->vdda = devm_regulator_get(dev, "vdda"); | |
192 | if (IS_ERR(res->vdda)) | |
193 | return PTR_ERR(res->vdda); | |
194 | ||
195 | res->iface = devm_clk_get(dev, "iface"); | |
196 | if (IS_ERR(res->iface)) | |
197 | return PTR_ERR(res->iface); | |
198 | ||
199 | res->aux = devm_clk_get(dev, "aux"); | |
200 | if (IS_ERR(res->aux)) | |
201 | return PTR_ERR(res->aux); | |
202 | ||
203 | res->master_bus = devm_clk_get(dev, "master_bus"); | |
204 | if (IS_ERR(res->master_bus)) | |
205 | return PTR_ERR(res->master_bus); | |
206 | ||
207 | res->slave_bus = devm_clk_get(dev, "slave_bus"); | |
208 | if (IS_ERR(res->slave_bus)) | |
209 | return PTR_ERR(res->slave_bus); | |
210 | ||
211 | res->core = devm_reset_control_get(dev, "core"); | |
212 | if (IS_ERR(res->core)) | |
213 | return PTR_ERR(res->core); | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) | |
219 | { | |
220 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; | |
221 | ||
222 | reset_control_assert(res->pci_reset); | |
223 | reset_control_assert(res->axi_reset); | |
224 | reset_control_assert(res->ahb_reset); | |
225 | reset_control_assert(res->por_reset); | |
226 | reset_control_assert(res->pci_reset); | |
227 | clk_disable_unprepare(res->iface_clk); | |
228 | clk_disable_unprepare(res->core_clk); | |
229 | clk_disable_unprepare(res->phy_clk); | |
230 | regulator_disable(res->vdda); | |
231 | regulator_disable(res->vdda_phy); | |
232 | regulator_disable(res->vdda_refclk); | |
233 | } | |
234 | ||
235 | static int qcom_pcie_init_v0(struct qcom_pcie *pcie) | |
236 | { | |
237 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; | |
e6a087ee | 238 | struct device *dev = pcie->pp.dev; |
82a82383 SV |
239 | u32 val; |
240 | int ret; | |
241 | ||
242 | ret = regulator_enable(res->vdda); | |
243 | if (ret) { | |
244 | dev_err(dev, "cannot enable vdda regulator\n"); | |
245 | return ret; | |
246 | } | |
247 | ||
248 | ret = regulator_enable(res->vdda_refclk); | |
249 | if (ret) { | |
250 | dev_err(dev, "cannot enable vdda_refclk regulator\n"); | |
251 | goto err_refclk; | |
252 | } | |
253 | ||
254 | ret = regulator_enable(res->vdda_phy); | |
255 | if (ret) { | |
256 | dev_err(dev, "cannot enable vdda_phy regulator\n"); | |
257 | goto err_vdda_phy; | |
258 | } | |
259 | ||
260 | ret = reset_control_assert(res->ahb_reset); | |
261 | if (ret) { | |
262 | dev_err(dev, "cannot assert ahb reset\n"); | |
263 | goto err_assert_ahb; | |
264 | } | |
265 | ||
266 | ret = clk_prepare_enable(res->iface_clk); | |
267 | if (ret) { | |
268 | dev_err(dev, "cannot prepare/enable iface clock\n"); | |
269 | goto err_assert_ahb; | |
270 | } | |
271 | ||
272 | ret = clk_prepare_enable(res->phy_clk); | |
273 | if (ret) { | |
274 | dev_err(dev, "cannot prepare/enable phy clock\n"); | |
275 | goto err_clk_phy; | |
276 | } | |
277 | ||
278 | ret = clk_prepare_enable(res->core_clk); | |
279 | if (ret) { | |
280 | dev_err(dev, "cannot prepare/enable core clock\n"); | |
281 | goto err_clk_core; | |
282 | } | |
283 | ||
284 | ret = reset_control_deassert(res->ahb_reset); | |
285 | if (ret) { | |
286 | dev_err(dev, "cannot deassert ahb reset\n"); | |
287 | goto err_deassert_ahb; | |
288 | } | |
289 | ||
290 | /* enable PCIe clocks and resets */ | |
291 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | |
292 | val &= ~BIT(0); | |
293 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | |
294 | ||
295 | /* enable external reference clock */ | |
296 | val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); | |
297 | val |= BIT(16); | |
298 | writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); | |
299 | ||
300 | ret = reset_control_deassert(res->phy_reset); | |
301 | if (ret) { | |
302 | dev_err(dev, "cannot deassert phy reset\n"); | |
303 | return ret; | |
304 | } | |
305 | ||
306 | ret = reset_control_deassert(res->pci_reset); | |
307 | if (ret) { | |
308 | dev_err(dev, "cannot deassert pci reset\n"); | |
309 | return ret; | |
310 | } | |
311 | ||
312 | ret = reset_control_deassert(res->por_reset); | |
313 | if (ret) { | |
314 | dev_err(dev, "cannot deassert por reset\n"); | |
315 | return ret; | |
316 | } | |
317 | ||
318 | ret = reset_control_deassert(res->axi_reset); | |
319 | if (ret) { | |
320 | dev_err(dev, "cannot deassert axi reset\n"); | |
321 | return ret; | |
322 | } | |
323 | ||
324 | /* wait for clock acquisition */ | |
325 | usleep_range(1000, 1500); | |
326 | ||
327 | return 0; | |
328 | ||
329 | err_deassert_ahb: | |
330 | clk_disable_unprepare(res->core_clk); | |
331 | err_clk_core: | |
332 | clk_disable_unprepare(res->phy_clk); | |
333 | err_clk_phy: | |
334 | clk_disable_unprepare(res->iface_clk); | |
335 | err_assert_ahb: | |
336 | regulator_disable(res->vdda_phy); | |
337 | err_vdda_phy: | |
338 | regulator_disable(res->vdda_refclk); | |
339 | err_refclk: | |
340 | regulator_disable(res->vdda); | |
341 | ||
342 | return ret; | |
343 | } | |
344 | ||
345 | static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) | |
346 | { | |
347 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | |
348 | ||
349 | reset_control_assert(res->core); | |
350 | clk_disable_unprepare(res->slave_bus); | |
351 | clk_disable_unprepare(res->master_bus); | |
352 | clk_disable_unprepare(res->iface); | |
353 | clk_disable_unprepare(res->aux); | |
354 | regulator_disable(res->vdda); | |
355 | } | |
356 | ||
357 | static int qcom_pcie_init_v1(struct qcom_pcie *pcie) | |
358 | { | |
359 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | |
e6a087ee | 360 | struct device *dev = pcie->pp.dev; |
82a82383 SV |
361 | int ret; |
362 | ||
363 | ret = reset_control_deassert(res->core); | |
364 | if (ret) { | |
365 | dev_err(dev, "cannot deassert core reset\n"); | |
366 | return ret; | |
367 | } | |
368 | ||
369 | ret = clk_prepare_enable(res->aux); | |
370 | if (ret) { | |
371 | dev_err(dev, "cannot prepare/enable aux clock\n"); | |
372 | goto err_res; | |
373 | } | |
374 | ||
375 | ret = clk_prepare_enable(res->iface); | |
376 | if (ret) { | |
377 | dev_err(dev, "cannot prepare/enable iface clock\n"); | |
378 | goto err_aux; | |
379 | } | |
380 | ||
381 | ret = clk_prepare_enable(res->master_bus); | |
382 | if (ret) { | |
383 | dev_err(dev, "cannot prepare/enable master_bus clock\n"); | |
384 | goto err_iface; | |
385 | } | |
386 | ||
387 | ret = clk_prepare_enable(res->slave_bus); | |
388 | if (ret) { | |
389 | dev_err(dev, "cannot prepare/enable slave_bus clock\n"); | |
390 | goto err_master; | |
391 | } | |
392 | ||
393 | ret = regulator_enable(res->vdda); | |
394 | if (ret) { | |
395 | dev_err(dev, "cannot enable vdda regulator\n"); | |
396 | goto err_slave; | |
397 | } | |
398 | ||
399 | /* change DBI base address */ | |
400 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | |
401 | ||
402 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
403 | u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); | |
404 | ||
405 | val |= BIT(31); | |
406 | writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); | |
407 | } | |
408 | ||
409 | return 0; | |
410 | err_slave: | |
411 | clk_disable_unprepare(res->slave_bus); | |
412 | err_master: | |
413 | clk_disable_unprepare(res->master_bus); | |
414 | err_iface: | |
415 | clk_disable_unprepare(res->iface); | |
416 | err_aux: | |
417 | clk_disable_unprepare(res->aux); | |
418 | err_res: | |
419 | reset_control_assert(res->core); | |
420 | ||
421 | return ret; | |
422 | } | |
423 | ||
424 | static int qcom_pcie_link_up(struct pcie_port *pp) | |
425 | { | |
426 | struct qcom_pcie *pcie = to_qcom_pcie(pp); | |
0edd578e | 427 | u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); |
82a82383 SV |
428 | |
429 | return !!(val & PCI_EXP_LNKSTA_DLLLA); | |
430 | } | |
431 | ||
432 | static void qcom_pcie_host_init(struct pcie_port *pp) | |
433 | { | |
434 | struct qcom_pcie *pcie = to_qcom_pcie(pp); | |
435 | int ret; | |
436 | ||
437 | qcom_ep_reset_assert(pcie); | |
438 | ||
439 | ret = pcie->ops->init(pcie); | |
440 | if (ret) | |
441 | goto err_deinit; | |
442 | ||
443 | ret = phy_power_on(pcie->phy); | |
444 | if (ret) | |
445 | goto err_deinit; | |
446 | ||
447 | dw_pcie_setup_rc(pp); | |
448 | ||
449 | if (IS_ENABLED(CONFIG_PCI_MSI)) | |
450 | dw_pcie_msi_init(pp); | |
451 | ||
452 | qcom_ep_reset_deassert(pcie); | |
453 | ||
454 | ret = qcom_pcie_establish_link(pcie); | |
455 | if (ret) | |
456 | goto err; | |
457 | ||
458 | return; | |
459 | err: | |
460 | qcom_ep_reset_assert(pcie); | |
461 | phy_power_off(pcie->phy); | |
462 | err_deinit: | |
463 | pcie->ops->deinit(pcie); | |
464 | } | |
465 | ||
466 | static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | |
467 | u32 *val) | |
468 | { | |
469 | /* the device class is not reported correctly from the register */ | |
470 | if (where == PCI_CLASS_REVISION && size == 4) { | |
471 | *val = readl(pp->dbi_base + PCI_CLASS_REVISION); | |
472 | *val &= 0xff; /* keep revision id */ | |
473 | *val |= PCI_CLASS_BRIDGE_PCI << 16; | |
474 | return PCIBIOS_SUCCESSFUL; | |
475 | } | |
476 | ||
477 | return dw_pcie_cfg_read(pp->dbi_base + where, size, val); | |
478 | } | |
479 | ||
480 | static struct pcie_host_ops qcom_pcie_dw_ops = { | |
481 | .link_up = qcom_pcie_link_up, | |
482 | .host_init = qcom_pcie_host_init, | |
483 | .rd_own_conf = qcom_pcie_rd_own_conf, | |
484 | }; | |
485 | ||
486 | static const struct qcom_pcie_ops ops_v0 = { | |
487 | .get_resources = qcom_pcie_get_resources_v0, | |
488 | .init = qcom_pcie_init_v0, | |
489 | .deinit = qcom_pcie_deinit_v0, | |
490 | }; | |
491 | ||
492 | static const struct qcom_pcie_ops ops_v1 = { | |
493 | .get_resources = qcom_pcie_get_resources_v1, | |
494 | .init = qcom_pcie_init_v1, | |
495 | .deinit = qcom_pcie_deinit_v1, | |
496 | }; | |
497 | ||
498 | static int qcom_pcie_probe(struct platform_device *pdev) | |
499 | { | |
500 | struct device *dev = &pdev->dev; | |
501 | struct resource *res; | |
502 | struct qcom_pcie *pcie; | |
503 | struct pcie_port *pp; | |
504 | int ret; | |
505 | ||
506 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | |
507 | if (!pcie) | |
508 | return -ENOMEM; | |
509 | ||
0edd578e | 510 | pp = &pcie->pp; |
82a82383 | 511 | pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev); |
82a82383 SV |
512 | |
513 | pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); | |
514 | if (IS_ERR(pcie->reset)) | |
515 | return PTR_ERR(pcie->reset); | |
516 | ||
517 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); | |
518 | pcie->parf = devm_ioremap_resource(dev, res); | |
519 | if (IS_ERR(pcie->parf)) | |
520 | return PTR_ERR(pcie->parf); | |
521 | ||
522 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | |
0edd578e BH |
523 | pp->dbi_base = devm_ioremap_resource(dev, res); |
524 | if (IS_ERR(pp->dbi_base)) | |
525 | return PTR_ERR(pp->dbi_base); | |
82a82383 SV |
526 | |
527 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); | |
528 | pcie->elbi = devm_ioremap_resource(dev, res); | |
529 | if (IS_ERR(pcie->elbi)) | |
530 | return PTR_ERR(pcie->elbi); | |
531 | ||
532 | pcie->phy = devm_phy_optional_get(dev, "pciephy"); | |
533 | if (IS_ERR(pcie->phy)) | |
534 | return PTR_ERR(pcie->phy); | |
535 | ||
ad110449 | 536 | pp->dev = dev; |
82a82383 SV |
537 | ret = pcie->ops->get_resources(pcie); |
538 | if (ret) | |
539 | return ret; | |
540 | ||
82a82383 SV |
541 | pp->root_bus_nr = -1; |
542 | pp->ops = &qcom_pcie_dw_ops; | |
543 | ||
544 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
545 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | |
546 | if (pp->msi_irq < 0) | |
547 | return pp->msi_irq; | |
548 | ||
549 | ret = devm_request_irq(dev, pp->msi_irq, | |
550 | qcom_pcie_msi_irq_handler, | |
551 | IRQF_SHARED, "qcom-pcie-msi", pp); | |
552 | if (ret) { | |
553 | dev_err(dev, "cannot request msi irq\n"); | |
554 | return ret; | |
555 | } | |
556 | } | |
557 | ||
558 | ret = phy_init(pcie->phy); | |
559 | if (ret) | |
560 | return ret; | |
561 | ||
562 | ret = dw_pcie_host_init(pp); | |
563 | if (ret) { | |
564 | dev_err(dev, "cannot initialize host\n"); | |
565 | return ret; | |
566 | } | |
567 | ||
82a82383 SV |
568 | return 0; |
569 | } | |
570 | ||
82a82383 SV |
571 | static const struct of_device_id qcom_pcie_match[] = { |
572 | { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 }, | |
573 | { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, | |
574 | { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, | |
575 | { } | |
576 | }; | |
82a82383 SV |
577 | |
578 | static struct platform_driver qcom_pcie_driver = { | |
579 | .probe = qcom_pcie_probe, | |
82a82383 SV |
580 | .driver = { |
581 | .name = "qcom-pcie", | |
f9a66600 | 582 | .suppress_bind_attrs = true, |
82a82383 SV |
583 | .of_match_table = qcom_pcie_match, |
584 | }, | |
585 | }; | |
f9a66600 | 586 | builtin_platform_driver(qcom_pcie_driver); |