]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/pci/dwc/pcie-qcom.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / drivers / pci / dwc / pcie-qcom.c
1 /*
2 * Qualcomm PCIe root complex driver
3 *
4 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
5 * Copyright 2015 Linaro Limited.
6 *
7 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/gpio.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/iopoll.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/of_device.h>
28 #include <linux/of_gpio.h>
29 #include <linux/pci.h>
30 #include <linux/platform_device.h>
31 #include <linux/phy/phy.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/reset.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36
37 #include "pcie-designware.h"
38
39 #define PCIE20_PARF_SYS_CTRL 0x00
40 #define PCIE20_PARF_PHY_CTRL 0x40
41 #define PCIE20_PARF_PHY_REFCLK 0x4C
42 #define PCIE20_PARF_DBI_BASE_ADDR 0x168
43 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
44 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
45 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
46 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
47 #define PCIE20_PARF_LTSSM 0x1B0
48 #define PCIE20_PARF_SID_OFFSET 0x234
49 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
50
51 #define PCIE20_ELBI_SYS_CTRL 0x04
52 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
53
54 #define PCIE20_CAP 0x70
55
56 #define PERST_DELAY_US 1000
57
58 struct qcom_pcie_resources_v0 {
59 struct clk *iface_clk;
60 struct clk *core_clk;
61 struct clk *phy_clk;
62 struct reset_control *pci_reset;
63 struct reset_control *axi_reset;
64 struct reset_control *ahb_reset;
65 struct reset_control *por_reset;
66 struct reset_control *phy_reset;
67 struct regulator *vdda;
68 struct regulator *vdda_phy;
69 struct regulator *vdda_refclk;
70 };
71
72 struct qcom_pcie_resources_v1 {
73 struct clk *iface;
74 struct clk *aux;
75 struct clk *master_bus;
76 struct clk *slave_bus;
77 struct reset_control *core;
78 struct regulator *vdda;
79 };
80
81 struct qcom_pcie_resources_v2 {
82 struct clk *aux_clk;
83 struct clk *master_clk;
84 struct clk *slave_clk;
85 struct clk *cfg_clk;
86 struct clk *pipe_clk;
87 };
88
89 union qcom_pcie_resources {
90 struct qcom_pcie_resources_v0 v0;
91 struct qcom_pcie_resources_v1 v1;
92 struct qcom_pcie_resources_v2 v2;
93 };
94
95 struct qcom_pcie;
96
97 struct qcom_pcie_ops {
98 int (*get_resources)(struct qcom_pcie *pcie);
99 int (*init)(struct qcom_pcie *pcie);
100 int (*post_init)(struct qcom_pcie *pcie);
101 void (*deinit)(struct qcom_pcie *pcie);
102 void (*ltssm_enable)(struct qcom_pcie *pcie);
103 };
104
105 struct qcom_pcie {
106 struct dw_pcie *pci;
107 void __iomem *parf; /* DT parf */
108 void __iomem *elbi; /* DT elbi */
109 union qcom_pcie_resources res;
110 struct phy *phy;
111 struct gpio_desc *reset;
112 struct qcom_pcie_ops *ops;
113 };
114
115 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
116
117 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
118 {
119 gpiod_set_value(pcie->reset, 1);
120 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
121 }
122
123 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
124 {
125 gpiod_set_value(pcie->reset, 0);
126 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
127 }
128
129 static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
130 {
131 struct pcie_port *pp = arg;
132
133 return dw_handle_msi_irq(pp);
134 }
135
136 static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
137 {
138 u32 val;
139
140 /* enable link training */
141 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
142 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
143 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
144 }
145
146 static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
147 {
148 u32 val;
149
150 /* enable link training */
151 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
152 val |= BIT(8);
153 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
154 }
155
156 static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
157 {
158 struct dw_pcie *pci = pcie->pci;
159
160 if (dw_pcie_link_up(pci))
161 return 0;
162
163 /* Enable Link Training state machine */
164 if (pcie->ops->ltssm_enable)
165 pcie->ops->ltssm_enable(pcie);
166
167 return dw_pcie_wait_for_link(pci);
168 }
169
170 static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
171 {
172 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
173 struct dw_pcie *pci = pcie->pci;
174 struct device *dev = pci->dev;
175
176 res->vdda = devm_regulator_get(dev, "vdda");
177 if (IS_ERR(res->vdda))
178 return PTR_ERR(res->vdda);
179
180 res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
181 if (IS_ERR(res->vdda_phy))
182 return PTR_ERR(res->vdda_phy);
183
184 res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
185 if (IS_ERR(res->vdda_refclk))
186 return PTR_ERR(res->vdda_refclk);
187
188 res->iface_clk = devm_clk_get(dev, "iface");
189 if (IS_ERR(res->iface_clk))
190 return PTR_ERR(res->iface_clk);
191
192 res->core_clk = devm_clk_get(dev, "core");
193 if (IS_ERR(res->core_clk))
194 return PTR_ERR(res->core_clk);
195
196 res->phy_clk = devm_clk_get(dev, "phy");
197 if (IS_ERR(res->phy_clk))
198 return PTR_ERR(res->phy_clk);
199
200 res->pci_reset = devm_reset_control_get(dev, "pci");
201 if (IS_ERR(res->pci_reset))
202 return PTR_ERR(res->pci_reset);
203
204 res->axi_reset = devm_reset_control_get(dev, "axi");
205 if (IS_ERR(res->axi_reset))
206 return PTR_ERR(res->axi_reset);
207
208 res->ahb_reset = devm_reset_control_get(dev, "ahb");
209 if (IS_ERR(res->ahb_reset))
210 return PTR_ERR(res->ahb_reset);
211
212 res->por_reset = devm_reset_control_get(dev, "por");
213 if (IS_ERR(res->por_reset))
214 return PTR_ERR(res->por_reset);
215
216 res->phy_reset = devm_reset_control_get(dev, "phy");
217 return PTR_ERR_OR_ZERO(res->phy_reset);
218 }
219
220 static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
221 {
222 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
223 struct dw_pcie *pci = pcie->pci;
224 struct device *dev = pci->dev;
225
226 res->vdda = devm_regulator_get(dev, "vdda");
227 if (IS_ERR(res->vdda))
228 return PTR_ERR(res->vdda);
229
230 res->iface = devm_clk_get(dev, "iface");
231 if (IS_ERR(res->iface))
232 return PTR_ERR(res->iface);
233
234 res->aux = devm_clk_get(dev, "aux");
235 if (IS_ERR(res->aux))
236 return PTR_ERR(res->aux);
237
238 res->master_bus = devm_clk_get(dev, "master_bus");
239 if (IS_ERR(res->master_bus))
240 return PTR_ERR(res->master_bus);
241
242 res->slave_bus = devm_clk_get(dev, "slave_bus");
243 if (IS_ERR(res->slave_bus))
244 return PTR_ERR(res->slave_bus);
245
246 res->core = devm_reset_control_get(dev, "core");
247 return PTR_ERR_OR_ZERO(res->core);
248 }
249
250 static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
251 {
252 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
253
254 reset_control_assert(res->pci_reset);
255 reset_control_assert(res->axi_reset);
256 reset_control_assert(res->ahb_reset);
257 reset_control_assert(res->por_reset);
258 reset_control_assert(res->pci_reset);
259 clk_disable_unprepare(res->iface_clk);
260 clk_disable_unprepare(res->core_clk);
261 clk_disable_unprepare(res->phy_clk);
262 regulator_disable(res->vdda);
263 regulator_disable(res->vdda_phy);
264 regulator_disable(res->vdda_refclk);
265 }
266
267 static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
268 {
269 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
270 struct dw_pcie *pci = pcie->pci;
271 struct device *dev = pci->dev;
272 u32 val;
273 int ret;
274
275 ret = regulator_enable(res->vdda);
276 if (ret) {
277 dev_err(dev, "cannot enable vdda regulator\n");
278 return ret;
279 }
280
281 ret = regulator_enable(res->vdda_refclk);
282 if (ret) {
283 dev_err(dev, "cannot enable vdda_refclk regulator\n");
284 goto err_refclk;
285 }
286
287 ret = regulator_enable(res->vdda_phy);
288 if (ret) {
289 dev_err(dev, "cannot enable vdda_phy regulator\n");
290 goto err_vdda_phy;
291 }
292
293 ret = reset_control_assert(res->ahb_reset);
294 if (ret) {
295 dev_err(dev, "cannot assert ahb reset\n");
296 goto err_assert_ahb;
297 }
298
299 ret = clk_prepare_enable(res->iface_clk);
300 if (ret) {
301 dev_err(dev, "cannot prepare/enable iface clock\n");
302 goto err_assert_ahb;
303 }
304
305 ret = clk_prepare_enable(res->phy_clk);
306 if (ret) {
307 dev_err(dev, "cannot prepare/enable phy clock\n");
308 goto err_clk_phy;
309 }
310
311 ret = clk_prepare_enable(res->core_clk);
312 if (ret) {
313 dev_err(dev, "cannot prepare/enable core clock\n");
314 goto err_clk_core;
315 }
316
317 ret = reset_control_deassert(res->ahb_reset);
318 if (ret) {
319 dev_err(dev, "cannot deassert ahb reset\n");
320 goto err_deassert_ahb;
321 }
322
323 /* enable PCIe clocks and resets */
324 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
325 val &= ~BIT(0);
326 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
327
328 /* enable external reference clock */
329 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
330 val |= BIT(16);
331 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
332
333 ret = reset_control_deassert(res->phy_reset);
334 if (ret) {
335 dev_err(dev, "cannot deassert phy reset\n");
336 return ret;
337 }
338
339 ret = reset_control_deassert(res->pci_reset);
340 if (ret) {
341 dev_err(dev, "cannot deassert pci reset\n");
342 return ret;
343 }
344
345 ret = reset_control_deassert(res->por_reset);
346 if (ret) {
347 dev_err(dev, "cannot deassert por reset\n");
348 return ret;
349 }
350
351 ret = reset_control_deassert(res->axi_reset);
352 if (ret) {
353 dev_err(dev, "cannot deassert axi reset\n");
354 return ret;
355 }
356
357 /* wait for clock acquisition */
358 usleep_range(1000, 1500);
359
360 return 0;
361
362 err_deassert_ahb:
363 clk_disable_unprepare(res->core_clk);
364 err_clk_core:
365 clk_disable_unprepare(res->phy_clk);
366 err_clk_phy:
367 clk_disable_unprepare(res->iface_clk);
368 err_assert_ahb:
369 regulator_disable(res->vdda_phy);
370 err_vdda_phy:
371 regulator_disable(res->vdda_refclk);
372 err_refclk:
373 regulator_disable(res->vdda);
374
375 return ret;
376 }
377
378 static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
379 {
380 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
381
382 reset_control_assert(res->core);
383 clk_disable_unprepare(res->slave_bus);
384 clk_disable_unprepare(res->master_bus);
385 clk_disable_unprepare(res->iface);
386 clk_disable_unprepare(res->aux);
387 regulator_disable(res->vdda);
388 }
389
390 static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
391 {
392 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
393 struct dw_pcie *pci = pcie->pci;
394 struct device *dev = pci->dev;
395 int ret;
396
397 ret = reset_control_deassert(res->core);
398 if (ret) {
399 dev_err(dev, "cannot deassert core reset\n");
400 return ret;
401 }
402
403 ret = clk_prepare_enable(res->aux);
404 if (ret) {
405 dev_err(dev, "cannot prepare/enable aux clock\n");
406 goto err_res;
407 }
408
409 ret = clk_prepare_enable(res->iface);
410 if (ret) {
411 dev_err(dev, "cannot prepare/enable iface clock\n");
412 goto err_aux;
413 }
414
415 ret = clk_prepare_enable(res->master_bus);
416 if (ret) {
417 dev_err(dev, "cannot prepare/enable master_bus clock\n");
418 goto err_iface;
419 }
420
421 ret = clk_prepare_enable(res->slave_bus);
422 if (ret) {
423 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
424 goto err_master;
425 }
426
427 ret = regulator_enable(res->vdda);
428 if (ret) {
429 dev_err(dev, "cannot enable vdda regulator\n");
430 goto err_slave;
431 }
432
433 /* change DBI base address */
434 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
435
436 if (IS_ENABLED(CONFIG_PCI_MSI)) {
437 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
438
439 val |= BIT(31);
440 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
441 }
442
443 return 0;
444 err_slave:
445 clk_disable_unprepare(res->slave_bus);
446 err_master:
447 clk_disable_unprepare(res->master_bus);
448 err_iface:
449 clk_disable_unprepare(res->iface);
450 err_aux:
451 clk_disable_unprepare(res->aux);
452 err_res:
453 reset_control_assert(res->core);
454
455 return ret;
456 }
457
458 static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
459 {
460 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
461 struct dw_pcie *pci = pcie->pci;
462 struct device *dev = pci->dev;
463
464 res->aux_clk = devm_clk_get(dev, "aux");
465 if (IS_ERR(res->aux_clk))
466 return PTR_ERR(res->aux_clk);
467
468 res->cfg_clk = devm_clk_get(dev, "cfg");
469 if (IS_ERR(res->cfg_clk))
470 return PTR_ERR(res->cfg_clk);
471
472 res->master_clk = devm_clk_get(dev, "bus_master");
473 if (IS_ERR(res->master_clk))
474 return PTR_ERR(res->master_clk);
475
476 res->slave_clk = devm_clk_get(dev, "bus_slave");
477 if (IS_ERR(res->slave_clk))
478 return PTR_ERR(res->slave_clk);
479
480 res->pipe_clk = devm_clk_get(dev, "pipe");
481 return PTR_ERR_OR_ZERO(res->pipe_clk);
482 }
483
484 static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
485 {
486 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
487 struct dw_pcie *pci = pcie->pci;
488 struct device *dev = pci->dev;
489 u32 val;
490 int ret;
491
492 ret = clk_prepare_enable(res->aux_clk);
493 if (ret) {
494 dev_err(dev, "cannot prepare/enable aux clock\n");
495 return ret;
496 }
497
498 ret = clk_prepare_enable(res->cfg_clk);
499 if (ret) {
500 dev_err(dev, "cannot prepare/enable cfg clock\n");
501 goto err_cfg_clk;
502 }
503
504 ret = clk_prepare_enable(res->master_clk);
505 if (ret) {
506 dev_err(dev, "cannot prepare/enable master clock\n");
507 goto err_master_clk;
508 }
509
510 ret = clk_prepare_enable(res->slave_clk);
511 if (ret) {
512 dev_err(dev, "cannot prepare/enable slave clock\n");
513 goto err_slave_clk;
514 }
515
516 /* enable PCIe clocks and resets */
517 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
518 val &= ~BIT(0);
519 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
520
521 /* change DBI base address */
522 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
523
524 /* MAC PHY_POWERDOWN MUX DISABLE */
525 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
526 val &= ~BIT(29);
527 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
528
529 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
530 val |= BIT(4);
531 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
532
533 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
534 val |= BIT(31);
535 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
536
537 return 0;
538
539 err_slave_clk:
540 clk_disable_unprepare(res->master_clk);
541 err_master_clk:
542 clk_disable_unprepare(res->cfg_clk);
543 err_cfg_clk:
544 clk_disable_unprepare(res->aux_clk);
545
546 return ret;
547 }
548
549 static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
550 {
551 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
552 struct dw_pcie *pci = pcie->pci;
553 struct device *dev = pci->dev;
554 int ret;
555
556 ret = clk_prepare_enable(res->pipe_clk);
557 if (ret) {
558 dev_err(dev, "cannot prepare/enable pipe clock\n");
559 return ret;
560 }
561
562 return 0;
563 }
564
565 static int qcom_pcie_link_up(struct dw_pcie *pci)
566 {
567 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
568
569 return !!(val & PCI_EXP_LNKSTA_DLLLA);
570 }
571
572 static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
573 {
574 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
575
576 clk_disable_unprepare(res->pipe_clk);
577 clk_disable_unprepare(res->slave_clk);
578 clk_disable_unprepare(res->master_clk);
579 clk_disable_unprepare(res->cfg_clk);
580 clk_disable_unprepare(res->aux_clk);
581 }
582
583 static void qcom_pcie_host_init(struct pcie_port *pp)
584 {
585 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
586 struct qcom_pcie *pcie = to_qcom_pcie(pci);
587 int ret;
588
589 qcom_ep_reset_assert(pcie);
590
591 ret = pcie->ops->init(pcie);
592 if (ret)
593 goto err_deinit;
594
595 ret = phy_power_on(pcie->phy);
596 if (ret)
597 goto err_deinit;
598
599 if (pcie->ops->post_init)
600 pcie->ops->post_init(pcie);
601
602 dw_pcie_setup_rc(pp);
603
604 if (IS_ENABLED(CONFIG_PCI_MSI))
605 dw_pcie_msi_init(pp);
606
607 qcom_ep_reset_deassert(pcie);
608
609 ret = qcom_pcie_establish_link(pcie);
610 if (ret)
611 goto err;
612
613 return;
614 err:
615 qcom_ep_reset_assert(pcie);
616 phy_power_off(pcie->phy);
617 err_deinit:
618 pcie->ops->deinit(pcie);
619 }
620
621 static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
622 u32 *val)
623 {
624 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
625
626 /* the device class is not reported correctly from the register */
627 if (where == PCI_CLASS_REVISION && size == 4) {
628 *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
629 *val &= 0xff; /* keep revision id */
630 *val |= PCI_CLASS_BRIDGE_PCI << 16;
631 return PCIBIOS_SUCCESSFUL;
632 }
633
634 return dw_pcie_read(pci->dbi_base + where, size, val);
635 }
636
637 static struct dw_pcie_host_ops qcom_pcie_dw_ops = {
638 .host_init = qcom_pcie_host_init,
639 .rd_own_conf = qcom_pcie_rd_own_conf,
640 };
641
642 static const struct qcom_pcie_ops ops_v0 = {
643 .get_resources = qcom_pcie_get_resources_v0,
644 .init = qcom_pcie_init_v0,
645 .deinit = qcom_pcie_deinit_v0,
646 .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
647 };
648
649 static const struct qcom_pcie_ops ops_v1 = {
650 .get_resources = qcom_pcie_get_resources_v1,
651 .init = qcom_pcie_init_v1,
652 .deinit = qcom_pcie_deinit_v1,
653 .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
654 };
655
656 static const struct qcom_pcie_ops ops_v2 = {
657 .get_resources = qcom_pcie_get_resources_v2,
658 .init = qcom_pcie_init_v2,
659 .post_init = qcom_pcie_post_init_v2,
660 .deinit = qcom_pcie_deinit_v2,
661 .ltssm_enable = qcom_pcie_v2_ltssm_enable,
662 };
663
664 static const struct dw_pcie_ops dw_pcie_ops = {
665 .link_up = qcom_pcie_link_up,
666 };
667
668 static int qcom_pcie_probe(struct platform_device *pdev)
669 {
670 struct device *dev = &pdev->dev;
671 struct resource *res;
672 struct pcie_port *pp;
673 struct dw_pcie *pci;
674 struct qcom_pcie *pcie;
675 int ret;
676
677 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
678 if (!pcie)
679 return -ENOMEM;
680
681 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
682 if (!pci)
683 return -ENOMEM;
684
685 pci->dev = dev;
686 pci->ops = &dw_pcie_ops;
687 pp = &pci->pp;
688
689 pcie->pci = pci;
690
691 pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
692
693 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
694 if (IS_ERR(pcie->reset))
695 return PTR_ERR(pcie->reset);
696
697 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
698 pcie->parf = devm_ioremap_resource(dev, res);
699 if (IS_ERR(pcie->parf))
700 return PTR_ERR(pcie->parf);
701
702 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
703 pci->dbi_base = devm_ioremap_resource(dev, res);
704 if (IS_ERR(pci->dbi_base))
705 return PTR_ERR(pci->dbi_base);
706
707 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
708 pcie->elbi = devm_ioremap_resource(dev, res);
709 if (IS_ERR(pcie->elbi))
710 return PTR_ERR(pcie->elbi);
711
712 pcie->phy = devm_phy_optional_get(dev, "pciephy");
713 if (IS_ERR(pcie->phy))
714 return PTR_ERR(pcie->phy);
715
716 ret = pcie->ops->get_resources(pcie);
717 if (ret)
718 return ret;
719
720 pp->root_bus_nr = -1;
721 pp->ops = &qcom_pcie_dw_ops;
722
723 if (IS_ENABLED(CONFIG_PCI_MSI)) {
724 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
725 if (pp->msi_irq < 0)
726 return pp->msi_irq;
727
728 ret = devm_request_irq(dev, pp->msi_irq,
729 qcom_pcie_msi_irq_handler,
730 IRQF_SHARED, "qcom-pcie-msi", pp);
731 if (ret) {
732 dev_err(dev, "cannot request msi irq\n");
733 return ret;
734 }
735 }
736
737 ret = phy_init(pcie->phy);
738 if (ret)
739 return ret;
740
741 platform_set_drvdata(pdev, pcie);
742
743 ret = dw_pcie_host_init(pp);
744 if (ret) {
745 dev_err(dev, "cannot initialize host\n");
746 return ret;
747 }
748
749 return 0;
750 }
751
752 static const struct of_device_id qcom_pcie_match[] = {
753 { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
754 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
755 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
756 { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
757 { }
758 };
759
760 static struct platform_driver qcom_pcie_driver = {
761 .probe = qcom_pcie_probe,
762 .driver = {
763 .name = "qcom-pcie",
764 .suppress_bind_attrs = true,
765 .of_match_table = qcom_pcie_match,
766 },
767 };
768 builtin_platform_driver(qcom_pcie_driver);