]>
Commit | Line | Data |
---|---|---|
ed22aaae DK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * PCIe host controller driver for Intel Gateway SoCs | |
4 | * | |
5 | * Copyright (c) 2019 Intel Corporation. | |
6 | */ | |
7 | ||
8 | #include <linux/bitfield.h> | |
9 | #include <linux/clk.h> | |
10 | #include <linux/gpio/consumer.h> | |
11 | #include <linux/iopoll.h> | |
12 | #include <linux/pci_regs.h> | |
13 | #include <linux/phy/phy.h> | |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/reset.h> | |
16 | ||
17 | #include "../../pci.h" | |
18 | #include "pcie-designware.h" | |
19 | ||
20 | #define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1) | |
21 | #define PORT_AFR_N_FTS_GEN3 180 | |
22 | #define PORT_AFR_N_FTS_GEN4 196 | |
23 | ||
24 | /* PCIe Application logic Registers */ | |
25 | #define PCIE_APP_CCR 0x10 | |
26 | #define PCIE_APP_CCR_LTSSM_ENABLE BIT(0) | |
27 | ||
28 | #define PCIE_APP_MSG_CR 0x30 | |
29 | #define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0) | |
30 | ||
31 | #define PCIE_APP_PMC 0x44 | |
32 | #define PCIE_APP_PMC_IN_L2 BIT(20) | |
33 | ||
34 | #define PCIE_APP_IRNEN 0xF4 | |
35 | #define PCIE_APP_IRNCR 0xF8 | |
36 | #define PCIE_APP_IRN_AER_REPORT BIT(0) | |
37 | #define PCIE_APP_IRN_PME BIT(2) | |
38 | #define PCIE_APP_IRN_RX_VDM_MSG BIT(4) | |
39 | #define PCIE_APP_IRN_PM_TO_ACK BIT(9) | |
40 | #define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11) | |
41 | #define PCIE_APP_IRN_BW_MGT BIT(12) | |
42 | #define PCIE_APP_IRN_MSG_LTR BIT(18) | |
43 | #define PCIE_APP_IRN_SYS_ERR_RC BIT(29) | |
44 | #define PCIE_APP_INTX_OFST 12 | |
45 | ||
46 | #define PCIE_APP_IRN_INT \ | |
47 | (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \ | |
48 | PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \ | |
49 | PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \ | |
50 | PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \ | |
51 | (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTA) | \ | |
52 | (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTB) | \ | |
53 | (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTC) | \ | |
54 | (PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTD)) | |
55 | ||
56 | #define BUS_IATU_OFFSET SZ_256M | |
57 | #define RESET_INTERVAL_MS 100 | |
58 | ||
59 | struct intel_pcie_soc { | |
60 | unsigned int pcie_ver; | |
ed22aaae DK |
61 | u32 num_viewport; |
62 | }; | |
63 | ||
64 | struct intel_pcie_port { | |
65 | struct dw_pcie pci; | |
66 | void __iomem *app_base; | |
67 | struct gpio_desc *reset_gpio; | |
68 | u32 rst_intrvl; | |
ed22aaae DK |
69 | struct clk *core_clk; |
70 | struct reset_control *core_rst; | |
71 | struct phy *phy; | |
ed22aaae DK |
72 | }; |
73 | ||
74 | static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val) | |
75 | { | |
76 | u32 old; | |
77 | ||
78 | old = readl(base + ofs); | |
79 | val = (old & ~mask) | (val & mask); | |
80 | ||
81 | if (val != old) | |
82 | writel(val, base + ofs); | |
83 | } | |
84 | ||
85 | static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs) | |
86 | { | |
87 | return readl(lpp->app_base + ofs); | |
88 | } | |
89 | ||
90 | static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) | |
91 | { | |
92 | writel(val, lpp->app_base + ofs); | |
93 | } | |
94 | ||
95 | static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs, | |
96 | u32 mask, u32 val) | |
97 | { | |
98 | pcie_update_bits(lpp->app_base, ofs, mask, val); | |
99 | } | |
100 | ||
101 | static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs) | |
102 | { | |
103 | return dw_pcie_readl_dbi(&lpp->pci, ofs); | |
104 | } | |
105 | ||
106 | static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val) | |
107 | { | |
108 | dw_pcie_writel_dbi(&lpp->pci, ofs, val); | |
109 | } | |
110 | ||
111 | static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs, | |
112 | u32 mask, u32 val) | |
113 | { | |
114 | pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val); | |
115 | } | |
116 | ||
117 | static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp) | |
118 | { | |
119 | pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, | |
120 | PCIE_APP_CCR_LTSSM_ENABLE); | |
121 | } | |
122 | ||
123 | static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp) | |
124 | { | |
125 | pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0); | |
126 | } | |
127 | ||
128 | static void intel_pcie_link_setup(struct intel_pcie_port *lpp) | |
129 | { | |
130 | u32 val; | |
cf854be2 | 131 | u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP); |
ed22aaae | 132 | |
ed22aaae DK |
133 | val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL); |
134 | ||
135 | val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC); | |
136 | pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val); | |
137 | } | |
138 | ||
aeaa0bfe | 139 | static void intel_pcie_init_n_fts(struct dw_pcie *pci) |
ed22aaae | 140 | { |
aeaa0bfe RH |
141 | switch (pci->link_gen) { |
142 | case 3: | |
143 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN3; | |
ed22aaae | 144 | break; |
aeaa0bfe RH |
145 | case 4: |
146 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN4; | |
ed22aaae DK |
147 | break; |
148 | default: | |
aeaa0bfe | 149 | pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT; |
ed22aaae DK |
150 | break; |
151 | } | |
aeaa0bfe | 152 | pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT; |
ed22aaae DK |
153 | } |
154 | ||
ed22aaae DK |
155 | static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp) |
156 | { | |
157 | struct device *dev = lpp->pci.dev; | |
158 | int ret; | |
159 | ||
160 | lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); | |
161 | if (IS_ERR(lpp->reset_gpio)) { | |
162 | ret = PTR_ERR(lpp->reset_gpio); | |
163 | if (ret != -EPROBE_DEFER) | |
164 | dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret); | |
165 | return ret; | |
166 | } | |
167 | ||
168 | /* Make initial reset last for 100us */ | |
169 | usleep_range(100, 200); | |
170 | ||
171 | return 0; | |
172 | } | |
173 | ||
174 | static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp) | |
175 | { | |
176 | reset_control_assert(lpp->core_rst); | |
177 | } | |
178 | ||
179 | static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp) | |
180 | { | |
181 | /* | |
182 | * One micro-second delay to make sure the reset pulse | |
183 | * wide enough so that core reset is clean. | |
184 | */ | |
185 | udelay(1); | |
186 | reset_control_deassert(lpp->core_rst); | |
187 | ||
188 | /* | |
189 | * Some SoC core reset also reset PHY, more delay needed | |
190 | * to make sure the reset process is done. | |
191 | */ | |
192 | usleep_range(1000, 2000); | |
193 | } | |
194 | ||
195 | static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp) | |
196 | { | |
197 | gpiod_set_value_cansleep(lpp->reset_gpio, 1); | |
198 | } | |
199 | ||
200 | static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp) | |
201 | { | |
202 | msleep(lpp->rst_intrvl); | |
203 | gpiod_set_value_cansleep(lpp->reset_gpio, 0); | |
204 | } | |
205 | ||
ed22aaae DK |
206 | static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp) |
207 | { | |
208 | pcie_app_wr(lpp, PCIE_APP_IRNEN, 0); | |
209 | pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT); | |
210 | } | |
211 | ||
212 | static int intel_pcie_get_resources(struct platform_device *pdev) | |
213 | { | |
214 | struct intel_pcie_port *lpp = platform_get_drvdata(pdev); | |
215 | struct dw_pcie *pci = &lpp->pci; | |
216 | struct device *dev = pci->dev; | |
ed22aaae DK |
217 | int ret; |
218 | ||
ed22aaae DK |
219 | lpp->core_clk = devm_clk_get(dev, NULL); |
220 | if (IS_ERR(lpp->core_clk)) { | |
221 | ret = PTR_ERR(lpp->core_clk); | |
222 | if (ret != -EPROBE_DEFER) | |
223 | dev_err(dev, "Failed to get clks: %d\n", ret); | |
224 | return ret; | |
225 | } | |
226 | ||
227 | lpp->core_rst = devm_reset_control_get(dev, NULL); | |
228 | if (IS_ERR(lpp->core_rst)) { | |
229 | ret = PTR_ERR(lpp->core_rst); | |
230 | if (ret != -EPROBE_DEFER) | |
231 | dev_err(dev, "Failed to get resets: %d\n", ret); | |
232 | return ret; | |
233 | } | |
234 | ||
ed22aaae DK |
235 | ret = device_property_read_u32(dev, "reset-assert-ms", |
236 | &lpp->rst_intrvl); | |
237 | if (ret) | |
238 | lpp->rst_intrvl = RESET_INTERVAL_MS; | |
239 | ||
936fa5cd | 240 | lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); |
ed22aaae DK |
241 | if (IS_ERR(lpp->app_base)) |
242 | return PTR_ERR(lpp->app_base); | |
243 | ||
244 | lpp->phy = devm_phy_get(dev, "pcie"); | |
245 | if (IS_ERR(lpp->phy)) { | |
246 | ret = PTR_ERR(lpp->phy); | |
247 | if (ret != -EPROBE_DEFER) | |
248 | dev_err(dev, "Couldn't get pcie-phy: %d\n", ret); | |
249 | return ret; | |
250 | } | |
251 | ||
252 | return 0; | |
253 | } | |
254 | ||
ed22aaae DK |
255 | static int intel_pcie_wait_l2(struct intel_pcie_port *lpp) |
256 | { | |
257 | u32 value; | |
258 | int ret; | |
39bc5006 | 259 | struct dw_pcie *pci = &lpp->pci; |
ed22aaae | 260 | |
39bc5006 | 261 | if (pci->link_gen < 3) |
ed22aaae DK |
262 | return 0; |
263 | ||
264 | /* Send PME_TURN_OFF message */ | |
265 | pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF, | |
266 | PCIE_APP_MSG_XMT_PM_TURNOFF); | |
267 | ||
268 | /* Read PMC status and wait for falling into L2 link state */ | |
269 | ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value, | |
270 | value & PCIE_APP_PMC_IN_L2, 20, | |
271 | jiffies_to_usecs(5 * HZ)); | |
272 | if (ret) | |
273 | dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n"); | |
274 | ||
275 | return ret; | |
276 | } | |
277 | ||
278 | static void intel_pcie_turn_off(struct intel_pcie_port *lpp) | |
279 | { | |
280 | if (dw_pcie_link_up(&lpp->pci)) | |
281 | intel_pcie_wait_l2(lpp); | |
282 | ||
283 | /* Put endpoint device in reset state */ | |
284 | intel_pcie_device_rst_assert(lpp); | |
285 | pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0); | |
286 | } | |
287 | ||
288 | static int intel_pcie_host_setup(struct intel_pcie_port *lpp) | |
289 | { | |
ed22aaae | 290 | int ret; |
1cc9a559 | 291 | struct dw_pcie *pci = &lpp->pci; |
ed22aaae DK |
292 | |
293 | intel_pcie_core_rst_assert(lpp); | |
294 | intel_pcie_device_rst_assert(lpp); | |
295 | ||
296 | ret = phy_init(lpp->phy); | |
297 | if (ret) | |
298 | return ret; | |
299 | ||
300 | intel_pcie_core_rst_deassert(lpp); | |
301 | ||
302 | ret = clk_prepare_enable(lpp->core_clk); | |
303 | if (ret) { | |
304 | dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret); | |
305 | goto clk_err; | |
306 | } | |
307 | ||
1cc9a559 RH |
308 | pci->atu_base = pci->dbi_base + 0xC0000; |
309 | ||
310 | intel_pcie_ltssm_disable(lpp); | |
311 | intel_pcie_link_setup(lpp); | |
312 | intel_pcie_init_n_fts(pci); | |
313 | dw_pcie_setup_rc(&pci->pp); | |
314 | dw_pcie_upconfig_setup(pci); | |
315 | ||
316 | intel_pcie_device_rst_deassert(lpp); | |
317 | intel_pcie_ltssm_enable(lpp); | |
318 | ||
319 | ret = dw_pcie_wait_for_link(pci); | |
ed22aaae DK |
320 | if (ret) |
321 | goto app_init_err; | |
322 | ||
323 | /* Enable integrated interrupts */ | |
324 | pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT, | |
325 | PCIE_APP_IRN_INT); | |
326 | ||
327 | return 0; | |
328 | ||
329 | app_init_err: | |
330 | clk_disable_unprepare(lpp->core_clk); | |
331 | clk_err: | |
332 | intel_pcie_core_rst_assert(lpp); | |
1cc9a559 | 333 | phy_exit(lpp->phy); |
ed22aaae DK |
334 | |
335 | return ret; | |
336 | } | |
337 | ||
338 | static void __intel_pcie_remove(struct intel_pcie_port *lpp) | |
339 | { | |
340 | intel_pcie_core_irq_disable(lpp); | |
341 | intel_pcie_turn_off(lpp); | |
342 | clk_disable_unprepare(lpp->core_clk); | |
343 | intel_pcie_core_rst_assert(lpp); | |
1cc9a559 | 344 | phy_exit(lpp->phy); |
ed22aaae DK |
345 | } |
346 | ||
347 | static int intel_pcie_remove(struct platform_device *pdev) | |
348 | { | |
349 | struct intel_pcie_port *lpp = platform_get_drvdata(pdev); | |
350 | struct pcie_port *pp = &lpp->pci.pp; | |
351 | ||
352 | dw_pcie_host_deinit(pp); | |
353 | __intel_pcie_remove(lpp); | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev) | |
359 | { | |
360 | struct intel_pcie_port *lpp = dev_get_drvdata(dev); | |
361 | int ret; | |
362 | ||
363 | intel_pcie_core_irq_disable(lpp); | |
364 | ret = intel_pcie_wait_l2(lpp); | |
365 | if (ret) | |
366 | return ret; | |
367 | ||
1cc9a559 | 368 | phy_exit(lpp->phy); |
ed22aaae DK |
369 | clk_disable_unprepare(lpp->core_clk); |
370 | return ret; | |
371 | } | |
372 | ||
373 | static int __maybe_unused intel_pcie_resume_noirq(struct device *dev) | |
374 | { | |
375 | struct intel_pcie_port *lpp = dev_get_drvdata(dev); | |
376 | ||
377 | return intel_pcie_host_setup(lpp); | |
378 | } | |
379 | ||
380 | static int intel_pcie_rc_init(struct pcie_port *pp) | |
381 | { | |
382 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | |
383 | struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev); | |
384 | ||
385 | return intel_pcie_host_setup(lpp); | |
386 | } | |
387 | ||
388 | /* | |
389 | * Dummy function so that DW core doesn't configure MSI | |
390 | */ | |
391 | static int intel_pcie_msi_init(struct pcie_port *pp) | |
392 | { | |
393 | return 0; | |
394 | } | |
395 | ||
558c1225 | 396 | static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) |
ed22aaae DK |
397 | { |
398 | return cpu_addr + BUS_IATU_OFFSET; | |
399 | } | |
400 | ||
401 | static const struct dw_pcie_ops intel_pcie_ops = { | |
402 | .cpu_addr_fixup = intel_pcie_cpu_addr, | |
403 | }; | |
404 | ||
405 | static const struct dw_pcie_host_ops intel_pcie_dw_ops = { | |
406 | .host_init = intel_pcie_rc_init, | |
407 | .msi_host_init = intel_pcie_msi_init, | |
408 | }; | |
409 | ||
410 | static const struct intel_pcie_soc pcie_data = { | |
411 | .pcie_ver = 0x520A, | |
ed22aaae DK |
412 | .num_viewport = 3, |
413 | }; | |
414 | ||
415 | static int intel_pcie_probe(struct platform_device *pdev) | |
416 | { | |
417 | const struct intel_pcie_soc *data; | |
418 | struct device *dev = &pdev->dev; | |
419 | struct intel_pcie_port *lpp; | |
420 | struct pcie_port *pp; | |
421 | struct dw_pcie *pci; | |
422 | int ret; | |
423 | ||
424 | lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL); | |
425 | if (!lpp) | |
426 | return -ENOMEM; | |
427 | ||
428 | platform_set_drvdata(pdev, lpp); | |
429 | pci = &lpp->pci; | |
430 | pci->dev = dev; | |
431 | pp = &pci->pp; | |
432 | ||
433 | ret = intel_pcie_get_resources(pdev); | |
434 | if (ret) | |
435 | return ret; | |
436 | ||
437 | ret = intel_pcie_ep_rst_init(lpp); | |
438 | if (ret) | |
439 | return ret; | |
440 | ||
441 | data = device_get_match_data(dev); | |
442 | if (!data) | |
443 | return -ENODEV; | |
444 | ||
445 | pci->ops = &intel_pcie_ops; | |
446 | pci->version = data->pcie_ver; | |
ed22aaae DK |
447 | pp->ops = &intel_pcie_dw_ops; |
448 | ||
449 | ret = dw_pcie_host_init(pp); | |
450 | if (ret) { | |
451 | dev_err(dev, "Cannot initialize host\n"); | |
452 | return ret; | |
453 | } | |
454 | ||
455 | /* | |
456 | * Intel PCIe doesn't configure IO region, so set viewport | |
457 | * to not perform IO region access. | |
458 | */ | |
459 | pci->num_viewport = data->num_viewport; | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | static const struct dev_pm_ops intel_pcie_pm_ops = { | |
465 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, | |
466 | intel_pcie_resume_noirq) | |
467 | }; | |
468 | ||
469 | static const struct of_device_id of_intel_pcie_match[] = { | |
470 | { .compatible = "intel,lgm-pcie", .data = &pcie_data }, | |
471 | {} | |
472 | }; | |
473 | ||
474 | static struct platform_driver intel_pcie_driver = { | |
475 | .probe = intel_pcie_probe, | |
476 | .remove = intel_pcie_remove, | |
477 | .driver = { | |
478 | .name = "intel-gw-pcie", | |
479 | .of_match_table = of_intel_pcie_match, | |
480 | .pm = &intel_pcie_pm_ops, | |
481 | }, | |
482 | }; | |
483 | builtin_platform_driver(intel_pcie_driver); |