]>
Commit | Line | Data |
---|---|---|
637cfaca RL |
1 | /* |
2 | * MediaTek PCIe host controller driver. | |
3 | * | |
4 | * Copyright (c) 2017 MediaTek Inc. | |
5 | * Author: Ryder Lee <ryder.lee@mediatek.com> | |
b099631d | 6 | * Honghui Zhang <honghui.zhang@mediatek.com> |
637cfaca RL |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | */ | |
17 | ||
18 | #include <linux/clk.h> | |
19 | #include <linux/delay.h> | |
e10b7a18 | 20 | #include <linux/iopoll.h> |
b099631d RL |
21 | #include <linux/irq.h> |
22 | #include <linux/irqdomain.h> | |
637cfaca RL |
23 | #include <linux/kernel.h> |
24 | #include <linux/of_address.h> | |
25 | #include <linux/of_pci.h> | |
26 | #include <linux/of_platform.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/phy/phy.h> | |
29 | #include <linux/platform_device.h> | |
30 | #include <linux/pm_runtime.h> | |
31 | #include <linux/reset.h> | |
32 | ||
33 | /* PCIe shared registers */ | |
34 | #define PCIE_SYS_CFG 0x00 | |
35 | #define PCIE_INT_ENABLE 0x0c | |
36 | #define PCIE_CFG_ADDR 0x20 | |
37 | #define PCIE_CFG_DATA 0x24 | |
38 | ||
39 | /* PCIe per port registers */ | |
40 | #define PCIE_BAR0_SETUP 0x10 | |
41 | #define PCIE_CLASS 0x34 | |
42 | #define PCIE_LINK_STATUS 0x50 | |
43 | ||
44 | #define PCIE_PORT_INT_EN(x) BIT(20 + (x)) | |
45 | #define PCIE_PORT_PERST(x) BIT(1 + (x)) | |
46 | #define PCIE_PORT_LINKUP BIT(0) | |
47 | #define PCIE_BAR_MAP_MAX GENMASK(31, 16) | |
48 | ||
49 | #define PCIE_BAR_ENABLE BIT(0) | |
50 | #define PCIE_REVISION_ID BIT(0) | |
51 | #define PCIE_CLASS_CODE (0x60400 << 8) | |
52 | #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ | |
53 | ((((regn) >> 8) & GENMASK(3, 0)) << 24)) | |
54 | #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) | |
55 | #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) | |
56 | #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) | |
57 | #define PCIE_CONF_ADDR(regn, fun, dev, bus) \ | |
58 | (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ | |
59 | PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) | |
60 | ||
61 | /* MediaTek specific configuration registers */ | |
62 | #define PCIE_FTS_NUM 0x70c | |
63 | #define PCIE_FTS_NUM_MASK GENMASK(15, 8) | |
64 | #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) | |
65 | ||
66 | #define PCIE_FC_CREDIT 0x73c | |
67 | #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) | |
68 | #define PCIE_FC_CREDIT_VAL(x) ((x) << 16) | |
69 | ||
b099631d RL |
70 | /* PCIe V2 share registers */ |
71 | #define PCIE_SYS_CFG_V2 0x0 | |
72 | #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) | |
73 | #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) | |
74 | ||
75 | /* PCIe V2 per-port registers */ | |
43e6409d | 76 | #define PCIE_MSI_VECTOR 0x0c0 |
b099631d RL |
77 | #define PCIE_INT_MASK 0x420 |
78 | #define INTX_MASK GENMASK(19, 16) | |
79 | #define INTX_SHIFT 16 | |
b099631d | 80 | #define PCIE_INT_STATUS 0x424 |
43e6409d HZ |
81 | #define MSI_STATUS BIT(23) |
82 | #define PCIE_IMSI_STATUS 0x42c | |
83 | #define PCIE_IMSI_ADDR 0x430 | |
84 | #define MSI_MASK BIT(23) | |
85 | #define MTK_MSI_IRQS_NUM 32 | |
b099631d RL |
86 | |
87 | #define PCIE_AHB_TRANS_BASE0_L 0x438 | |
88 | #define PCIE_AHB_TRANS_BASE0_H 0x43c | |
89 | #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) | |
90 | #define PCIE_AXI_WINDOW0 0x448 | |
91 | #define WIN_ENABLE BIT(7) | |
92 | ||
93 | /* PCIe V2 configuration transaction header */ | |
94 | #define PCIE_CFG_HEADER0 0x460 | |
95 | #define PCIE_CFG_HEADER1 0x464 | |
96 | #define PCIE_CFG_HEADER2 0x468 | |
97 | #define PCIE_CFG_WDATA 0x470 | |
98 | #define PCIE_APP_TLP_REQ 0x488 | |
99 | #define PCIE_CFG_RDATA 0x48c | |
100 | #define APP_CFG_REQ BIT(0) | |
101 | #define APP_CPL_STATUS GENMASK(7, 5) | |
102 | ||
103 | #define CFG_WRRD_TYPE_0 4 | |
104 | #define CFG_WR_FMT 2 | |
105 | #define CFG_RD_FMT 0 | |
106 | ||
107 | #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) | |
108 | #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) | |
109 | #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) | |
110 | #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) | |
111 | #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) | |
112 | #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) | |
113 | #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) | |
114 | #define CFG_HEADER_DW0(type, fmt) \ | |
115 | (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) | |
116 | #define CFG_HEADER_DW1(where, size) \ | |
117 | (GENMASK(((size) - 1), 0) << ((where) & 0x3)) | |
118 | #define CFG_HEADER_DW2(regn, fun, dev, bus) \ | |
119 | (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ | |
120 | CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) | |
121 | ||
122 | #define PCIE_RST_CTRL 0x510 | |
123 | #define PCIE_PHY_RSTB BIT(0) | |
124 | #define PCIE_PIPE_SRSTB BIT(1) | |
125 | #define PCIE_MAC_SRSTB BIT(2) | |
126 | #define PCIE_CRSTB BIT(3) | |
127 | #define PCIE_PERSTB BIT(8) | |
128 | #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) | |
129 | #define PCIE_LINK_STATUS_V2 0x804 | |
130 | #define PCIE_PORT_LINKUP_V2 BIT(10) | |
131 | ||
c681c930 HZ |
132 | struct mtk_pcie_port; |
133 | ||
134 | /** | |
135 | * struct mtk_pcie_soc - differentiate between host generations | |
43e6409d | 136 | * @has_msi: whether this host supports MSI interrupts or not |
c681c930 HZ |
137 | * @ops: pointer to configuration access functions |
138 | * @startup: pointer to controller setting functions | |
b099631d | 139 | * @setup_irq: pointer to initialize IRQ functions |
c681c930 HZ |
140 | */ |
141 | struct mtk_pcie_soc { | |
43e6409d | 142 | bool has_msi; |
c681c930 HZ |
143 | struct pci_ops *ops; |
144 | int (*startup)(struct mtk_pcie_port *port); | |
b099631d | 145 | int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); |
c681c930 HZ |
146 | }; |
147 | ||
637cfaca RL |
148 | /** |
149 | * struct mtk_pcie_port - PCIe port information | |
150 | * @base: IO mapped register base | |
151 | * @list: port list | |
152 | * @pcie: pointer to PCIe host info | |
153 | * @reset: pointer to port reset control | |
b099631d RL |
154 | * @sys_ck: pointer to transaction/data link layer clock |
155 | * @ahb_ck: pointer to AHB slave interface operating clock for CSR access | |
156 | * and RC initiated MMIO access | |
157 | * @axi_ck: pointer to application layer MMIO channel operating clock | |
158 | * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock | |
159 | * when pcie_mac_ck/pcie_pipe_ck is turned off | |
160 | * @obff_ck: pointer to OBFF functional block operating clock | |
161 | * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock | |
162 | * @phy: pointer to PHY control block | |
637cfaca | 163 | * @lane: lane count |
4f6f0460 | 164 | * @slot: port slot |
b099631d | 165 | * @irq_domain: legacy INTx IRQ domain |
43e6409d HZ |
166 | * @msi_domain: MSI IRQ domain |
167 | * @msi_irq_in_use: bit map for assigned MSI IRQ | |
637cfaca RL |
168 | */ |
169 | struct mtk_pcie_port { | |
170 | void __iomem *base; | |
171 | struct list_head list; | |
172 | struct mtk_pcie *pcie; | |
173 | struct reset_control *reset; | |
174 | struct clk *sys_ck; | |
b099631d RL |
175 | struct clk *ahb_ck; |
176 | struct clk *axi_ck; | |
177 | struct clk *aux_ck; | |
178 | struct clk *obff_ck; | |
179 | struct clk *pipe_ck; | |
637cfaca RL |
180 | struct phy *phy; |
181 | u32 lane; | |
4f6f0460 | 182 | u32 slot; |
b099631d | 183 | struct irq_domain *irq_domain; |
43e6409d HZ |
184 | struct irq_domain *msi_domain; |
185 | DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); | |
637cfaca RL |
186 | }; |
187 | ||
188 | /** | |
189 | * struct mtk_pcie - PCIe host information | |
190 | * @dev: pointer to PCIe device | |
191 | * @base: IO mapped register base | |
192 | * @free_ck: free-run reference clock | |
193 | * @io: IO resource | |
194 | * @pio: PIO resource | |
195 | * @mem: non-prefetchable memory resource | |
196 | * @busn: bus range | |
197 | * @offset: IO / Memory offset | |
198 | * @ports: pointer to PCIe port information | |
c681c930 | 199 | * @soc: pointer to SoC-dependent operations |
637cfaca RL |
200 | */ |
201 | struct mtk_pcie { | |
202 | struct device *dev; | |
203 | void __iomem *base; | |
204 | struct clk *free_ck; | |
205 | ||
206 | struct resource io; | |
207 | struct resource pio; | |
208 | struct resource mem; | |
209 | struct resource busn; | |
210 | struct { | |
211 | resource_size_t mem; | |
212 | resource_size_t io; | |
213 | } offset; | |
214 | struct list_head ports; | |
c681c930 | 215 | const struct mtk_pcie_soc *soc; |
637cfaca RL |
216 | }; |
217 | ||
637cfaca RL |
218 | static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) |
219 | { | |
220 | struct device *dev = pcie->dev; | |
221 | ||
222 | clk_disable_unprepare(pcie->free_ck); | |
223 | ||
224 | if (dev->pm_domain) { | |
225 | pm_runtime_put_sync(dev); | |
226 | pm_runtime_disable(dev); | |
227 | } | |
228 | } | |
229 | ||
230 | static void mtk_pcie_port_free(struct mtk_pcie_port *port) | |
231 | { | |
232 | struct mtk_pcie *pcie = port->pcie; | |
233 | struct device *dev = pcie->dev; | |
234 | ||
235 | devm_iounmap(dev, port->base); | |
236 | list_del(&port->list); | |
237 | devm_kfree(dev, port); | |
238 | } | |
239 | ||
240 | static void mtk_pcie_put_resources(struct mtk_pcie *pcie) | |
241 | { | |
242 | struct mtk_pcie_port *port, *tmp; | |
243 | ||
244 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) { | |
245 | phy_power_off(port->phy); | |
b099631d RL |
246 | phy_exit(port->phy); |
247 | clk_disable_unprepare(port->pipe_ck); | |
248 | clk_disable_unprepare(port->obff_ck); | |
249 | clk_disable_unprepare(port->axi_ck); | |
250 | clk_disable_unprepare(port->aux_ck); | |
251 | clk_disable_unprepare(port->ahb_ck); | |
637cfaca RL |
252 | clk_disable_unprepare(port->sys_ck); |
253 | mtk_pcie_port_free(port); | |
254 | } | |
255 | ||
256 | mtk_pcie_subsys_powerdown(pcie); | |
257 | } | |
258 | ||
b099631d RL |
259 | static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) |
260 | { | |
261 | u32 val; | |
262 | int err; | |
263 | ||
264 | err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, | |
265 | !(val & APP_CFG_REQ), 10, | |
266 | 100 * USEC_PER_MSEC); | |
267 | if (err) | |
268 | return PCIBIOS_SET_FAILED; | |
269 | ||
270 | if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) | |
271 | return PCIBIOS_SET_FAILED; | |
272 | ||
273 | return PCIBIOS_SUCCESSFUL; | |
274 | } | |
275 | ||
276 | static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, | |
277 | int where, int size, u32 *val) | |
278 | { | |
279 | u32 tmp; | |
280 | ||
281 | /* Write PCIe configuration transaction header for Cfgrd */ | |
282 | writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), | |
283 | port->base + PCIE_CFG_HEADER0); | |
284 | writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); | |
285 | writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), | |
286 | port->base + PCIE_CFG_HEADER2); | |
287 | ||
288 | /* Trigger h/w to transmit Cfgrd TLP */ | |
289 | tmp = readl(port->base + PCIE_APP_TLP_REQ); | |
290 | tmp |= APP_CFG_REQ; | |
291 | writel(tmp, port->base + PCIE_APP_TLP_REQ); | |
292 | ||
293 | /* Check completion status */ | |
294 | if (mtk_pcie_check_cfg_cpld(port)) | |
295 | return PCIBIOS_SET_FAILED; | |
296 | ||
297 | /* Read cpld payload of Cfgrd */ | |
298 | *val = readl(port->base + PCIE_CFG_RDATA); | |
299 | ||
300 | if (size == 1) | |
301 | *val = (*val >> (8 * (where & 3))) & 0xff; | |
302 | else if (size == 2) | |
303 | *val = (*val >> (8 * (where & 3))) & 0xffff; | |
304 | ||
305 | return PCIBIOS_SUCCESSFUL; | |
306 | } | |
307 | ||
308 | static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, | |
309 | int where, int size, u32 val) | |
310 | { | |
311 | /* Write PCIe configuration transaction header for Cfgwr */ | |
312 | writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), | |
313 | port->base + PCIE_CFG_HEADER0); | |
314 | writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); | |
315 | writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), | |
316 | port->base + PCIE_CFG_HEADER2); | |
317 | ||
318 | /* Write Cfgwr data */ | |
319 | val = val << 8 * (where & 3); | |
320 | writel(val, port->base + PCIE_CFG_WDATA); | |
321 | ||
322 | /* Trigger h/w to transmit Cfgwr TLP */ | |
323 | val = readl(port->base + PCIE_APP_TLP_REQ); | |
324 | val |= APP_CFG_REQ; | |
325 | writel(val, port->base + PCIE_APP_TLP_REQ); | |
326 | ||
327 | /* Check completion status */ | |
328 | return mtk_pcie_check_cfg_cpld(port); | |
329 | } | |
330 | ||
331 | static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, | |
332 | unsigned int devfn) | |
333 | { | |
334 | struct mtk_pcie *pcie = bus->sysdata; | |
335 | struct mtk_pcie_port *port; | |
336 | ||
337 | list_for_each_entry(port, &pcie->ports, list) | |
338 | if (port->slot == PCI_SLOT(devfn)) | |
339 | return port; | |
340 | ||
341 | return NULL; | |
342 | } | |
343 | ||
344 | static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, | |
345 | int where, int size, u32 *val) | |
346 | { | |
347 | struct mtk_pcie_port *port; | |
348 | u32 bn = bus->number; | |
349 | int ret; | |
350 | ||
351 | port = mtk_pcie_find_port(bus, devfn); | |
352 | if (!port) { | |
353 | *val = ~0; | |
354 | return PCIBIOS_DEVICE_NOT_FOUND; | |
355 | } | |
356 | ||
357 | ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); | |
358 | if (ret) | |
359 | *val = ~0; | |
360 | ||
361 | return ret; | |
362 | } | |
363 | ||
364 | static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, | |
365 | int where, int size, u32 val) | |
366 | { | |
367 | struct mtk_pcie_port *port; | |
368 | u32 bn = bus->number; | |
369 | ||
370 | port = mtk_pcie_find_port(bus, devfn); | |
371 | if (!port) | |
372 | return PCIBIOS_DEVICE_NOT_FOUND; | |
373 | ||
374 | return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); | |
375 | } | |
376 | ||
377 | static struct pci_ops mtk_pcie_ops_v2 = { | |
378 | .read = mtk_pcie_config_read, | |
379 | .write = mtk_pcie_config_write, | |
380 | }; | |
381 | ||
382 | static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) | |
383 | { | |
384 | struct mtk_pcie *pcie = port->pcie; | |
385 | struct resource *mem = &pcie->mem; | |
386 | u32 val; | |
387 | size_t size; | |
388 | int err; | |
389 | ||
390 | /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ | |
391 | if (pcie->base) { | |
392 | val = readl(pcie->base + PCIE_SYS_CFG_V2); | |
393 | val |= PCIE_CSR_LTSSM_EN(port->slot) | | |
394 | PCIE_CSR_ASPM_L1_EN(port->slot); | |
395 | writel(val, pcie->base + PCIE_SYS_CFG_V2); | |
396 | } | |
397 | ||
398 | /* Assert all reset signals */ | |
399 | writel(0, port->base + PCIE_RST_CTRL); | |
400 | ||
401 | /* | |
402 | * Enable PCIe link down reset, if link status changed from link up to | |
403 | * link down, this will reset MAC control registers and configuration | |
404 | * space. | |
405 | */ | |
406 | writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); | |
407 | ||
408 | /* De-assert PHY, PE, PIPE, MAC and configuration reset */ | |
409 | val = readl(port->base + PCIE_RST_CTRL); | |
410 | val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | | |
411 | PCIE_MAC_SRSTB | PCIE_CRSTB; | |
412 | writel(val, port->base + PCIE_RST_CTRL); | |
413 | ||
414 | /* 100ms timeout value should be enough for Gen1/2 training */ | |
415 | err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, | |
416 | !!(val & PCIE_PORT_LINKUP_V2), 20, | |
417 | 100 * USEC_PER_MSEC); | |
418 | if (err) | |
419 | return -ETIMEDOUT; | |
420 | ||
421 | /* Set INTx mask */ | |
422 | val = readl(port->base + PCIE_INT_MASK); | |
423 | val &= ~INTX_MASK; | |
424 | writel(val, port->base + PCIE_INT_MASK); | |
425 | ||
426 | /* Set AHB to PCIe translation windows */ | |
427 | size = mem->end - mem->start; | |
428 | val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); | |
429 | writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); | |
430 | ||
431 | val = upper_32_bits(mem->start); | |
432 | writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); | |
433 | ||
434 | /* Set PCIe to AXI translation memory space.*/ | |
435 | val = fls(0xffffffff) | WIN_ENABLE; | |
436 | writel(val, port->base + PCIE_AXI_WINDOW0); | |
437 | ||
438 | return 0; | |
439 | } | |
440 | ||
43e6409d HZ |
441 | static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port) |
442 | { | |
443 | int msi; | |
444 | ||
445 | msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); | |
446 | if (msi < MTK_MSI_IRQS_NUM) | |
447 | set_bit(msi, port->msi_irq_in_use); | |
448 | else | |
449 | return -ENOSPC; | |
450 | ||
451 | return msi; | |
452 | } | |
453 | ||
454 | static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq) | |
455 | { | |
456 | clear_bit(hwirq, port->msi_irq_in_use); | |
457 | } | |
458 | ||
459 | static int mtk_pcie_msi_setup_irq(struct msi_controller *chip, | |
460 | struct pci_dev *pdev, struct msi_desc *desc) | |
461 | { | |
462 | struct mtk_pcie_port *port; | |
463 | struct msi_msg msg; | |
464 | unsigned int irq; | |
465 | int hwirq; | |
466 | phys_addr_t msg_addr; | |
467 | ||
468 | port = mtk_pcie_find_port(pdev->bus, pdev->devfn); | |
469 | if (!port) | |
470 | return -EINVAL; | |
471 | ||
472 | hwirq = mtk_pcie_msi_alloc(port); | |
473 | if (hwirq < 0) | |
474 | return hwirq; | |
475 | ||
476 | irq = irq_create_mapping(port->msi_domain, hwirq); | |
477 | if (!irq) { | |
478 | mtk_pcie_msi_free(port, hwirq); | |
479 | return -EINVAL; | |
480 | } | |
481 | ||
482 | chip->dev = &pdev->dev; | |
483 | ||
484 | irq_set_msi_desc(irq, desc); | |
485 | ||
486 | /* MT2712/MT7622 only support 32-bit MSI addresses */ | |
487 | msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); | |
488 | msg.address_hi = 0; | |
489 | msg.address_lo = lower_32_bits(msg_addr); | |
490 | msg.data = hwirq; | |
491 | ||
492 | pci_write_msi_msg(irq, &msg); | |
493 | ||
494 | return 0; | |
495 | } | |
496 | ||
497 | static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) | |
498 | { | |
499 | struct pci_dev *pdev = to_pci_dev(chip->dev); | |
500 | struct irq_data *d = irq_get_irq_data(irq); | |
501 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | |
502 | struct mtk_pcie_port *port; | |
503 | ||
504 | port = mtk_pcie_find_port(pdev->bus, pdev->devfn); | |
505 | if (!port) | |
506 | return; | |
507 | ||
508 | irq_dispose_mapping(irq); | |
509 | mtk_pcie_msi_free(port, hwirq); | |
510 | } | |
511 | ||
512 | static struct msi_controller mtk_pcie_msi_chip = { | |
513 | .setup_irq = mtk_pcie_msi_setup_irq, | |
514 | .teardown_irq = mtk_msi_teardown_irq, | |
515 | }; | |
516 | ||
517 | static struct irq_chip mtk_msi_irq_chip = { | |
518 | .name = "MTK PCIe MSI", | |
519 | .irq_enable = pci_msi_unmask_irq, | |
520 | .irq_disable = pci_msi_mask_irq, | |
521 | .irq_mask = pci_msi_mask_irq, | |
522 | .irq_unmask = pci_msi_unmask_irq, | |
523 | }; | |
524 | ||
525 | static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq, | |
526 | irq_hw_number_t hwirq) | |
527 | { | |
528 | irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq); | |
529 | irq_set_chip_data(irq, domain->host_data); | |
530 | ||
531 | return 0; | |
532 | } | |
533 | ||
534 | static const struct irq_domain_ops msi_domain_ops = { | |
535 | .map = mtk_pcie_msi_map, | |
536 | }; | |
537 | ||
538 | static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) | |
539 | { | |
540 | u32 val; | |
541 | phys_addr_t msg_addr; | |
542 | ||
543 | msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); | |
544 | val = lower_32_bits(msg_addr); | |
545 | writel(val, port->base + PCIE_IMSI_ADDR); | |
546 | ||
547 | val = readl(port->base + PCIE_INT_MASK); | |
548 | val &= ~MSI_MASK; | |
549 | writel(val, port->base + PCIE_INT_MASK); | |
550 | } | |
551 | ||
b099631d RL |
552 | static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, |
553 | irq_hw_number_t hwirq) | |
554 | { | |
555 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | |
556 | irq_set_chip_data(irq, domain->host_data); | |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
561 | static const struct irq_domain_ops intx_domain_ops = { | |
562 | .map = mtk_pcie_intx_map, | |
563 | }; | |
564 | ||
565 | static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, | |
566 | struct device_node *node) | |
567 | { | |
568 | struct device *dev = port->pcie->dev; | |
569 | struct device_node *pcie_intc_node; | |
570 | ||
571 | /* Setup INTx */ | |
572 | pcie_intc_node = of_get_next_child(node, NULL); | |
573 | if (!pcie_intc_node) { | |
574 | dev_err(dev, "no PCIe Intc node found\n"); | |
575 | return -ENODEV; | |
576 | } | |
577 | ||
d84c246b | 578 | port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, |
b099631d RL |
579 | &intx_domain_ops, port); |
580 | if (!port->irq_domain) { | |
581 | dev_err(dev, "failed to get INTx IRQ domain\n"); | |
582 | return -ENODEV; | |
583 | } | |
584 | ||
43e6409d HZ |
585 | if (IS_ENABLED(CONFIG_PCI_MSI)) { |
586 | port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM, | |
587 | &msi_domain_ops, | |
588 | &mtk_pcie_msi_chip); | |
589 | if (!port->msi_domain) { | |
590 | dev_err(dev, "failed to create MSI IRQ domain\n"); | |
591 | return -ENODEV; | |
592 | } | |
593 | mtk_pcie_enable_msi(port); | |
594 | } | |
595 | ||
b099631d RL |
596 | return 0; |
597 | } | |
598 | ||
599 | static irqreturn_t mtk_pcie_intr_handler(int irq, void *data) | |
600 | { | |
601 | struct mtk_pcie_port *port = (struct mtk_pcie_port *)data; | |
602 | unsigned long status; | |
603 | u32 virq; | |
604 | u32 bit = INTX_SHIFT; | |
605 | ||
606 | while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) { | |
d84c246b | 607 | for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { |
b099631d RL |
608 | /* Clear the INTx */ |
609 | writel(1 << bit, port->base + PCIE_INT_STATUS); | |
610 | virq = irq_find_mapping(port->irq_domain, | |
611 | bit - INTX_SHIFT); | |
612 | generic_handle_irq(virq); | |
613 | } | |
614 | } | |
615 | ||
43e6409d HZ |
616 | if (IS_ENABLED(CONFIG_PCI_MSI)) { |
617 | while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) { | |
618 | unsigned long imsi_status; | |
619 | ||
620 | while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { | |
621 | for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) { | |
622 | /* Clear the MSI */ | |
623 | writel(1 << bit, port->base + PCIE_IMSI_STATUS); | |
624 | virq = irq_find_mapping(port->msi_domain, bit); | |
625 | generic_handle_irq(virq); | |
626 | } | |
627 | } | |
628 | /* Clear MSI interrupt status */ | |
629 | writel(MSI_STATUS, port->base + PCIE_INT_STATUS); | |
630 | } | |
631 | } | |
632 | ||
b099631d RL |
633 | return IRQ_HANDLED; |
634 | } | |
635 | ||
636 | static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, | |
637 | struct device_node *node) | |
638 | { | |
639 | struct mtk_pcie *pcie = port->pcie; | |
640 | struct device *dev = pcie->dev; | |
641 | struct platform_device *pdev = to_platform_device(dev); | |
642 | int err, irq; | |
643 | ||
644 | irq = platform_get_irq(pdev, port->slot); | |
645 | err = devm_request_irq(dev, irq, mtk_pcie_intr_handler, | |
646 | IRQF_SHARED, "mtk-pcie", port); | |
647 | if (err) { | |
648 | dev_err(dev, "unable to request IRQ %d\n", irq); | |
649 | return err; | |
650 | } | |
651 | ||
652 | err = mtk_pcie_init_irq_domain(port, node); | |
653 | if (err) { | |
43e6409d | 654 | dev_err(dev, "failed to init PCIe IRQ domain\n"); |
b099631d RL |
655 | return err; |
656 | } | |
657 | ||
658 | return 0; | |
659 | } | |
660 | ||
637cfaca RL |
661 | static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, |
662 | unsigned int devfn, int where) | |
663 | { | |
db271747 | 664 | struct mtk_pcie *pcie = bus->sysdata; |
637cfaca RL |
665 | |
666 | writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), | |
667 | bus->number), pcie->base + PCIE_CFG_ADDR); | |
668 | ||
669 | return pcie->base + PCIE_CFG_DATA + (where & 3); | |
670 | } | |
671 | ||
672 | static struct pci_ops mtk_pcie_ops = { | |
673 | .map_bus = mtk_pcie_map_bus, | |
674 | .read = pci_generic_config_read, | |
675 | .write = pci_generic_config_write, | |
676 | }; | |
677 | ||
e10b7a18 | 678 | static int mtk_pcie_startup_port(struct mtk_pcie_port *port) |
637cfaca RL |
679 | { |
680 | struct mtk_pcie *pcie = port->pcie; | |
4f6f0460 HZ |
681 | u32 func = PCI_FUNC(port->slot << 3); |
682 | u32 slot = PCI_SLOT(port->slot << 3); | |
637cfaca | 683 | u32 val; |
e10b7a18 RL |
684 | int err; |
685 | ||
686 | /* assert port PERST_N */ | |
687 | val = readl(pcie->base + PCIE_SYS_CFG); | |
4f6f0460 | 688 | val |= PCIE_PORT_PERST(port->slot); |
e10b7a18 RL |
689 | writel(val, pcie->base + PCIE_SYS_CFG); |
690 | ||
691 | /* de-assert port PERST_N */ | |
692 | val = readl(pcie->base + PCIE_SYS_CFG); | |
4f6f0460 | 693 | val &= ~PCIE_PORT_PERST(port->slot); |
e10b7a18 RL |
694 | writel(val, pcie->base + PCIE_SYS_CFG); |
695 | ||
696 | /* 100ms timeout value should be enough for Gen1/2 training */ | |
697 | err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, | |
698 | !!(val & PCIE_PORT_LINKUP), 20, | |
699 | 100 * USEC_PER_MSEC); | |
700 | if (err) | |
701 | return -ETIMEDOUT; | |
637cfaca RL |
702 | |
703 | /* enable interrupt */ | |
704 | val = readl(pcie->base + PCIE_INT_ENABLE); | |
4f6f0460 | 705 | val |= PCIE_PORT_INT_EN(port->slot); |
637cfaca RL |
706 | writel(val, pcie->base + PCIE_INT_ENABLE); |
707 | ||
708 | /* map to all DDR region. We need to set it before cfg operation. */ | |
709 | writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, | |
710 | port->base + PCIE_BAR0_SETUP); | |
711 | ||
712 | /* configure class code and revision ID */ | |
713 | writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); | |
714 | ||
715 | /* configure FC credit */ | |
716 | writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), | |
717 | pcie->base + PCIE_CFG_ADDR); | |
718 | val = readl(pcie->base + PCIE_CFG_DATA); | |
719 | val &= ~PCIE_FC_CREDIT_MASK; | |
720 | val |= PCIE_FC_CREDIT_VAL(0x806c); | |
721 | writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), | |
722 | pcie->base + PCIE_CFG_ADDR); | |
723 | writel(val, pcie->base + PCIE_CFG_DATA); | |
724 | ||
725 | /* configure RC FTS number to 250 when it leaves L0s */ | |
726 | writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), | |
727 | pcie->base + PCIE_CFG_ADDR); | |
728 | val = readl(pcie->base + PCIE_CFG_DATA); | |
729 | val &= ~PCIE_FTS_NUM_MASK; | |
730 | val |= PCIE_FTS_NUM_L0(0x50); | |
731 | writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), | |
732 | pcie->base + PCIE_CFG_ADDR); | |
733 | writel(val, pcie->base + PCIE_CFG_DATA); | |
637cfaca | 734 | |
e10b7a18 | 735 | return 0; |
637cfaca RL |
736 | } |
737 | ||
4f6f0460 | 738 | static void mtk_pcie_enable_port(struct mtk_pcie_port *port) |
637cfaca | 739 | { |
c681c930 HZ |
740 | struct mtk_pcie *pcie = port->pcie; |
741 | struct device *dev = pcie->dev; | |
637cfaca RL |
742 | int err; |
743 | ||
744 | err = clk_prepare_enable(port->sys_ck); | |
745 | if (err) { | |
b099631d | 746 | dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); |
637cfaca RL |
747 | goto err_sys_clk; |
748 | } | |
749 | ||
b099631d RL |
750 | err = clk_prepare_enable(port->ahb_ck); |
751 | if (err) { | |
752 | dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); | |
753 | goto err_ahb_clk; | |
754 | } | |
755 | ||
756 | err = clk_prepare_enable(port->aux_ck); | |
757 | if (err) { | |
758 | dev_err(dev, "failed to enable aux_ck%d\n", port->slot); | |
759 | goto err_aux_clk; | |
760 | } | |
761 | ||
762 | err = clk_prepare_enable(port->axi_ck); | |
763 | if (err) { | |
764 | dev_err(dev, "failed to enable axi_ck%d\n", port->slot); | |
765 | goto err_axi_clk; | |
766 | } | |
767 | ||
768 | err = clk_prepare_enable(port->obff_ck); | |
769 | if (err) { | |
770 | dev_err(dev, "failed to enable obff_ck%d\n", port->slot); | |
771 | goto err_obff_clk; | |
772 | } | |
773 | ||
774 | err = clk_prepare_enable(port->pipe_ck); | |
775 | if (err) { | |
776 | dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); | |
777 | goto err_pipe_clk; | |
778 | } | |
779 | ||
637cfaca RL |
780 | reset_control_assert(port->reset); |
781 | reset_control_deassert(port->reset); | |
782 | ||
b099631d RL |
783 | err = phy_init(port->phy); |
784 | if (err) { | |
785 | dev_err(dev, "failed to initialize port%d phy\n", port->slot); | |
786 | goto err_phy_init; | |
787 | } | |
788 | ||
637cfaca RL |
789 | err = phy_power_on(port->phy); |
790 | if (err) { | |
4f6f0460 | 791 | dev_err(dev, "failed to power on port%d phy\n", port->slot); |
637cfaca RL |
792 | goto err_phy_on; |
793 | } | |
794 | ||
c681c930 | 795 | if (!pcie->soc->startup(port)) |
637cfaca | 796 | return; |
637cfaca | 797 | |
4f6f0460 | 798 | dev_info(dev, "Port%d link down\n", port->slot); |
637cfaca RL |
799 | |
800 | phy_power_off(port->phy); | |
801 | err_phy_on: | |
b099631d RL |
802 | phy_exit(port->phy); |
803 | err_phy_init: | |
804 | clk_disable_unprepare(port->pipe_ck); | |
805 | err_pipe_clk: | |
806 | clk_disable_unprepare(port->obff_ck); | |
807 | err_obff_clk: | |
808 | clk_disable_unprepare(port->axi_ck); | |
809 | err_axi_clk: | |
810 | clk_disable_unprepare(port->aux_ck); | |
811 | err_aux_clk: | |
812 | clk_disable_unprepare(port->ahb_ck); | |
813 | err_ahb_clk: | |
637cfaca RL |
814 | clk_disable_unprepare(port->sys_ck); |
815 | err_sys_clk: | |
816 | mtk_pcie_port_free(port); | |
817 | } | |
818 | ||
4f6f0460 HZ |
819 | static int mtk_pcie_parse_port(struct mtk_pcie *pcie, |
820 | struct device_node *node, | |
821 | int slot) | |
637cfaca RL |
822 | { |
823 | struct mtk_pcie_port *port; | |
824 | struct resource *regs; | |
825 | struct device *dev = pcie->dev; | |
826 | struct platform_device *pdev = to_platform_device(dev); | |
827 | char name[10]; | |
828 | int err; | |
829 | ||
830 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | |
831 | if (!port) | |
832 | return -ENOMEM; | |
833 | ||
834 | err = of_property_read_u32(node, "num-lanes", &port->lane); | |
835 | if (err) { | |
836 | dev_err(dev, "missing num-lanes property\n"); | |
837 | return err; | |
838 | } | |
839 | ||
1eacd7b8 RL |
840 | snprintf(name, sizeof(name), "port%d", slot); |
841 | regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | |
637cfaca RL |
842 | port->base = devm_ioremap_resource(dev, regs); |
843 | if (IS_ERR(port->base)) { | |
4f6f0460 | 844 | dev_err(dev, "failed to map port%d base\n", slot); |
637cfaca RL |
845 | return PTR_ERR(port->base); |
846 | } | |
847 | ||
4f6f0460 | 848 | snprintf(name, sizeof(name), "sys_ck%d", slot); |
637cfaca RL |
849 | port->sys_ck = devm_clk_get(dev, name); |
850 | if (IS_ERR(port->sys_ck)) { | |
b099631d | 851 | dev_err(dev, "failed to get sys_ck%d clock\n", slot); |
637cfaca RL |
852 | return PTR_ERR(port->sys_ck); |
853 | } | |
854 | ||
b099631d RL |
855 | /* sys_ck might be divided into the following parts in some chips */ |
856 | snprintf(name, sizeof(name), "ahb_ck%d", slot); | |
857 | port->ahb_ck = devm_clk_get(dev, name); | |
858 | if (IS_ERR(port->ahb_ck)) { | |
859 | if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) | |
860 | return -EPROBE_DEFER; | |
861 | ||
862 | port->ahb_ck = NULL; | |
863 | } | |
864 | ||
865 | snprintf(name, sizeof(name), "axi_ck%d", slot); | |
866 | port->axi_ck = devm_clk_get(dev, name); | |
867 | if (IS_ERR(port->axi_ck)) { | |
868 | if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) | |
869 | return -EPROBE_DEFER; | |
870 | ||
871 | port->axi_ck = NULL; | |
872 | } | |
873 | ||
874 | snprintf(name, sizeof(name), "aux_ck%d", slot); | |
875 | port->aux_ck = devm_clk_get(dev, name); | |
876 | if (IS_ERR(port->aux_ck)) { | |
877 | if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) | |
878 | return -EPROBE_DEFER; | |
879 | ||
880 | port->aux_ck = NULL; | |
881 | } | |
882 | ||
883 | snprintf(name, sizeof(name), "obff_ck%d", slot); | |
884 | port->obff_ck = devm_clk_get(dev, name); | |
885 | if (IS_ERR(port->obff_ck)) { | |
886 | if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) | |
887 | return -EPROBE_DEFER; | |
888 | ||
889 | port->obff_ck = NULL; | |
890 | } | |
891 | ||
892 | snprintf(name, sizeof(name), "pipe_ck%d", slot); | |
893 | port->pipe_ck = devm_clk_get(dev, name); | |
894 | if (IS_ERR(port->pipe_ck)) { | |
895 | if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) | |
896 | return -EPROBE_DEFER; | |
897 | ||
898 | port->pipe_ck = NULL; | |
899 | } | |
900 | ||
4f6f0460 | 901 | snprintf(name, sizeof(name), "pcie-rst%d", slot); |
608fcac7 | 902 | port->reset = devm_reset_control_get_optional_exclusive(dev, name); |
637cfaca RL |
903 | if (PTR_ERR(port->reset) == -EPROBE_DEFER) |
904 | return PTR_ERR(port->reset); | |
905 | ||
906 | /* some platforms may use default PHY setting */ | |
4f6f0460 | 907 | snprintf(name, sizeof(name), "pcie-phy%d", slot); |
637cfaca RL |
908 | port->phy = devm_phy_optional_get(dev, name); |
909 | if (IS_ERR(port->phy)) | |
910 | return PTR_ERR(port->phy); | |
911 | ||
4f6f0460 | 912 | port->slot = slot; |
637cfaca RL |
913 | port->pcie = pcie; |
914 | ||
b099631d RL |
915 | if (pcie->soc->setup_irq) { |
916 | err = pcie->soc->setup_irq(port, node); | |
917 | if (err) | |
918 | return err; | |
919 | } | |
920 | ||
637cfaca RL |
921 | INIT_LIST_HEAD(&port->list); |
922 | list_add_tail(&port->list, &pcie->ports); | |
923 | ||
924 | return 0; | |
925 | } | |
926 | ||
927 | static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) | |
928 | { | |
929 | struct device *dev = pcie->dev; | |
930 | struct platform_device *pdev = to_platform_device(dev); | |
931 | struct resource *regs; | |
932 | int err; | |
933 | ||
1eacd7b8 RL |
934 | /* get shared registers, which are optional */ |
935 | regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys"); | |
936 | if (regs) { | |
937 | pcie->base = devm_ioremap_resource(dev, regs); | |
938 | if (IS_ERR(pcie->base)) { | |
939 | dev_err(dev, "failed to map shared register\n"); | |
940 | return PTR_ERR(pcie->base); | |
941 | } | |
637cfaca RL |
942 | } |
943 | ||
944 | pcie->free_ck = devm_clk_get(dev, "free_ck"); | |
945 | if (IS_ERR(pcie->free_ck)) { | |
946 | if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) | |
947 | return -EPROBE_DEFER; | |
948 | ||
949 | pcie->free_ck = NULL; | |
950 | } | |
951 | ||
952 | if (dev->pm_domain) { | |
953 | pm_runtime_enable(dev); | |
954 | pm_runtime_get_sync(dev); | |
955 | } | |
956 | ||
957 | /* enable top level clock */ | |
958 | err = clk_prepare_enable(pcie->free_ck); | |
959 | if (err) { | |
960 | dev_err(dev, "failed to enable free_ck\n"); | |
961 | goto err_free_ck; | |
962 | } | |
963 | ||
964 | return 0; | |
965 | ||
966 | err_free_ck: | |
967 | if (dev->pm_domain) { | |
968 | pm_runtime_put_sync(dev); | |
969 | pm_runtime_disable(dev); | |
970 | } | |
971 | ||
972 | return err; | |
973 | } | |
974 | ||
975 | static int mtk_pcie_setup(struct mtk_pcie *pcie) | |
976 | { | |
977 | struct device *dev = pcie->dev; | |
978 | struct device_node *node = dev->of_node, *child; | |
979 | struct of_pci_range_parser parser; | |
980 | struct of_pci_range range; | |
981 | struct resource res; | |
982 | struct mtk_pcie_port *port, *tmp; | |
983 | int err; | |
984 | ||
985 | if (of_pci_range_parser_init(&parser, node)) { | |
986 | dev_err(dev, "missing \"ranges\" property\n"); | |
987 | return -EINVAL; | |
988 | } | |
989 | ||
990 | for_each_of_pci_range(&parser, &range) { | |
991 | err = of_pci_range_to_resource(&range, node, &res); | |
992 | if (err < 0) | |
993 | return err; | |
994 | ||
995 | switch (res.flags & IORESOURCE_TYPE_BITS) { | |
996 | case IORESOURCE_IO: | |
997 | pcie->offset.io = res.start - range.pci_addr; | |
998 | ||
999 | memcpy(&pcie->pio, &res, sizeof(res)); | |
1000 | pcie->pio.name = node->full_name; | |
1001 | ||
1002 | pcie->io.start = range.cpu_addr; | |
1003 | pcie->io.end = range.cpu_addr + range.size - 1; | |
1004 | pcie->io.flags = IORESOURCE_MEM; | |
1005 | pcie->io.name = "I/O"; | |
1006 | ||
1007 | memcpy(&res, &pcie->io, sizeof(res)); | |
1008 | break; | |
1009 | ||
1010 | case IORESOURCE_MEM: | |
1011 | pcie->offset.mem = res.start - range.pci_addr; | |
1012 | ||
1013 | memcpy(&pcie->mem, &res, sizeof(res)); | |
1014 | pcie->mem.name = "non-prefetchable"; | |
1015 | break; | |
1016 | } | |
1017 | } | |
1018 | ||
1019 | err = of_pci_parse_bus_range(node, &pcie->busn); | |
1020 | if (err < 0) { | |
1021 | dev_err(dev, "failed to parse bus ranges property: %d\n", err); | |
1022 | pcie->busn.name = node->name; | |
1023 | pcie->busn.start = 0; | |
1024 | pcie->busn.end = 0xff; | |
1025 | pcie->busn.flags = IORESOURCE_BUS; | |
1026 | } | |
1027 | ||
1028 | for_each_available_child_of_node(node, child) { | |
4f6f0460 | 1029 | int slot; |
637cfaca RL |
1030 | |
1031 | err = of_pci_get_devfn(child); | |
1032 | if (err < 0) { | |
1033 | dev_err(dev, "failed to parse devfn: %d\n", err); | |
1034 | return err; | |
1035 | } | |
1036 | ||
4f6f0460 | 1037 | slot = PCI_SLOT(err); |
637cfaca | 1038 | |
4f6f0460 | 1039 | err = mtk_pcie_parse_port(pcie, child, slot); |
637cfaca RL |
1040 | if (err) |
1041 | return err; | |
1042 | } | |
1043 | ||
1044 | err = mtk_pcie_subsys_powerup(pcie); | |
1045 | if (err) | |
1046 | return err; | |
1047 | ||
1048 | /* enable each port, and then check link status */ | |
1049 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) | |
4f6f0460 | 1050 | mtk_pcie_enable_port(port); |
637cfaca RL |
1051 | |
1052 | /* power down PCIe subsys if slots are all empty (link down) */ | |
1053 | if (list_empty(&pcie->ports)) | |
1054 | mtk_pcie_subsys_powerdown(pcie); | |
1055 | ||
1056 | return 0; | |
1057 | } | |
1058 | ||
1059 | static int mtk_pcie_request_resources(struct mtk_pcie *pcie) | |
1060 | { | |
1061 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | |
1062 | struct list_head *windows = &host->windows; | |
1063 | struct device *dev = pcie->dev; | |
1064 | int err; | |
1065 | ||
1066 | pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); | |
1067 | pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); | |
1068 | pci_add_resource(windows, &pcie->busn); | |
1069 | ||
1070 | err = devm_request_pci_bus_resources(dev, windows); | |
1071 | if (err < 0) | |
1072 | return err; | |
1073 | ||
1074 | pci_remap_iospace(&pcie->pio, pcie->io.start); | |
1075 | ||
1076 | return 0; | |
1077 | } | |
1078 | ||
1079 | static int mtk_pcie_register_host(struct pci_host_bridge *host) | |
1080 | { | |
1081 | struct mtk_pcie *pcie = pci_host_bridge_priv(host); | |
1082 | struct pci_bus *child; | |
1083 | int err; | |
1084 | ||
1085 | host->busnr = pcie->busn.start; | |
1086 | host->dev.parent = pcie->dev; | |
c681c930 | 1087 | host->ops = pcie->soc->ops; |
637cfaca RL |
1088 | host->map_irq = of_irq_parse_and_map_pci; |
1089 | host->swizzle_irq = pci_common_swizzle; | |
b099631d | 1090 | host->sysdata = pcie; |
43e6409d HZ |
1091 | if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi) |
1092 | host->msi = &mtk_pcie_msi_chip; | |
637cfaca RL |
1093 | |
1094 | err = pci_scan_root_bus_bridge(host); | |
1095 | if (err < 0) | |
1096 | return err; | |
1097 | ||
1098 | pci_bus_size_bridges(host->bus); | |
1099 | pci_bus_assign_resources(host->bus); | |
1100 | ||
1101 | list_for_each_entry(child, &host->bus->children, node) | |
1102 | pcie_bus_configure_settings(child); | |
1103 | ||
1104 | pci_bus_add_devices(host->bus); | |
1105 | ||
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | static int mtk_pcie_probe(struct platform_device *pdev) | |
1110 | { | |
1111 | struct device *dev = &pdev->dev; | |
1112 | struct mtk_pcie *pcie; | |
1113 | struct pci_host_bridge *host; | |
1114 | int err; | |
1115 | ||
1116 | host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | |
1117 | if (!host) | |
1118 | return -ENOMEM; | |
1119 | ||
1120 | pcie = pci_host_bridge_priv(host); | |
1121 | ||
1122 | pcie->dev = dev; | |
c681c930 | 1123 | pcie->soc = of_device_get_match_data(dev); |
637cfaca RL |
1124 | platform_set_drvdata(pdev, pcie); |
1125 | INIT_LIST_HEAD(&pcie->ports); | |
1126 | ||
1127 | err = mtk_pcie_setup(pcie); | |
1128 | if (err) | |
1129 | return err; | |
1130 | ||
1131 | err = mtk_pcie_request_resources(pcie); | |
1132 | if (err) | |
1133 | goto put_resources; | |
1134 | ||
1135 | err = mtk_pcie_register_host(host); | |
1136 | if (err) | |
1137 | goto put_resources; | |
1138 | ||
1139 | return 0; | |
1140 | ||
1141 | put_resources: | |
1142 | if (!list_empty(&pcie->ports)) | |
1143 | mtk_pcie_put_resources(pcie); | |
1144 | ||
1145 | return err; | |
1146 | } | |
1147 | ||
c681c930 HZ |
1148 | static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { |
1149 | .ops = &mtk_pcie_ops, | |
1150 | .startup = mtk_pcie_startup_port, | |
1151 | }; | |
1152 | ||
b099631d | 1153 | static const struct mtk_pcie_soc mtk_pcie_soc_v2 = { |
43e6409d | 1154 | .has_msi = true, |
b099631d RL |
1155 | .ops = &mtk_pcie_ops_v2, |
1156 | .startup = mtk_pcie_startup_port_v2, | |
1157 | .setup_irq = mtk_pcie_setup_irq, | |
1158 | }; | |
1159 | ||
637cfaca | 1160 | static const struct of_device_id mtk_pcie_ids[] = { |
c681c930 HZ |
1161 | { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, |
1162 | { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, | |
b099631d RL |
1163 | { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 }, |
1164 | { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 }, | |
637cfaca RL |
1165 | {}, |
1166 | }; | |
1167 | ||
1168 | static struct platform_driver mtk_pcie_driver = { | |
1169 | .probe = mtk_pcie_probe, | |
1170 | .driver = { | |
1171 | .name = "mtk-pcie", | |
1172 | .of_match_table = mtk_pcie_ids, | |
1173 | .suppress_bind_attrs = true, | |
1174 | }, | |
1175 | }; | |
1176 | builtin_platform_driver(mtk_pcie_driver); |