]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/pci/host/pci-aardvark.c
PCI: aardvark: Add Aardvark PCI host controller driver
[mirror_ubuntu-hirsute-kernel.git] / drivers / pci / host / pci-aardvark.c
CommitLineData
8c39d710
TP
1/*
2 * Driver for the Aardvark PCIe controller, used on Marvell Armada
3 * 3700.
4 *
5 * Copyright (C) 2016 Marvell
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/delay.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/irqdomain.h>
16#include <linux/kernel.h>
17#include <linux/pci.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/of_address.h>
21#include <linux/of_pci.h>
22
23/* PCIe core registers */
24#define PCIE_CORE_CMD_STATUS_REG 0x4
25#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
26#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
27#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
28#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
29#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
30#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
31#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
32#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
33#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
34#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
35#define PCIE_CORE_LINK_TRAINING BIT(5)
36#define PCIE_CORE_LINK_WIDTH_SHIFT 20
37#define PCIE_CORE_ERR_CAPCTL_REG 0x118
38#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
39#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
40#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
41#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
42
43/* PIO registers base address and register offsets */
44#define PIO_BASE_ADDR 0x4000
45#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
46#define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
47#define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
48#define PIO_STAT (PIO_BASE_ADDR + 0x4)
49#define PIO_COMPLETION_STATUS_SHIFT 7
50#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
51#define PIO_COMPLETION_STATUS_OK 0
52#define PIO_COMPLETION_STATUS_UR 1
53#define PIO_COMPLETION_STATUS_CRS 2
54#define PIO_COMPLETION_STATUS_CA 4
55#define PIO_NON_POSTED_REQ BIT(0)
56#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
57#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
58#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
59#define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
60#define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
61#define PIO_START (PIO_BASE_ADDR + 0x1c)
62#define PIO_ISR (PIO_BASE_ADDR + 0x20)
63#define PIO_ISRM (PIO_BASE_ADDR + 0x24)
64
65/* Aardvark Control registers */
66#define CONTROL_BASE_ADDR 0x4800
67#define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
68#define PCIE_GEN_SEL_MSK 0x3
69#define PCIE_GEN_SEL_SHIFT 0x0
70#define SPEED_GEN_1 0
71#define SPEED_GEN_2 1
72#define SPEED_GEN_3 2
73#define IS_RC_MSK 1
74#define IS_RC_SHIFT 2
75#define LANE_CNT_MSK 0x18
76#define LANE_CNT_SHIFT 0x3
77#define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
78#define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
79#define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
80#define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
81#define LINK_TRAINING_EN BIT(6)
82#define LEGACY_INTA BIT(28)
83#define LEGACY_INTB BIT(29)
84#define LEGACY_INTC BIT(30)
85#define LEGACY_INTD BIT(31)
86#define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
87#define HOT_RESET_GEN BIT(0)
88#define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
89#define PCIE_CORE_CTRL2_RESERVED 0x7
90#define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
91#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
92#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
93#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
94#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
95#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
96#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
97#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
98#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
99#define PCIE_ISR0_ALL_MASK GENMASK(26, 0)
100#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
101#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
102#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
103#define PCIE_ISR1_FLUSH BIT(5)
104#define PCIE_ISR1_ALL_MASK GENMASK(5, 4)
105#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
106#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
107#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
108#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
109#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
110
111/* PCIe window configuration */
112#define OB_WIN_BASE_ADDR 0x4c00
113#define OB_WIN_BLOCK_SIZE 0x20
114#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
115 OB_WIN_BLOCK_SIZE * (win) + \
116 (offset))
117#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
118#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
119#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
120#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
121#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
122#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
123#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
124
125/* PCIe window types */
126#define OB_PCIE_MEM 0x0
127#define OB_PCIE_IO 0x4
128
129/* LMI registers base address and register offsets */
130#define LMI_BASE_ADDR 0x6000
131#define CFG_REG (LMI_BASE_ADDR + 0x0)
132#define LTSSM_SHIFT 24
133#define LTSSM_MASK 0x3f
134#define LTSSM_L0 0x10
135#define RC_BAR_CONFIG 0x300
136
137/* PCIe core controller registers */
138#define CTRL_CORE_BASE_ADDR 0x18000
139#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
140#define CTRL_MODE_SHIFT 0x0
141#define CTRL_MODE_MASK 0x1
142#define PCIE_CORE_MODE_DIRECT 0x0
143#define PCIE_CORE_MODE_COMMAND 0x1
144
145/* PCIe Central Interrupts Registers */
146#define CENTRAL_INT_BASE_ADDR 0x1b000
147#define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
148#define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
149#define PCIE_IRQ_CMDQ_INT BIT(0)
150#define PCIE_IRQ_MSI_STATUS_INT BIT(1)
151#define PCIE_IRQ_CMD_SENT_DONE BIT(3)
152#define PCIE_IRQ_DMA_INT BIT(4)
153#define PCIE_IRQ_IB_DXFERDONE BIT(5)
154#define PCIE_IRQ_OB_DXFERDONE BIT(6)
155#define PCIE_IRQ_OB_RXFERDONE BIT(7)
156#define PCIE_IRQ_COMPQ_INT BIT(12)
157#define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
158#define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
159#define PCIE_IRQ_CORE_INT BIT(16)
160#define PCIE_IRQ_CORE_INT_PIO BIT(17)
161#define PCIE_IRQ_DPMU_INT BIT(18)
162#define PCIE_IRQ_PCIE_MIS_INT BIT(19)
163#define PCIE_IRQ_MSI_INT1_DET BIT(20)
164#define PCIE_IRQ_MSI_INT2_DET BIT(21)
165#define PCIE_IRQ_RC_DBELL_DET BIT(22)
166#define PCIE_IRQ_EP_STATUS BIT(23)
167#define PCIE_IRQ_ALL_MASK 0xfff0fb
168#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
169
170/* Transaction types */
171#define PCIE_CONFIG_RD_TYPE0 0x8
172#define PCIE_CONFIG_RD_TYPE1 0x9
173#define PCIE_CONFIG_WR_TYPE0 0xa
174#define PCIE_CONFIG_WR_TYPE1 0xb
175
176/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
177#define PCIE_BDF(dev) (dev << 4)
178#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
179#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
180#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
181#define PCIE_CONF_REG(reg) ((reg) & 0xffc)
182#define PCIE_CONF_ADDR(bus, devfn, where) \
183 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
184 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
185
186#define PIO_TIMEOUT_MS 1
187
188#define LINK_WAIT_MAX_RETRIES 10
189#define LINK_WAIT_USLEEP_MIN 90000
190#define LINK_WAIT_USLEEP_MAX 100000
191
192#define LEGACY_IRQ_NUM 4
193#define MSI_IRQ_NUM 32
194
195struct advk_pcie {
196 struct platform_device *pdev;
197 void __iomem *base;
198 struct list_head resources;
199 struct irq_domain *irq_domain;
200 struct irq_chip irq_chip;
201 struct msi_controller msi;
202 struct irq_domain *msi_domain;
203 struct irq_chip msi_irq_chip;
204 DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM);
205 struct mutex msi_used_lock;
206 u16 msi_msg;
207 int root_bus_nr;
208};
209
210static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
211{
212 writel(val, pcie->base + reg);
213}
214
215static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
216{
217 return readl(pcie->base + reg);
218}
219
220static int advk_pcie_link_up(struct advk_pcie *pcie)
221{
222 u32 val, ltssm_state;
223
224 val = advk_readl(pcie, CFG_REG);
225 ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
226 return ltssm_state >= LTSSM_L0;
227}
228
229static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
230{
231 int retries;
232
233 /* check if the link is up or not */
234 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
235 if (advk_pcie_link_up(pcie)) {
236 dev_info(&pcie->pdev->dev, "link up\n");
237 return 0;
238 }
239
240 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
241 }
242
243 dev_err(&pcie->pdev->dev, "link never came up\n");
244
245 return -ETIMEDOUT;
246}
247
248/*
249 * Set PCIe address window register which could be used for memory
250 * mapping.
251 */
252static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
253 u32 win_num, u32 match_ms,
254 u32 match_ls, u32 mask_ms,
255 u32 mask_ls, u32 remap_ms,
256 u32 remap_ls, u32 action)
257{
258 advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
259 advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
260 advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
261 advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
262 advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
263 advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
264 advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
265 advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
266}
267
268static void advk_pcie_setup_hw(struct advk_pcie *pcie)
269{
270 u32 reg;
271 int i;
272
273 /* Point PCIe unit MBUS decode windows to DRAM space */
274 for (i = 0; i < 8; i++)
275 advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
276
277 /* Set to Direct mode */
278 reg = advk_readl(pcie, CTRL_CONFIG_REG);
279 reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
280 reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
281 advk_writel(pcie, reg, CTRL_CONFIG_REG);
282
283 /* Set PCI global control register to RC mode */
284 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
285 reg |= (IS_RC_MSK << IS_RC_SHIFT);
286 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
287
288 /* Set Advanced Error Capabilities and Control PF0 register */
289 reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
290 PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
291 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
292 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
293 advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
294
295 /* Set PCIe Device Control and Status 1 PF0 register */
296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
299 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
300 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
301
302 /* Program PCIe Control 2 to disable strict ordering */
303 reg = PCIE_CORE_CTRL2_RESERVED |
304 PCIE_CORE_CTRL2_TD_ENABLE;
305 advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
306
307 /* Set GEN2 */
308 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
309 reg &= ~PCIE_GEN_SEL_MSK;
310 reg |= SPEED_GEN_2;
311 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
312
313 /* Set lane X1 */
314 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
315 reg &= ~LANE_CNT_MSK;
316 reg |= LANE_COUNT_1;
317 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
318
319 /* Enable link training */
320 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
321 reg |= LINK_TRAINING_EN;
322 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
323
324 /* Enable MSI */
325 reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
326 reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
327 advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
328
329 /* Clear all interrupts */
330 advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
331 advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
332 advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
333
334 /* Disable All ISR0/1 Sources */
335 reg = PCIE_ISR0_ALL_MASK;
336 reg &= ~PCIE_ISR0_MSI_INT_PENDING;
337 advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
338
339 advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
340
341 /* Unmask all MSI's */
342 advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
343
344 /* Enable summary interrupt for GIC SPI source */
345 reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
346 advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
347
348 reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
349 reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
350 advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
351
352 /* Bypass the address window mapping for PIO */
353 reg = advk_readl(pcie, PIO_CTRL);
354 reg |= PIO_CTRL_ADDR_WIN_DISABLE;
355 advk_writel(pcie, reg, PIO_CTRL);
356
357 /* Start link training */
358 reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
359 reg |= PCIE_CORE_LINK_TRAINING;
360 advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
361
362 advk_pcie_wait_for_link(pcie);
363
364 reg = PCIE_CORE_LINK_L0S_ENTRY |
365 (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
366 advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
367
368 reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
369 reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
370 PCIE_CORE_CMD_IO_ACCESS_EN |
371 PCIE_CORE_CMD_MEM_IO_REQ_EN;
372 advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
373}
374
375static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
376{
377 u32 reg;
378 unsigned int status;
379 char *strcomp_status, *str_posted;
380
381 reg = advk_readl(pcie, PIO_STAT);
382 status = (reg & PIO_COMPLETION_STATUS_MASK) >>
383 PIO_COMPLETION_STATUS_SHIFT;
384
385 if (!status)
386 return;
387
388 switch (status) {
389 case PIO_COMPLETION_STATUS_UR:
390 strcomp_status = "UR";
391 break;
392 case PIO_COMPLETION_STATUS_CRS:
393 strcomp_status = "CRS";
394 break;
395 case PIO_COMPLETION_STATUS_CA:
396 strcomp_status = "CA";
397 break;
398 default:
399 strcomp_status = "Unknown";
400 break;
401 }
402
403 if (reg & PIO_NON_POSTED_REQ)
404 str_posted = "Non-posted";
405 else
406 str_posted = "Posted";
407
408 dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
409 str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
410}
411
412static int advk_pcie_wait_pio(struct advk_pcie *pcie)
413{
414 unsigned long timeout;
415
416 timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
417
418 while (time_before(jiffies, timeout)) {
419 u32 start, isr;
420
421 start = advk_readl(pcie, PIO_START);
422 isr = advk_readl(pcie, PIO_ISR);
423 if (!start && isr)
424 return 0;
425 }
426
427 dev_err(&pcie->pdev->dev, "config read/write timed out\n");
428 return -ETIMEDOUT;
429}
430
431static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
432 int where, int size, u32 *val)
433{
434 struct advk_pcie *pcie = bus->sysdata;
435 u32 reg;
436 int ret;
437
438 if (PCI_SLOT(devfn) != 0) {
439 *val = 0xffffffff;
440 return PCIBIOS_DEVICE_NOT_FOUND;
441 }
442
443 /* Start PIO */
444 advk_writel(pcie, 0, PIO_START);
445 advk_writel(pcie, 1, PIO_ISR);
446
447 /* Program the control register */
448 reg = advk_readl(pcie, PIO_CTRL);
449 reg &= ~PIO_CTRL_TYPE_MASK;
450 if (bus->number == pcie->root_bus_nr)
451 reg |= PCIE_CONFIG_RD_TYPE0;
452 else
453 reg |= PCIE_CONFIG_RD_TYPE1;
454 advk_writel(pcie, reg, PIO_CTRL);
455
456 /* Program the address registers */
457 reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
458 advk_writel(pcie, reg, PIO_ADDR_LS);
459 advk_writel(pcie, 0, PIO_ADDR_MS);
460
461 /* Program the data strobe */
462 advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
463
464 /* Start the transfer */
465 advk_writel(pcie, 1, PIO_START);
466
467 ret = advk_pcie_wait_pio(pcie);
468 if (ret < 0)
469 return PCIBIOS_SET_FAILED;
470
471 advk_pcie_check_pio_status(pcie);
472
473 /* Get the read result */
474 *val = advk_readl(pcie, PIO_RD_DATA);
475 if (size == 1)
476 *val = (*val >> (8 * (where & 3))) & 0xff;
477 else if (size == 2)
478 *val = (*val >> (8 * (where & 3))) & 0xffff;
479
480 return PCIBIOS_SUCCESSFUL;
481}
482
483static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
484 int where, int size, u32 val)
485{
486 struct advk_pcie *pcie = bus->sysdata;
487 u32 reg;
488 u32 data_strobe = 0x0;
489 int offset;
490 int ret;
491
492 if (PCI_SLOT(devfn) != 0)
493 return PCIBIOS_DEVICE_NOT_FOUND;
494
495 if (where % size)
496 return PCIBIOS_SET_FAILED;
497
498 /* Start PIO */
499 advk_writel(pcie, 0, PIO_START);
500 advk_writel(pcie, 1, PIO_ISR);
501
502 /* Program the control register */
503 reg = advk_readl(pcie, PIO_CTRL);
504 reg &= ~PIO_CTRL_TYPE_MASK;
505 if (bus->number == pcie->root_bus_nr)
506 reg |= PCIE_CONFIG_WR_TYPE0;
507 else
508 reg |= PCIE_CONFIG_WR_TYPE1;
509 advk_writel(pcie, reg, PIO_CTRL);
510
511 /* Program the address registers */
512 reg = PCIE_CONF_ADDR(bus->number, devfn, where);
513 advk_writel(pcie, reg, PIO_ADDR_LS);
514 advk_writel(pcie, 0, PIO_ADDR_MS);
515
516 /* Calculate the write strobe */
517 offset = where & 0x3;
518 reg = val << (8 * offset);
519 data_strobe = GENMASK(size - 1, 0) << offset;
520
521 /* Program the data register */
522 advk_writel(pcie, reg, PIO_WR_DATA);
523
524 /* Program the data strobe */
525 advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
526
527 /* Start the transfer */
528 advk_writel(pcie, 1, PIO_START);
529
530 ret = advk_pcie_wait_pio(pcie);
531 if (ret < 0)
532 return PCIBIOS_SET_FAILED;
533
534 advk_pcie_check_pio_status(pcie);
535
536 return PCIBIOS_SUCCESSFUL;
537}
538
539static struct pci_ops advk_pcie_ops = {
540 .read = advk_pcie_rd_conf,
541 .write = advk_pcie_wr_conf,
542};
543
544static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
545{
546 int hwirq;
547
548 mutex_lock(&pcie->msi_used_lock);
549 hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM);
550 if (hwirq >= MSI_IRQ_NUM)
551 hwirq = -ENOSPC;
552 else
553 set_bit(hwirq, pcie->msi_irq_in_use);
554 mutex_unlock(&pcie->msi_used_lock);
555
556 return hwirq;
557}
558
559static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
560{
561 mutex_lock(&pcie->msi_used_lock);
562 if (!test_bit(hwirq, pcie->msi_irq_in_use))
563 dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
564 hwirq);
565 else
566 clear_bit(hwirq, pcie->msi_irq_in_use);
567 mutex_unlock(&pcie->msi_used_lock);
568}
569
570static int advk_pcie_setup_msi_irq(struct msi_controller *chip,
571 struct pci_dev *pdev,
572 struct msi_desc *desc)
573{
574 struct advk_pcie *pcie = pdev->bus->sysdata;
575 struct msi_msg msg;
576 int virq, hwirq;
577 phys_addr_t msi_msg_phys;
578
579 /* We support MSI, but not MSI-X */
580 if (desc->msi_attrib.is_msix)
581 return -EINVAL;
582
583 hwirq = advk_pcie_alloc_msi(pcie);
584 if (hwirq < 0)
585 return hwirq;
586
587 virq = irq_create_mapping(pcie->msi_domain, hwirq);
588 if (!virq) {
589 advk_pcie_free_msi(pcie, hwirq);
590 return -EINVAL;
591 }
592
593 irq_set_msi_desc(virq, desc);
594
595 msi_msg_phys = virt_to_phys(&pcie->msi_msg);
596
597 msg.address_lo = lower_32_bits(msi_msg_phys);
598 msg.address_hi = upper_32_bits(msi_msg_phys);
599 msg.data = virq;
600
601 pci_write_msi_msg(virq, &msg);
602
603 return 0;
604}
605
606static void advk_pcie_teardown_msi_irq(struct msi_controller *chip,
607 unsigned int irq)
608{
609 struct irq_data *d = irq_get_irq_data(irq);
610 struct msi_desc *msi = irq_data_get_msi_desc(d);
611 struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi);
612 unsigned long hwirq = d->hwirq;
613
614 irq_dispose_mapping(irq);
615 advk_pcie_free_msi(pcie, hwirq);
616}
617
618static int advk_pcie_msi_map(struct irq_domain *domain,
619 unsigned int virq, irq_hw_number_t hw)
620{
621 struct advk_pcie *pcie = domain->host_data;
622
623 irq_set_chip_and_handler(virq, &pcie->msi_irq_chip,
624 handle_simple_irq);
625
626 return 0;
627}
628
629static const struct irq_domain_ops advk_pcie_msi_irq_ops = {
630 .map = advk_pcie_msi_map,
631};
632
633static void advk_pcie_irq_mask(struct irq_data *d)
634{
635 struct advk_pcie *pcie = d->domain->host_data;
636 irq_hw_number_t hwirq = irqd_to_hwirq(d);
637 u32 mask;
638
639 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
640 mask |= PCIE_ISR0_INTX_ASSERT(hwirq);
641 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
642}
643
644static void advk_pcie_irq_unmask(struct irq_data *d)
645{
646 struct advk_pcie *pcie = d->domain->host_data;
647 irq_hw_number_t hwirq = irqd_to_hwirq(d);
648 u32 mask;
649
650 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
651 mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq);
652 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
653}
654
655static int advk_pcie_irq_map(struct irq_domain *h,
656 unsigned int virq, irq_hw_number_t hwirq)
657{
658 struct advk_pcie *pcie = h->host_data;
659
660 advk_pcie_irq_mask(irq_get_irq_data(virq));
661 irq_set_status_flags(virq, IRQ_LEVEL);
662 irq_set_chip_and_handler(virq, &pcie->irq_chip,
663 handle_level_irq);
664 irq_set_chip_data(virq, pcie);
665
666 return 0;
667}
668
669static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
670 .map = advk_pcie_irq_map,
671 .xlate = irq_domain_xlate_onecell,
672};
673
674static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
675{
676 struct device *dev = &pcie->pdev->dev;
677 struct device_node *node = dev->of_node;
678 struct irq_chip *msi_irq_chip;
679 struct msi_controller *msi;
680 phys_addr_t msi_msg_phys;
681 int ret;
682
683 msi_irq_chip = &pcie->msi_irq_chip;
684
685 msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi",
686 dev_name(dev));
687 if (!msi_irq_chip->name)
688 return -ENOMEM;
689
690 msi_irq_chip->irq_enable = pci_msi_unmask_irq;
691 msi_irq_chip->irq_disable = pci_msi_mask_irq;
692 msi_irq_chip->irq_mask = pci_msi_mask_irq;
693 msi_irq_chip->irq_unmask = pci_msi_unmask_irq;
694
695 msi = &pcie->msi;
696
697 msi->setup_irq = advk_pcie_setup_msi_irq;
698 msi->teardown_irq = advk_pcie_teardown_msi_irq;
699 msi->of_node = node;
700
701 mutex_init(&pcie->msi_used_lock);
702
703 msi_msg_phys = virt_to_phys(&pcie->msi_msg);
704
705 advk_writel(pcie, lower_32_bits(msi_msg_phys),
706 PCIE_MSI_ADDR_LOW_REG);
707 advk_writel(pcie, upper_32_bits(msi_msg_phys),
708 PCIE_MSI_ADDR_HIGH_REG);
709
710 pcie->msi_domain =
711 irq_domain_add_linear(NULL, MSI_IRQ_NUM,
712 &advk_pcie_msi_irq_ops, pcie);
713 if (!pcie->msi_domain)
714 return -ENOMEM;
715
716 ret = of_pci_msi_chip_add(msi);
717 if (ret < 0) {
718 irq_domain_remove(pcie->msi_domain);
719 return ret;
720 }
721
722 return 0;
723}
724
725static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
726{
727 of_pci_msi_chip_remove(&pcie->msi);
728 irq_domain_remove(pcie->msi_domain);
729}
730
731static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
732{
733 struct device *dev = &pcie->pdev->dev;
734 struct device_node *node = dev->of_node;
735 struct device_node *pcie_intc_node;
736 struct irq_chip *irq_chip;
737
738 pcie_intc_node = of_get_next_child(node, NULL);
739 if (!pcie_intc_node) {
740 dev_err(dev, "No PCIe Intc node found\n");
741 return -ENODEV;
742 }
743
744 irq_chip = &pcie->irq_chip;
745
746 irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
747 dev_name(dev));
748 if (!irq_chip->name) {
749 of_node_put(pcie_intc_node);
750 return -ENOMEM;
751 }
752
753 irq_chip->irq_mask = advk_pcie_irq_mask;
754 irq_chip->irq_mask_ack = advk_pcie_irq_mask;
755 irq_chip->irq_unmask = advk_pcie_irq_unmask;
756
757 pcie->irq_domain =
758 irq_domain_add_linear(pcie_intc_node, LEGACY_IRQ_NUM,
759 &advk_pcie_irq_domain_ops, pcie);
760 if (!pcie->irq_domain) {
761 dev_err(dev, "Failed to get a INTx IRQ domain\n");
762 of_node_put(pcie_intc_node);
763 return -ENOMEM;
764 }
765
766 return 0;
767}
768
769static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
770{
771 irq_domain_remove(pcie->irq_domain);
772}
773
774static void advk_pcie_handle_msi(struct advk_pcie *pcie)
775{
776 u32 msi_val, msi_mask, msi_status, msi_idx;
777 u16 msi_data;
778
779 msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
780 msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
781 msi_status = msi_val & ~msi_mask;
782
783 for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
784 if (!(BIT(msi_idx) & msi_status))
785 continue;
786
787 advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
788 msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF;
789 generic_handle_irq(msi_data);
790 }
791
792 advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
793 PCIE_ISR0_REG);
794}
795
796static void advk_pcie_handle_int(struct advk_pcie *pcie)
797{
798 u32 val, mask, status;
799 int i, virq;
800
801 val = advk_readl(pcie, PCIE_ISR0_REG);
802 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
803 status = val & ((~mask) & PCIE_ISR0_ALL_MASK);
804
805 if (!status) {
806 advk_writel(pcie, val, PCIE_ISR0_REG);
807 return;
808 }
809
810 /* Process MSI interrupts */
811 if (status & PCIE_ISR0_MSI_INT_PENDING)
812 advk_pcie_handle_msi(pcie);
813
814 /* Process legacy interrupts */
815 for (i = 0; i < LEGACY_IRQ_NUM; i++) {
816 if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
817 continue;
818
819 advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i),
820 PCIE_ISR0_REG);
821
822 virq = irq_find_mapping(pcie->irq_domain, i);
823 generic_handle_irq(virq);
824 }
825}
826
827static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
828{
829 struct advk_pcie *pcie = arg;
830 u32 status;
831
832 status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
833 if (!(status & PCIE_IRQ_CORE_INT))
834 return IRQ_NONE;
835
836 advk_pcie_handle_int(pcie);
837
838 /* Clear interrupt */
839 advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
840
841 return IRQ_HANDLED;
842}
843
844static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
845{
846 int err, res_valid = 0;
847 struct device *dev = &pcie->pdev->dev;
848 struct device_node *np = dev->of_node;
849 struct resource_entry *win;
850 resource_size_t iobase;
851
852 INIT_LIST_HEAD(&pcie->resources);
853
854 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
855 &iobase);
856 if (err)
857 return err;
858
859 resource_list_for_each_entry(win, &pcie->resources) {
860 struct resource *parent = NULL;
861 struct resource *res = win->res;
862
863 switch (resource_type(res)) {
864 case IORESOURCE_IO:
865 parent = &ioport_resource;
866 advk_pcie_set_ob_win(pcie, 1,
867 upper_32_bits(res->start),
868 lower_32_bits(res->start),
869 0, 0xF8000000, 0,
870 lower_32_bits(res->start),
871 OB_PCIE_IO);
872 err = pci_remap_iospace(res, iobase);
873 if (err) {
874 dev_warn(dev, "error %d: failed to map resource %pR\n",
875 err, res);
876 continue;
877 }
878 break;
879 case IORESOURCE_MEM:
880 parent = &iomem_resource;
881 advk_pcie_set_ob_win(pcie, 0,
882 upper_32_bits(res->start),
883 lower_32_bits(res->start),
884 0x0, 0xF8000000, 0,
885 lower_32_bits(res->start),
886 (2 << 20) | OB_PCIE_MEM);
887 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
888 break;
889 case IORESOURCE_BUS:
890 pcie->root_bus_nr = res->start;
891 break;
892 default:
893 continue;
894 }
895
896 if (parent) {
897 err = devm_request_resource(dev, parent, res);
898 if (err)
899 goto out_release_res;
900 }
901 }
902
903 if (!res_valid) {
904 dev_err(dev, "non-prefetchable memory resource required\n");
905 err = -EINVAL;
906 goto out_release_res;
907 }
908
909 return 0;
910
911out_release_res:
912 pci_free_resource_list(&pcie->resources);
913 return err;
914}
915
916static int advk_pcie_probe(struct platform_device *pdev)
917{
918 struct advk_pcie *pcie;
919 struct resource *res;
920 struct pci_bus *bus, *child;
921 struct msi_controller *msi;
922 struct device_node *msi_node;
923 int ret, irq;
924
925 pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
926 GFP_KERNEL);
927 if (!pcie)
928 return -ENOMEM;
929
930 pcie->pdev = pdev;
931 platform_set_drvdata(pdev, pcie);
932
933 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
934 pcie->base = devm_ioremap_resource(&pdev->dev, res);
935 if (IS_ERR(pcie->base)) {
936 dev_err(&pdev->dev, "Failed to map registers\n");
937 return PTR_ERR(pcie->base);
938 }
939
940 irq = platform_get_irq(pdev, 0);
941 ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
942 IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
943 pcie);
944 if (ret) {
945 dev_err(&pdev->dev, "Failed to register interrupt\n");
946 return ret;
947 }
948
949 ret = advk_pcie_parse_request_of_pci_ranges(pcie);
950 if (ret) {
951 dev_err(&pdev->dev, "Failed to parse resources\n");
952 return ret;
953 }
954
955 advk_pcie_setup_hw(pcie);
956
957 ret = advk_pcie_init_irq_domain(pcie);
958 if (ret) {
959 dev_err(&pdev->dev, "Failed to initialize irq\n");
960 return ret;
961 }
962
963 ret = advk_pcie_init_msi_irq_domain(pcie);
964 if (ret) {
965 dev_err(&pdev->dev, "Failed to initialize irq\n");
966 advk_pcie_remove_irq_domain(pcie);
967 return ret;
968 }
969
970 msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
971 if (msi_node)
972 msi = of_pci_find_msi_chip_by_node(msi_node);
973 else
974 msi = NULL;
975
976 bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
977 pcie, &pcie->resources, &pcie->msi);
978 if (!bus) {
979 advk_pcie_remove_msi_irq_domain(pcie);
980 advk_pcie_remove_irq_domain(pcie);
981 return -ENOMEM;
982 }
983
984 pci_bus_assign_resources(bus);
985
986 list_for_each_entry(child, &bus->children, node)
987 pcie_bus_configure_settings(child);
988
989 pci_bus_add_devices(bus);
990
991 return 0;
992}
993
994static const struct of_device_id advk_pcie_of_match_table[] = {
995 { .compatible = "marvell,armada-3700-pcie", },
996 {},
997};
998
999static struct platform_driver advk_pcie_driver = {
1000 .driver = {
1001 .name = "advk-pcie",
1002 .of_match_table = advk_pcie_of_match_table,
1003 /* Driver unloading/unbinding currently not supported */
1004 .suppress_bind_attrs = true,
1005 },
1006 .probe = advk_pcie_probe,
1007};
1008module_platform_driver(advk_pcie_driver);
1009
1010MODULE_AUTHOR("Hezi Shahmoon <hezi.shahmoon@marvell.com>");
1011MODULE_DESCRIPTION("Aardvark PCIe driver");
1012MODULE_LICENSE("GPL v2");