1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the Aardvark PCIe controller, used on Marvell Armada
6 * Copyright (C) 2016 Marvell
8 * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
11 #include <linux/delay.h>
12 #include <linux/gpio/consumer.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pci-ecam.h>
20 #include <linux/init.h>
21 #include <linux/phy/phy.h>
22 #include <linux/platform_device.h>
23 #include <linux/msi.h>
24 #include <linux/of_address.h>
25 #include <linux/of_gpio.h>
26 #include <linux/of_pci.h>
29 #include "../pci-bridge-emul.h"
31 /* PCIe core registers */
32 #define PCIE_CORE_DEV_ID_REG 0x0
33 #define PCIE_CORE_CMD_STATUS_REG 0x4
34 #define PCIE_CORE_DEV_REV_REG 0x8
35 #define PCIE_CORE_PCIEXP_CAP 0xc0
36 #define PCIE_CORE_ERR_CAPCTL_REG 0x118
37 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
38 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
39 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
40 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
41 /* PIO registers base address and register offsets */
42 #define PIO_BASE_ADDR 0x4000
43 #define PIO_CTRL (PIO_BASE_ADDR + 0x0)
44 #define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
45 #define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
46 #define PIO_STAT (PIO_BASE_ADDR + 0x4)
47 #define PIO_COMPLETION_STATUS_SHIFT 7
48 #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
49 #define PIO_COMPLETION_STATUS_OK 0
50 #define PIO_COMPLETION_STATUS_UR 1
51 #define PIO_COMPLETION_STATUS_CRS 2
52 #define PIO_COMPLETION_STATUS_CA 4
53 #define PIO_NON_POSTED_REQ BIT(10)
54 #define PIO_ERR_STATUS BIT(11)
55 #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
56 #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
57 #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
58 #define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
59 #define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
60 #define PIO_START (PIO_BASE_ADDR + 0x1c)
61 #define PIO_ISR (PIO_BASE_ADDR + 0x20)
62 #define PIO_ISRM (PIO_BASE_ADDR + 0x24)
64 /* Aardvark Control registers */
65 #define CONTROL_BASE_ADDR 0x4800
66 #define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
67 #define PCIE_GEN_SEL_MSK 0x3
68 #define PCIE_GEN_SEL_SHIFT 0x0
74 #define LANE_CNT_MSK 0x18
75 #define LANE_CNT_SHIFT 0x3
76 #define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
77 #define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
78 #define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
79 #define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
80 #define LINK_TRAINING_EN BIT(6)
81 #define LEGACY_INTA BIT(28)
82 #define LEGACY_INTB BIT(29)
83 #define LEGACY_INTC BIT(30)
84 #define LEGACY_INTD BIT(31)
85 #define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
86 #define HOT_RESET_GEN BIT(0)
87 #define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
88 #define PCIE_CORE_CTRL2_RESERVED 0x7
89 #define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
90 #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
91 #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
92 #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
93 #define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
94 #define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
95 #define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2)
96 #define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
97 #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
98 #define PCIE_MSG_PM_PME_MASK BIT(7)
99 #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
100 #define PCIE_ISR0_MSI_INT_PENDING BIT(24)
101 #define PCIE_ISR0_CORR_ERR BIT(11)
102 #define PCIE_ISR0_NFAT_ERR BIT(12)
103 #define PCIE_ISR0_FAT_ERR BIT(13)
104 #define PCIE_ISR0_ERR_MASK GENMASK(13, 11)
105 #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
106 #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
107 #define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
108 #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
109 #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
110 #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
111 #define PCIE_ISR1_FLUSH BIT(5)
112 #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
113 #define PCIE_ISR1_ALL_MASK GENMASK(31, 0)
114 #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
115 #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
116 #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
117 #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
118 #define PCIE_MSI_ALL_MASK GENMASK(31, 0)
119 #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
120 #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
122 /* PCIe window configuration */
123 #define OB_WIN_BASE_ADDR 0x4c00
124 #define OB_WIN_BLOCK_SIZE 0x20
125 #define OB_WIN_COUNT 8
126 #define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
127 OB_WIN_BLOCK_SIZE * (win) + \
129 #define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
130 #define OB_WIN_ENABLE BIT(0)
131 #define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
132 #define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
133 #define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
134 #define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
135 #define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
136 #define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
137 #define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
138 #define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
139 #define OB_WIN_FUNC_NUM_SHIFT 24
140 #define OB_WIN_FUNC_NUM_ENABLE BIT(23)
141 #define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
142 #define OB_WIN_BUS_NUM_BITS_SHIFT 20
143 #define OB_WIN_MSG_CODE_ENABLE BIT(22)
144 #define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
145 #define OB_WIN_MSG_CODE_SHIFT 14
146 #define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
147 #define OB_WIN_ATTR_ENABLE BIT(11)
148 #define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
149 #define OB_WIN_ATTR_TC_SHIFT 8
150 #define OB_WIN_ATTR_RELAXED BIT(7)
151 #define OB_WIN_ATTR_NOSNOOP BIT(6)
152 #define OB_WIN_ATTR_POISON BIT(5)
153 #define OB_WIN_ATTR_IDO BIT(4)
154 #define OB_WIN_TYPE_MASK GENMASK(3, 0)
155 #define OB_WIN_TYPE_SHIFT 0
156 #define OB_WIN_TYPE_MEM 0x0
157 #define OB_WIN_TYPE_IO 0x4
158 #define OB_WIN_TYPE_CONFIG_TYPE0 0x8
159 #define OB_WIN_TYPE_CONFIG_TYPE1 0x9
160 #define OB_WIN_TYPE_MSG 0xc
162 /* LMI registers base address and register offsets */
163 #define LMI_BASE_ADDR 0x6000
164 #define CFG_REG (LMI_BASE_ADDR + 0x0)
165 #define LTSSM_SHIFT 24
166 #define LTSSM_MASK 0x3f
167 #define RC_BAR_CONFIG 0x300
169 /* LTSSM values in CFG_REG */
171 LTSSM_DETECT_QUIET
= 0x0,
172 LTSSM_DETECT_ACTIVE
= 0x1,
173 LTSSM_POLLING_ACTIVE
= 0x2,
174 LTSSM_POLLING_COMPLIANCE
= 0x3,
175 LTSSM_POLLING_CONFIGURATION
= 0x4,
176 LTSSM_CONFIG_LINKWIDTH_START
= 0x5,
177 LTSSM_CONFIG_LINKWIDTH_ACCEPT
= 0x6,
178 LTSSM_CONFIG_LANENUM_ACCEPT
= 0x7,
179 LTSSM_CONFIG_LANENUM_WAIT
= 0x8,
180 LTSSM_CONFIG_COMPLETE
= 0x9,
181 LTSSM_CONFIG_IDLE
= 0xa,
182 LTSSM_RECOVERY_RCVR_LOCK
= 0xb,
183 LTSSM_RECOVERY_SPEED
= 0xc,
184 LTSSM_RECOVERY_RCVR_CFG
= 0xd,
185 LTSSM_RECOVERY_IDLE
= 0xe,
187 LTSSM_RX_L0S_ENTRY
= 0x11,
188 LTSSM_RX_L0S_IDLE
= 0x12,
189 LTSSM_RX_L0S_FTS
= 0x13,
190 LTSSM_TX_L0S_ENTRY
= 0x14,
191 LTSSM_TX_L0S_IDLE
= 0x15,
192 LTSSM_TX_L0S_FTS
= 0x16,
193 LTSSM_L1_ENTRY
= 0x17,
194 LTSSM_L1_IDLE
= 0x18,
195 LTSSM_L2_IDLE
= 0x19,
196 LTSSM_L2_TRANSMIT_WAKE
= 0x1a,
197 LTSSM_DISABLED
= 0x20,
198 LTSSM_LOOPBACK_ENTRY_MASTER
= 0x21,
199 LTSSM_LOOPBACK_ACTIVE_MASTER
= 0x22,
200 LTSSM_LOOPBACK_EXIT_MASTER
= 0x23,
201 LTSSM_LOOPBACK_ENTRY_SLAVE
= 0x24,
202 LTSSM_LOOPBACK_ACTIVE_SLAVE
= 0x25,
203 LTSSM_LOOPBACK_EXIT_SLAVE
= 0x26,
204 LTSSM_HOT_RESET
= 0x27,
205 LTSSM_RECOVERY_EQUALIZATION_PHASE0
= 0x28,
206 LTSSM_RECOVERY_EQUALIZATION_PHASE1
= 0x29,
207 LTSSM_RECOVERY_EQUALIZATION_PHASE2
= 0x2a,
208 LTSSM_RECOVERY_EQUALIZATION_PHASE3
= 0x2b,
211 #define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
213 /* PCIe core controller registers */
214 #define CTRL_CORE_BASE_ADDR 0x18000
215 #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
216 #define CTRL_MODE_SHIFT 0x0
217 #define CTRL_MODE_MASK 0x1
218 #define PCIE_CORE_MODE_DIRECT 0x0
219 #define PCIE_CORE_MODE_COMMAND 0x1
221 /* PCIe Central Interrupts Registers */
222 #define CENTRAL_INT_BASE_ADDR 0x1b000
223 #define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
224 #define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
225 #define PCIE_IRQ_CMDQ_INT BIT(0)
226 #define PCIE_IRQ_MSI_STATUS_INT BIT(1)
227 #define PCIE_IRQ_CMD_SENT_DONE BIT(3)
228 #define PCIE_IRQ_DMA_INT BIT(4)
229 #define PCIE_IRQ_IB_DXFERDONE BIT(5)
230 #define PCIE_IRQ_OB_DXFERDONE BIT(6)
231 #define PCIE_IRQ_OB_RXFERDONE BIT(7)
232 #define PCIE_IRQ_COMPQ_INT BIT(12)
233 #define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
234 #define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
235 #define PCIE_IRQ_CORE_INT BIT(16)
236 #define PCIE_IRQ_CORE_INT_PIO BIT(17)
237 #define PCIE_IRQ_DPMU_INT BIT(18)
238 #define PCIE_IRQ_PCIE_MIS_INT BIT(19)
239 #define PCIE_IRQ_MSI_INT1_DET BIT(20)
240 #define PCIE_IRQ_MSI_INT2_DET BIT(21)
241 #define PCIE_IRQ_RC_DBELL_DET BIT(22)
242 #define PCIE_IRQ_EP_STATUS BIT(23)
243 #define PCIE_IRQ_ALL_MASK GENMASK(31, 0)
244 #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
246 /* Transaction types */
247 #define PCIE_CONFIG_RD_TYPE0 0x8
248 #define PCIE_CONFIG_RD_TYPE1 0x9
249 #define PCIE_CONFIG_WR_TYPE0 0xa
250 #define PCIE_CONFIG_WR_TYPE1 0xb
252 #define PIO_RETRY_CNT 750000 /* 1.5 s */
253 #define PIO_RETRY_DELAY 2 /* 2 us*/
255 #define LINK_WAIT_MAX_RETRIES 10
256 #define LINK_WAIT_USLEEP_MIN 90000
257 #define LINK_WAIT_USLEEP_MAX 100000
258 #define RETRAIN_WAIT_MAX_RETRIES 10
259 #define RETRAIN_WAIT_USLEEP_US 2000
261 #define MSI_IRQ_NUM 32
263 #define CFG_RD_CRS_VAL 0xffff0001
266 struct platform_device
*pdev
;
273 } wins
[OB_WIN_COUNT
];
275 struct irq_domain
*irq_domain
;
276 struct irq_chip irq_chip
;
277 raw_spinlock_t irq_lock
;
278 struct irq_domain
*msi_domain
;
279 struct irq_domain
*msi_inner_domain
;
280 raw_spinlock_t msi_irq_lock
;
281 DECLARE_BITMAP(msi_used
, MSI_IRQ_NUM
);
282 struct mutex msi_used_lock
;
284 struct pci_bridge_emul bridge
;
285 struct gpio_desc
*reset_gpio
;
289 static inline void advk_writel(struct advk_pcie
*pcie
, u32 val
, u64 reg
)
291 writel(val
, pcie
->base
+ reg
);
294 static inline u32
advk_readl(struct advk_pcie
*pcie
, u64 reg
)
296 return readl(pcie
->base
+ reg
);
299 static u8
advk_pcie_ltssm_state(struct advk_pcie
*pcie
)
304 val
= advk_readl(pcie
, CFG_REG
);
305 ltssm_state
= (val
>> LTSSM_SHIFT
) & LTSSM_MASK
;
309 static inline bool advk_pcie_link_up(struct advk_pcie
*pcie
)
311 /* check if LTSSM is in normal operation - some L* state */
312 u8 ltssm_state
= advk_pcie_ltssm_state(pcie
);
313 return ltssm_state
>= LTSSM_L0
&& ltssm_state
< LTSSM_DISABLED
;
316 static inline bool advk_pcie_link_active(struct advk_pcie
*pcie
)
319 * According to PCIe Base specification 3.0, Table 4-14: Link
320 * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
321 * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
322 * L0s, L1 and L2 states. And according to 3.2.1. Data Link
323 * Control and Management State Machine Rules is DL Up status
324 * reported in DL Active state.
326 u8 ltssm_state
= advk_pcie_ltssm_state(pcie
);
327 return ltssm_state
>= LTSSM_CONFIG_IDLE
&& ltssm_state
< LTSSM_DISABLED
;
330 static inline bool advk_pcie_link_training(struct advk_pcie
*pcie
)
333 * According to PCIe Base specification 3.0, Table 4-14: Link
334 * Status Mapped to the LTSSM is Link Training mapped to LTSSM
335 * Configuration and Recovery states.
337 u8 ltssm_state
= advk_pcie_ltssm_state(pcie
);
338 return ((ltssm_state
>= LTSSM_CONFIG_LINKWIDTH_START
&&
339 ltssm_state
< LTSSM_L0
) ||
340 (ltssm_state
>= LTSSM_RECOVERY_EQUALIZATION_PHASE0
&&
341 ltssm_state
<= LTSSM_RECOVERY_EQUALIZATION_PHASE3
));
344 static int advk_pcie_wait_for_link(struct advk_pcie
*pcie
)
348 /* check if the link is up or not */
349 for (retries
= 0; retries
< LINK_WAIT_MAX_RETRIES
; retries
++) {
350 if (advk_pcie_link_up(pcie
))
353 usleep_range(LINK_WAIT_USLEEP_MIN
, LINK_WAIT_USLEEP_MAX
);
359 static void advk_pcie_wait_for_retrain(struct advk_pcie
*pcie
)
363 for (retries
= 0; retries
< RETRAIN_WAIT_MAX_RETRIES
; ++retries
) {
364 if (advk_pcie_link_training(pcie
))
366 udelay(RETRAIN_WAIT_USLEEP_US
);
370 static void advk_pcie_issue_perst(struct advk_pcie
*pcie
)
372 if (!pcie
->reset_gpio
)
375 /* 10ms delay is needed for some cards */
376 dev_info(&pcie
->pdev
->dev
, "issuing PERST via reset GPIO for 10ms\n");
377 gpiod_set_value_cansleep(pcie
->reset_gpio
, 1);
378 usleep_range(10000, 11000);
379 gpiod_set_value_cansleep(pcie
->reset_gpio
, 0);
382 static void advk_pcie_train_link(struct advk_pcie
*pcie
)
384 struct device
*dev
= &pcie
->pdev
->dev
;
389 * Setup PCIe rev / gen compliance based on device tree property
390 * 'max-link-speed' which also forces maximal link speed.
392 reg
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
393 reg
&= ~PCIE_GEN_SEL_MSK
;
394 if (pcie
->link_gen
== 3)
396 else if (pcie
->link_gen
== 2)
400 advk_writel(pcie
, reg
, PCIE_CORE_CTRL0_REG
);
403 * Set maximal link speed value also into PCIe Link Control 2 register.
404 * Armada 3700 Functional Specification says that default value is based
405 * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
407 reg
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ PCI_EXP_LNKCTL2
);
408 reg
&= ~PCI_EXP_LNKCTL2_TLS
;
409 if (pcie
->link_gen
== 3)
410 reg
|= PCI_EXP_LNKCTL2_TLS_8_0GT
;
411 else if (pcie
->link_gen
== 2)
412 reg
|= PCI_EXP_LNKCTL2_TLS_5_0GT
;
414 reg
|= PCI_EXP_LNKCTL2_TLS_2_5GT
;
415 advk_writel(pcie
, reg
, PCIE_CORE_PCIEXP_CAP
+ PCI_EXP_LNKCTL2
);
417 /* Enable link training after selecting PCIe generation */
418 reg
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
419 reg
|= LINK_TRAINING_EN
;
420 advk_writel(pcie
, reg
, PCIE_CORE_CTRL0_REG
);
423 * Reset PCIe card via PERST# signal. Some cards are not detected
424 * during link training when they are in some non-initial state.
426 advk_pcie_issue_perst(pcie
);
429 * PERST# signal could have been asserted by pinctrl subsystem before
430 * probe() callback has been called or issued explicitly by reset gpio
431 * function advk_pcie_issue_perst(), making the endpoint going into
432 * fundamental reset. As required by PCI Express spec (PCI Express
433 * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
434 * Conventional Reset) a delay for at least 100ms after such a reset
435 * before sending a Configuration Request to the device is needed.
436 * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
437 * waits for link at least 900ms.
439 ret
= advk_pcie_wait_for_link(pcie
);
441 dev_err(dev
, "link never came up\n");
443 dev_info(dev
, "link up\n");
447 * Set PCIe address window register which could be used for memory
450 static void advk_pcie_set_ob_win(struct advk_pcie
*pcie
, u8 win_num
,
451 phys_addr_t match
, phys_addr_t remap
,
452 phys_addr_t mask
, u32 actions
)
454 advk_writel(pcie
, OB_WIN_ENABLE
|
455 lower_32_bits(match
), OB_WIN_MATCH_LS(win_num
));
456 advk_writel(pcie
, upper_32_bits(match
), OB_WIN_MATCH_MS(win_num
));
457 advk_writel(pcie
, lower_32_bits(remap
), OB_WIN_REMAP_LS(win_num
));
458 advk_writel(pcie
, upper_32_bits(remap
), OB_WIN_REMAP_MS(win_num
));
459 advk_writel(pcie
, lower_32_bits(mask
), OB_WIN_MASK_LS(win_num
));
460 advk_writel(pcie
, upper_32_bits(mask
), OB_WIN_MASK_MS(win_num
));
461 advk_writel(pcie
, actions
, OB_WIN_ACTIONS(win_num
));
464 static void advk_pcie_disable_ob_win(struct advk_pcie
*pcie
, u8 win_num
)
466 advk_writel(pcie
, 0, OB_WIN_MATCH_LS(win_num
));
467 advk_writel(pcie
, 0, OB_WIN_MATCH_MS(win_num
));
468 advk_writel(pcie
, 0, OB_WIN_REMAP_LS(win_num
));
469 advk_writel(pcie
, 0, OB_WIN_REMAP_MS(win_num
));
470 advk_writel(pcie
, 0, OB_WIN_MASK_LS(win_num
));
471 advk_writel(pcie
, 0, OB_WIN_MASK_MS(win_num
));
472 advk_writel(pcie
, 0, OB_WIN_ACTIONS(win_num
));
475 static void advk_pcie_setup_hw(struct advk_pcie
*pcie
)
477 phys_addr_t msi_addr
;
482 * Configure PCIe Reference clock. Direction is from the PCIe
483 * controller to the endpoint card, so enable transmitting of
484 * Reference clock differential signal off-chip and disable
485 * receiving off-chip differential signal.
487 reg
= advk_readl(pcie
, PCIE_CORE_REF_CLK_REG
);
488 reg
|= PCIE_CORE_REF_CLK_TX_ENABLE
;
489 reg
&= ~PCIE_CORE_REF_CLK_RX_ENABLE
;
490 advk_writel(pcie
, reg
, PCIE_CORE_REF_CLK_REG
);
492 /* Set to Direct mode */
493 reg
= advk_readl(pcie
, CTRL_CONFIG_REG
);
494 reg
&= ~(CTRL_MODE_MASK
<< CTRL_MODE_SHIFT
);
495 reg
|= ((PCIE_CORE_MODE_DIRECT
& CTRL_MODE_MASK
) << CTRL_MODE_SHIFT
);
496 advk_writel(pcie
, reg
, CTRL_CONFIG_REG
);
498 /* Set PCI global control register to RC mode */
499 reg
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
500 reg
|= (IS_RC_MSK
<< IS_RC_SHIFT
);
501 advk_writel(pcie
, reg
, PCIE_CORE_CTRL0_REG
);
504 * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
505 * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
506 * id in high 16 bits. Updating this register changes readback value of
507 * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
508 * for erratum 4.1: "The value of device and vendor ID is incorrect".
510 reg
= (PCI_VENDOR_ID_MARVELL
<< 16) | PCI_VENDOR_ID_MARVELL
;
511 advk_writel(pcie
, reg
, VENDOR_ID_REG
);
514 * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
515 * because the default value is Mass storage controller (0x010400).
517 * Note that this Aardvark PCI Bridge does not have compliant Type 1
518 * Configuration Space and it even cannot be accessed via Aardvark's
519 * PCI config space access method. Something like config space is
520 * available in internal Aardvark registers starting at offset 0x0
521 * and is reported as Type 0. In range 0x10 - 0x34 it has totally
522 * different registers.
524 * Therefore driver uses emulation of PCI Bridge which emulates
525 * access to configuration space via internal Aardvark registers or
526 * emulated configuration buffer.
528 reg
= advk_readl(pcie
, PCIE_CORE_DEV_REV_REG
);
530 reg
|= (PCI_CLASS_BRIDGE_PCI
<< 8) << 8;
531 advk_writel(pcie
, reg
, PCIE_CORE_DEV_REV_REG
);
533 /* Disable Root Bridge I/O space, memory space and bus mastering */
534 reg
= advk_readl(pcie
, PCIE_CORE_CMD_STATUS_REG
);
535 reg
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
536 advk_writel(pcie
, reg
, PCIE_CORE_CMD_STATUS_REG
);
538 /* Set Advanced Error Capabilities and Control PF0 register */
539 reg
= PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX
|
540 PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN
|
541 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK
|
542 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV
;
543 advk_writel(pcie
, reg
, PCIE_CORE_ERR_CAPCTL_REG
);
545 /* Set PCIe Device Control register */
546 reg
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ PCI_EXP_DEVCTL
);
547 reg
&= ~PCI_EXP_DEVCTL_RELAX_EN
;
548 reg
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
549 reg
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
550 reg
&= ~PCI_EXP_DEVCTL_READRQ
;
551 reg
|= PCI_EXP_DEVCTL_PAYLOAD_512B
;
552 reg
|= PCI_EXP_DEVCTL_READRQ_512B
;
553 advk_writel(pcie
, reg
, PCIE_CORE_PCIEXP_CAP
+ PCI_EXP_DEVCTL
);
555 /* Program PCIe Control 2 to disable strict ordering */
556 reg
= PCIE_CORE_CTRL2_RESERVED
|
557 PCIE_CORE_CTRL2_TD_ENABLE
;
558 advk_writel(pcie
, reg
, PCIE_CORE_CTRL2_REG
);
561 reg
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
562 reg
&= ~LANE_CNT_MSK
;
564 advk_writel(pcie
, reg
, PCIE_CORE_CTRL0_REG
);
566 /* Set MSI address */
567 msi_addr
= virt_to_phys(pcie
);
568 advk_writel(pcie
, lower_32_bits(msi_addr
), PCIE_MSI_ADDR_LOW_REG
);
569 advk_writel(pcie
, upper_32_bits(msi_addr
), PCIE_MSI_ADDR_HIGH_REG
);
572 reg
= advk_readl(pcie
, PCIE_CORE_CTRL2_REG
);
573 reg
|= PCIE_CORE_CTRL2_MSI_ENABLE
;
574 advk_writel(pcie
, reg
, PCIE_CORE_CTRL2_REG
);
576 /* Clear all interrupts */
577 advk_writel(pcie
, PCIE_MSI_ALL_MASK
, PCIE_MSI_STATUS_REG
);
578 advk_writel(pcie
, PCIE_ISR0_ALL_MASK
, PCIE_ISR0_REG
);
579 advk_writel(pcie
, PCIE_ISR1_ALL_MASK
, PCIE_ISR1_REG
);
580 advk_writel(pcie
, PCIE_IRQ_ALL_MASK
, HOST_CTRL_INT_STATUS_REG
);
582 /* Disable All ISR0/1 and MSI Sources */
583 advk_writel(pcie
, PCIE_ISR0_ALL_MASK
, PCIE_ISR0_MASK_REG
);
584 advk_writel(pcie
, PCIE_ISR1_ALL_MASK
, PCIE_ISR1_MASK_REG
);
585 advk_writel(pcie
, PCIE_MSI_ALL_MASK
, PCIE_MSI_MASK_REG
);
587 /* Unmask summary MSI interrupt */
588 reg
= advk_readl(pcie
, PCIE_ISR0_MASK_REG
);
589 reg
&= ~PCIE_ISR0_MSI_INT_PENDING
;
590 advk_writel(pcie
, reg
, PCIE_ISR0_MASK_REG
);
592 /* Unmask PME interrupt for processing of PME requester */
593 reg
= advk_readl(pcie
, PCIE_ISR0_MASK_REG
);
594 reg
&= ~PCIE_MSG_PM_PME_MASK
;
595 advk_writel(pcie
, reg
, PCIE_ISR0_MASK_REG
);
597 /* Enable summary interrupt for GIC SPI source */
598 reg
= PCIE_IRQ_ALL_MASK
& (~PCIE_IRQ_ENABLE_INTS_MASK
);
599 advk_writel(pcie
, reg
, HOST_CTRL_INT_MASK_REG
);
602 * Enable AXI address window location generation:
603 * When it is enabled, the default outbound window
604 * configurations (Default User Field: 0xD0074CFC)
605 * are used to transparent address translation for
606 * the outbound transactions. Thus, PCIe address
607 * windows are not required for transparent memory
608 * access when default outbound window configuration
609 * is set for memory access.
611 reg
= advk_readl(pcie
, PCIE_CORE_CTRL2_REG
);
612 reg
|= PCIE_CORE_CTRL2_OB_WIN_ENABLE
;
613 advk_writel(pcie
, reg
, PCIE_CORE_CTRL2_REG
);
616 * Set memory access in Default User Field so it
617 * is not required to configure PCIe address for
618 * transparent memory access.
620 advk_writel(pcie
, OB_WIN_TYPE_MEM
, OB_WIN_DEFAULT_ACTIONS
);
623 * Bypass the address window mapping for PIO:
624 * Since PIO access already contains all required
625 * info over AXI interface by PIO registers, the
626 * address window is not required.
628 reg
= advk_readl(pcie
, PIO_CTRL
);
629 reg
|= PIO_CTRL_ADDR_WIN_DISABLE
;
630 advk_writel(pcie
, reg
, PIO_CTRL
);
633 * Configure PCIe address windows for non-memory or
634 * non-transparent access as by default PCIe uses
635 * transparent memory access.
637 for (i
= 0; i
< pcie
->wins_count
; i
++)
638 advk_pcie_set_ob_win(pcie
, i
,
639 pcie
->wins
[i
].match
, pcie
->wins
[i
].remap
,
640 pcie
->wins
[i
].mask
, pcie
->wins
[i
].actions
);
642 /* Disable remaining PCIe outbound windows */
643 for (i
= pcie
->wins_count
; i
< OB_WIN_COUNT
; i
++)
644 advk_pcie_disable_ob_win(pcie
, i
);
646 advk_pcie_train_link(pcie
);
649 static int advk_pcie_check_pio_status(struct advk_pcie
*pcie
, bool allow_crs
, u32
*val
)
651 struct device
*dev
= &pcie
->pdev
->dev
;
654 char *strcomp_status
, *str_posted
;
657 reg
= advk_readl(pcie
, PIO_STAT
);
658 status
= (reg
& PIO_COMPLETION_STATUS_MASK
) >>
659 PIO_COMPLETION_STATUS_SHIFT
;
662 * According to HW spec, the PIO status check sequence as below:
663 * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
664 * it still needs to check Error Status(bit11), only when this bit
665 * indicates no error happen, the operation is successful.
666 * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
667 * means a PIO write error, and for PIO read it is successful with
668 * a read value of 0xFFFFFFFF.
669 * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
670 * only means a PIO write error, and for PIO read it is successful
671 * with a read value of 0xFFFF0001.
672 * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
673 * error for both PIO read and PIO write operation.
674 * 5) other errors are indicated as 'unknown'.
677 case PIO_COMPLETION_STATUS_OK
:
678 if (reg
& PIO_ERR_STATUS
) {
679 strcomp_status
= "COMP_ERR";
683 /* Get the read result */
685 *val
= advk_readl(pcie
, PIO_RD_DATA
);
687 strcomp_status
= NULL
;
690 case PIO_COMPLETION_STATUS_UR
:
691 strcomp_status
= "UR";
694 case PIO_COMPLETION_STATUS_CRS
:
695 if (allow_crs
&& val
) {
696 /* PCIe r4.0, sec 2.3.2, says:
697 * If CRS Software Visibility is enabled:
698 * For a Configuration Read Request that includes both
699 * bytes of the Vendor ID field of a device Function's
700 * Configuration Space Header, the Root Complex must
701 * complete the Request to the host by returning a
702 * read-data value of 0001h for the Vendor ID field and
703 * all '1's for any additional bytes included in the
706 * So CRS in this case is not an error status.
708 *val
= CFG_RD_CRS_VAL
;
709 strcomp_status
= NULL
;
713 /* PCIe r4.0, sec 2.3.2, says:
714 * If CRS Software Visibility is not enabled, the Root Complex
715 * must re-issue the Configuration Request as a new Request.
716 * If CRS Software Visibility is enabled: For a Configuration
717 * Write Request or for any other Configuration Read Request,
718 * the Root Complex must re-issue the Configuration Request as
720 * A Root Complex implementation may choose to limit the number
721 * of Configuration Request/CRS Completion Status loops before
722 * determining that something is wrong with the target of the
723 * Request and taking appropriate action, e.g., complete the
724 * Request to the host as a failed transaction.
726 * So return -EAGAIN and caller (pci-aardvark.c driver) will
727 * re-issue request again up to the PIO_RETRY_CNT retries.
729 strcomp_status
= "CRS";
732 case PIO_COMPLETION_STATUS_CA
:
733 strcomp_status
= "CA";
737 strcomp_status
= "Unknown";
745 if (reg
& PIO_NON_POSTED_REQ
)
746 str_posted
= "Non-posted";
748 str_posted
= "Posted";
750 dev_dbg(dev
, "%s PIO Response Status: %s, %#x @ %#x\n",
751 str_posted
, strcomp_status
, reg
, advk_readl(pcie
, PIO_ADDR_LS
));
756 static int advk_pcie_wait_pio(struct advk_pcie
*pcie
)
758 struct device
*dev
= &pcie
->pdev
->dev
;
761 for (i
= 1; i
<= PIO_RETRY_CNT
; i
++) {
764 start
= advk_readl(pcie
, PIO_START
);
765 isr
= advk_readl(pcie
, PIO_ISR
);
768 udelay(PIO_RETRY_DELAY
);
771 dev_err(dev
, "PIO read/write transfer time out\n");
775 static pci_bridge_emul_read_status_t
776 advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul
*bridge
,
779 struct advk_pcie
*pcie
= bridge
->data
;
783 *value
= advk_readl(pcie
, PCIE_CORE_CMD_STATUS_REG
);
784 return PCI_BRIDGE_EMUL_HANDLED
;
786 case PCI_INTERRUPT_LINE
: {
788 * From the whole 32bit register we support reading from HW only
789 * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
790 * Other bits are retrieved only from emulated config buffer.
792 __le32
*cfgspace
= (__le32
*)&bridge
->conf
;
793 u32 val
= le32_to_cpu(cfgspace
[PCI_INTERRUPT_LINE
/ 4]);
794 if (advk_readl(pcie
, PCIE_ISR0_MASK_REG
) & PCIE_ISR0_ERR_MASK
)
795 val
&= ~(PCI_BRIDGE_CTL_SERR
<< 16);
797 val
|= PCI_BRIDGE_CTL_SERR
<< 16;
798 if (advk_readl(pcie
, PCIE_CORE_CTRL1_REG
) & HOT_RESET_GEN
)
799 val
|= PCI_BRIDGE_CTL_BUS_RESET
<< 16;
801 val
&= ~(PCI_BRIDGE_CTL_BUS_RESET
<< 16);
803 return PCI_BRIDGE_EMUL_HANDLED
;
807 return PCI_BRIDGE_EMUL_NOT_HANDLED
;
812 advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul
*bridge
,
813 int reg
, u32 old
, u32
new, u32 mask
)
815 struct advk_pcie
*pcie
= bridge
->data
;
819 advk_writel(pcie
, new, PCIE_CORE_CMD_STATUS_REG
);
822 case PCI_INTERRUPT_LINE
:
824 * According to Figure 6-3: Pseudo Logic Diagram for Error
825 * Message Controls in PCIe base specification, SERR# Enable bit
826 * in Bridge Control register enable receiving of ERR_* messages
828 if (mask
& (PCI_BRIDGE_CTL_SERR
<< 16)) {
829 u32 val
= advk_readl(pcie
, PCIE_ISR0_MASK_REG
);
830 if (new & (PCI_BRIDGE_CTL_SERR
<< 16))
831 val
&= ~PCIE_ISR0_ERR_MASK
;
833 val
|= PCIE_ISR0_ERR_MASK
;
834 advk_writel(pcie
, val
, PCIE_ISR0_MASK_REG
);
836 if (mask
& (PCI_BRIDGE_CTL_BUS_RESET
<< 16)) {
837 u32 val
= advk_readl(pcie
, PCIE_CORE_CTRL1_REG
);
838 if (new & (PCI_BRIDGE_CTL_BUS_RESET
<< 16))
839 val
|= HOT_RESET_GEN
;
841 val
&= ~HOT_RESET_GEN
;
842 advk_writel(pcie
, val
, PCIE_CORE_CTRL1_REG
);
851 static pci_bridge_emul_read_status_t
852 advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul
*bridge
,
855 struct advk_pcie
*pcie
= bridge
->data
;
860 *value
= PCI_EXP_SLTSTA_PDS
<< 16;
861 return PCI_BRIDGE_EMUL_HANDLED
;
864 * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
865 * to be handled here, because their values are stored in emulated
866 * config space buffer, and we read them from there when needed.
869 case PCI_EXP_LNKCAP
: {
870 u32 val
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ reg
);
872 * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
873 * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
874 * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
876 val
|= PCI_EXP_LNKCAP_DLLLARC
;
878 return PCI_BRIDGE_EMUL_HANDLED
;
881 case PCI_EXP_LNKCTL
: {
882 /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
883 u32 val
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ reg
) &
884 ~(PCI_EXP_LNKSTA_LT
<< 16);
885 if (advk_pcie_link_training(pcie
))
886 val
|= (PCI_EXP_LNKSTA_LT
<< 16);
887 if (advk_pcie_link_active(pcie
))
888 val
|= (PCI_EXP_LNKSTA_DLLLA
<< 16);
890 return PCI_BRIDGE_EMUL_HANDLED
;
895 case PCI_EXP_DEVCAP2
:
896 case PCI_EXP_DEVCTL2
:
897 case PCI_EXP_LNKCAP2
:
898 case PCI_EXP_LNKCTL2
:
899 *value
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ reg
);
900 return PCI_BRIDGE_EMUL_HANDLED
;
903 return PCI_BRIDGE_EMUL_NOT_HANDLED
;
909 advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul
*bridge
,
910 int reg
, u32 old
, u32
new, u32 mask
)
912 struct advk_pcie
*pcie
= bridge
->data
;
916 advk_writel(pcie
, new, PCIE_CORE_PCIEXP_CAP
+ reg
);
917 if (new & PCI_EXP_LNKCTL_RL
)
918 advk_pcie_wait_for_retrain(pcie
);
921 case PCI_EXP_RTCTL
: {
922 u16 rootctl
= le16_to_cpu(bridge
->pcie_conf
.rootctl
);
923 /* Only emulation of PMEIE and CRSSVE bits is provided */
924 rootctl
&= PCI_EXP_RTCTL_PMEIE
| PCI_EXP_RTCTL_CRSSVE
;
925 bridge
->pcie_conf
.rootctl
= cpu_to_le16(rootctl
);
930 * PCI_EXP_RTSTA is also supported, but does not need to be handled
931 * here, because its value is stored in emulated config space buffer,
932 * and we write it there when needed.
936 case PCI_EXP_DEVCTL2
:
937 case PCI_EXP_LNKCTL2
:
938 advk_writel(pcie
, new, PCIE_CORE_PCIEXP_CAP
+ reg
);
946 static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops
= {
947 .read_base
= advk_pci_bridge_emul_base_conf_read
,
948 .write_base
= advk_pci_bridge_emul_base_conf_write
,
949 .read_pcie
= advk_pci_bridge_emul_pcie_conf_read
,
950 .write_pcie
= advk_pci_bridge_emul_pcie_conf_write
,
954 * Initialize the configuration space of the PCI-to-PCI bridge
955 * associated with the given PCIe interface.
957 static int advk_sw_pci_bridge_init(struct advk_pcie
*pcie
)
959 struct pci_bridge_emul
*bridge
= &pcie
->bridge
;
961 bridge
->conf
.vendor
=
962 cpu_to_le16(advk_readl(pcie
, PCIE_CORE_DEV_ID_REG
) & 0xffff);
963 bridge
->conf
.device
=
964 cpu_to_le16(advk_readl(pcie
, PCIE_CORE_DEV_ID_REG
) >> 16);
965 bridge
->conf
.class_revision
=
966 cpu_to_le32(advk_readl(pcie
, PCIE_CORE_DEV_REV_REG
) & 0xff);
968 /* Support 32 bits I/O addressing */
969 bridge
->conf
.iobase
= PCI_IO_RANGE_TYPE_32
;
970 bridge
->conf
.iolimit
= PCI_IO_RANGE_TYPE_32
;
972 /* Support 64 bits memory pref */
973 bridge
->conf
.pref_mem_base
= cpu_to_le16(PCI_PREF_RANGE_TYPE_64
);
974 bridge
->conf
.pref_mem_limit
= cpu_to_le16(PCI_PREF_RANGE_TYPE_64
);
976 /* Support interrupt A for MSI feature */
977 bridge
->conf
.intpin
= PCI_INTERRUPT_INTA
;
979 /* Aardvark HW provides PCIe Capability structure in version 2 */
980 bridge
->pcie_conf
.cap
= cpu_to_le16(2);
982 /* Indicates supports for Completion Retry Status */
983 bridge
->pcie_conf
.rootcap
= cpu_to_le16(PCI_EXP_RTCAP_CRSVIS
);
985 bridge
->has_pcie
= true;
987 bridge
->ops
= &advk_pci_bridge_emul_ops
;
989 return pci_bridge_emul_init(bridge
, 0);
992 static bool advk_pcie_valid_device(struct advk_pcie
*pcie
, struct pci_bus
*bus
,
995 if (pci_is_root_bus(bus
) && PCI_SLOT(devfn
) != 0)
999 * If the link goes down after we check for link-up, nothing bad
1000 * happens but the config access times out.
1002 if (!pci_is_root_bus(bus
) && !advk_pcie_link_up(pcie
))
1008 static bool advk_pcie_pio_is_running(struct advk_pcie
*pcie
)
1010 struct device
*dev
= &pcie
->pdev
->dev
;
1013 * Trying to start a new PIO transfer when previous has not completed
1014 * cause External Abort on CPU which results in kernel panic:
1016 * SError Interrupt on CPU0, code 0xbf000002 -- SError
1017 * Kernel panic - not syncing: Asynchronous SError Interrupt
1019 * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
1020 * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
1021 * concurrent calls at the same time. But because PIO transfer may take
1022 * about 1.5s when link is down or card is disconnected, it means that
1023 * advk_pcie_wait_pio() does not always have to wait for completion.
1025 * Some versions of ARM Trusted Firmware handles this External Abort at
1026 * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
1027 * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
1029 if (advk_readl(pcie
, PIO_START
)) {
1030 dev_err(dev
, "Previous PIO read/write transfer is still running\n");
1037 static int advk_pcie_rd_conf(struct pci_bus
*bus
, u32 devfn
,
1038 int where
, int size
, u32
*val
)
1040 struct advk_pcie
*pcie
= bus
->sysdata
;
1046 if (!advk_pcie_valid_device(pcie
, bus
, devfn
)) {
1048 return PCIBIOS_DEVICE_NOT_FOUND
;
1051 if (pci_is_root_bus(bus
))
1052 return pci_bridge_emul_conf_read(&pcie
->bridge
, where
,
1056 * Completion Retry Status is possible to return only when reading all
1057 * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
1058 * CRSSVE flag on Root Bridge is enabled.
1060 allow_crs
= (where
== PCI_VENDOR_ID
) && (size
== 4) &&
1061 (le16_to_cpu(pcie
->bridge
.pcie_conf
.rootctl
) &
1062 PCI_EXP_RTCTL_CRSSVE
);
1064 if (advk_pcie_pio_is_running(pcie
))
1067 /* Program the control register */
1068 reg
= advk_readl(pcie
, PIO_CTRL
);
1069 reg
&= ~PIO_CTRL_TYPE_MASK
;
1070 if (pci_is_root_bus(bus
->parent
))
1071 reg
|= PCIE_CONFIG_RD_TYPE0
;
1073 reg
|= PCIE_CONFIG_RD_TYPE1
;
1074 advk_writel(pcie
, reg
, PIO_CTRL
);
1076 /* Program the address registers */
1077 reg
= ALIGN_DOWN(PCIE_ECAM_OFFSET(bus
->number
, devfn
, where
), 4);
1078 advk_writel(pcie
, reg
, PIO_ADDR_LS
);
1079 advk_writel(pcie
, 0, PIO_ADDR_MS
);
1081 /* Program the data strobe */
1082 advk_writel(pcie
, 0xf, PIO_WR_DATA_STRB
);
1086 /* Clear PIO DONE ISR and start the transfer */
1087 advk_writel(pcie
, 1, PIO_ISR
);
1088 advk_writel(pcie
, 1, PIO_START
);
1090 ret
= advk_pcie_wait_pio(pcie
);
1096 /* Check PIO status and get the read result */
1097 ret
= advk_pcie_check_pio_status(pcie
, allow_crs
, val
);
1098 } while (ret
== -EAGAIN
&& retry_count
< PIO_RETRY_CNT
);
1104 *val
= (*val
>> (8 * (where
& 3))) & 0xff;
1106 *val
= (*val
>> (8 * (where
& 3))) & 0xffff;
1108 return PCIBIOS_SUCCESSFUL
;
1112 * If it is possible, return Completion Retry Status so that caller
1113 * tries to issue the request again instead of failing.
1116 *val
= CFG_RD_CRS_VAL
;
1117 return PCIBIOS_SUCCESSFUL
;
1122 return PCIBIOS_SET_FAILED
;
1125 static int advk_pcie_wr_conf(struct pci_bus
*bus
, u32 devfn
,
1126 int where
, int size
, u32 val
)
1128 struct advk_pcie
*pcie
= bus
->sysdata
;
1130 u32 data_strobe
= 0x0;
1135 if (!advk_pcie_valid_device(pcie
, bus
, devfn
))
1136 return PCIBIOS_DEVICE_NOT_FOUND
;
1138 if (pci_is_root_bus(bus
))
1139 return pci_bridge_emul_conf_write(&pcie
->bridge
, where
,
1143 return PCIBIOS_SET_FAILED
;
1145 if (advk_pcie_pio_is_running(pcie
))
1146 return PCIBIOS_SET_FAILED
;
1148 /* Program the control register */
1149 reg
= advk_readl(pcie
, PIO_CTRL
);
1150 reg
&= ~PIO_CTRL_TYPE_MASK
;
1151 if (pci_is_root_bus(bus
->parent
))
1152 reg
|= PCIE_CONFIG_WR_TYPE0
;
1154 reg
|= PCIE_CONFIG_WR_TYPE1
;
1155 advk_writel(pcie
, reg
, PIO_CTRL
);
1157 /* Program the address registers */
1158 reg
= ALIGN_DOWN(PCIE_ECAM_OFFSET(bus
->number
, devfn
, where
), 4);
1159 advk_writel(pcie
, reg
, PIO_ADDR_LS
);
1160 advk_writel(pcie
, 0, PIO_ADDR_MS
);
1162 /* Calculate the write strobe */
1163 offset
= where
& 0x3;
1164 reg
= val
<< (8 * offset
);
1165 data_strobe
= GENMASK(size
- 1, 0) << offset
;
1167 /* Program the data register */
1168 advk_writel(pcie
, reg
, PIO_WR_DATA
);
1170 /* Program the data strobe */
1171 advk_writel(pcie
, data_strobe
, PIO_WR_DATA_STRB
);
1175 /* Clear PIO DONE ISR and start the transfer */
1176 advk_writel(pcie
, 1, PIO_ISR
);
1177 advk_writel(pcie
, 1, PIO_START
);
1179 ret
= advk_pcie_wait_pio(pcie
);
1181 return PCIBIOS_SET_FAILED
;
1185 ret
= advk_pcie_check_pio_status(pcie
, false, NULL
);
1186 } while (ret
== -EAGAIN
&& retry_count
< PIO_RETRY_CNT
);
1188 return ret
< 0 ? PCIBIOS_SET_FAILED
: PCIBIOS_SUCCESSFUL
;
1191 static struct pci_ops advk_pcie_ops
= {
1192 .read
= advk_pcie_rd_conf
,
1193 .write
= advk_pcie_wr_conf
,
1196 static void advk_msi_irq_compose_msi_msg(struct irq_data
*data
,
1197 struct msi_msg
*msg
)
1199 struct advk_pcie
*pcie
= irq_data_get_irq_chip_data(data
);
1200 phys_addr_t msi_addr
= virt_to_phys(pcie
);
1202 msg
->address_lo
= lower_32_bits(msi_addr
);
1203 msg
->address_hi
= upper_32_bits(msi_addr
);
1204 msg
->data
= data
->hwirq
;
1207 static int advk_msi_set_affinity(struct irq_data
*irq_data
,
1208 const struct cpumask
*mask
, bool force
)
1213 static void advk_msi_irq_mask(struct irq_data
*d
)
1215 struct advk_pcie
*pcie
= d
->domain
->host_data
;
1216 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1217 unsigned long flags
;
1220 raw_spin_lock_irqsave(&pcie
->msi_irq_lock
, flags
);
1221 mask
= advk_readl(pcie
, PCIE_MSI_MASK_REG
);
1223 advk_writel(pcie
, mask
, PCIE_MSI_MASK_REG
);
1224 raw_spin_unlock_irqrestore(&pcie
->msi_irq_lock
, flags
);
1227 static void advk_msi_irq_unmask(struct irq_data
*d
)
1229 struct advk_pcie
*pcie
= d
->domain
->host_data
;
1230 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1231 unsigned long flags
;
1234 raw_spin_lock_irqsave(&pcie
->msi_irq_lock
, flags
);
1235 mask
= advk_readl(pcie
, PCIE_MSI_MASK_REG
);
1236 mask
&= ~BIT(hwirq
);
1237 advk_writel(pcie
, mask
, PCIE_MSI_MASK_REG
);
1238 raw_spin_unlock_irqrestore(&pcie
->msi_irq_lock
, flags
);
1241 static void advk_msi_top_irq_mask(struct irq_data
*d
)
1243 pci_msi_mask_irq(d
);
1244 irq_chip_mask_parent(d
);
1247 static void advk_msi_top_irq_unmask(struct irq_data
*d
)
1249 pci_msi_unmask_irq(d
);
1250 irq_chip_unmask_parent(d
);
1253 static struct irq_chip advk_msi_bottom_irq_chip
= {
1255 .irq_compose_msi_msg
= advk_msi_irq_compose_msi_msg
,
1256 .irq_set_affinity
= advk_msi_set_affinity
,
1257 .irq_mask
= advk_msi_irq_mask
,
1258 .irq_unmask
= advk_msi_irq_unmask
,
1261 static int advk_msi_irq_domain_alloc(struct irq_domain
*domain
,
1263 unsigned int nr_irqs
, void *args
)
1265 struct advk_pcie
*pcie
= domain
->host_data
;
1268 mutex_lock(&pcie
->msi_used_lock
);
1269 hwirq
= bitmap_find_free_region(pcie
->msi_used
, MSI_IRQ_NUM
,
1270 order_base_2(nr_irqs
));
1271 mutex_unlock(&pcie
->msi_used_lock
);
1275 for (i
= 0; i
< nr_irqs
; i
++)
1276 irq_domain_set_info(domain
, virq
+ i
, hwirq
+ i
,
1277 &advk_msi_bottom_irq_chip
,
1278 domain
->host_data
, handle_simple_irq
,
1284 static void advk_msi_irq_domain_free(struct irq_domain
*domain
,
1285 unsigned int virq
, unsigned int nr_irqs
)
1287 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
1288 struct advk_pcie
*pcie
= domain
->host_data
;
1290 mutex_lock(&pcie
->msi_used_lock
);
1291 bitmap_release_region(pcie
->msi_used
, d
->hwirq
, order_base_2(nr_irqs
));
1292 mutex_unlock(&pcie
->msi_used_lock
);
1295 static const struct irq_domain_ops advk_msi_domain_ops
= {
1296 .alloc
= advk_msi_irq_domain_alloc
,
1297 .free
= advk_msi_irq_domain_free
,
1300 static void advk_pcie_irq_mask(struct irq_data
*d
)
1302 struct advk_pcie
*pcie
= d
->domain
->host_data
;
1303 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1304 unsigned long flags
;
1307 raw_spin_lock_irqsave(&pcie
->irq_lock
, flags
);
1308 mask
= advk_readl(pcie
, PCIE_ISR1_MASK_REG
);
1309 mask
|= PCIE_ISR1_INTX_ASSERT(hwirq
);
1310 advk_writel(pcie
, mask
, PCIE_ISR1_MASK_REG
);
1311 raw_spin_unlock_irqrestore(&pcie
->irq_lock
, flags
);
1314 static void advk_pcie_irq_unmask(struct irq_data
*d
)
1316 struct advk_pcie
*pcie
= d
->domain
->host_data
;
1317 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1318 unsigned long flags
;
1321 raw_spin_lock_irqsave(&pcie
->irq_lock
, flags
);
1322 mask
= advk_readl(pcie
, PCIE_ISR1_MASK_REG
);
1323 mask
&= ~PCIE_ISR1_INTX_ASSERT(hwirq
);
1324 advk_writel(pcie
, mask
, PCIE_ISR1_MASK_REG
);
1325 raw_spin_unlock_irqrestore(&pcie
->irq_lock
, flags
);
1328 static int advk_pcie_irq_map(struct irq_domain
*h
,
1329 unsigned int virq
, irq_hw_number_t hwirq
)
1331 struct advk_pcie
*pcie
= h
->host_data
;
1333 advk_pcie_irq_mask(irq_get_irq_data(virq
));
1334 irq_set_status_flags(virq
, IRQ_LEVEL
);
1335 irq_set_chip_and_handler(virq
, &pcie
->irq_chip
,
1337 irq_set_chip_data(virq
, pcie
);
1342 static const struct irq_domain_ops advk_pcie_irq_domain_ops
= {
1343 .map
= advk_pcie_irq_map
,
1344 .xlate
= irq_domain_xlate_onecell
,
1347 static struct irq_chip advk_msi_irq_chip
= {
1349 .irq_mask
= advk_msi_top_irq_mask
,
1350 .irq_unmask
= advk_msi_top_irq_unmask
,
1353 static struct msi_domain_info advk_msi_domain_info
= {
1354 .flags
= MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
1355 MSI_FLAG_MULTI_PCI_MSI
| MSI_FLAG_PCI_MSIX
,
1356 .chip
= &advk_msi_irq_chip
,
1359 static int advk_pcie_init_msi_irq_domain(struct advk_pcie
*pcie
)
1361 struct device
*dev
= &pcie
->pdev
->dev
;
1363 raw_spin_lock_init(&pcie
->msi_irq_lock
);
1364 mutex_init(&pcie
->msi_used_lock
);
1366 pcie
->msi_inner_domain
=
1367 irq_domain_add_linear(NULL
, MSI_IRQ_NUM
,
1368 &advk_msi_domain_ops
, pcie
);
1369 if (!pcie
->msi_inner_domain
)
1373 pci_msi_create_irq_domain(dev_fwnode(dev
),
1374 &advk_msi_domain_info
,
1375 pcie
->msi_inner_domain
);
1376 if (!pcie
->msi_domain
) {
1377 irq_domain_remove(pcie
->msi_inner_domain
);
1384 static void advk_pcie_remove_msi_irq_domain(struct advk_pcie
*pcie
)
1386 irq_domain_remove(pcie
->msi_domain
);
1387 irq_domain_remove(pcie
->msi_inner_domain
);
1390 static int advk_pcie_init_irq_domain(struct advk_pcie
*pcie
)
1392 struct device
*dev
= &pcie
->pdev
->dev
;
1393 struct device_node
*node
= dev
->of_node
;
1394 struct device_node
*pcie_intc_node
;
1395 struct irq_chip
*irq_chip
;
1398 raw_spin_lock_init(&pcie
->irq_lock
);
1400 pcie_intc_node
= of_get_next_child(node
, NULL
);
1401 if (!pcie_intc_node
) {
1402 dev_err(dev
, "No PCIe Intc node found\n");
1406 irq_chip
= &pcie
->irq_chip
;
1408 irq_chip
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s-irq",
1410 if (!irq_chip
->name
) {
1415 irq_chip
->irq_mask
= advk_pcie_irq_mask
;
1416 irq_chip
->irq_mask_ack
= advk_pcie_irq_mask
;
1417 irq_chip
->irq_unmask
= advk_pcie_irq_unmask
;
1420 irq_domain_add_linear(pcie_intc_node
, PCI_NUM_INTX
,
1421 &advk_pcie_irq_domain_ops
, pcie
);
1422 if (!pcie
->irq_domain
) {
1423 dev_err(dev
, "Failed to get a INTx IRQ domain\n");
1429 of_node_put(pcie_intc_node
);
1433 static void advk_pcie_remove_irq_domain(struct advk_pcie
*pcie
)
1435 irq_domain_remove(pcie
->irq_domain
);
1438 static void advk_pcie_handle_pme(struct advk_pcie
*pcie
)
1440 u32 requester
= advk_readl(pcie
, PCIE_MSG_LOG_REG
) >> 16;
1442 advk_writel(pcie
, PCIE_MSG_PM_PME_MASK
, PCIE_ISR0_REG
);
1445 * PCIE_MSG_LOG_REG contains the last inbound message, so store
1446 * the requester ID only when PME was not asserted yet.
1447 * Also do not trigger PME interrupt when PME is still asserted.
1449 if (!(le32_to_cpu(pcie
->bridge
.pcie_conf
.rootsta
) & PCI_EXP_RTSTA_PME
)) {
1450 pcie
->bridge
.pcie_conf
.rootsta
= cpu_to_le32(requester
| PCI_EXP_RTSTA_PME
);
1453 * Trigger PME interrupt only if PMEIE bit in Root Control is set.
1454 * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
1456 if (!(le16_to_cpu(pcie
->bridge
.pcie_conf
.rootctl
) & PCI_EXP_RTCTL_PMEIE
))
1459 if (generic_handle_domain_irq(pcie
->irq_domain
, 0) == -EINVAL
)
1460 dev_err_ratelimited(&pcie
->pdev
->dev
, "unhandled PME IRQ\n");
1464 static void advk_pcie_handle_msi(struct advk_pcie
*pcie
)
1466 u32 msi_val
, msi_mask
, msi_status
, msi_idx
;
1468 msi_mask
= advk_readl(pcie
, PCIE_MSI_MASK_REG
);
1469 msi_val
= advk_readl(pcie
, PCIE_MSI_STATUS_REG
);
1470 msi_status
= msi_val
& ((~msi_mask
) & PCIE_MSI_ALL_MASK
);
1472 for (msi_idx
= 0; msi_idx
< MSI_IRQ_NUM
; msi_idx
++) {
1473 if (!(BIT(msi_idx
) & msi_status
))
1476 advk_writel(pcie
, BIT(msi_idx
), PCIE_MSI_STATUS_REG
);
1477 if (generic_handle_domain_irq(pcie
->msi_inner_domain
, msi_idx
) == -EINVAL
)
1478 dev_err_ratelimited(&pcie
->pdev
->dev
, "unexpected MSI 0x%02x\n", msi_idx
);
1481 advk_writel(pcie
, PCIE_ISR0_MSI_INT_PENDING
,
1485 static void advk_pcie_handle_int(struct advk_pcie
*pcie
)
1487 u32 isr0_val
, isr0_mask
, isr0_status
;
1488 u32 isr1_val
, isr1_mask
, isr1_status
;
1491 isr0_val
= advk_readl(pcie
, PCIE_ISR0_REG
);
1492 isr0_mask
= advk_readl(pcie
, PCIE_ISR0_MASK_REG
);
1493 isr0_status
= isr0_val
& ((~isr0_mask
) & PCIE_ISR0_ALL_MASK
);
1495 isr1_val
= advk_readl(pcie
, PCIE_ISR1_REG
);
1496 isr1_mask
= advk_readl(pcie
, PCIE_ISR1_MASK_REG
);
1497 isr1_status
= isr1_val
& ((~isr1_mask
) & PCIE_ISR1_ALL_MASK
);
1499 /* Process PME interrupt as the first one to do not miss PME requester id */
1500 if (isr0_status
& PCIE_MSG_PM_PME_MASK
)
1501 advk_pcie_handle_pme(pcie
);
1503 /* Process ERR interrupt */
1504 if (isr0_status
& PCIE_ISR0_ERR_MASK
) {
1505 advk_writel(pcie
, PCIE_ISR0_ERR_MASK
, PCIE_ISR0_REG
);
1508 * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
1511 if (generic_handle_domain_irq(pcie
->irq_domain
, 0) == -EINVAL
)
1512 dev_err_ratelimited(&pcie
->pdev
->dev
, "unhandled ERR IRQ\n");
1515 /* Process MSI interrupts */
1516 if (isr0_status
& PCIE_ISR0_MSI_INT_PENDING
)
1517 advk_pcie_handle_msi(pcie
);
1519 /* Process legacy interrupts */
1520 for (i
= 0; i
< PCI_NUM_INTX
; i
++) {
1521 if (!(isr1_status
& PCIE_ISR1_INTX_ASSERT(i
)))
1524 advk_writel(pcie
, PCIE_ISR1_INTX_ASSERT(i
),
1527 if (generic_handle_domain_irq(pcie
->irq_domain
, i
) == -EINVAL
)
1528 dev_err_ratelimited(&pcie
->pdev
->dev
, "unexpected INT%c IRQ\n",
1533 static irqreturn_t
advk_pcie_irq_handler(int irq
, void *arg
)
1535 struct advk_pcie
*pcie
= arg
;
1538 status
= advk_readl(pcie
, HOST_CTRL_INT_STATUS_REG
);
1539 if (!(status
& PCIE_IRQ_CORE_INT
))
1542 advk_pcie_handle_int(pcie
);
1544 /* Clear interrupt */
1545 advk_writel(pcie
, PCIE_IRQ_CORE_INT
, HOST_CTRL_INT_STATUS_REG
);
1550 static void __maybe_unused
advk_pcie_disable_phy(struct advk_pcie
*pcie
)
1552 phy_power_off(pcie
->phy
);
1553 phy_exit(pcie
->phy
);
1556 static int advk_pcie_enable_phy(struct advk_pcie
*pcie
)
1563 ret
= phy_init(pcie
->phy
);
1567 ret
= phy_set_mode(pcie
->phy
, PHY_MODE_PCIE
);
1569 phy_exit(pcie
->phy
);
1573 ret
= phy_power_on(pcie
->phy
);
1574 if (ret
== -EOPNOTSUPP
) {
1575 dev_warn(&pcie
->pdev
->dev
, "PHY unsupported by firmware\n");
1577 phy_exit(pcie
->phy
);
1584 static int advk_pcie_setup_phy(struct advk_pcie
*pcie
)
1586 struct device
*dev
= &pcie
->pdev
->dev
;
1587 struct device_node
*node
= dev
->of_node
;
1590 pcie
->phy
= devm_of_phy_get(dev
, node
, NULL
);
1591 if (IS_ERR(pcie
->phy
) && (PTR_ERR(pcie
->phy
) == -EPROBE_DEFER
))
1592 return PTR_ERR(pcie
->phy
);
1594 /* Old bindings miss the PHY handle */
1595 if (IS_ERR(pcie
->phy
)) {
1596 dev_warn(dev
, "PHY unavailable (%ld)\n", PTR_ERR(pcie
->phy
));
1601 ret
= advk_pcie_enable_phy(pcie
);
1603 dev_err(dev
, "Failed to initialize PHY (%d)\n", ret
);
1608 static int advk_pcie_probe(struct platform_device
*pdev
)
1610 struct device
*dev
= &pdev
->dev
;
1611 struct advk_pcie
*pcie
;
1612 struct pci_host_bridge
*bridge
;
1613 struct resource_entry
*entry
;
1616 bridge
= devm_pci_alloc_host_bridge(dev
, sizeof(struct advk_pcie
));
1620 pcie
= pci_host_bridge_priv(bridge
);
1622 platform_set_drvdata(pdev
, pcie
);
1624 resource_list_for_each_entry(entry
, &bridge
->windows
) {
1625 resource_size_t start
= entry
->res
->start
;
1626 resource_size_t size
= resource_size(entry
->res
);
1627 unsigned long type
= resource_type(entry
->res
);
1631 * Aardvark hardware allows to configure also PCIe window
1632 * for config type 0 and type 1 mapping, but driver uses
1633 * only PIO for issuing configuration transfers which does
1634 * not use PCIe window configuration.
1636 if (type
!= IORESOURCE_MEM
&& type
!= IORESOURCE_IO
)
1640 * Skip transparent memory resources. Default outbound access
1641 * configuration is set to transparent memory access so it
1642 * does not need window configuration.
1644 if (type
== IORESOURCE_MEM
&& entry
->offset
== 0)
1648 * The n-th PCIe window is configured by tuple (match, remap, mask)
1649 * and an access to address A uses this window if A matches the
1650 * match with given mask.
1651 * So every PCIe window size must be a power of two and every start
1652 * address must be aligned to window size. Minimal size is 64 KiB
1653 * because lower 16 bits of mask must be zero. Remapped address
1654 * may have set only bits from the mask.
1656 while (pcie
->wins_count
< OB_WIN_COUNT
&& size
> 0) {
1657 /* Calculate the largest aligned window size */
1658 win_size
= (1ULL << (fls64(size
)-1)) |
1659 (start
? (1ULL << __ffs64(start
)) : 0);
1660 win_size
= 1ULL << __ffs64(win_size
);
1661 if (win_size
< 0x10000)
1665 "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
1666 pcie
->wins_count
, (unsigned long long)start
,
1667 (unsigned long long)start
+ win_size
, type
);
1669 if (type
== IORESOURCE_IO
) {
1670 pcie
->wins
[pcie
->wins_count
].actions
= OB_WIN_TYPE_IO
;
1671 pcie
->wins
[pcie
->wins_count
].match
= pci_pio_to_address(start
);
1673 pcie
->wins
[pcie
->wins_count
].actions
= OB_WIN_TYPE_MEM
;
1674 pcie
->wins
[pcie
->wins_count
].match
= start
;
1676 pcie
->wins
[pcie
->wins_count
].remap
= start
- entry
->offset
;
1677 pcie
->wins
[pcie
->wins_count
].mask
= ~(win_size
- 1);
1679 if (pcie
->wins
[pcie
->wins_count
].remap
& (win_size
- 1))
1688 dev_err(&pcie
->pdev
->dev
,
1689 "Invalid PCIe region [0x%llx-0x%llx]\n",
1690 (unsigned long long)entry
->res
->start
,
1691 (unsigned long long)entry
->res
->end
+ 1);
1696 pcie
->base
= devm_platform_ioremap_resource(pdev
, 0);
1697 if (IS_ERR(pcie
->base
))
1698 return PTR_ERR(pcie
->base
);
1700 irq
= platform_get_irq(pdev
, 0);
1704 ret
= devm_request_irq(dev
, irq
, advk_pcie_irq_handler
,
1705 IRQF_SHARED
| IRQF_NO_THREAD
, "advk-pcie",
1708 dev_err(dev
, "Failed to register interrupt\n");
1712 pcie
->reset_gpio
= devm_gpiod_get_from_of_node(dev
, dev
->of_node
,
1716 ret
= PTR_ERR_OR_ZERO(pcie
->reset_gpio
);
1718 if (ret
== -ENOENT
) {
1719 pcie
->reset_gpio
= NULL
;
1721 if (ret
!= -EPROBE_DEFER
)
1722 dev_err(dev
, "Failed to get reset-gpio: %i\n",
1728 ret
= of_pci_get_max_link_speed(dev
->of_node
);
1729 if (ret
<= 0 || ret
> 3)
1732 pcie
->link_gen
= ret
;
1734 ret
= advk_pcie_setup_phy(pcie
);
1738 advk_pcie_setup_hw(pcie
);
1740 ret
= advk_sw_pci_bridge_init(pcie
);
1742 dev_err(dev
, "Failed to register emulated root PCI bridge\n");
1746 ret
= advk_pcie_init_irq_domain(pcie
);
1748 dev_err(dev
, "Failed to initialize irq\n");
1752 ret
= advk_pcie_init_msi_irq_domain(pcie
);
1754 dev_err(dev
, "Failed to initialize irq\n");
1755 advk_pcie_remove_irq_domain(pcie
);
1759 bridge
->sysdata
= pcie
;
1760 bridge
->ops
= &advk_pcie_ops
;
1762 ret
= pci_host_probe(bridge
);
1764 advk_pcie_remove_msi_irq_domain(pcie
);
1765 advk_pcie_remove_irq_domain(pcie
);
1772 static int advk_pcie_remove(struct platform_device
*pdev
)
1774 struct advk_pcie
*pcie
= platform_get_drvdata(pdev
);
1775 struct pci_host_bridge
*bridge
= pci_host_bridge_from_priv(pcie
);
1779 /* Remove PCI bus with all devices */
1780 pci_lock_rescan_remove();
1781 pci_stop_root_bus(bridge
->bus
);
1782 pci_remove_root_bus(bridge
->bus
);
1783 pci_unlock_rescan_remove();
1785 /* Disable Root Bridge I/O space, memory space and bus mastering */
1786 val
= advk_readl(pcie
, PCIE_CORE_CMD_STATUS_REG
);
1787 val
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
1788 advk_writel(pcie
, val
, PCIE_CORE_CMD_STATUS_REG
);
1791 val
= advk_readl(pcie
, PCIE_CORE_CTRL2_REG
);
1792 val
&= ~PCIE_CORE_CTRL2_MSI_ENABLE
;
1793 advk_writel(pcie
, val
, PCIE_CORE_CTRL2_REG
);
1795 /* Clear MSI address */
1796 advk_writel(pcie
, 0, PCIE_MSI_ADDR_LOW_REG
);
1797 advk_writel(pcie
, 0, PCIE_MSI_ADDR_HIGH_REG
);
1799 /* Mask all interrupts */
1800 advk_writel(pcie
, PCIE_MSI_ALL_MASK
, PCIE_MSI_MASK_REG
);
1801 advk_writel(pcie
, PCIE_ISR0_ALL_MASK
, PCIE_ISR0_MASK_REG
);
1802 advk_writel(pcie
, PCIE_ISR1_ALL_MASK
, PCIE_ISR1_MASK_REG
);
1803 advk_writel(pcie
, PCIE_IRQ_ALL_MASK
, HOST_CTRL_INT_MASK_REG
);
1805 /* Clear all interrupts */
1806 advk_writel(pcie
, PCIE_MSI_ALL_MASK
, PCIE_MSI_STATUS_REG
);
1807 advk_writel(pcie
, PCIE_ISR0_ALL_MASK
, PCIE_ISR0_REG
);
1808 advk_writel(pcie
, PCIE_ISR1_ALL_MASK
, PCIE_ISR1_REG
);
1809 advk_writel(pcie
, PCIE_IRQ_ALL_MASK
, HOST_CTRL_INT_STATUS_REG
);
1811 /* Remove IRQ domains */
1812 advk_pcie_remove_msi_irq_domain(pcie
);
1813 advk_pcie_remove_irq_domain(pcie
);
1815 /* Free config space for emulated root bridge */
1816 pci_bridge_emul_cleanup(&pcie
->bridge
);
1818 /* Assert PERST# signal which prepares PCIe card for power down */
1819 if (pcie
->reset_gpio
)
1820 gpiod_set_value_cansleep(pcie
->reset_gpio
, 1);
1822 /* Disable link training */
1823 val
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
1824 val
&= ~LINK_TRAINING_EN
;
1825 advk_writel(pcie
, val
, PCIE_CORE_CTRL0_REG
);
1827 /* Disable outbound address windows mapping */
1828 for (i
= 0; i
< OB_WIN_COUNT
; i
++)
1829 advk_pcie_disable_ob_win(pcie
, i
);
1832 advk_pcie_disable_phy(pcie
);
1837 static const struct of_device_id advk_pcie_of_match_table
[] = {
1838 { .compatible
= "marvell,armada-3700-pcie", },
1841 MODULE_DEVICE_TABLE(of
, advk_pcie_of_match_table
);
1843 static struct platform_driver advk_pcie_driver
= {
1845 .name
= "advk-pcie",
1846 .of_match_table
= advk_pcie_of_match_table
,
1848 .probe
= advk_pcie_probe
,
1849 .remove
= advk_pcie_remove
,
1851 module_platform_driver(advk_pcie_driver
);
1853 MODULE_DESCRIPTION("Aardvark PCIe controller");
1854 MODULE_LICENSE("GPL v2");