]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/pci/dwc/pcie-designware.c
Merge branch 'for-4.15-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[mirror_ubuntu-bionic-kernel.git] / drivers / pci / dwc / pcie-designware.c
CommitLineData
340cba60 1/*
96291d56 2 * Synopsys DesignWare PCIe host controller driver
340cba60
JH
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
886bc5ce 14#include <linux/delay.h>
feb85d9b
KVA
15#include <linux/of.h>
16#include <linux/types.h>
340cba60 17
4b1ced84 18#include "pcie-designware.h"
340cba60 19
dac29e6c
JP
20/* PCIe Port Logic registers */
21#define PLR_OFFSET 0x700
22#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
01c07673
JZ
23#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
24#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
dac29e6c 25
19ce01cc 26int dw_pcie_read(void __iomem *addr, int size, u32 *val)
340cba60 27{
b6b18f58
GP
28 if ((uintptr_t)addr & (size - 1)) {
29 *val = 0;
30 return PCIBIOS_BAD_REGISTER_NUMBER;
31 }
32
314fc854 33 if (size == 4) {
c003ca99 34 *val = readl(addr);
314fc854 35 } else if (size == 2) {
4c45852f 36 *val = readw(addr);
314fc854 37 } else if (size == 1) {
4c45852f 38 *val = readb(addr);
314fc854 39 } else {
c003ca99 40 *val = 0;
340cba60 41 return PCIBIOS_BAD_REGISTER_NUMBER;
c003ca99 42 }
340cba60
JH
43
44 return PCIBIOS_SUCCESSFUL;
45}
46
19ce01cc 47int dw_pcie_write(void __iomem *addr, int size, u32 val)
340cba60 48{
b6b18f58
GP
49 if ((uintptr_t)addr & (size - 1))
50 return PCIBIOS_BAD_REGISTER_NUMBER;
51
340cba60
JH
52 if (size == 4)
53 writel(val, addr);
54 else if (size == 2)
4c45852f 55 writew(val, addr);
340cba60 56 else if (size == 1)
4c45852f 57 writeb(val, addr);
340cba60
JH
58 else
59 return PCIBIOS_BAD_REGISTER_NUMBER;
60
61 return PCIBIOS_SUCCESSFUL;
62}
63
a509d7d9
KVA
64u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
65 size_t size)
340cba60 66{
a509d7d9
KVA
67 int ret;
68 u32 val;
446fc23f 69
a509d7d9
KVA
70 if (pci->ops->read_dbi)
71 return pci->ops->read_dbi(pci, base, reg, size);
72
73 ret = dw_pcie_read(base + reg, size, &val);
74 if (ret)
75 dev_err(pci->dev, "read DBI address failed\n");
76
77 return val;
340cba60
JH
78}
79
a509d7d9
KVA
80void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
81 size_t size, u32 val)
340cba60 82{
a509d7d9
KVA
83 int ret;
84
85 if (pci->ops->write_dbi) {
86 pci->ops->write_dbi(pci, base, reg, size, val);
87 return;
88 }
89
90 ret = dw_pcie_write(base + reg, size, val);
91 if (ret)
92 dev_err(pci->dev, "write DBI address failed\n");
340cba60
JH
93}
94
edd45e39 95static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
a0601a47
JP
96{
97 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
98
442ec4c0 99 return dw_pcie_readl_dbi(pci, offset + reg);
a0601a47
JP
100}
101
edd45e39
KVA
102static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
103 u32 val)
a0601a47
JP
104{
105 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
106
442ec4c0 107 dw_pcie_writel_dbi(pci, offset + reg, val);
a0601a47
JP
108}
109
684a3a91
CP
110static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
111 int type, u64 cpu_addr,
112 u64 pci_addr, u32 size)
edd45e39
KVA
113{
114 u32 retries, val;
115
116 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
117 lower_32_bits(cpu_addr));
118 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
119 upper_32_bits(cpu_addr));
120 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
121 lower_32_bits(cpu_addr + size - 1));
122 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
123 lower_32_bits(pci_addr));
124 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
125 upper_32_bits(pci_addr));
126 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
127 type);
128 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
129 PCIE_ATU_ENABLE);
130
131 /*
132 * Make sure ATU enable takes effect before any subsequent config
133 * and I/O accesses.
134 */
135 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
136 val = dw_pcie_readl_ob_unroll(pci, index,
137 PCIE_ATU_UNR_REGION_CTRL2);
138 if (val & PCIE_ATU_ENABLE)
139 return;
140
141 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
142 }
143 dev_err(pci->dev, "outbound iATU is not being enabled\n");
144}
145
feb85d9b
KVA
146void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
147 u64 cpu_addr, u64 pci_addr, u32 size)
63503c87 148{
d8bbeb39 149 u32 retries, val;
17209dfb 150
a660083e
KVA
151 if (pci->ops->cpu_addr_fixup)
152 cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
153
442ec4c0 154 if (pci->iatu_unroll_enabled) {
edd45e39
KVA
155 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
156 pci_addr, size);
157 return;
a0601a47 158 }
17209dfb 159
edd45e39
KVA
160 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
161 PCIE_ATU_REGION_OUTBOUND | index);
162 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
163 lower_32_bits(cpu_addr));
164 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
165 upper_32_bits(cpu_addr));
166 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
167 lower_32_bits(cpu_addr + size - 1));
168 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
169 lower_32_bits(pci_addr));
170 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
171 upper_32_bits(pci_addr));
172 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
173 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
174
17209dfb
SV
175 /*
176 * Make sure ATU enable takes effect before any subsequent config
177 * and I/O accesses.
178 */
d8bbeb39 179 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
edd45e39 180 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
e9be4d78 181 if (val & PCIE_ATU_ENABLE)
d8bbeb39
JP
182 return;
183
184 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
185 }
edd45e39 186 dev_err(pci->dev, "outbound iATU is not being enabled\n");
63503c87
JZ
187}
188
f8aed6ec
KVA
189static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
190{
191 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
192
193 return dw_pcie_readl_dbi(pci, offset + reg);
194}
195
196static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
197 u32 val)
198{
199 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
200
201 dw_pcie_writel_dbi(pci, offset + reg, val);
202}
203
684a3a91
CP
204static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
205 int bar, u64 cpu_addr,
206 enum dw_pcie_as_type as_type)
f8aed6ec
KVA
207{
208 int type;
209 u32 retries, val;
210
211 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
212 lower_32_bits(cpu_addr));
213 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
214 upper_32_bits(cpu_addr));
215
216 switch (as_type) {
217 case DW_PCIE_AS_MEM:
218 type = PCIE_ATU_TYPE_MEM;
219 break;
220 case DW_PCIE_AS_IO:
221 type = PCIE_ATU_TYPE_IO;
222 break;
223 default:
224 return -EINVAL;
225 }
226
227 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
228 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
229 PCIE_ATU_ENABLE |
230 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
231
232 /*
233 * Make sure ATU enable takes effect before any subsequent config
234 * and I/O accesses.
235 */
236 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
237 val = dw_pcie_readl_ib_unroll(pci, index,
238 PCIE_ATU_UNR_REGION_CTRL2);
239 if (val & PCIE_ATU_ENABLE)
240 return 0;
241
242 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
243 }
244 dev_err(pci->dev, "inbound iATU is not being enabled\n");
245
246 return -EBUSY;
247}
248
249int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
250 u64 cpu_addr, enum dw_pcie_as_type as_type)
251{
252 int type;
253 u32 retries, val;
254
255 if (pci->iatu_unroll_enabled)
256 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
257 cpu_addr, as_type);
258
259 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
260 index);
261 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
262 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
263
264 switch (as_type) {
265 case DW_PCIE_AS_MEM:
266 type = PCIE_ATU_TYPE_MEM;
267 break;
268 case DW_PCIE_AS_IO:
269 type = PCIE_ATU_TYPE_IO;
270 break;
271 default:
272 return -EINVAL;
273 }
274
275 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
276 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
277 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
278
279 /*
280 * Make sure ATU enable takes effect before any subsequent config
281 * and I/O accesses.
282 */
283 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
284 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
285 if (val & PCIE_ATU_ENABLE)
286 return 0;
287
288 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
289 }
290 dev_err(pci->dev, "inbound iATU is not being enabled\n");
291
292 return -EBUSY;
293}
294
295void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
296 enum dw_pcie_region_type type)
297{
298 int region;
299
300 switch (type) {
301 case DW_PCIE_REGION_INBOUND:
302 region = PCIE_ATU_REGION_INBOUND;
303 break;
304 case DW_PCIE_REGION_OUTBOUND:
305 region = PCIE_ATU_REGION_OUTBOUND;
306 break;
307 default:
308 return;
309 }
310
311 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
312 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
313}
314
442ec4c0 315int dw_pcie_wait_for_link(struct dw_pcie *pci)
886bc5ce
JP
316{
317 int retries;
318
319 /* check if the link is up or not */
320 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
442ec4c0
KVA
321 if (dw_pcie_link_up(pci)) {
322 dev_info(pci->dev, "link up\n");
886bc5ce
JP
323 return 0;
324 }
325 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
326 }
327
442ec4c0 328 dev_err(pci->dev, "phy link never came up\n");
886bc5ce
JP
329
330 return -ETIMEDOUT;
331}
332
442ec4c0 333int dw_pcie_link_up(struct dw_pcie *pci)
4b1ced84 334{
dac29e6c
JP
335 u32 val;
336
442ec4c0
KVA
337 if (pci->ops->link_up)
338 return pci->ops->link_up(pci);
116a489d 339
442ec4c0 340 val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
01c07673
JZ
341 return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
342 (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
4b1ced84
JH
343}
344
feb85d9b 345void dw_pcie_setup(struct dw_pcie *pci)
340cba60 346{
5f334db6 347 int ret;
340cba60 348 u32 val;
feb85d9b 349 u32 lanes;
5f334db6
KVA
350 struct device *dev = pci->dev;
351 struct device_node *np = dev->of_node;
352
353 ret = of_property_read_u32(np, "num-lanes", &lanes);
354 if (ret)
355 lanes = 0;
340cba60 356
66c5c34b 357 /* set the number of lanes */
442ec4c0 358 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
340cba60 359 val &= ~PORT_LINK_MODE_MASK;
5f334db6 360 switch (lanes) {
4b1ced84
JH
361 case 1:
362 val |= PORT_LINK_MODE_1_LANES;
363 break;
364 case 2:
365 val |= PORT_LINK_MODE_2_LANES;
366 break;
367 case 4:
368 val |= PORT_LINK_MODE_4_LANES;
369 break;
5b0f0738
ZW
370 case 8:
371 val |= PORT_LINK_MODE_8_LANES;
372 break;
907fce09 373 default:
5f334db6 374 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
907fce09 375 return;
4b1ced84 376 }
442ec4c0 377 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
340cba60
JH
378
379 /* set link width speed control register */
442ec4c0 380 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
340cba60 381 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
5f334db6 382 switch (lanes) {
4b1ced84
JH
383 case 1:
384 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
385 break;
386 case 2:
387 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
388 break;
389 case 4:
390 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
391 break;
5b0f0738
ZW
392 case 8:
393 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
394 break;
4b1ced84 395 }
442ec4c0 396 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
340cba60 397}