]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/pci/dwc/pcie-designware.c
Merge tag 'powerpc-4.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-artful-kernel.git] / drivers / pci / dwc / pcie-designware.c
1 /*
2 * Synopsys Designware PCIe host controller driver
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/delay.h>
15 #include <linux/of.h>
16 #include <linux/types.h>
17
18 #include "pcie-designware.h"
19
20 /* PCIe Port Logic registers */
21 #define PLR_OFFSET 0x700
22 #define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
23 #define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
24 #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
25
26 int dw_pcie_read(void __iomem *addr, int size, u32 *val)
27 {
28 if ((uintptr_t)addr & (size - 1)) {
29 *val = 0;
30 return PCIBIOS_BAD_REGISTER_NUMBER;
31 }
32
33 if (size == 4) {
34 *val = readl(addr);
35 } else if (size == 2) {
36 *val = readw(addr);
37 } else if (size == 1) {
38 *val = readb(addr);
39 } else {
40 *val = 0;
41 return PCIBIOS_BAD_REGISTER_NUMBER;
42 }
43
44 return PCIBIOS_SUCCESSFUL;
45 }
46
47 int dw_pcie_write(void __iomem *addr, int size, u32 val)
48 {
49 if ((uintptr_t)addr & (size - 1))
50 return PCIBIOS_BAD_REGISTER_NUMBER;
51
52 if (size == 4)
53 writel(val, addr);
54 else if (size == 2)
55 writew(val, addr);
56 else if (size == 1)
57 writeb(val, addr);
58 else
59 return PCIBIOS_BAD_REGISTER_NUMBER;
60
61 return PCIBIOS_SUCCESSFUL;
62 }
63
64 u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
65 size_t size)
66 {
67 int ret;
68 u32 val;
69
70 if (pci->ops->read_dbi)
71 return pci->ops->read_dbi(pci, base, reg, size);
72
73 ret = dw_pcie_read(base + reg, size, &val);
74 if (ret)
75 dev_err(pci->dev, "read DBI address failed\n");
76
77 return val;
78 }
79
80 void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
81 size_t size, u32 val)
82 {
83 int ret;
84
85 if (pci->ops->write_dbi) {
86 pci->ops->write_dbi(pci, base, reg, size, val);
87 return;
88 }
89
90 ret = dw_pcie_write(base + reg, size, val);
91 if (ret)
92 dev_err(pci->dev, "write DBI address failed\n");
93 }
94
95 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
96 {
97 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
98
99 return dw_pcie_readl_dbi(pci, offset + reg);
100 }
101
102 static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
103 u32 val)
104 {
105 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
106
107 dw_pcie_writel_dbi(pci, offset + reg, val);
108 }
109
110 void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type,
111 u64 cpu_addr, u64 pci_addr, u32 size)
112 {
113 u32 retries, val;
114
115 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
116 lower_32_bits(cpu_addr));
117 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
118 upper_32_bits(cpu_addr));
119 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
120 lower_32_bits(cpu_addr + size - 1));
121 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
122 lower_32_bits(pci_addr));
123 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
124 upper_32_bits(pci_addr));
125 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
126 type);
127 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
128 PCIE_ATU_ENABLE);
129
130 /*
131 * Make sure ATU enable takes effect before any subsequent config
132 * and I/O accesses.
133 */
134 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
135 val = dw_pcie_readl_ob_unroll(pci, index,
136 PCIE_ATU_UNR_REGION_CTRL2);
137 if (val & PCIE_ATU_ENABLE)
138 return;
139
140 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
141 }
142 dev_err(pci->dev, "outbound iATU is not being enabled\n");
143 }
144
145 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
146 u64 cpu_addr, u64 pci_addr, u32 size)
147 {
148 u32 retries, val;
149
150 if (pci->ops->cpu_addr_fixup)
151 cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
152
153 if (pci->iatu_unroll_enabled) {
154 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
155 pci_addr, size);
156 return;
157 }
158
159 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
160 PCIE_ATU_REGION_OUTBOUND | index);
161 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
162 lower_32_bits(cpu_addr));
163 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
164 upper_32_bits(cpu_addr));
165 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
166 lower_32_bits(cpu_addr + size - 1));
167 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
168 lower_32_bits(pci_addr));
169 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
170 upper_32_bits(pci_addr));
171 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
172 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
173
174 /*
175 * Make sure ATU enable takes effect before any subsequent config
176 * and I/O accesses.
177 */
178 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
179 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
180 if (val == PCIE_ATU_ENABLE)
181 return;
182
183 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
184 }
185 dev_err(pci->dev, "outbound iATU is not being enabled\n");
186 }
187
188 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
189 {
190 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
191
192 return dw_pcie_readl_dbi(pci, offset + reg);
193 }
194
195 static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
196 u32 val)
197 {
198 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
199
200 dw_pcie_writel_dbi(pci, offset + reg, val);
201 }
202
203 int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar,
204 u64 cpu_addr, enum dw_pcie_as_type as_type)
205 {
206 int type;
207 u32 retries, val;
208
209 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
210 lower_32_bits(cpu_addr));
211 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
212 upper_32_bits(cpu_addr));
213
214 switch (as_type) {
215 case DW_PCIE_AS_MEM:
216 type = PCIE_ATU_TYPE_MEM;
217 break;
218 case DW_PCIE_AS_IO:
219 type = PCIE_ATU_TYPE_IO;
220 break;
221 default:
222 return -EINVAL;
223 }
224
225 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
226 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
227 PCIE_ATU_ENABLE |
228 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
229
230 /*
231 * Make sure ATU enable takes effect before any subsequent config
232 * and I/O accesses.
233 */
234 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
235 val = dw_pcie_readl_ib_unroll(pci, index,
236 PCIE_ATU_UNR_REGION_CTRL2);
237 if (val & PCIE_ATU_ENABLE)
238 return 0;
239
240 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
241 }
242 dev_err(pci->dev, "inbound iATU is not being enabled\n");
243
244 return -EBUSY;
245 }
246
247 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
248 u64 cpu_addr, enum dw_pcie_as_type as_type)
249 {
250 int type;
251 u32 retries, val;
252
253 if (pci->iatu_unroll_enabled)
254 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
255 cpu_addr, as_type);
256
257 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
258 index);
259 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
260 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
261
262 switch (as_type) {
263 case DW_PCIE_AS_MEM:
264 type = PCIE_ATU_TYPE_MEM;
265 break;
266 case DW_PCIE_AS_IO:
267 type = PCIE_ATU_TYPE_IO;
268 break;
269 default:
270 return -EINVAL;
271 }
272
273 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
274 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
275 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
276
277 /*
278 * Make sure ATU enable takes effect before any subsequent config
279 * and I/O accesses.
280 */
281 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
282 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
283 if (val & PCIE_ATU_ENABLE)
284 return 0;
285
286 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
287 }
288 dev_err(pci->dev, "inbound iATU is not being enabled\n");
289
290 return -EBUSY;
291 }
292
293 void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
294 enum dw_pcie_region_type type)
295 {
296 int region;
297
298 switch (type) {
299 case DW_PCIE_REGION_INBOUND:
300 region = PCIE_ATU_REGION_INBOUND;
301 break;
302 case DW_PCIE_REGION_OUTBOUND:
303 region = PCIE_ATU_REGION_OUTBOUND;
304 break;
305 default:
306 return;
307 }
308
309 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
310 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
311 }
312
313 int dw_pcie_wait_for_link(struct dw_pcie *pci)
314 {
315 int retries;
316
317 /* check if the link is up or not */
318 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
319 if (dw_pcie_link_up(pci)) {
320 dev_info(pci->dev, "link up\n");
321 return 0;
322 }
323 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
324 }
325
326 dev_err(pci->dev, "phy link never came up\n");
327
328 return -ETIMEDOUT;
329 }
330
331 int dw_pcie_link_up(struct dw_pcie *pci)
332 {
333 u32 val;
334
335 if (pci->ops->link_up)
336 return pci->ops->link_up(pci);
337
338 val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
339 return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
340 (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
341 }
342
343 void dw_pcie_setup(struct dw_pcie *pci)
344 {
345 int ret;
346 u32 val;
347 u32 lanes;
348 struct device *dev = pci->dev;
349 struct device_node *np = dev->of_node;
350
351 ret = of_property_read_u32(np, "num-lanes", &lanes);
352 if (ret)
353 lanes = 0;
354
355 /* set the number of lanes */
356 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
357 val &= ~PORT_LINK_MODE_MASK;
358 switch (lanes) {
359 case 1:
360 val |= PORT_LINK_MODE_1_LANES;
361 break;
362 case 2:
363 val |= PORT_LINK_MODE_2_LANES;
364 break;
365 case 4:
366 val |= PORT_LINK_MODE_4_LANES;
367 break;
368 case 8:
369 val |= PORT_LINK_MODE_8_LANES;
370 break;
371 default:
372 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
373 return;
374 }
375 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
376
377 /* set link width speed control register */
378 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
379 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
380 switch (lanes) {
381 case 1:
382 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
383 break;
384 case 2:
385 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
386 break;
387 case 4:
388 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
389 break;
390 case 8:
391 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
392 break;
393 }
394 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
395 }