]>
Commit | Line | Data |
---|---|---|
8cfab3cf | 1 | // SPDX-License-Identifier: GPL-2.0 |
f8aed6ec | 2 | /** |
96291d56 | 3 | * Synopsys DesignWare PCIe Endpoint controller driver |
f8aed6ec KVA |
4 | * |
5 | * Copyright (C) 2017 Texas Instruments | |
6 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | |
f8aed6ec KVA |
7 | */ |
8 | ||
9 | #include <linux/of.h> | |
10 | ||
11 | #include "pcie-designware.h" | |
12 | #include <linux/pci-epc.h> | |
13 | #include <linux/pci-epf.h> | |
14 | ||
15 | void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) | |
16 | { | |
17 | struct pci_epc *epc = ep->epc; | |
18 | ||
19 | pci_epc_linkup(epc); | |
20 | } | |
21 | ||
77d08dbd NC |
22 | static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, |
23 | int flags) | |
f8aed6ec KVA |
24 | { |
25 | u32 reg; | |
26 | ||
27 | reg = PCI_BASE_ADDRESS_0 + (4 * bar); | |
1cab826b | 28 | dw_pcie_dbi_ro_wr_en(pci); |
f8aed6ec KVA |
29 | dw_pcie_writel_dbi2(pci, reg, 0x0); |
30 | dw_pcie_writel_dbi(pci, reg, 0x0); | |
96a3be43 NC |
31 | if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { |
32 | dw_pcie_writel_dbi2(pci, reg + 4, 0x0); | |
33 | dw_pcie_writel_dbi(pci, reg + 4, 0x0); | |
34 | } | |
1cab826b | 35 | dw_pcie_dbi_ro_wr_dis(pci); |
f8aed6ec KVA |
36 | } |
37 | ||
77d08dbd NC |
38 | void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) |
39 | { | |
40 | __dw_pcie_ep_reset_bar(pci, bar, 0); | |
41 | } | |
42 | ||
beb4641a GP |
43 | static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, |
44 | u8 cap) | |
45 | { | |
46 | u8 cap_id, next_cap_ptr; | |
47 | u16 reg; | |
48 | ||
49 | reg = dw_pcie_readw_dbi(pci, cap_ptr); | |
50 | next_cap_ptr = (reg & 0xff00) >> 8; | |
51 | cap_id = (reg & 0x00ff); | |
52 | ||
53 | if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX) | |
54 | return 0; | |
55 | ||
56 | if (cap_id == cap) | |
57 | return cap_ptr; | |
58 | ||
59 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); | |
60 | } | |
61 | ||
62 | static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) | |
63 | { | |
64 | u8 next_cap_ptr; | |
65 | u16 reg; | |
66 | ||
67 | reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); | |
68 | next_cap_ptr = (reg & 0x00ff); | |
69 | ||
70 | if (!next_cap_ptr) | |
71 | return 0; | |
72 | ||
73 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); | |
74 | } | |
75 | ||
4494738d | 76 | static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, |
f8aed6ec KVA |
77 | struct pci_epf_header *hdr) |
78 | { | |
79 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
80 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
81 | ||
1cab826b | 82 | dw_pcie_dbi_ro_wr_en(pci); |
f8aed6ec KVA |
83 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); |
84 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); | |
85 | dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); | |
86 | dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); | |
87 | dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, | |
88 | hdr->subclass_code | hdr->baseclass_code << 8); | |
89 | dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, | |
90 | hdr->cache_line_size); | |
91 | dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, | |
92 | hdr->subsys_vendor_id); | |
93 | dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); | |
94 | dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, | |
95 | hdr->interrupt_pin); | |
1cab826b | 96 | dw_pcie_dbi_ro_wr_dis(pci); |
f8aed6ec KVA |
97 | |
98 | return 0; | |
99 | } | |
100 | ||
101 | static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, | |
102 | dma_addr_t cpu_addr, | |
103 | enum dw_pcie_as_type as_type) | |
104 | { | |
105 | int ret; | |
106 | u32 free_win; | |
107 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
108 | ||
ad4a5bec | 109 | free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); |
f8aed6ec | 110 | if (free_win >= ep->num_ib_windows) { |
b4a8a51c | 111 | dev_err(pci->dev, "No free inbound window\n"); |
f8aed6ec KVA |
112 | return -EINVAL; |
113 | } | |
114 | ||
115 | ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, | |
116 | as_type); | |
117 | if (ret < 0) { | |
118 | dev_err(pci->dev, "Failed to program IB window\n"); | |
119 | return ret; | |
120 | } | |
121 | ||
122 | ep->bar_to_atu[bar] = free_win; | |
ad4a5bec | 123 | set_bit(free_win, ep->ib_window_map); |
f8aed6ec KVA |
124 | |
125 | return 0; | |
126 | } | |
127 | ||
128 | static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, | |
129 | u64 pci_addr, size_t size) | |
130 | { | |
131 | u32 free_win; | |
132 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
133 | ||
ad4a5bec | 134 | free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); |
f8aed6ec | 135 | if (free_win >= ep->num_ob_windows) { |
b4a8a51c | 136 | dev_err(pci->dev, "No free outbound window\n"); |
f8aed6ec KVA |
137 | return -EINVAL; |
138 | } | |
139 | ||
140 | dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, | |
141 | phys_addr, pci_addr, size); | |
142 | ||
ad4a5bec | 143 | set_bit(free_win, ep->ob_window_map); |
f8aed6ec KVA |
144 | ep->outbound_addr[free_win] = phys_addr; |
145 | ||
146 | return 0; | |
147 | } | |
148 | ||
4494738d | 149 | static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, |
77d08dbd | 150 | struct pci_epf_bar *epf_bar) |
f8aed6ec KVA |
151 | { |
152 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
153 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
77d08dbd | 154 | enum pci_barno bar = epf_bar->barno; |
f8aed6ec KVA |
155 | u32 atu_index = ep->bar_to_atu[bar]; |
156 | ||
77d08dbd | 157 | __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags); |
f8aed6ec KVA |
158 | |
159 | dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); | |
ad4a5bec | 160 | clear_bit(atu_index, ep->ib_window_map); |
f8aed6ec KVA |
161 | } |
162 | ||
4494738d | 163 | static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, |
bc4a4897 | 164 | struct pci_epf_bar *epf_bar) |
f8aed6ec KVA |
165 | { |
166 | int ret; | |
167 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
168 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
bc4a4897 NC |
169 | enum pci_barno bar = epf_bar->barno; |
170 | size_t size = epf_bar->size; | |
171 | int flags = epf_bar->flags; | |
f8aed6ec KVA |
172 | enum dw_pcie_as_type as_type; |
173 | u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); | |
174 | ||
175 | if (!(flags & PCI_BASE_ADDRESS_SPACE)) | |
176 | as_type = DW_PCIE_AS_MEM; | |
177 | else | |
178 | as_type = DW_PCIE_AS_IO; | |
179 | ||
bc4a4897 | 180 | ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type); |
f8aed6ec KVA |
181 | if (ret) |
182 | return ret; | |
183 | ||
1cab826b | 184 | dw_pcie_dbi_ro_wr_en(pci); |
d28810ba NC |
185 | |
186 | dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1)); | |
f8aed6ec | 187 | dw_pcie_writel_dbi(pci, reg, flags); |
d28810ba NC |
188 | |
189 | if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { | |
190 | dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1)); | |
191 | dw_pcie_writel_dbi(pci, reg + 4, 0); | |
192 | } | |
193 | ||
1cab826b | 194 | dw_pcie_dbi_ro_wr_dis(pci); |
f8aed6ec KVA |
195 | |
196 | return 0; | |
197 | } | |
198 | ||
199 | static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, | |
200 | u32 *atu_index) | |
201 | { | |
202 | u32 index; | |
203 | ||
204 | for (index = 0; index < ep->num_ob_windows; index++) { | |
205 | if (ep->outbound_addr[index] != addr) | |
206 | continue; | |
207 | *atu_index = index; | |
208 | return 0; | |
209 | } | |
210 | ||
211 | return -EINVAL; | |
212 | } | |
213 | ||
4494738d CP |
214 | static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, |
215 | phys_addr_t addr) | |
f8aed6ec KVA |
216 | { |
217 | int ret; | |
218 | u32 atu_index; | |
219 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
220 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
221 | ||
222 | ret = dw_pcie_find_index(ep, addr, &atu_index); | |
223 | if (ret < 0) | |
224 | return; | |
225 | ||
226 | dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); | |
ad4a5bec | 227 | clear_bit(atu_index, ep->ob_window_map); |
f8aed6ec KVA |
228 | } |
229 | ||
4494738d CP |
230 | static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, |
231 | phys_addr_t addr, | |
f8aed6ec KVA |
232 | u64 pci_addr, size_t size) |
233 | { | |
234 | int ret; | |
235 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
236 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
237 | ||
238 | ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); | |
239 | if (ret) { | |
b4a8a51c | 240 | dev_err(pci->dev, "Failed to enable address\n"); |
f8aed6ec KVA |
241 | return ret; |
242 | } | |
243 | ||
244 | return 0; | |
245 | } | |
246 | ||
4494738d | 247 | static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) |
f8aed6ec | 248 | { |
f8aed6ec KVA |
249 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); |
250 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
3920a5d7 GP |
251 | u32 val, reg; |
252 | ||
253 | if (!ep->msi_cap) | |
254 | return -EINVAL; | |
f8aed6ec | 255 | |
3920a5d7 GP |
256 | reg = ep->msi_cap + PCI_MSI_FLAGS; |
257 | val = dw_pcie_readw_dbi(pci, reg); | |
258 | if (!(val & PCI_MSI_FLAGS_ENABLE)) | |
f8aed6ec KVA |
259 | return -EINVAL; |
260 | ||
3920a5d7 GP |
261 | val = (val & PCI_MSI_FLAGS_QSIZE) >> 4; |
262 | ||
f8aed6ec KVA |
263 | return val; |
264 | } | |
265 | ||
3920a5d7 | 266 | static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts) |
f8aed6ec | 267 | { |
f8aed6ec KVA |
268 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); |
269 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
3920a5d7 | 270 | u32 val, reg; |
f8aed6ec | 271 | |
3920a5d7 GP |
272 | if (!ep->msi_cap) |
273 | return -EINVAL; | |
274 | ||
275 | reg = ep->msi_cap + PCI_MSI_FLAGS; | |
276 | val = dw_pcie_readw_dbi(pci, reg); | |
277 | val &= ~PCI_MSI_FLAGS_QMASK; | |
278 | val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK; | |
1cab826b | 279 | dw_pcie_dbi_ro_wr_en(pci); |
3920a5d7 | 280 | dw_pcie_writew_dbi(pci, reg, val); |
1cab826b | 281 | dw_pcie_dbi_ro_wr_dis(pci); |
f8aed6ec KVA |
282 | |
283 | return 0; | |
284 | } | |
285 | ||
beb4641a GP |
286 | static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) |
287 | { | |
288 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
289 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
290 | u32 val, reg; | |
291 | ||
292 | if (!ep->msix_cap) | |
293 | return -EINVAL; | |
294 | ||
295 | reg = ep->msix_cap + PCI_MSIX_FLAGS; | |
296 | val = dw_pcie_readw_dbi(pci, reg); | |
297 | if (!(val & PCI_MSIX_FLAGS_ENABLE)) | |
298 | return -EINVAL; | |
299 | ||
300 | val &= PCI_MSIX_FLAGS_QSIZE; | |
301 | ||
302 | return val; | |
303 | } | |
304 | ||
305 | static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) | |
306 | { | |
307 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
308 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
309 | u32 val, reg; | |
310 | ||
311 | if (!ep->msix_cap) | |
312 | return -EINVAL; | |
313 | ||
314 | reg = ep->msix_cap + PCI_MSIX_FLAGS; | |
315 | val = dw_pcie_readw_dbi(pci, reg); | |
316 | val &= ~PCI_MSIX_FLAGS_QSIZE; | |
317 | val |= interrupts; | |
318 | dw_pcie_dbi_ro_wr_en(pci); | |
319 | dw_pcie_writew_dbi(pci, reg, val); | |
320 | dw_pcie_dbi_ro_wr_dis(pci); | |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
4494738d | 325 | static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, |
d3c70a98 | 326 | enum pci_epc_irq_type type, u16 interrupt_num) |
f8aed6ec KVA |
327 | { |
328 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
329 | ||
330 | if (!ep->ops->raise_irq) | |
331 | return -EINVAL; | |
332 | ||
16093362 | 333 | return ep->ops->raise_irq(ep, func_no, type, interrupt_num); |
f8aed6ec KVA |
334 | } |
335 | ||
336 | static void dw_pcie_ep_stop(struct pci_epc *epc) | |
337 | { | |
338 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
339 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
340 | ||
341 | if (!pci->ops->stop_link) | |
342 | return; | |
343 | ||
344 | pci->ops->stop_link(pci); | |
345 | } | |
346 | ||
347 | static int dw_pcie_ep_start(struct pci_epc *epc) | |
348 | { | |
349 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | |
350 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
351 | ||
352 | if (!pci->ops->start_link) | |
353 | return -EINVAL; | |
354 | ||
355 | return pci->ops->start_link(pci); | |
356 | } | |
357 | ||
358 | static const struct pci_epc_ops epc_ops = { | |
359 | .write_header = dw_pcie_ep_write_header, | |
360 | .set_bar = dw_pcie_ep_set_bar, | |
361 | .clear_bar = dw_pcie_ep_clear_bar, | |
362 | .map_addr = dw_pcie_ep_map_addr, | |
363 | .unmap_addr = dw_pcie_ep_unmap_addr, | |
364 | .set_msi = dw_pcie_ep_set_msi, | |
365 | .get_msi = dw_pcie_ep_get_msi, | |
beb4641a GP |
366 | .set_msix = dw_pcie_ep_set_msix, |
367 | .get_msix = dw_pcie_ep_get_msix, | |
f8aed6ec KVA |
368 | .raise_irq = dw_pcie_ep_raise_irq, |
369 | .start = dw_pcie_ep_start, | |
370 | .stop = dw_pcie_ep_stop, | |
371 | }; | |
372 | ||
cb22d40b GP |
373 | int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) |
374 | { | |
375 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
376 | struct device *dev = pci->dev; | |
377 | ||
378 | dev_err(dev, "EP cannot trigger legacy IRQs\n"); | |
379 | ||
380 | return -EINVAL; | |
381 | } | |
382 | ||
16093362 | 383 | int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, |
6f6d7873 NC |
384 | u8 interrupt_num) |
385 | { | |
386 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
387 | struct pci_epc *epc = ep->epc; | |
388 | u16 msg_ctrl, msg_data; | |
3920a5d7 | 389 | u32 msg_addr_lower, msg_addr_upper, reg; |
6f6d7873 NC |
390 | u64 msg_addr; |
391 | bool has_upper; | |
392 | int ret; | |
393 | ||
3920a5d7 GP |
394 | if (!ep->msi_cap) |
395 | return -EINVAL; | |
396 | ||
6f6d7873 | 397 | /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ |
3920a5d7 GP |
398 | reg = ep->msi_cap + PCI_MSI_FLAGS; |
399 | msg_ctrl = dw_pcie_readw_dbi(pci, reg); | |
6f6d7873 | 400 | has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); |
3920a5d7 GP |
401 | reg = ep->msi_cap + PCI_MSI_ADDRESS_LO; |
402 | msg_addr_lower = dw_pcie_readl_dbi(pci, reg); | |
6f6d7873 | 403 | if (has_upper) { |
3920a5d7 GP |
404 | reg = ep->msi_cap + PCI_MSI_ADDRESS_HI; |
405 | msg_addr_upper = dw_pcie_readl_dbi(pci, reg); | |
406 | reg = ep->msi_cap + PCI_MSI_DATA_64; | |
407 | msg_data = dw_pcie_readw_dbi(pci, reg); | |
6f6d7873 NC |
408 | } else { |
409 | msg_addr_upper = 0; | |
3920a5d7 GP |
410 | reg = ep->msi_cap + PCI_MSI_DATA_32; |
411 | msg_data = dw_pcie_readw_dbi(pci, reg); | |
6f6d7873 NC |
412 | } |
413 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; | |
16093362 | 414 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, |
6f6d7873 NC |
415 | epc->mem->page_size); |
416 | if (ret) | |
417 | return ret; | |
418 | ||
419 | writel(msg_data | (interrupt_num - 1), ep->msi_mem); | |
420 | ||
16093362 | 421 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); |
6f6d7873 NC |
422 | |
423 | return 0; | |
424 | } | |
425 | ||
beb4641a GP |
426 | int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, |
427 | u16 interrupt_num) | |
428 | { | |
429 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
430 | struct pci_epc *epc = ep->epc; | |
431 | u16 tbl_offset, bir; | |
432 | u32 bar_addr_upper, bar_addr_lower; | |
433 | u32 msg_addr_upper, msg_addr_lower; | |
434 | u32 reg, msg_data, vec_ctrl; | |
435 | u64 tbl_addr, msg_addr, reg_u64; | |
436 | void __iomem *msix_tbl; | |
437 | int ret; | |
438 | ||
439 | reg = ep->msix_cap + PCI_MSIX_TABLE; | |
440 | tbl_offset = dw_pcie_readl_dbi(pci, reg); | |
441 | bir = (tbl_offset & PCI_MSIX_TABLE_BIR); | |
442 | tbl_offset &= PCI_MSIX_TABLE_OFFSET; | |
443 | tbl_offset >>= 3; | |
444 | ||
445 | reg = PCI_BASE_ADDRESS_0 + (4 * bir); | |
446 | bar_addr_upper = 0; | |
447 | bar_addr_lower = dw_pcie_readl_dbi(pci, reg); | |
448 | reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK); | |
449 | if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64) | |
450 | bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4); | |
451 | ||
452 | tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower; | |
453 | tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); | |
454 | tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; | |
455 | ||
456 | msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr, | |
457 | PCI_MSIX_ENTRY_SIZE); | |
458 | if (!msix_tbl) | |
459 | return -EINVAL; | |
460 | ||
461 | msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR); | |
462 | msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR); | |
463 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; | |
464 | msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA); | |
465 | vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL); | |
466 | ||
467 | iounmap(msix_tbl); | |
468 | ||
469 | if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) | |
470 | return -EPERM; | |
471 | ||
472 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, | |
473 | epc->mem->page_size); | |
474 | if (ret) | |
475 | return ret; | |
476 | ||
477 | writel(msg_data, ep->msi_mem); | |
478 | ||
479 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); | |
480 | ||
481 | return 0; | |
482 | } | |
483 | ||
f8aed6ec KVA |
484 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep) |
485 | { | |
486 | struct pci_epc *epc = ep->epc; | |
487 | ||
2fd0c9d9 NC |
488 | pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, |
489 | epc->mem->page_size); | |
490 | ||
f8aed6ec KVA |
491 | pci_epc_mem_exit(epc); |
492 | } | |
493 | ||
494 | int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |
495 | { | |
496 | int ret; | |
497 | void *addr; | |
f8aed6ec KVA |
498 | struct pci_epc *epc; |
499 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | |
500 | struct device *dev = pci->dev; | |
501 | struct device_node *np = dev->of_node; | |
502 | ||
503 | if (!pci->dbi_base || !pci->dbi_base2) { | |
ae15d863 | 504 | dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); |
f8aed6ec KVA |
505 | return -EINVAL; |
506 | } | |
6d6b05e3 SW |
507 | if (pci->iatu_unroll_enabled && !pci->atu_base) { |
508 | dev_err(dev, "atu_base is not populated\n"); | |
509 | return -EINVAL; | |
510 | } | |
f8aed6ec KVA |
511 | |
512 | ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); | |
513 | if (ret < 0) { | |
b4a8a51c | 514 | dev_err(dev, "Unable to read *num-ib-windows* property\n"); |
f8aed6ec KVA |
515 | return ret; |
516 | } | |
ad4a5bec | 517 | if (ep->num_ib_windows > MAX_IATU_IN) { |
b4a8a51c | 518 | dev_err(dev, "Invalid *num-ib-windows*\n"); |
ad4a5bec NC |
519 | return -EINVAL; |
520 | } | |
f8aed6ec KVA |
521 | |
522 | ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); | |
523 | if (ret < 0) { | |
b4a8a51c | 524 | dev_err(dev, "Unable to read *num-ob-windows* property\n"); |
f8aed6ec KVA |
525 | return ret; |
526 | } | |
ad4a5bec | 527 | if (ep->num_ob_windows > MAX_IATU_OUT) { |
b4a8a51c | 528 | dev_err(dev, "Invalid *num-ob-windows*\n"); |
ad4a5bec NC |
529 | return -EINVAL; |
530 | } | |
531 | ||
a86854d0 | 532 | ep->ib_window_map = devm_kcalloc(dev, |
ad4a5bec | 533 | BITS_TO_LONGS(ep->num_ib_windows), |
a86854d0 | 534 | sizeof(long), |
ad4a5bec NC |
535 | GFP_KERNEL); |
536 | if (!ep->ib_window_map) | |
537 | return -ENOMEM; | |
538 | ||
a86854d0 | 539 | ep->ob_window_map = devm_kcalloc(dev, |
ad4a5bec | 540 | BITS_TO_LONGS(ep->num_ob_windows), |
a86854d0 | 541 | sizeof(long), |
ad4a5bec NC |
542 | GFP_KERNEL); |
543 | if (!ep->ob_window_map) | |
544 | return -ENOMEM; | |
f8aed6ec | 545 | |
a86854d0 | 546 | addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t), |
f8aed6ec KVA |
547 | GFP_KERNEL); |
548 | if (!addr) | |
549 | return -ENOMEM; | |
550 | ep->outbound_addr = addr; | |
551 | ||
f8aed6ec KVA |
552 | epc = devm_pci_epc_create(dev, &epc_ops); |
553 | if (IS_ERR(epc)) { | |
b4a8a51c | 554 | dev_err(dev, "Failed to create epc device\n"); |
f8aed6ec KVA |
555 | return PTR_ERR(epc); |
556 | } | |
557 | ||
4e965ede GP |
558 | ep->epc = epc; |
559 | epc_set_drvdata(epc, ep); | |
560 | ||
561 | if (ep->ops->ep_init) | |
562 | ep->ops->ep_init(ep); | |
563 | ||
f8aed6ec KVA |
564 | ret = of_property_read_u8(np, "max-functions", &epc->max_functions); |
565 | if (ret < 0) | |
566 | epc->max_functions = 1; | |
567 | ||
a937fe08 KVA |
568 | ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, |
569 | ep->page_size); | |
f8aed6ec KVA |
570 | if (ret < 0) { |
571 | dev_err(dev, "Failed to initialize address space\n"); | |
572 | return ret; | |
573 | } | |
574 | ||
2fd0c9d9 NC |
575 | ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, |
576 | epc->mem->page_size); | |
577 | if (!ep->msi_mem) { | |
beb4641a | 578 | dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); |
2fd0c9d9 NC |
579 | return -ENOMEM; |
580 | } | |
beb4641a GP |
581 | ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI); |
582 | ||
583 | ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); | |
2fd0c9d9 | 584 | |
f8aed6ec KVA |
585 | dw_pcie_setup(pci); |
586 | ||
587 | return 0; | |
588 | } |