]>
Commit | Line | Data |
---|---|---|
6f7f0b3d MN |
1 | /* |
2 | * Copyright 2014 IBM Corp. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/pci.h> | |
11 | #include <misc/cxl.h> | |
12 | #include "cxl.h" | |
13 | ||
14 | static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | |
15 | { | |
16 | if (dma_mask < DMA_BIT_MASK(64)) { | |
17 | pr_info("%s only 64bit DMA supported on CXL", __func__); | |
18 | return -EIO; | |
19 | } | |
20 | ||
21 | *(pdev->dev.dma_mask) = dma_mask; | |
22 | return 0; | |
23 | } | |
24 | ||
25 | static int cxl_pci_probe_mode(struct pci_bus *bus) | |
26 | { | |
27 | return PCI_PROBE_NORMAL; | |
28 | } | |
29 | ||
30 | static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |
31 | { | |
32 | return -ENODEV; | |
33 | } | |
34 | ||
35 | static void cxl_teardown_msi_irqs(struct pci_dev *pdev) | |
36 | { | |
37 | /* | |
38 | * MSI should never be set but need still need to provide this call | |
39 | * back. | |
40 | */ | |
41 | } | |
42 | ||
43 | static bool cxl_pci_enable_device_hook(struct pci_dev *dev) | |
44 | { | |
45 | struct pci_controller *phb; | |
46 | struct cxl_afu *afu; | |
47 | struct cxl_context *ctx; | |
48 | ||
49 | phb = pci_bus_to_host(dev->bus); | |
50 | afu = (struct cxl_afu *)phb->private_data; | |
51 | set_dma_ops(&dev->dev, &dma_direct_ops); | |
52 | set_dma_offset(&dev->dev, PAGE_OFFSET); | |
53 | ||
54 | /* | |
55 | * Allocate a context to do cxl things too. If we eventually do real | |
56 | * DMA ops, we'll need a default context to attach them to | |
57 | */ | |
58 | ctx = cxl_dev_context_init(dev); | |
59 | if (!ctx) | |
60 | return false; | |
61 | dev->dev.archdata.cxl_ctx = ctx; | |
62 | ||
63 | return (cxl_afu_check_and_enable(afu) == 0); | |
64 | } | |
65 | ||
66 | static void cxl_pci_disable_device(struct pci_dev *dev) | |
67 | { | |
68 | struct cxl_context *ctx = cxl_get_context(dev); | |
69 | ||
70 | if (ctx) { | |
71 | if (ctx->status == STARTED) { | |
72 | dev_err(&dev->dev, "Default context started\n"); | |
73 | return; | |
74 | } | |
f67b4938 | 75 | dev->dev.archdata.cxl_ctx = NULL; |
6f7f0b3d MN |
76 | cxl_release_context(ctx); |
77 | } | |
78 | } | |
79 | ||
80 | static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus, | |
81 | unsigned long type) | |
82 | { | |
83 | return 1; | |
84 | } | |
85 | ||
86 | static void cxl_pci_reset_secondary_bus(struct pci_dev *dev) | |
87 | { | |
88 | /* Should we do an AFU reset here ? */ | |
89 | } | |
90 | ||
91 | static int cxl_pcie_cfg_record(u8 bus, u8 devfn) | |
92 | { | |
93 | return (bus << 8) + devfn; | |
94 | } | |
95 | ||
96 | static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb, | |
97 | u8 bus, u8 devfn, int offset) | |
98 | { | |
99 | int record = cxl_pcie_cfg_record(bus, devfn); | |
100 | ||
101 | return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset; | |
102 | } | |
103 | ||
104 | ||
105 | static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, | |
106 | int offset, int len, | |
107 | volatile void __iomem **ioaddr, | |
108 | u32 *mask, int *shift) | |
109 | { | |
110 | struct pci_controller *phb; | |
111 | struct cxl_afu *afu; | |
112 | unsigned long addr; | |
113 | ||
114 | phb = pci_bus_to_host(bus); | |
6f7f0b3d MN |
115 | if (phb == NULL) |
116 | return PCIBIOS_DEVICE_NOT_FOUND; | |
14f21189 MS |
117 | afu = (struct cxl_afu *)phb->private_data; |
118 | ||
6f7f0b3d MN |
119 | if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num) |
120 | return PCIBIOS_DEVICE_NOT_FOUND; | |
121 | if (offset >= (unsigned long)phb->cfg_data) | |
122 | return PCIBIOS_BAD_REGISTER_NUMBER; | |
123 | addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset); | |
124 | ||
125 | *ioaddr = (void *)(addr & ~0x3ULL); | |
126 | *shift = ((addr & 0x3) * 8); | |
127 | switch (len) { | |
128 | case 1: | |
129 | *mask = 0xff; | |
130 | break; | |
131 | case 2: | |
132 | *mask = 0xffff; | |
133 | break; | |
134 | default: | |
135 | *mask = 0xffffffff; | |
136 | break; | |
137 | } | |
138 | return 0; | |
139 | } | |
140 | ||
0b3f9c75 DA |
141 | |
142 | static inline bool cxl_config_link_ok(struct pci_bus *bus) | |
143 | { | |
144 | struct pci_controller *phb; | |
145 | struct cxl_afu *afu; | |
146 | ||
147 | /* Config space IO is based on phb->cfg_addr, which is based on | |
148 | * afu_desc_mmio. This isn't safe to read/write when the link | |
149 | * goes down, as EEH tears down MMIO space. | |
150 | * | |
151 | * Check if the link is OK before proceeding. | |
152 | */ | |
153 | ||
154 | phb = pci_bus_to_host(bus); | |
155 | if (phb == NULL) | |
156 | return false; | |
157 | afu = (struct cxl_afu *)phb->private_data; | |
158 | return cxl_adapter_link_ok(afu->adapter); | |
159 | } | |
160 | ||
6f7f0b3d MN |
161 | static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, |
162 | int offset, int len, u32 *val) | |
163 | { | |
164 | volatile void __iomem *ioaddr; | |
165 | int shift, rc; | |
166 | u32 mask; | |
167 | ||
168 | rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr, | |
169 | &mask, &shift); | |
170 | if (rc) | |
171 | return rc; | |
172 | ||
0b3f9c75 DA |
173 | if (!cxl_config_link_ok(bus)) |
174 | return PCIBIOS_DEVICE_NOT_FOUND; | |
175 | ||
6f7f0b3d MN |
176 | /* Can only read 32 bits */ |
177 | *val = (in_le32(ioaddr) >> shift) & mask; | |
178 | return PCIBIOS_SUCCESSFUL; | |
179 | } | |
180 | ||
181 | static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, | |
182 | int offset, int len, u32 val) | |
183 | { | |
184 | volatile void __iomem *ioaddr; | |
185 | u32 v, mask; | |
186 | int shift, rc; | |
187 | ||
188 | rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr, | |
189 | &mask, &shift); | |
190 | if (rc) | |
191 | return rc; | |
192 | ||
0b3f9c75 DA |
193 | if (!cxl_config_link_ok(bus)) |
194 | return PCIBIOS_DEVICE_NOT_FOUND; | |
195 | ||
6f7f0b3d MN |
196 | /* Can only write 32 bits so do read-modify-write */ |
197 | mask <<= shift; | |
198 | val <<= shift; | |
199 | ||
200 | v = (in_le32(ioaddr) & ~mask) || (val & mask); | |
201 | ||
202 | out_le32(ioaddr, v); | |
203 | return PCIBIOS_SUCCESSFUL; | |
204 | } | |
205 | ||
206 | static struct pci_ops cxl_pcie_pci_ops = | |
207 | { | |
208 | .read = cxl_pcie_read_config, | |
209 | .write = cxl_pcie_write_config, | |
210 | }; | |
211 | ||
212 | ||
213 | static struct pci_controller_ops cxl_pci_controller_ops = | |
214 | { | |
215 | .probe_mode = cxl_pci_probe_mode, | |
216 | .enable_device_hook = cxl_pci_enable_device_hook, | |
217 | .disable_device = cxl_pci_disable_device, | |
218 | .release_device = cxl_pci_disable_device, | |
219 | .window_alignment = cxl_pci_window_alignment, | |
220 | .reset_secondary_bus = cxl_pci_reset_secondary_bus, | |
221 | .setup_msi_irqs = cxl_setup_msi_irqs, | |
222 | .teardown_msi_irqs = cxl_teardown_msi_irqs, | |
223 | .dma_set_mask = cxl_dma_set_mask, | |
224 | }; | |
225 | ||
226 | int cxl_pci_vphb_add(struct cxl_afu *afu) | |
227 | { | |
228 | struct pci_dev *phys_dev; | |
229 | struct pci_controller *phb, *phys_phb; | |
230 | ||
231 | phys_dev = to_pci_dev(afu->adapter->dev.parent); | |
232 | phys_phb = pci_bus_to_host(phys_dev->bus); | |
233 | ||
234 | /* Alloc and setup PHB data structure */ | |
235 | phb = pcibios_alloc_controller(phys_phb->dn); | |
236 | ||
237 | if (!phb) | |
238 | return -ENODEV; | |
239 | ||
240 | /* Setup parent in sysfs */ | |
241 | phb->parent = &phys_dev->dev; | |
242 | ||
243 | /* Setup the PHB using arch provided callback */ | |
244 | phb->ops = &cxl_pcie_pci_ops; | |
245 | phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset; | |
246 | phb->cfg_data = (void *)(u64)afu->crs_len; | |
247 | phb->private_data = afu; | |
248 | phb->controller_ops = cxl_pci_controller_ops; | |
249 | ||
250 | /* Scan the bus */ | |
251 | pcibios_scan_phb(phb); | |
252 | if (phb->bus == NULL) | |
253 | return -ENXIO; | |
254 | ||
255 | /* Claim resources. This might need some rework as well depending | |
256 | * whether we are doing probe-only or not, like assigning unassigned | |
257 | * resources etc... | |
258 | */ | |
259 | pcibios_claim_one_bus(phb->bus); | |
260 | ||
261 | /* Add probed PCI devices to the device model */ | |
262 | pci_bus_add_devices(phb->bus); | |
263 | ||
264 | afu->phb = phb; | |
265 | ||
266 | return 0; | |
267 | } | |
268 | ||
9e8df8a2 DA |
269 | void cxl_pci_vphb_reconfigure(struct cxl_afu *afu) |
270 | { | |
271 | /* When we are reconfigured, the AFU's MMIO space is unmapped | |
272 | * and remapped. We need to reflect this in the PHB's view of | |
273 | * the world. | |
274 | */ | |
275 | afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset; | |
276 | } | |
6f7f0b3d MN |
277 | |
278 | void cxl_pci_vphb_remove(struct cxl_afu *afu) | |
279 | { | |
280 | struct pci_controller *phb; | |
281 | ||
282 | /* If there is no configuration record we won't have one of these */ | |
283 | if (!afu || !afu->phb) | |
284 | return; | |
285 | ||
286 | phb = afu->phb; | |
287 | ||
288 | pci_remove_root_bus(phb->bus); | |
289 | } | |
290 | ||
291 | struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) | |
292 | { | |
293 | struct pci_controller *phb; | |
294 | ||
295 | phb = pci_bus_to_host(dev->bus); | |
296 | ||
297 | return (struct cxl_afu *)phb->private_data; | |
298 | } | |
299 | EXPORT_SYMBOL_GPL(cxl_pci_to_afu); | |
300 | ||
301 | unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev) | |
302 | { | |
303 | return cxl_pcie_cfg_record(dev->bus->number, dev->devfn); | |
304 | } | |
305 | EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record); |