]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/platforms/powernv/npu-dma.c
powerpc/powernv/npu: Add set/unset window helpers
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / platforms / powernv / npu-dma.c
CommitLineData
5d2aa710
AP
1/*
2 * This file implements the DMA operations for NVLink devices. The NPU
3 * devices all point to the same iommu table as the parent PCI device.
4 *
5 * Copyright Alistair Popple, IBM Corporation 2015.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
11
12#include <linux/export.h>
13#include <linux/pci.h>
14#include <linux/memblock.h>
15
16#include <asm/iommu.h>
17#include <asm/pnv-pci.h>
18#include <asm/msi_bitmap.h>
19#include <asm/opal.h>
20
21#include "powernv.h"
22#include "pci.h"
23
24/*
25 * Other types of TCE cache invalidation are not functional in the
26 * hardware.
27 */
5d2aa710
AP
28static struct pci_dev *get_pci_dev(struct device_node *dn)
29{
30 return PCI_DN(dn)->pcidev;
31}
32
33/* Given a NPU device get the associated PCI device. */
34struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
35{
36 struct device_node *dn;
37 struct pci_dev *gpdev;
38
39 /* Get assoicated PCI device */
40 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
41 if (!dn)
42 return NULL;
43
44 gpdev = get_pci_dev(dn);
45 of_node_put(dn);
46
47 return gpdev;
48}
49EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
50
51/* Given the real PCI device get a linked NPU device. */
52struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
53{
54 struct device_node *dn;
55 struct pci_dev *npdev;
56
57 /* Get assoicated PCI device */
58 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
59 if (!dn)
60 return NULL;
61
62 npdev = get_pci_dev(dn);
63 of_node_put(dn);
64
65 return npdev;
66}
67EXPORT_SYMBOL(pnv_pci_get_npu_dev);
68
69#define NPU_DMA_OP_UNSUPPORTED() \
70 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
71 __func__)
72
73static void *dma_npu_alloc(struct device *dev, size_t size,
74 dma_addr_t *dma_handle, gfp_t flag,
75 struct dma_attrs *attrs)
76{
77 NPU_DMA_OP_UNSUPPORTED();
78 return NULL;
79}
80
81static void dma_npu_free(struct device *dev, size_t size,
82 void *vaddr, dma_addr_t dma_handle,
83 struct dma_attrs *attrs)
84{
85 NPU_DMA_OP_UNSUPPORTED();
86}
87
88static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
89 unsigned long offset, size_t size,
90 enum dma_data_direction direction,
91 struct dma_attrs *attrs)
92{
93 NPU_DMA_OP_UNSUPPORTED();
94 return 0;
95}
96
97static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
98 int nelems, enum dma_data_direction direction,
99 struct dma_attrs *attrs)
100{
101 NPU_DMA_OP_UNSUPPORTED();
102 return 0;
103}
104
105static int dma_npu_dma_supported(struct device *dev, u64 mask)
106{
107 NPU_DMA_OP_UNSUPPORTED();
108 return 0;
109}
110
111static u64 dma_npu_get_required_mask(struct device *dev)
112{
113 NPU_DMA_OP_UNSUPPORTED();
114 return 0;
115}
116
117struct dma_map_ops dma_npu_ops = {
118 .map_page = dma_npu_map_page,
119 .map_sg = dma_npu_map_sg,
120 .alloc = dma_npu_alloc,
121 .free = dma_npu_free,
122 .dma_supported = dma_npu_dma_supported,
123 .get_required_mask = dma_npu_get_required_mask,
124};
125
126/*
127 * Returns the PE assoicated with the PCI device of the given
128 * NPU. Returns the linked pci device if pci_dev != NULL.
129 */
130static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
131 struct pci_dev **gpdev)
132{
133 struct pnv_phb *phb;
134 struct pci_controller *hose;
135 struct pci_dev *pdev;
136 struct pnv_ioda_pe *pe;
137 struct pci_dn *pdn;
138
139 if (npe->flags & PNV_IODA_PE_PEER) {
140 pe = npe->peers[0];
141 pdev = pe->pdev;
142 } else {
143 pdev = pnv_pci_get_gpu_dev(npe->pdev);
144 if (!pdev)
145 return NULL;
146
147 pdn = pci_get_pdn(pdev);
148 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
149 return NULL;
150
151 hose = pci_bus_to_host(pdev->bus);
152 phb = hose->private_data;
153 pe = &phb->ioda.pe_array[pdn->pe_number];
154 }
155
156 if (gpdev)
157 *gpdev = pdev;
158
159 return pe;
160}
161
b575c731
AK
162static long pnv_npu_set_window(struct pnv_ioda_pe *npe,
163 struct iommu_table *tbl)
164{
165 struct pnv_phb *phb = npe->phb;
166 int64_t rc;
167 const unsigned long size = tbl->it_indirect_levels ?
168 tbl->it_level_size : tbl->it_size;
169 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
170 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
171
172 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
173 start_addr, start_addr + win_size - 1,
174 IOMMU_PAGE_SIZE(tbl));
175
176 rc = opal_pci_map_pe_dma_window(phb->opal_id,
177 npe->pe_number,
178 npe->pe_number,
179 tbl->it_indirect_levels + 1,
180 __pa(tbl->it_base),
181 size << 3,
182 IOMMU_PAGE_SIZE(tbl));
183 if (rc) {
184 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
185 return rc;
186 }
187 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
188
189 return 0;
190}
191
192static long pnv_npu_unset_window(struct pnv_ioda_pe *npe)
193{
194 struct pnv_phb *phb = npe->phb;
195 int64_t rc;
196
197 pe_info(npe, "Removing DMA window\n");
198
199 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
200 npe->pe_number,
201 0/* levels */, 0/* table address */,
202 0/* table size */, 0/* page size */);
203 if (rc) {
204 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
205 return rc;
206 }
207 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
208
209 return 0;
210}
211
5d2aa710
AP
212void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe)
213{
214 struct pnv_ioda_pe *gpe;
215 struct pci_dev *gpdev;
216 int i, avail = -1;
217
218 if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
219 return;
220
221 gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
222 if (!gpe)
223 return;
224
225 for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
226 /* Nothing to do if the PE is already connected. */
227 if (gpe->peers[i] == npe)
228 return;
229
230 if (!gpe->peers[i])
231 avail = i;
232 }
233
234 if (WARN_ON(avail < 0))
235 return;
236
237 gpe->peers[avail] = npe;
238 gpe->flags |= PNV_IODA_PE_PEER;
239
240 /*
241 * We assume that the NPU devices only have a single peer PE
242 * (the GPU PCIe device PE).
243 */
244 npe->peers[0] = gpe;
245 npe->flags |= PNV_IODA_PE_PEER;
246}
247
248/*
f9f83456 249 * Enables 32 bit DMA on NPU.
5d2aa710 250 */
f9f83456 251static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
5d2aa710 252{
5d2aa710
AP
253 struct pci_dev *gpdev;
254 struct pnv_ioda_pe *gpe;
5d2aa710
AP
255 int64_t rc;
256
257 /*
258 * Find the assoicated PCI devices and get the dma window
259 * information from there.
260 */
261 if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
262 return;
263
264 gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
265 if (!gpe)
266 return;
267
b575c731 268 rc = pnv_npu_set_window(npe, gpe->table_group.tables[0]);
5d2aa710
AP
269
270 /*
271 * We don't initialise npu_pe->tce32_table as we always use
272 * dma_npu_ops which are nops.
273 */
274 set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
275}
276
277/*
f9f83456 278 * Enables bypass mode on the NPU. The NPU only supports one
446957ba 279 * window per link, so bypass needs to be explicitly enabled or
5d2aa710
AP
280 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
281 * active at the same time.
282 */
f9f83456 283static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
5d2aa710
AP
284{
285 struct pnv_phb *phb = npe->phb;
286 int64_t rc = 0;
f9f83456 287 phys_addr_t top = memblock_end_of_DRAM();
5d2aa710
AP
288
289 if (phb->type != PNV_PHB_NPU || !npe->pdev)
290 return -EINVAL;
291
b575c731
AK
292 rc = pnv_npu_unset_window(npe);
293 if (rc != OPAL_SUCCESS)
294 return rc;
295
f9f83456
AK
296 /* Enable the bypass window */
297
298 top = roundup_pow_of_two(top);
299 dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n",
300 npe->pe_number);
301 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
302 npe->pe_number, npe->pe_number,
303 0 /* bypass base */, top);
5d2aa710
AP
304
305 return rc;
306}
307
f9f83456 308void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
5d2aa710 309{
f9f83456
AK
310 int i;
311 struct pnv_phb *phb;
312 struct pci_dn *pdn;
313 struct pnv_ioda_pe *npe;
314 struct pci_dev *npdev;
5d2aa710 315
f9f83456
AK
316 for (i = 0; ; ++i) {
317 npdev = pnv_pci_get_npu_dev(gpdev, i);
5d2aa710 318
f9f83456
AK
319 if (!npdev)
320 break;
5d2aa710 321
f9f83456
AK
322 pdn = pci_get_pdn(npdev);
323 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
324 return;
5d2aa710 325
f9f83456 326 phb = pci_bus_to_host(npdev->bus)->private_data;
5d2aa710 327
f9f83456
AK
328 /* We only do bypass if it's enabled on the linked device */
329 npe = &phb->ioda.pe_array[pdn->pe_number];
5d2aa710 330
f9f83456
AK
331 if (bypass) {
332 dev_info(&npdev->dev,
333 "Using 64-bit DMA iommu bypass\n");
334 pnv_npu_dma_set_bypass(npe);
335 } else {
336 dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
337 pnv_npu_dma_set_32(npe);
338 }
339 }
5d2aa710 340}