]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/powerpc/platforms/powernv/npu-dma.c
powerpc/book3s64: Move PPC_DT_CPU_FTRs and enable it by default
[mirror_ubuntu-focal-kernel.git] / arch / powerpc / platforms / powernv / npu-dma.c
CommitLineData
5d2aa710
AP
1/*
2 * This file implements the DMA operations for NVLink devices. The NPU
3 * devices all point to the same iommu table as the parent PCI device.
4 *
5 * Copyright Alistair Popple, IBM Corporation 2015.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
11
1ab66d1f
AP
12#include <linux/slab.h>
13#include <linux/mmu_notifier.h>
14#include <linux/mmu_context.h>
15#include <linux/of.h>
5d2aa710
AP
16#include <linux/export.h>
17#include <linux/pci.h>
18#include <linux/memblock.h>
b5cb9ab1 19#include <linux/iommu.h>
5d2aa710 20
1ab66d1f
AP
21#include <asm/tlb.h>
22#include <asm/powernv.h>
23#include <asm/reg.h>
24#include <asm/opal.h>
25#include <asm/io.h>
5d2aa710
AP
26#include <asm/iommu.h>
27#include <asm/pnv-pci.h>
28#include <asm/msi_bitmap.h>
29#include <asm/opal.h>
30
31#include "powernv.h"
32#include "pci.h"
33
1ab66d1f
AP
34#define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
35
5d2aa710
AP
36/*
37 * Other types of TCE cache invalidation are not functional in the
38 * hardware.
39 */
5d2aa710
AP
40static struct pci_dev *get_pci_dev(struct device_node *dn)
41{
42 return PCI_DN(dn)->pcidev;
43}
44
45/* Given a NPU device get the associated PCI device. */
46struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
47{
48 struct device_node *dn;
49 struct pci_dev *gpdev;
50
4c3b89ef
AP
51 if (WARN_ON(!npdev))
52 return NULL;
53
54 if (WARN_ON(!npdev->dev.of_node))
55 return NULL;
56
5d2aa710
AP
57 /* Get assoicated PCI device */
58 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
59 if (!dn)
60 return NULL;
61
62 gpdev = get_pci_dev(dn);
63 of_node_put(dn);
64
65 return gpdev;
66}
67EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
68
69/* Given the real PCI device get a linked NPU device. */
70struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
71{
72 struct device_node *dn;
73 struct pci_dev *npdev;
74
4c3b89ef
AP
75 if (WARN_ON(!gpdev))
76 return NULL;
77
78 if (WARN_ON(!gpdev->dev.of_node))
79 return NULL;
80
5d2aa710
AP
81 /* Get assoicated PCI device */
82 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
83 if (!dn)
84 return NULL;
85
86 npdev = get_pci_dev(dn);
87 of_node_put(dn);
88
89 return npdev;
90}
91EXPORT_SYMBOL(pnv_pci_get_npu_dev);
92
93#define NPU_DMA_OP_UNSUPPORTED() \
94 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
95 __func__)
96
97static void *dma_npu_alloc(struct device *dev, size_t size,
98 dma_addr_t *dma_handle, gfp_t flag,
00085f1e 99 unsigned long attrs)
5d2aa710
AP
100{
101 NPU_DMA_OP_UNSUPPORTED();
102 return NULL;
103}
104
105static void dma_npu_free(struct device *dev, size_t size,
106 void *vaddr, dma_addr_t dma_handle,
00085f1e 107 unsigned long attrs)
5d2aa710
AP
108{
109 NPU_DMA_OP_UNSUPPORTED();
110}
111
112static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
113 unsigned long offset, size_t size,
114 enum dma_data_direction direction,
00085f1e 115 unsigned long attrs)
5d2aa710
AP
116{
117 NPU_DMA_OP_UNSUPPORTED();
118 return 0;
119}
120
121static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
122 int nelems, enum dma_data_direction direction,
00085f1e 123 unsigned long attrs)
5d2aa710
AP
124{
125 NPU_DMA_OP_UNSUPPORTED();
126 return 0;
127}
128
129static int dma_npu_dma_supported(struct device *dev, u64 mask)
130{
131 NPU_DMA_OP_UNSUPPORTED();
132 return 0;
133}
134
135static u64 dma_npu_get_required_mask(struct device *dev)
136{
137 NPU_DMA_OP_UNSUPPORTED();
138 return 0;
139}
140
5299709d 141static const struct dma_map_ops dma_npu_ops = {
5d2aa710
AP
142 .map_page = dma_npu_map_page,
143 .map_sg = dma_npu_map_sg,
144 .alloc = dma_npu_alloc,
145 .free = dma_npu_free,
146 .dma_supported = dma_npu_dma_supported,
147 .get_required_mask = dma_npu_get_required_mask,
148};
149
150/*
151 * Returns the PE assoicated with the PCI device of the given
152 * NPU. Returns the linked pci device if pci_dev != NULL.
153 */
154static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
155 struct pci_dev **gpdev)
156{
157 struct pnv_phb *phb;
158 struct pci_controller *hose;
159 struct pci_dev *pdev;
160 struct pnv_ioda_pe *pe;
161 struct pci_dn *pdn;
162
85674868
AK
163 pdev = pnv_pci_get_gpu_dev(npe->pdev);
164 if (!pdev)
165 return NULL;
5d2aa710 166
85674868
AK
167 pdn = pci_get_pdn(pdev);
168 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
169 return NULL;
170
171 hose = pci_bus_to_host(pdev->bus);
172 phb = hose->private_data;
173 pe = &phb->ioda.pe_array[pdn->pe_number];
5d2aa710
AP
174
175 if (gpdev)
176 *gpdev = pdev;
177
178 return pe;
179}
180
b5cb9ab1 181long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
b575c731
AK
182 struct iommu_table *tbl)
183{
184 struct pnv_phb *phb = npe->phb;
185 int64_t rc;
186 const unsigned long size = tbl->it_indirect_levels ?
187 tbl->it_level_size : tbl->it_size;
188 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
189 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
190
191 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
192 start_addr, start_addr + win_size - 1,
193 IOMMU_PAGE_SIZE(tbl));
194
195 rc = opal_pci_map_pe_dma_window(phb->opal_id,
196 npe->pe_number,
197 npe->pe_number,
198 tbl->it_indirect_levels + 1,
199 __pa(tbl->it_base),
200 size << 3,
201 IOMMU_PAGE_SIZE(tbl));
202 if (rc) {
203 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
204 return rc;
205 }
6b3d12a9 206 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
b575c731 207
85674868 208 /* Add the table to the list so its TCE cache will get invalidated */
b5cb9ab1 209 pnv_pci_link_table_and_group(phb->hose->node, num,
85674868
AK
210 tbl, &npe->table_group);
211
b575c731
AK
212 return 0;
213}
214
b5cb9ab1 215long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
b575c731
AK
216{
217 struct pnv_phb *phb = npe->phb;
218 int64_t rc;
219
220 pe_info(npe, "Removing DMA window\n");
221
222 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
223 npe->pe_number,
224 0/* levels */, 0/* table address */,
225 0/* table size */, 0/* page size */);
226 if (rc) {
227 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
228 return rc;
229 }
6b3d12a9 230 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
b575c731 231
b5cb9ab1 232 pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
85674868 233 &npe->table_group);
5d2aa710 234
85674868 235 return 0;
5d2aa710
AP
236}
237
238/*
f9f83456 239 * Enables 32 bit DMA on NPU.
5d2aa710 240 */
f9f83456 241static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
5d2aa710 242{
5d2aa710
AP
243 struct pci_dev *gpdev;
244 struct pnv_ioda_pe *gpe;
5d2aa710
AP
245 int64_t rc;
246
247 /*
248 * Find the assoicated PCI devices and get the dma window
249 * information from there.
250 */
251 if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
252 return;
253
254 gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
255 if (!gpe)
256 return;
257
b5cb9ab1 258 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
5d2aa710
AP
259
260 /*
261 * We don't initialise npu_pe->tce32_table as we always use
262 * dma_npu_ops which are nops.
263 */
264 set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
265}
266
267/*
f9f83456 268 * Enables bypass mode on the NPU. The NPU only supports one
446957ba 269 * window per link, so bypass needs to be explicitly enabled or
5d2aa710
AP
270 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
271 * active at the same time.
272 */
f9f83456 273static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
5d2aa710
AP
274{
275 struct pnv_phb *phb = npe->phb;
276 int64_t rc = 0;
f9f83456 277 phys_addr_t top = memblock_end_of_DRAM();
5d2aa710
AP
278
279 if (phb->type != PNV_PHB_NPU || !npe->pdev)
280 return -EINVAL;
281
b5cb9ab1 282 rc = pnv_npu_unset_window(npe, 0);
b575c731
AK
283 if (rc != OPAL_SUCCESS)
284 return rc;
285
f9f83456
AK
286 /* Enable the bypass window */
287
288 top = roundup_pow_of_two(top);
1f52f176 289 dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
f9f83456
AK
290 npe->pe_number);
291 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
292 npe->pe_number, npe->pe_number,
293 0 /* bypass base */, top);
5d2aa710 294
85674868 295 if (rc == OPAL_SUCCESS)
6b3d12a9 296 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
85674868 297
5d2aa710
AP
298 return rc;
299}
300
f9f83456 301void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
5d2aa710 302{
f9f83456
AK
303 int i;
304 struct pnv_phb *phb;
305 struct pci_dn *pdn;
306 struct pnv_ioda_pe *npe;
307 struct pci_dev *npdev;
5d2aa710 308
f9f83456
AK
309 for (i = 0; ; ++i) {
310 npdev = pnv_pci_get_npu_dev(gpdev, i);
5d2aa710 311
f9f83456
AK
312 if (!npdev)
313 break;
5d2aa710 314
f9f83456
AK
315 pdn = pci_get_pdn(npdev);
316 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
317 return;
5d2aa710 318
f9f83456 319 phb = pci_bus_to_host(npdev->bus)->private_data;
5d2aa710 320
f9f83456
AK
321 /* We only do bypass if it's enabled on the linked device */
322 npe = &phb->ioda.pe_array[pdn->pe_number];
5d2aa710 323
f9f83456
AK
324 if (bypass) {
325 dev_info(&npdev->dev,
326 "Using 64-bit DMA iommu bypass\n");
327 pnv_npu_dma_set_bypass(npe);
328 } else {
329 dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
330 pnv_npu_dma_set_32(npe);
331 }
332 }
5d2aa710 333}
b5cb9ab1
AK
334
335/* Switch ownership from platform code to external user (e.g. VFIO) */
336void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
337{
338 struct pnv_phb *phb = npe->phb;
339 int64_t rc;
340
341 /*
342 * Note: NPU has just a single TVE in the hardware which means that
343 * while used by the kernel, it can have either 32bit window or
344 * DMA bypass but never both. So we deconfigure 32bit window only
345 * if it was enabled at the moment of ownership change.
346 */
347 if (npe->table_group.tables[0]) {
348 pnv_npu_unset_window(npe, 0);
349 return;
350 }
351
352 /* Disable bypass */
353 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
354 npe->pe_number, npe->pe_number,
355 0 /* bypass base */, 0);
356 if (rc) {
357 pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
358 return;
359 }
6b3d12a9 360 pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
b5cb9ab1
AK
361}
362
363struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
364{
365 struct pnv_phb *phb = npe->phb;
366 struct pci_bus *pbus = phb->hose->bus;
367 struct pci_dev *npdev, *gpdev = NULL, *gptmp;
368 struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
369
370 if (!gpe || !gpdev)
371 return NULL;
372
373 list_for_each_entry(npdev, &pbus->devices, bus_list) {
374 gptmp = pnv_pci_get_gpu_dev(npdev);
375
376 if (gptmp != gpdev)
377 continue;
378
379 pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev));
380 iommu_group_add_device(gpe->table_group.group, &npdev->dev);
381 }
382
383 return gpe;
384}
1ab66d1f
AP
385
386/* Maximum number of nvlinks per npu */
387#define NV_MAX_LINKS 6
388
389/* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
390static int max_npu2_index;
391
392struct npu_context {
393 struct mm_struct *mm;
394 struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
395 struct mmu_notifier mn;
396 struct kref kref;
397
398 /* Callback to stop translation requests on a given GPU */
399 struct npu_context *(*release_cb)(struct npu_context *, void *);
400
401 /*
402 * Private pointer passed to the above callback for usage by
403 * device drivers.
404 */
405 void *priv;
406};
407
408/*
409 * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
410 * if none are available.
411 */
412static int get_mmio_atsd_reg(struct npu *npu)
413{
414 int i;
415
416 for (i = 0; i < npu->mmio_atsd_count; i++) {
417 if (!test_and_set_bit(i, &npu->mmio_atsd_usage))
418 return i;
419 }
420
421 return -ENOSPC;
422}
423
424static void put_mmio_atsd_reg(struct npu *npu, int reg)
425{
426 clear_bit(reg, &npu->mmio_atsd_usage);
427}
428
429/* MMIO ATSD register offsets */
430#define XTS_ATSD_AVA 1
431#define XTS_ATSD_STAT 2
432
433static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
434 unsigned long va)
435{
436 int mmio_atsd_reg;
437
438 do {
439 mmio_atsd_reg = get_mmio_atsd_reg(npu);
440 cpu_relax();
441 } while (mmio_atsd_reg < 0);
442
443 __raw_writeq(cpu_to_be64(va),
444 npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA);
445 eieio();
446 __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]);
447
448 return mmio_atsd_reg;
449}
450
451static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
452{
453 unsigned long launch;
454
455 /* IS set to invalidate matching PID */
456 launch = PPC_BIT(12);
457
458 /* PRS set to process-scoped */
459 launch |= PPC_BIT(13);
460
461 /* AP */
462 launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
463
464 /* PID */
465 launch |= pid << PPC_BITLSHIFT(38);
466
467 /* Invalidating the entire process doesn't use a va */
468 return mmio_launch_invalidate(npu, launch, 0);
469}
470
471static int mmio_invalidate_va(struct npu *npu, unsigned long va,
472 unsigned long pid)
473{
474 unsigned long launch;
475
476 /* IS set to invalidate target VA */
477 launch = 0;
478
479 /* PRS set to process scoped */
480 launch |= PPC_BIT(13);
481
482 /* AP */
483 launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
484
485 /* PID */
486 launch |= pid << PPC_BITLSHIFT(38);
487
488 return mmio_launch_invalidate(npu, launch, va);
489}
490
491#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
492
493/*
494 * Invalidate either a single address or an entire PID depending on
495 * the value of va.
496 */
497static void mmio_invalidate(struct npu_context *npu_context, int va,
498 unsigned long address)
499{
500 int i, j, reg;
501 struct npu *npu;
502 struct pnv_phb *nphb;
503 struct pci_dev *npdev;
504 struct {
505 struct npu *npu;
506 int reg;
507 } mmio_atsd_reg[NV_MAX_NPUS];
508 unsigned long pid = npu_context->mm->context.id;
509
510 /*
511 * Loop over all the NPUs this process is active on and launch
512 * an invalidate.
513 */
514 for (i = 0; i <= max_npu2_index; i++) {
515 mmio_atsd_reg[i].reg = -1;
516 for (j = 0; j < NV_MAX_LINKS; j++) {
517 npdev = npu_context->npdev[i][j];
518 if (!npdev)
519 continue;
520
521 nphb = pci_bus_to_host(npdev->bus)->private_data;
522 npu = &nphb->npu;
523 mmio_atsd_reg[i].npu = npu;
524
525 if (va)
526 mmio_atsd_reg[i].reg =
527 mmio_invalidate_va(npu, address, pid);
528 else
529 mmio_atsd_reg[i].reg =
530 mmio_invalidate_pid(npu, pid);
531
532 /*
533 * The NPU hardware forwards the shootdown to all GPUs
534 * so we only have to launch one shootdown per NPU.
535 */
536 break;
537 }
538 }
539
540 /*
541 * Unfortunately the nest mmu does not support flushing specific
542 * addresses so we have to flush the whole mm.
543 */
544 flush_tlb_mm(npu_context->mm);
545
546 /* Wait for all invalidations to complete */
547 for (i = 0; i <= max_npu2_index; i++) {
548 if (mmio_atsd_reg[i].reg < 0)
549 continue;
550
551 /* Wait for completion */
552 npu = mmio_atsd_reg[i].npu;
553 reg = mmio_atsd_reg[i].reg;
554 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
555 cpu_relax();
556 put_mmio_atsd_reg(npu, reg);
557 }
558}
559
560static void pnv_npu2_mn_release(struct mmu_notifier *mn,
561 struct mm_struct *mm)
562{
563 struct npu_context *npu_context = mn_to_npu_context(mn);
564
565 /* Call into device driver to stop requests to the NMMU */
566 if (npu_context->release_cb)
567 npu_context->release_cb(npu_context, npu_context->priv);
568
569 /*
570 * There should be no more translation requests for this PID, but we
571 * need to ensure any entries for it are removed from the TLB.
572 */
573 mmio_invalidate(npu_context, 0, 0);
574}
575
576static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
577 struct mm_struct *mm,
578 unsigned long address,
579 pte_t pte)
580{
581 struct npu_context *npu_context = mn_to_npu_context(mn);
582
583 mmio_invalidate(npu_context, 1, address);
584}
585
586static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
587 struct mm_struct *mm,
588 unsigned long address)
589{
590 struct npu_context *npu_context = mn_to_npu_context(mn);
591
592 mmio_invalidate(npu_context, 1, address);
593}
594
595static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
596 struct mm_struct *mm,
597 unsigned long start, unsigned long end)
598{
599 struct npu_context *npu_context = mn_to_npu_context(mn);
600 unsigned long address;
601
602 for (address = start; address <= end; address += PAGE_SIZE)
603 mmio_invalidate(npu_context, 1, address);
604}
605
606static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
607 .release = pnv_npu2_mn_release,
608 .change_pte = pnv_npu2_mn_change_pte,
609 .invalidate_page = pnv_npu2_mn_invalidate_page,
610 .invalidate_range = pnv_npu2_mn_invalidate_range,
611};
612
613/*
614 * Call into OPAL to setup the nmmu context for the current task in
615 * the NPU. This must be called to setup the context tables before the
616 * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
617 *
618 * A release callback should be registered to allow a device driver to
619 * be notified that it should not launch any new translation requests
620 * as the final TLB invalidate is about to occur.
621 *
622 * Returns an error if there no contexts are currently available or a
623 * npu_context which should be passed to pnv_npu2_handle_fault().
624 *
625 * mmap_sem must be held in write mode.
626 */
627struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
628 unsigned long flags,
629 struct npu_context *(*cb)(struct npu_context *, void *),
630 void *priv)
631{
632 int rc;
633 u32 nvlink_index;
634 struct device_node *nvlink_dn;
635 struct mm_struct *mm = current->mm;
636 struct pnv_phb *nphb;
637 struct npu *npu;
638 struct npu_context *npu_context;
639
640 /*
641 * At present we don't support GPUs connected to multiple NPUs and I'm
642 * not sure the hardware does either.
643 */
644 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
645
646 if (!firmware_has_feature(FW_FEATURE_OPAL))
647 return ERR_PTR(-ENODEV);
648
649 if (!npdev)
650 /* No nvlink associated with this GPU device */
651 return ERR_PTR(-ENODEV);
652
653 if (!mm) {
654 /* kernel thread contexts are not supported */
655 return ERR_PTR(-EINVAL);
656 }
657
658 nphb = pci_bus_to_host(npdev->bus)->private_data;
659 npu = &nphb->npu;
660
661 /*
662 * Setup the NPU context table for a particular GPU. These need to be
663 * per-GPU as we need the tables to filter ATSDs when there are no
664 * active contexts on a particular GPU.
665 */
666 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
667 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
668 if (rc < 0)
669 return ERR_PTR(-ENOSPC);
670
671 /*
672 * We store the npu pci device so we can more easily get at the
673 * associated npus.
674 */
675 npu_context = mm->context.npu_context;
676 if (!npu_context) {
677 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
678 if (!npu_context)
679 return ERR_PTR(-ENOMEM);
680
681 mm->context.npu_context = npu_context;
682 npu_context->mm = mm;
683 npu_context->mn.ops = &nv_nmmu_notifier_ops;
684 __mmu_notifier_register(&npu_context->mn, mm);
685 kref_init(&npu_context->kref);
686 } else {
687 kref_get(&npu_context->kref);
688 }
689
690 npu_context->release_cb = cb;
691 npu_context->priv = priv;
692 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
693 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
694 &nvlink_index)))
695 return ERR_PTR(-ENODEV);
696 npu_context->npdev[npu->index][nvlink_index] = npdev;
697
698 return npu_context;
699}
700EXPORT_SYMBOL(pnv_npu2_init_context);
701
702static void pnv_npu2_release_context(struct kref *kref)
703{
704 struct npu_context *npu_context =
705 container_of(kref, struct npu_context, kref);
706
707 npu_context->mm->context.npu_context = NULL;
708 mmu_notifier_unregister(&npu_context->mn,
709 npu_context->mm);
710
711 kfree(npu_context);
712}
713
714void pnv_npu2_destroy_context(struct npu_context *npu_context,
715 struct pci_dev *gpdev)
716{
415ba3c1 717 struct pnv_phb *nphb;
1ab66d1f
AP
718 struct npu *npu;
719 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
720 struct device_node *nvlink_dn;
721 u32 nvlink_index;
722
723 if (WARN_ON(!npdev))
724 return;
725
726 if (!firmware_has_feature(FW_FEATURE_OPAL))
727 return;
728
729 nphb = pci_bus_to_host(npdev->bus)->private_data;
730 npu = &nphb->npu;
1ab66d1f
AP
731 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
732 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
733 &nvlink_index)))
734 return;
735 npu_context->npdev[npu->index][nvlink_index] = NULL;
415ba3c1 736 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
1ab66d1f
AP
737 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
738 kref_put(&npu_context->kref, pnv_npu2_release_context);
739}
740EXPORT_SYMBOL(pnv_npu2_destroy_context);
741
742/*
743 * Assumes mmap_sem is held for the contexts associated mm.
744 */
745int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
746 unsigned long *flags, unsigned long *status, int count)
747{
748 u64 rc = 0, result = 0;
749 int i, is_write;
750 struct page *page[1];
751
752 /* mmap_sem should be held so the struct_mm must be present */
753 struct mm_struct *mm = context->mm;
754
755 if (!firmware_has_feature(FW_FEATURE_OPAL))
756 return -ENODEV;
757
758 WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
759
760 for (i = 0; i < count; i++) {
761 is_write = flags[i] & NPU2_WRITE;
762 rc = get_user_pages_remote(NULL, mm, ea[i], 1,
763 is_write ? FOLL_WRITE : 0,
764 page, NULL, NULL);
765
766 /*
767 * To support virtualised environments we will have to do an
768 * access to the page to ensure it gets faulted into the
769 * hypervisor. For the moment virtualisation is not supported in
770 * other areas so leave the access out.
771 */
772 if (rc != 1) {
773 status[i] = rc;
774 result = -EFAULT;
775 continue;
776 }
777
778 status[i] = 0;
779 put_page(page[0]);
780 }
781
782 return result;
783}
784EXPORT_SYMBOL(pnv_npu2_handle_fault);
785
786int pnv_npu2_init(struct pnv_phb *phb)
787{
788 unsigned int i;
789 u64 mmio_atsd;
790 struct device_node *dn;
791 struct pci_dev *gpdev;
792 static int npu_index;
793 uint64_t rc = 0;
794
795 for_each_child_of_node(phb->hose->dn, dn) {
796 gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
797 if (gpdev) {
798 rc = opal_npu_map_lpar(phb->opal_id,
799 PCI_DEVID(gpdev->bus->number, gpdev->devfn),
800 0, 0);
801 if (rc)
802 dev_err(&gpdev->dev,
803 "Error %lld mapping device to LPAR\n",
804 rc);
805 }
806 }
807
808 for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
809 i, &mmio_atsd); i++)
810 phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
811
812 pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
813 phb->npu.mmio_atsd_count = i;
814 phb->npu.mmio_atsd_usage = 0;
815 npu_index++;
816 if (WARN_ON(npu_index >= NV_MAX_NPUS))
817 return -ENOSPC;
818 max_npu2_index = npu_index;
819 phb->npu.index = npu_index;
820
821 return 0;
822}