2 * This file implements the DMA operations for NVLink devices. The NPU
3 * devices all point to the same iommu table as the parent PCI device.
5 * Copyright Alistair Popple, IBM Corporation 2015.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/mmu_context.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <linux/memblock.h>
19 #include <linux/iommu.h>
22 #include <asm/powernv.h>
26 #include <asm/iommu.h>
27 #include <asm/pnv-pci.h>
28 #include <asm/msi_bitmap.h>
34 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
37 * Other types of TCE cache invalidation are not functional in the
40 static struct pci_dev
*get_pci_dev(struct device_node
*dn
)
42 return PCI_DN(dn
)->pcidev
;
45 /* Given a NPU device get the associated PCI device. */
46 struct pci_dev
*pnv_pci_get_gpu_dev(struct pci_dev
*npdev
)
48 struct device_node
*dn
;
49 struct pci_dev
*gpdev
;
54 if (WARN_ON(!npdev
->dev
.of_node
))
57 /* Get assoicated PCI device */
58 dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,gpu", 0);
62 gpdev
= get_pci_dev(dn
);
67 EXPORT_SYMBOL(pnv_pci_get_gpu_dev
);
69 /* Given the real PCI device get a linked NPU device. */
70 struct pci_dev
*pnv_pci_get_npu_dev(struct pci_dev
*gpdev
, int index
)
72 struct device_node
*dn
;
73 struct pci_dev
*npdev
;
78 if (WARN_ON(!gpdev
->dev
.of_node
))
81 /* Get assoicated PCI device */
82 dn
= of_parse_phandle(gpdev
->dev
.of_node
, "ibm,npu", index
);
86 npdev
= get_pci_dev(dn
);
91 EXPORT_SYMBOL(pnv_pci_get_npu_dev
);
93 #define NPU_DMA_OP_UNSUPPORTED() \
94 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
97 static void *dma_npu_alloc(struct device
*dev
, size_t size
,
98 dma_addr_t
*dma_handle
, gfp_t flag
,
101 NPU_DMA_OP_UNSUPPORTED();
105 static void dma_npu_free(struct device
*dev
, size_t size
,
106 void *vaddr
, dma_addr_t dma_handle
,
109 NPU_DMA_OP_UNSUPPORTED();
112 static dma_addr_t
dma_npu_map_page(struct device
*dev
, struct page
*page
,
113 unsigned long offset
, size_t size
,
114 enum dma_data_direction direction
,
117 NPU_DMA_OP_UNSUPPORTED();
121 static int dma_npu_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
122 int nelems
, enum dma_data_direction direction
,
125 NPU_DMA_OP_UNSUPPORTED();
129 static int dma_npu_dma_supported(struct device
*dev
, u64 mask
)
131 NPU_DMA_OP_UNSUPPORTED();
135 static u64
dma_npu_get_required_mask(struct device
*dev
)
137 NPU_DMA_OP_UNSUPPORTED();
141 static const struct dma_map_ops dma_npu_ops
= {
142 .map_page
= dma_npu_map_page
,
143 .map_sg
= dma_npu_map_sg
,
144 .alloc
= dma_npu_alloc
,
145 .free
= dma_npu_free
,
146 .dma_supported
= dma_npu_dma_supported
,
147 .get_required_mask
= dma_npu_get_required_mask
,
151 * Returns the PE assoicated with the PCI device of the given
152 * NPU. Returns the linked pci device if pci_dev != NULL.
154 static struct pnv_ioda_pe
*get_gpu_pci_dev_and_pe(struct pnv_ioda_pe
*npe
,
155 struct pci_dev
**gpdev
)
158 struct pci_controller
*hose
;
159 struct pci_dev
*pdev
;
160 struct pnv_ioda_pe
*pe
;
163 pdev
= pnv_pci_get_gpu_dev(npe
->pdev
);
167 pdn
= pci_get_pdn(pdev
);
168 if (WARN_ON(!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
))
171 hose
= pci_bus_to_host(pdev
->bus
);
172 phb
= hose
->private_data
;
173 pe
= &phb
->ioda
.pe_array
[pdn
->pe_number
];
181 long pnv_npu_set_window(struct pnv_ioda_pe
*npe
, int num
,
182 struct iommu_table
*tbl
)
184 struct pnv_phb
*phb
= npe
->phb
;
186 const unsigned long size
= tbl
->it_indirect_levels
?
187 tbl
->it_level_size
: tbl
->it_size
;
188 const __u64 start_addr
= tbl
->it_offset
<< tbl
->it_page_shift
;
189 const __u64 win_size
= tbl
->it_size
<< tbl
->it_page_shift
;
191 pe_info(npe
, "Setting up window %llx..%llx pg=%lx\n",
192 start_addr
, start_addr
+ win_size
- 1,
193 IOMMU_PAGE_SIZE(tbl
));
195 rc
= opal_pci_map_pe_dma_window(phb
->opal_id
,
198 tbl
->it_indirect_levels
+ 1,
201 IOMMU_PAGE_SIZE(tbl
));
203 pe_err(npe
, "Failed to configure TCE table, err %lld\n", rc
);
206 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
208 /* Add the table to the list so its TCE cache will get invalidated */
209 pnv_pci_link_table_and_group(phb
->hose
->node
, num
,
210 tbl
, &npe
->table_group
);
215 long pnv_npu_unset_window(struct pnv_ioda_pe
*npe
, int num
)
217 struct pnv_phb
*phb
= npe
->phb
;
220 pe_info(npe
, "Removing DMA window\n");
222 rc
= opal_pci_map_pe_dma_window(phb
->opal_id
, npe
->pe_number
,
224 0/* levels */, 0/* table address */,
225 0/* table size */, 0/* page size */);
227 pe_err(npe
, "Unmapping failed, ret = %lld\n", rc
);
230 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
232 pnv_pci_unlink_table_and_group(npe
->table_group
.tables
[num
],
239 * Enables 32 bit DMA on NPU.
241 static void pnv_npu_dma_set_32(struct pnv_ioda_pe
*npe
)
243 struct pci_dev
*gpdev
;
244 struct pnv_ioda_pe
*gpe
;
248 * Find the assoicated PCI devices and get the dma window
249 * information from there.
251 if (!npe
->pdev
|| !(npe
->flags
& PNV_IODA_PE_DEV
))
254 gpe
= get_gpu_pci_dev_and_pe(npe
, &gpdev
);
258 rc
= pnv_npu_set_window(npe
, 0, gpe
->table_group
.tables
[0]);
261 * We don't initialise npu_pe->tce32_table as we always use
262 * dma_npu_ops which are nops.
264 set_dma_ops(&npe
->pdev
->dev
, &dma_npu_ops
);
268 * Enables bypass mode on the NPU. The NPU only supports one
269 * window per link, so bypass needs to be explicitly enabled or
270 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
271 * active at the same time.
273 static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe
*npe
)
275 struct pnv_phb
*phb
= npe
->phb
;
277 phys_addr_t top
= memblock_end_of_DRAM();
279 if (phb
->type
!= PNV_PHB_NPU
|| !npe
->pdev
)
282 rc
= pnv_npu_unset_window(npe
, 0);
283 if (rc
!= OPAL_SUCCESS
)
286 /* Enable the bypass window */
288 top
= roundup_pow_of_two(top
);
289 dev_info(&npe
->pdev
->dev
, "Enabling bypass for PE %x\n",
291 rc
= opal_pci_map_pe_dma_window_real(phb
->opal_id
,
292 npe
->pe_number
, npe
->pe_number
,
293 0 /* bypass base */, top
);
295 if (rc
== OPAL_SUCCESS
)
296 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
301 void pnv_npu_try_dma_set_bypass(struct pci_dev
*gpdev
, bool bypass
)
306 struct pnv_ioda_pe
*npe
;
307 struct pci_dev
*npdev
;
310 npdev
= pnv_pci_get_npu_dev(gpdev
, i
);
315 pdn
= pci_get_pdn(npdev
);
316 if (WARN_ON(!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
))
319 phb
= pci_bus_to_host(npdev
->bus
)->private_data
;
321 /* We only do bypass if it's enabled on the linked device */
322 npe
= &phb
->ioda
.pe_array
[pdn
->pe_number
];
325 dev_info(&npdev
->dev
,
326 "Using 64-bit DMA iommu bypass\n");
327 pnv_npu_dma_set_bypass(npe
);
329 dev_info(&npdev
->dev
, "Using 32-bit DMA via iommu\n");
330 pnv_npu_dma_set_32(npe
);
335 /* Switch ownership from platform code to external user (e.g. VFIO) */
336 void pnv_npu_take_ownership(struct pnv_ioda_pe
*npe
)
338 struct pnv_phb
*phb
= npe
->phb
;
342 * Note: NPU has just a single TVE in the hardware which means that
343 * while used by the kernel, it can have either 32bit window or
344 * DMA bypass but never both. So we deconfigure 32bit window only
345 * if it was enabled at the moment of ownership change.
347 if (npe
->table_group
.tables
[0]) {
348 pnv_npu_unset_window(npe
, 0);
353 rc
= opal_pci_map_pe_dma_window_real(phb
->opal_id
,
354 npe
->pe_number
, npe
->pe_number
,
355 0 /* bypass base */, 0);
357 pe_err(npe
, "Failed to disable bypass, err %lld\n", rc
);
360 pnv_pci_ioda2_tce_invalidate_entire(npe
->phb
, false);
363 struct pnv_ioda_pe
*pnv_pci_npu_setup_iommu(struct pnv_ioda_pe
*npe
)
365 struct pnv_phb
*phb
= npe
->phb
;
366 struct pci_bus
*pbus
= phb
->hose
->bus
;
367 struct pci_dev
*npdev
, *gpdev
= NULL
, *gptmp
;
368 struct pnv_ioda_pe
*gpe
= get_gpu_pci_dev_and_pe(npe
, &gpdev
);
373 list_for_each_entry(npdev
, &pbus
->devices
, bus_list
) {
374 gptmp
= pnv_pci_get_gpu_dev(npdev
);
379 pe_info(gpe
, "Attached NPU %s\n", dev_name(&npdev
->dev
));
380 iommu_group_add_device(gpe
->table_group
.group
, &npdev
->dev
);
386 /* Maximum number of nvlinks per npu */
387 #define NV_MAX_LINKS 6
389 /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
390 static int max_npu2_index
;
393 struct mm_struct
*mm
;
394 struct pci_dev
*npdev
[NV_MAX_NPUS
][NV_MAX_LINKS
];
395 struct mmu_notifier mn
;
398 /* Callback to stop translation requests on a given GPU */
399 struct npu_context
*(*release_cb
)(struct npu_context
*, void *);
402 * Private pointer passed to the above callback for usage by
409 * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
410 * if none are available.
412 static int get_mmio_atsd_reg(struct npu
*npu
)
416 for (i
= 0; i
< npu
->mmio_atsd_count
; i
++) {
417 if (!test_and_set_bit(i
, &npu
->mmio_atsd_usage
))
424 static void put_mmio_atsd_reg(struct npu
*npu
, int reg
)
426 clear_bit(reg
, &npu
->mmio_atsd_usage
);
429 /* MMIO ATSD register offsets */
430 #define XTS_ATSD_AVA 1
431 #define XTS_ATSD_STAT 2
433 static int mmio_launch_invalidate(struct npu
*npu
, unsigned long launch
,
439 mmio_atsd_reg
= get_mmio_atsd_reg(npu
);
441 } while (mmio_atsd_reg
< 0);
443 __raw_writeq(cpu_to_be64(va
),
444 npu
->mmio_atsd_regs
[mmio_atsd_reg
] + XTS_ATSD_AVA
);
446 __raw_writeq(cpu_to_be64(launch
), npu
->mmio_atsd_regs
[mmio_atsd_reg
]);
448 return mmio_atsd_reg
;
451 static int mmio_invalidate_pid(struct npu
*npu
, unsigned long pid
)
453 unsigned long launch
;
455 /* IS set to invalidate matching PID */
456 launch
= PPC_BIT(12);
458 /* PRS set to process-scoped */
459 launch
|= PPC_BIT(13);
462 launch
|= (u64
) mmu_get_ap(mmu_virtual_psize
) << PPC_BITLSHIFT(17);
465 launch
|= pid
<< PPC_BITLSHIFT(38);
467 /* Invalidating the entire process doesn't use a va */
468 return mmio_launch_invalidate(npu
, launch
, 0);
471 static int mmio_invalidate_va(struct npu
*npu
, unsigned long va
,
474 unsigned long launch
;
476 /* IS set to invalidate target VA */
479 /* PRS set to process scoped */
480 launch
|= PPC_BIT(13);
483 launch
|= (u64
) mmu_get_ap(mmu_virtual_psize
) << PPC_BITLSHIFT(17);
486 launch
|= pid
<< PPC_BITLSHIFT(38);
488 return mmio_launch_invalidate(npu
, launch
, va
);
491 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
494 * Invalidate either a single address or an entire PID depending on
497 static void mmio_invalidate(struct npu_context
*npu_context
, int va
,
498 unsigned long address
)
502 struct pnv_phb
*nphb
;
503 struct pci_dev
*npdev
;
507 } mmio_atsd_reg
[NV_MAX_NPUS
];
508 unsigned long pid
= npu_context
->mm
->context
.id
;
511 * Loop over all the NPUs this process is active on and launch
514 for (i
= 0; i
<= max_npu2_index
; i
++) {
515 mmio_atsd_reg
[i
].reg
= -1;
516 for (j
= 0; j
< NV_MAX_LINKS
; j
++) {
517 npdev
= npu_context
->npdev
[i
][j
];
521 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
523 mmio_atsd_reg
[i
].npu
= npu
;
526 mmio_atsd_reg
[i
].reg
=
527 mmio_invalidate_va(npu
, address
, pid
);
529 mmio_atsd_reg
[i
].reg
=
530 mmio_invalidate_pid(npu
, pid
);
533 * The NPU hardware forwards the shootdown to all GPUs
534 * so we only have to launch one shootdown per NPU.
541 * Unfortunately the nest mmu does not support flushing specific
542 * addresses so we have to flush the whole mm.
544 flush_tlb_mm(npu_context
->mm
);
546 /* Wait for all invalidations to complete */
547 for (i
= 0; i
<= max_npu2_index
; i
++) {
548 if (mmio_atsd_reg
[i
].reg
< 0)
551 /* Wait for completion */
552 npu
= mmio_atsd_reg
[i
].npu
;
553 reg
= mmio_atsd_reg
[i
].reg
;
554 while (__raw_readq(npu
->mmio_atsd_regs
[reg
] + XTS_ATSD_STAT
))
556 put_mmio_atsd_reg(npu
, reg
);
560 static void pnv_npu2_mn_release(struct mmu_notifier
*mn
,
561 struct mm_struct
*mm
)
563 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
565 /* Call into device driver to stop requests to the NMMU */
566 if (npu_context
->release_cb
)
567 npu_context
->release_cb(npu_context
, npu_context
->priv
);
570 * There should be no more translation requests for this PID, but we
571 * need to ensure any entries for it are removed from the TLB.
573 mmio_invalidate(npu_context
, 0, 0);
576 static void pnv_npu2_mn_change_pte(struct mmu_notifier
*mn
,
577 struct mm_struct
*mm
,
578 unsigned long address
,
581 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
583 mmio_invalidate(npu_context
, 1, address
);
586 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier
*mn
,
587 struct mm_struct
*mm
,
588 unsigned long address
)
590 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
592 mmio_invalidate(npu_context
, 1, address
);
595 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier
*mn
,
596 struct mm_struct
*mm
,
597 unsigned long start
, unsigned long end
)
599 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
600 unsigned long address
;
602 for (address
= start
; address
<= end
; address
+= PAGE_SIZE
)
603 mmio_invalidate(npu_context
, 1, address
);
606 static const struct mmu_notifier_ops nv_nmmu_notifier_ops
= {
607 .release
= pnv_npu2_mn_release
,
608 .change_pte
= pnv_npu2_mn_change_pte
,
609 .invalidate_page
= pnv_npu2_mn_invalidate_page
,
610 .invalidate_range
= pnv_npu2_mn_invalidate_range
,
614 * Call into OPAL to setup the nmmu context for the current task in
615 * the NPU. This must be called to setup the context tables before the
616 * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
618 * A release callback should be registered to allow a device driver to
619 * be notified that it should not launch any new translation requests
620 * as the final TLB invalidate is about to occur.
622 * Returns an error if there no contexts are currently available or a
623 * npu_context which should be passed to pnv_npu2_handle_fault().
625 * mmap_sem must be held in write mode.
627 struct npu_context
*pnv_npu2_init_context(struct pci_dev
*gpdev
,
629 struct npu_context
*(*cb
)(struct npu_context
*, void *),
634 struct device_node
*nvlink_dn
;
635 struct mm_struct
*mm
= current
->mm
;
636 struct pnv_phb
*nphb
;
638 struct npu_context
*npu_context
;
641 * At present we don't support GPUs connected to multiple NPUs and I'm
642 * not sure the hardware does either.
644 struct pci_dev
*npdev
= pnv_pci_get_npu_dev(gpdev
, 0);
646 if (!firmware_has_feature(FW_FEATURE_OPAL
))
647 return ERR_PTR(-ENODEV
);
650 /* No nvlink associated with this GPU device */
651 return ERR_PTR(-ENODEV
);
654 /* kernel thread contexts are not supported */
655 return ERR_PTR(-EINVAL
);
658 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
662 * Setup the NPU context table for a particular GPU. These need to be
663 * per-GPU as we need the tables to filter ATSDs when there are no
664 * active contexts on a particular GPU.
666 rc
= opal_npu_init_context(nphb
->opal_id
, mm
->context
.id
, flags
,
667 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
));
669 return ERR_PTR(-ENOSPC
);
672 * We store the npu pci device so we can more easily get at the
675 npu_context
= mm
->context
.npu_context
;
677 npu_context
= kzalloc(sizeof(struct npu_context
), GFP_KERNEL
);
679 return ERR_PTR(-ENOMEM
);
681 mm
->context
.npu_context
= npu_context
;
682 npu_context
->mm
= mm
;
683 npu_context
->mn
.ops
= &nv_nmmu_notifier_ops
;
684 __mmu_notifier_register(&npu_context
->mn
, mm
);
685 kref_init(&npu_context
->kref
);
687 kref_get(&npu_context
->kref
);
690 npu_context
->release_cb
= cb
;
691 npu_context
->priv
= priv
;
692 nvlink_dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,nvlink", 0);
693 if (WARN_ON(of_property_read_u32(nvlink_dn
, "ibm,npu-link-index",
695 return ERR_PTR(-ENODEV
);
696 npu_context
->npdev
[npu
->index
][nvlink_index
] = npdev
;
700 EXPORT_SYMBOL(pnv_npu2_init_context
);
702 static void pnv_npu2_release_context(struct kref
*kref
)
704 struct npu_context
*npu_context
=
705 container_of(kref
, struct npu_context
, kref
);
707 npu_context
->mm
->context
.npu_context
= NULL
;
708 mmu_notifier_unregister(&npu_context
->mn
,
714 void pnv_npu2_destroy_context(struct npu_context
*npu_context
,
715 struct pci_dev
*gpdev
)
717 struct pnv_phb
*nphb
, *phb
;
719 struct pci_dev
*npdev
= pnv_pci_get_npu_dev(gpdev
, 0);
720 struct device_node
*nvlink_dn
;
726 if (!firmware_has_feature(FW_FEATURE_OPAL
))
729 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
731 phb
= pci_bus_to_host(gpdev
->bus
)->private_data
;
732 nvlink_dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,nvlink", 0);
733 if (WARN_ON(of_property_read_u32(nvlink_dn
, "ibm,npu-link-index",
736 npu_context
->npdev
[npu
->index
][nvlink_index
] = NULL
;
737 opal_npu_destroy_context(phb
->opal_id
, npu_context
->mm
->context
.id
,
738 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
));
739 kref_put(&npu_context
->kref
, pnv_npu2_release_context
);
741 EXPORT_SYMBOL(pnv_npu2_destroy_context
);
744 * Assumes mmap_sem is held for the contexts associated mm.
746 int pnv_npu2_handle_fault(struct npu_context
*context
, uintptr_t *ea
,
747 unsigned long *flags
, unsigned long *status
, int count
)
749 u64 rc
= 0, result
= 0;
751 struct page
*page
[1];
753 /* mmap_sem should be held so the struct_mm must be present */
754 struct mm_struct
*mm
= context
->mm
;
756 if (!firmware_has_feature(FW_FEATURE_OPAL
))
759 WARN_ON(!rwsem_is_locked(&mm
->mmap_sem
));
761 for (i
= 0; i
< count
; i
++) {
762 is_write
= flags
[i
] & NPU2_WRITE
;
763 rc
= get_user_pages_remote(NULL
, mm
, ea
[i
], 1,
764 is_write
? FOLL_WRITE
: 0,
768 * To support virtualised environments we will have to do an
769 * access to the page to ensure it gets faulted into the
770 * hypervisor. For the moment virtualisation is not supported in
771 * other areas so leave the access out.
785 EXPORT_SYMBOL(pnv_npu2_handle_fault
);
787 int pnv_npu2_init(struct pnv_phb
*phb
)
791 struct device_node
*dn
;
792 struct pci_dev
*gpdev
;
793 static int npu_index
;
796 for_each_child_of_node(phb
->hose
->dn
, dn
) {
797 gpdev
= pnv_pci_get_gpu_dev(get_pci_dev(dn
));
799 rc
= opal_npu_map_lpar(phb
->opal_id
,
800 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
),
804 "Error %lld mapping device to LPAR\n",
809 for (i
= 0; !of_property_read_u64_index(phb
->hose
->dn
, "ibm,mmio-atsd",
811 phb
->npu
.mmio_atsd_regs
[i
] = ioremap(mmio_atsd
, 32);
813 pr_info("NPU%lld: Found %d MMIO ATSD registers", phb
->opal_id
, i
);
814 phb
->npu
.mmio_atsd_count
= i
;
815 phb
->npu
.mmio_atsd_usage
= 0;
817 if (WARN_ON(npu_index
>= NV_MAX_NPUS
))
819 max_npu2_index
= npu_index
;
820 phb
->npu
.index
= npu_index
;