2 * This file implements the DMA operations for NVLink devices. The NPU
3 * devices all point to the same iommu table as the parent PCI device.
5 * Copyright Alistair Popple, IBM Corporation 2015.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/mmu_context.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <linux/memblock.h>
19 #include <linux/iommu.h>
22 #include <asm/powernv.h>
26 #include <asm/iommu.h>
27 #include <asm/pnv-pci.h>
28 #include <asm/msi_bitmap.h>
34 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
37 * Other types of TCE cache invalidation are not functional in the
40 static struct pci_dev
*get_pci_dev(struct device_node
*dn
)
42 return PCI_DN(dn
)->pcidev
;
45 /* Given a NPU device get the associated PCI device. */
46 struct pci_dev
*pnv_pci_get_gpu_dev(struct pci_dev
*npdev
)
48 struct device_node
*dn
;
49 struct pci_dev
*gpdev
;
54 if (WARN_ON(!npdev
->dev
.of_node
))
57 /* Get assoicated PCI device */
58 dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,gpu", 0);
62 gpdev
= get_pci_dev(dn
);
67 EXPORT_SYMBOL(pnv_pci_get_gpu_dev
);
69 /* Given the real PCI device get a linked NPU device. */
70 struct pci_dev
*pnv_pci_get_npu_dev(struct pci_dev
*gpdev
, int index
)
72 struct device_node
*dn
;
73 struct pci_dev
*npdev
;
78 /* Not all PCI devices have device-tree nodes */
79 if (!gpdev
->dev
.of_node
)
82 /* Get assoicated PCI device */
83 dn
= of_parse_phandle(gpdev
->dev
.of_node
, "ibm,npu", index
);
87 npdev
= get_pci_dev(dn
);
92 EXPORT_SYMBOL(pnv_pci_get_npu_dev
);
94 #define NPU_DMA_OP_UNSUPPORTED() \
95 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
98 static void *dma_npu_alloc(struct device
*dev
, size_t size
,
99 dma_addr_t
*dma_handle
, gfp_t flag
,
102 NPU_DMA_OP_UNSUPPORTED();
106 static void dma_npu_free(struct device
*dev
, size_t size
,
107 void *vaddr
, dma_addr_t dma_handle
,
110 NPU_DMA_OP_UNSUPPORTED();
113 static dma_addr_t
dma_npu_map_page(struct device
*dev
, struct page
*page
,
114 unsigned long offset
, size_t size
,
115 enum dma_data_direction direction
,
118 NPU_DMA_OP_UNSUPPORTED();
122 static int dma_npu_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
123 int nelems
, enum dma_data_direction direction
,
126 NPU_DMA_OP_UNSUPPORTED();
130 static int dma_npu_dma_supported(struct device
*dev
, u64 mask
)
132 NPU_DMA_OP_UNSUPPORTED();
136 static u64
dma_npu_get_required_mask(struct device
*dev
)
138 NPU_DMA_OP_UNSUPPORTED();
142 static const struct dma_map_ops dma_npu_ops
= {
143 .map_page
= dma_npu_map_page
,
144 .map_sg
= dma_npu_map_sg
,
145 .alloc
= dma_npu_alloc
,
146 .free
= dma_npu_free
,
147 .dma_supported
= dma_npu_dma_supported
,
148 .get_required_mask
= dma_npu_get_required_mask
,
152 * Returns the PE assoicated with the PCI device of the given
153 * NPU. Returns the linked pci device if pci_dev != NULL.
155 static struct pnv_ioda_pe
*get_gpu_pci_dev_and_pe(struct pnv_ioda_pe
*npe
,
156 struct pci_dev
**gpdev
)
159 struct pci_controller
*hose
;
160 struct pci_dev
*pdev
;
161 struct pnv_ioda_pe
*pe
;
164 pdev
= pnv_pci_get_gpu_dev(npe
->pdev
);
168 pdn
= pci_get_pdn(pdev
);
169 if (WARN_ON(!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
))
172 hose
= pci_bus_to_host(pdev
->bus
);
173 phb
= hose
->private_data
;
174 pe
= &phb
->ioda
.pe_array
[pdn
->pe_number
];
182 long pnv_npu_set_window(struct pnv_ioda_pe
*npe
, int num
,
183 struct iommu_table
*tbl
)
185 struct pnv_phb
*phb
= npe
->phb
;
187 const unsigned long size
= tbl
->it_indirect_levels
?
188 tbl
->it_level_size
: tbl
->it_size
;
189 const __u64 start_addr
= tbl
->it_offset
<< tbl
->it_page_shift
;
190 const __u64 win_size
= tbl
->it_size
<< tbl
->it_page_shift
;
192 pe_info(npe
, "Setting up window %llx..%llx pg=%lx\n",
193 start_addr
, start_addr
+ win_size
- 1,
194 IOMMU_PAGE_SIZE(tbl
));
196 rc
= opal_pci_map_pe_dma_window(phb
->opal_id
,
199 tbl
->it_indirect_levels
+ 1,
202 IOMMU_PAGE_SIZE(tbl
));
204 pe_err(npe
, "Failed to configure TCE table, err %lld\n", rc
);
207 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
209 /* Add the table to the list so its TCE cache will get invalidated */
210 pnv_pci_link_table_and_group(phb
->hose
->node
, num
,
211 tbl
, &npe
->table_group
);
216 long pnv_npu_unset_window(struct pnv_ioda_pe
*npe
, int num
)
218 struct pnv_phb
*phb
= npe
->phb
;
221 pe_info(npe
, "Removing DMA window\n");
223 rc
= opal_pci_map_pe_dma_window(phb
->opal_id
, npe
->pe_number
,
225 0/* levels */, 0/* table address */,
226 0/* table size */, 0/* page size */);
228 pe_err(npe
, "Unmapping failed, ret = %lld\n", rc
);
231 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
233 pnv_pci_unlink_table_and_group(npe
->table_group
.tables
[num
],
240 * Enables 32 bit DMA on NPU.
242 static void pnv_npu_dma_set_32(struct pnv_ioda_pe
*npe
)
244 struct pci_dev
*gpdev
;
245 struct pnv_ioda_pe
*gpe
;
249 * Find the assoicated PCI devices and get the dma window
250 * information from there.
252 if (!npe
->pdev
|| !(npe
->flags
& PNV_IODA_PE_DEV
))
255 gpe
= get_gpu_pci_dev_and_pe(npe
, &gpdev
);
259 rc
= pnv_npu_set_window(npe
, 0, gpe
->table_group
.tables
[0]);
262 * We don't initialise npu_pe->tce32_table as we always use
263 * dma_npu_ops which are nops.
265 set_dma_ops(&npe
->pdev
->dev
, &dma_npu_ops
);
269 * Enables bypass mode on the NPU. The NPU only supports one
270 * window per link, so bypass needs to be explicitly enabled or
271 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
272 * active at the same time.
274 static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe
*npe
)
276 struct pnv_phb
*phb
= npe
->phb
;
278 phys_addr_t top
= memblock_end_of_DRAM();
280 if (phb
->type
!= PNV_PHB_NPU
|| !npe
->pdev
)
283 rc
= pnv_npu_unset_window(npe
, 0);
284 if (rc
!= OPAL_SUCCESS
)
287 /* Enable the bypass window */
289 top
= roundup_pow_of_two(top
);
290 dev_info(&npe
->pdev
->dev
, "Enabling bypass for PE %x\n",
292 rc
= opal_pci_map_pe_dma_window_real(phb
->opal_id
,
293 npe
->pe_number
, npe
->pe_number
,
294 0 /* bypass base */, top
);
296 if (rc
== OPAL_SUCCESS
)
297 pnv_pci_ioda2_tce_invalidate_entire(phb
, false);
302 void pnv_npu_try_dma_set_bypass(struct pci_dev
*gpdev
, bool bypass
)
307 struct pnv_ioda_pe
*npe
;
308 struct pci_dev
*npdev
;
311 npdev
= pnv_pci_get_npu_dev(gpdev
, i
);
316 pdn
= pci_get_pdn(npdev
);
317 if (WARN_ON(!pdn
|| pdn
->pe_number
== IODA_INVALID_PE
))
320 phb
= pci_bus_to_host(npdev
->bus
)->private_data
;
322 /* We only do bypass if it's enabled on the linked device */
323 npe
= &phb
->ioda
.pe_array
[pdn
->pe_number
];
326 dev_info(&npdev
->dev
,
327 "Using 64-bit DMA iommu bypass\n");
328 pnv_npu_dma_set_bypass(npe
);
330 dev_info(&npdev
->dev
, "Using 32-bit DMA via iommu\n");
331 pnv_npu_dma_set_32(npe
);
336 /* Switch ownership from platform code to external user (e.g. VFIO) */
337 void pnv_npu_take_ownership(struct pnv_ioda_pe
*npe
)
339 struct pnv_phb
*phb
= npe
->phb
;
343 * Note: NPU has just a single TVE in the hardware which means that
344 * while used by the kernel, it can have either 32bit window or
345 * DMA bypass but never both. So we deconfigure 32bit window only
346 * if it was enabled at the moment of ownership change.
348 if (npe
->table_group
.tables
[0]) {
349 pnv_npu_unset_window(npe
, 0);
354 rc
= opal_pci_map_pe_dma_window_real(phb
->opal_id
,
355 npe
->pe_number
, npe
->pe_number
,
356 0 /* bypass base */, 0);
358 pe_err(npe
, "Failed to disable bypass, err %lld\n", rc
);
361 pnv_pci_ioda2_tce_invalidate_entire(npe
->phb
, false);
364 struct pnv_ioda_pe
*pnv_pci_npu_setup_iommu(struct pnv_ioda_pe
*npe
)
366 struct pnv_phb
*phb
= npe
->phb
;
367 struct pci_bus
*pbus
= phb
->hose
->bus
;
368 struct pci_dev
*npdev
, *gpdev
= NULL
, *gptmp
;
369 struct pnv_ioda_pe
*gpe
= get_gpu_pci_dev_and_pe(npe
, &gpdev
);
374 list_for_each_entry(npdev
, &pbus
->devices
, bus_list
) {
375 gptmp
= pnv_pci_get_gpu_dev(npdev
);
380 pe_info(gpe
, "Attached NPU %s\n", dev_name(&npdev
->dev
));
381 iommu_group_add_device(gpe
->table_group
.group
, &npdev
->dev
);
387 /* Maximum number of nvlinks per npu */
388 #define NV_MAX_LINKS 6
390 /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
391 static int max_npu2_index
;
394 struct mm_struct
*mm
;
395 struct pci_dev
*npdev
[NV_MAX_NPUS
][NV_MAX_LINKS
];
396 struct mmu_notifier mn
;
399 /* Callback to stop translation requests on a given GPU */
400 struct npu_context
*(*release_cb
)(struct npu_context
*, void *);
403 * Private pointer passed to the above callback for usage by
410 * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
411 * if none are available.
413 static int get_mmio_atsd_reg(struct npu
*npu
)
417 for (i
= 0; i
< npu
->mmio_atsd_count
; i
++) {
418 if (!test_and_set_bit(i
, &npu
->mmio_atsd_usage
))
425 static void put_mmio_atsd_reg(struct npu
*npu
, int reg
)
427 clear_bit(reg
, &npu
->mmio_atsd_usage
);
430 /* MMIO ATSD register offsets */
431 #define XTS_ATSD_AVA 1
432 #define XTS_ATSD_STAT 2
434 static int mmio_launch_invalidate(struct npu
*npu
, unsigned long launch
,
440 mmio_atsd_reg
= get_mmio_atsd_reg(npu
);
442 } while (mmio_atsd_reg
< 0);
444 __raw_writeq(cpu_to_be64(va
),
445 npu
->mmio_atsd_regs
[mmio_atsd_reg
] + XTS_ATSD_AVA
);
447 __raw_writeq(cpu_to_be64(launch
), npu
->mmio_atsd_regs
[mmio_atsd_reg
]);
449 return mmio_atsd_reg
;
452 static int mmio_invalidate_pid(struct npu
*npu
, unsigned long pid
, bool flush
)
454 unsigned long launch
;
456 /* IS set to invalidate matching PID */
457 launch
= PPC_BIT(12);
459 /* PRS set to process-scoped */
460 launch
|= PPC_BIT(13);
463 launch
|= (u64
) mmu_get_ap(mmu_virtual_psize
) << PPC_BITLSHIFT(17);
466 launch
|= pid
<< PPC_BITLSHIFT(38);
469 launch
|= !flush
<< PPC_BITLSHIFT(39);
471 /* Invalidating the entire process doesn't use a va */
472 return mmio_launch_invalidate(npu
, launch
, 0);
475 static int mmio_invalidate_va(struct npu
*npu
, unsigned long va
,
476 unsigned long pid
, bool flush
)
478 unsigned long launch
;
480 /* IS set to invalidate target VA */
483 /* PRS set to process scoped */
484 launch
|= PPC_BIT(13);
487 launch
|= (u64
) mmu_get_ap(mmu_virtual_psize
) << PPC_BITLSHIFT(17);
490 launch
|= pid
<< PPC_BITLSHIFT(38);
493 launch
|= !flush
<< PPC_BITLSHIFT(39);
495 return mmio_launch_invalidate(npu
, launch
, va
);
498 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
500 struct mmio_atsd_reg
{
505 static void mmio_invalidate_wait(
506 struct mmio_atsd_reg mmio_atsd_reg
[NV_MAX_NPUS
], bool flush
)
511 /* Wait for all invalidations to complete */
512 for (i
= 0; i
<= max_npu2_index
; i
++) {
513 if (mmio_atsd_reg
[i
].reg
< 0)
516 /* Wait for completion */
517 npu
= mmio_atsd_reg
[i
].npu
;
518 reg
= mmio_atsd_reg
[i
].reg
;
519 while (__raw_readq(npu
->mmio_atsd_regs
[reg
] + XTS_ATSD_STAT
))
522 put_mmio_atsd_reg(npu
, reg
);
525 * The GPU requires two flush ATSDs to ensure all entries have
526 * been flushed. We use PID 0 as it will never be used for a
527 * process on the GPU.
530 mmio_invalidate_pid(npu
, 0, true);
535 * Invalidate either a single address or an entire PID depending on
538 static void mmio_invalidate(struct npu_context
*npu_context
, int va
,
539 unsigned long address
, bool flush
)
543 struct pnv_phb
*nphb
;
544 struct pci_dev
*npdev
;
545 struct mmio_atsd_reg mmio_atsd_reg
[NV_MAX_NPUS
];
546 unsigned long pid
= npu_context
->mm
->context
.id
;
549 * Loop over all the NPUs this process is active on and launch
552 for (i
= 0; i
<= max_npu2_index
; i
++) {
553 mmio_atsd_reg
[i
].reg
= -1;
554 for (j
= 0; j
< NV_MAX_LINKS
; j
++) {
555 npdev
= npu_context
->npdev
[i
][j
];
559 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
561 mmio_atsd_reg
[i
].npu
= npu
;
564 mmio_atsd_reg
[i
].reg
=
565 mmio_invalidate_va(npu
, address
, pid
,
568 mmio_atsd_reg
[i
].reg
=
569 mmio_invalidate_pid(npu
, pid
, flush
);
572 * The NPU hardware forwards the shootdown to all GPUs
573 * so we only have to launch one shootdown per NPU.
580 * Unfortunately the nest mmu does not support flushing specific
581 * addresses so we have to flush the whole mm.
583 flush_tlb_mm(npu_context
->mm
);
585 mmio_invalidate_wait(mmio_atsd_reg
, flush
);
587 /* Wait for the flush to complete */
588 mmio_invalidate_wait(mmio_atsd_reg
, false);
591 static void pnv_npu2_mn_release(struct mmu_notifier
*mn
,
592 struct mm_struct
*mm
)
594 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
596 /* Call into device driver to stop requests to the NMMU */
597 if (npu_context
->release_cb
)
598 npu_context
->release_cb(npu_context
, npu_context
->priv
);
601 * There should be no more translation requests for this PID, but we
602 * need to ensure any entries for it are removed from the TLB.
604 mmio_invalidate(npu_context
, 0, 0, true);
607 static void pnv_npu2_mn_change_pte(struct mmu_notifier
*mn
,
608 struct mm_struct
*mm
,
609 unsigned long address
,
612 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
614 mmio_invalidate(npu_context
, 1, address
, true);
617 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier
*mn
,
618 struct mm_struct
*mm
,
619 unsigned long address
)
621 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
623 mmio_invalidate(npu_context
, 1, address
, true);
626 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier
*mn
,
627 struct mm_struct
*mm
,
628 unsigned long start
, unsigned long end
)
630 struct npu_context
*npu_context
= mn_to_npu_context(mn
);
631 unsigned long address
;
633 for (address
= start
; address
< end
; address
+= PAGE_SIZE
)
634 mmio_invalidate(npu_context
, 1, address
, false);
636 /* Do the flush only on the final addess == end */
637 mmio_invalidate(npu_context
, 1, address
, true);
640 static const struct mmu_notifier_ops nv_nmmu_notifier_ops
= {
641 .release
= pnv_npu2_mn_release
,
642 .change_pte
= pnv_npu2_mn_change_pte
,
643 .invalidate_page
= pnv_npu2_mn_invalidate_page
,
644 .invalidate_range
= pnv_npu2_mn_invalidate_range
,
648 * Call into OPAL to setup the nmmu context for the current task in
649 * the NPU. This must be called to setup the context tables before the
650 * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
652 * A release callback should be registered to allow a device driver to
653 * be notified that it should not launch any new translation requests
654 * as the final TLB invalidate is about to occur.
656 * Returns an error if there no contexts are currently available or a
657 * npu_context which should be passed to pnv_npu2_handle_fault().
659 * mmap_sem must be held in write mode.
661 struct npu_context
*pnv_npu2_init_context(struct pci_dev
*gpdev
,
663 struct npu_context
*(*cb
)(struct npu_context
*, void *),
668 struct device_node
*nvlink_dn
;
669 struct mm_struct
*mm
= current
->mm
;
670 struct pnv_phb
*nphb
;
672 struct npu_context
*npu_context
;
675 * At present we don't support GPUs connected to multiple NPUs and I'm
676 * not sure the hardware does either.
678 struct pci_dev
*npdev
= pnv_pci_get_npu_dev(gpdev
, 0);
680 if (!firmware_has_feature(FW_FEATURE_OPAL
))
681 return ERR_PTR(-ENODEV
);
684 /* No nvlink associated with this GPU device */
685 return ERR_PTR(-ENODEV
);
687 if (!mm
|| mm
->context
.id
== 0) {
689 * Kernel thread contexts are not supported and context id 0 is
690 * reserved on the GPU.
692 return ERR_PTR(-EINVAL
);
695 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
699 * Setup the NPU context table for a particular GPU. These need to be
700 * per-GPU as we need the tables to filter ATSDs when there are no
701 * active contexts on a particular GPU.
703 rc
= opal_npu_init_context(nphb
->opal_id
, mm
->context
.id
, flags
,
704 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
));
706 return ERR_PTR(-ENOSPC
);
709 * We store the npu pci device so we can more easily get at the
712 npu_context
= mm
->context
.npu_context
;
714 npu_context
= kzalloc(sizeof(struct npu_context
), GFP_KERNEL
);
716 return ERR_PTR(-ENOMEM
);
718 mm
->context
.npu_context
= npu_context
;
719 npu_context
->mm
= mm
;
720 npu_context
->mn
.ops
= &nv_nmmu_notifier_ops
;
721 __mmu_notifier_register(&npu_context
->mn
, mm
);
722 kref_init(&npu_context
->kref
);
724 kref_get(&npu_context
->kref
);
727 npu_context
->release_cb
= cb
;
728 npu_context
->priv
= priv
;
729 nvlink_dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,nvlink", 0);
730 if (WARN_ON(of_property_read_u32(nvlink_dn
, "ibm,npu-link-index",
732 return ERR_PTR(-ENODEV
);
733 npu_context
->npdev
[npu
->index
][nvlink_index
] = npdev
;
737 EXPORT_SYMBOL(pnv_npu2_init_context
);
739 static void pnv_npu2_release_context(struct kref
*kref
)
741 struct npu_context
*npu_context
=
742 container_of(kref
, struct npu_context
, kref
);
744 npu_context
->mm
->context
.npu_context
= NULL
;
745 mmu_notifier_unregister(&npu_context
->mn
,
751 void pnv_npu2_destroy_context(struct npu_context
*npu_context
,
752 struct pci_dev
*gpdev
)
754 struct pnv_phb
*nphb
;
756 struct pci_dev
*npdev
= pnv_pci_get_npu_dev(gpdev
, 0);
757 struct device_node
*nvlink_dn
;
763 if (!firmware_has_feature(FW_FEATURE_OPAL
))
766 nphb
= pci_bus_to_host(npdev
->bus
)->private_data
;
768 nvlink_dn
= of_parse_phandle(npdev
->dev
.of_node
, "ibm,nvlink", 0);
769 if (WARN_ON(of_property_read_u32(nvlink_dn
, "ibm,npu-link-index",
772 npu_context
->npdev
[npu
->index
][nvlink_index
] = NULL
;
773 opal_npu_destroy_context(nphb
->opal_id
, npu_context
->mm
->context
.id
,
774 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
));
775 kref_put(&npu_context
->kref
, pnv_npu2_release_context
);
777 EXPORT_SYMBOL(pnv_npu2_destroy_context
);
780 * Assumes mmap_sem is held for the contexts associated mm.
782 int pnv_npu2_handle_fault(struct npu_context
*context
, uintptr_t *ea
,
783 unsigned long *flags
, unsigned long *status
, int count
)
785 u64 rc
= 0, result
= 0;
787 struct page
*page
[1];
789 /* mmap_sem should be held so the struct_mm must be present */
790 struct mm_struct
*mm
= context
->mm
;
792 if (!firmware_has_feature(FW_FEATURE_OPAL
))
795 WARN_ON(!rwsem_is_locked(&mm
->mmap_sem
));
797 for (i
= 0; i
< count
; i
++) {
798 is_write
= flags
[i
] & NPU2_WRITE
;
799 rc
= get_user_pages_remote(NULL
, mm
, ea
[i
], 1,
800 is_write
? FOLL_WRITE
: 0,
804 * To support virtualised environments we will have to do an
805 * access to the page to ensure it gets faulted into the
806 * hypervisor. For the moment virtualisation is not supported in
807 * other areas so leave the access out.
821 EXPORT_SYMBOL(pnv_npu2_handle_fault
);
823 int pnv_npu2_init(struct pnv_phb
*phb
)
827 struct device_node
*dn
;
828 struct pci_dev
*gpdev
;
829 static int npu_index
;
832 for_each_child_of_node(phb
->hose
->dn
, dn
) {
833 gpdev
= pnv_pci_get_gpu_dev(get_pci_dev(dn
));
835 rc
= opal_npu_map_lpar(phb
->opal_id
,
836 PCI_DEVID(gpdev
->bus
->number
, gpdev
->devfn
),
840 "Error %lld mapping device to LPAR\n",
845 for (i
= 0; !of_property_read_u64_index(phb
->hose
->dn
, "ibm,mmio-atsd",
847 phb
->npu
.mmio_atsd_regs
[i
] = ioremap(mmio_atsd
, 32);
849 pr_info("NPU%lld: Found %d MMIO ATSD registers", phb
->opal_id
, i
);
850 phb
->npu
.mmio_atsd_count
= i
;
851 phb
->npu
.mmio_atsd_usage
= 0;
853 if (WARN_ON(npu_index
>= NV_MAX_NPUS
))
855 max_npu2_index
= npu_index
;
856 phb
->npu
.index
= npu_index
;