]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/misc/ocxl/link.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
3 #include <linux/sched/mm.h>
4 #include <linux/mutex.h>
5 #include <linux/mmu_context.h>
7 #include <asm/pnv-ocxl.h>
8 #include "ocxl_internal.h"
11 #define SPA_PASID_BITS 15
12 #define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1)
13 #define SPA_PE_MASK SPA_PASID_MAX
14 #define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */
16 #define SPA_CFG_SF (1ull << (63-0))
17 #define SPA_CFG_TA (1ull << (63-1))
18 #define SPA_CFG_HV (1ull << (63-3))
19 #define SPA_CFG_UV (1ull << (63-4))
20 #define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */
21 #define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */
22 #define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */
23 #define SPA_CFG_PR (1ull << (63-49))
24 #define SPA_CFG_TC (1ull << (63-54))
25 #define SPA_CFG_DR (1ull << (63-59))
27 #define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */
28 #define SPA_XSL_S (1ull << (63-38)) /* Store operation */
30 #define SPA_PE_VALID 0x80000000
35 /* callback to trigger when a translation fault occurs */
36 void (*xsl_err_cb
)(void *data
, u64 addr
, u64 dsisr
);
37 /* opaque pointer to be passed to the above callback */
43 struct ocxl_process_element
*spa_mem
;
45 struct mutex spa_lock
;
46 struct radix_tree_root pe_tree
; /* Maps PE handles to pe_data */
49 void __iomem
*reg_dsisr
;
50 void __iomem
*reg_dar
;
51 void __iomem
*reg_tfc
;
52 void __iomem
*reg_pe_handle
;
54 * The following field are used by the memory fault
55 * interrupt handler. We can only have one interrupt at a
56 * time. The NPU won't raise another interrupt until the
57 * previous one has been ack'd by writing to the TFC register
60 struct work_struct fault_work
;
64 struct pe_data pe_data
;
69 * A opencapi link can be used be by several PCI functions. We have
70 * one link per device slot.
72 * A linked list of opencapi links should suffice, as there's a
73 * limited number of opencapi slots on a system and lookup is only
74 * done when the device is probed
77 struct list_head list
;
82 atomic_t irq_available
;
86 static struct list_head links_list
= LIST_HEAD_INIT(links_list
);
87 static DEFINE_MUTEX(links_list_lock
);
96 static void read_irq(struct spa
*spa
, u64
*dsisr
, u64
*dar
, u64
*pe
)
100 *dsisr
= in_be64(spa
->reg_dsisr
);
101 *dar
= in_be64(spa
->reg_dar
);
102 reg
= in_be64(spa
->reg_pe_handle
);
103 *pe
= reg
& SPA_PE_MASK
;
106 static void ack_irq(struct spa
*spa
, enum xsl_response r
)
110 /* continue is not supported */
113 else if (r
== ADDRESS_ERROR
)
116 WARN(1, "Invalid irq response %d\n", r
);
119 out_be64(spa
->reg_tfc
, reg
);
122 static void xsl_fault_handler_bh(struct work_struct
*fault_work
)
124 unsigned int flt
= 0;
125 unsigned long access
, flags
, inv_flags
= 0;
127 struct xsl_fault
*fault
= container_of(fault_work
, struct xsl_fault
,
129 struct spa
*spa
= container_of(fault
, struct spa
, xsl_fault
);
134 * We need to release a reference on the mm whenever exiting this
135 * function (taken in the memory fault interrupt handler)
137 rc
= copro_handle_mm_fault(fault
->pe_data
.mm
, fault
->dar
, fault
->dsisr
,
140 pr_debug("copro_handle_mm_fault failed: %d\n", rc
);
141 if (fault
->pe_data
.xsl_err_cb
) {
142 fault
->pe_data
.xsl_err_cb(
143 fault
->pe_data
.xsl_err_data
,
144 fault
->dar
, fault
->dsisr
);
150 if (!radix_enabled()) {
152 * update_mmu_cache() will not have loaded the hash
153 * since current->trap is not a 0x400 or 0x300, so
154 * just call hash_page_mm() here.
156 access
= _PAGE_PRESENT
| _PAGE_READ
;
157 if (fault
->dsisr
& SPA_XSL_S
)
158 access
|= _PAGE_WRITE
;
160 if (REGION_ID(fault
->dar
) != USER_REGION_ID
)
161 access
|= _PAGE_PRIVILEGED
;
163 local_irq_save(flags
);
164 hash_page_mm(fault
->pe_data
.mm
, fault
->dar
, access
, 0x300,
166 local_irq_restore(flags
);
170 mmdrop(fault
->pe_data
.mm
);
174 static irqreturn_t
xsl_fault_handler(int irq
, void *data
)
176 struct link
*link
= (struct link
*) data
;
177 struct spa
*spa
= link
->spa
;
178 u64 dsisr
, dar
, pe_handle
;
179 struct pe_data
*pe_data
;
180 struct ocxl_process_element
*pe
;
183 read_irq(spa
, &dsisr
, &dar
, &pe_handle
);
185 WARN_ON(pe_handle
> SPA_PE_MASK
);
186 pe
= spa
->spa_mem
+ pe_handle
;
187 lpid
= be32_to_cpu(pe
->lpid
);
188 pid
= be32_to_cpu(pe
->pid
);
189 tid
= be32_to_cpu(pe
->tid
);
190 /* We could be reading all null values here if the PE is being
191 * removed while an interrupt kicks in. It's not supposed to
192 * happen if the driver notified the AFU to terminate the
193 * PASID, and the AFU waited for pending operations before
194 * acknowledging. But even if it happens, we won't find a
195 * memory context below and fail silently, so it should be ok.
197 if (!(dsisr
& SPA_XSL_TF
)) {
198 WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr
);
199 ack_irq(spa
, ADDRESS_ERROR
);
204 pe_data
= radix_tree_lookup(&spa
->pe_tree
, pe_handle
);
207 * Could only happen if the driver didn't notify the
208 * AFU about PASID termination before removing the PE,
209 * or the AFU didn't wait for all memory access to
212 * Either way, we fail early, but we shouldn't log an
213 * error message, as it is a valid (if unexpected)
217 pr_debug("Unknown mm context for xsl interrupt\n");
218 ack_irq(spa
, ADDRESS_ERROR
);
221 WARN_ON(pe_data
->mm
->context
.id
!= pid
);
223 spa
->xsl_fault
.pe
= pe_handle
;
224 spa
->xsl_fault
.dar
= dar
;
225 spa
->xsl_fault
.dsisr
= dsisr
;
226 spa
->xsl_fault
.pe_data
= *pe_data
;
227 mmgrab(pe_data
->mm
); /* mm count is released by bottom half */
230 schedule_work(&spa
->xsl_fault
.fault_work
);
234 static void unmap_irq_registers(struct spa
*spa
)
236 pnv_ocxl_unmap_xsl_regs(spa
->reg_dsisr
, spa
->reg_dar
, spa
->reg_tfc
,
240 static int map_irq_registers(struct pci_dev
*dev
, struct spa
*spa
)
242 return pnv_ocxl_map_xsl_regs(dev
, &spa
->reg_dsisr
, &spa
->reg_dar
,
243 &spa
->reg_tfc
, &spa
->reg_pe_handle
);
246 static int setup_xsl_irq(struct pci_dev
*dev
, struct link
*link
)
248 struct spa
*spa
= link
->spa
;
252 rc
= pnv_ocxl_get_xsl_irq(dev
, &hwirq
);
256 rc
= map_irq_registers(dev
, spa
);
260 spa
->irq_name
= kasprintf(GFP_KERNEL
, "ocxl-xsl-%x-%x-%x",
261 link
->domain
, link
->bus
, link
->dev
);
262 if (!spa
->irq_name
) {
263 unmap_irq_registers(spa
);
264 dev_err(&dev
->dev
, "Can't allocate name for xsl interrupt\n");
268 * At some point, we'll need to look into allowing a higher
269 * number of interrupts. Could we have an IRQ domain per link?
271 spa
->virq
= irq_create_mapping(NULL
, hwirq
);
273 kfree(spa
->irq_name
);
274 unmap_irq_registers(spa
);
276 "irq_create_mapping failed for translation interrupt\n");
280 dev_dbg(&dev
->dev
, "hwirq %d mapped to virq %d\n", hwirq
, spa
->virq
);
282 rc
= request_irq(spa
->virq
, xsl_fault_handler
, 0, spa
->irq_name
,
285 irq_dispose_mapping(spa
->virq
);
286 kfree(spa
->irq_name
);
287 unmap_irq_registers(spa
);
289 "request_irq failed for translation interrupt: %d\n",
296 static void release_xsl_irq(struct link
*link
)
298 struct spa
*spa
= link
->spa
;
301 free_irq(spa
->virq
, link
);
302 irq_dispose_mapping(spa
->virq
);
304 kfree(spa
->irq_name
);
305 unmap_irq_registers(spa
);
308 static int alloc_spa(struct pci_dev
*dev
, struct link
*link
)
312 spa
= kzalloc(sizeof(struct spa
), GFP_KERNEL
);
316 mutex_init(&spa
->spa_lock
);
317 INIT_RADIX_TREE(&spa
->pe_tree
, GFP_KERNEL
);
318 INIT_WORK(&spa
->xsl_fault
.fault_work
, xsl_fault_handler_bh
);
320 spa
->spa_order
= SPA_SPA_SIZE_LOG
- PAGE_SHIFT
;
321 spa
->spa_mem
= (struct ocxl_process_element
*)
322 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, spa
->spa_order
);
324 dev_err(&dev
->dev
, "Can't allocate Shared Process Area\n");
328 pr_debug("Allocated SPA for %x:%x:%x at %p\n", link
->domain
, link
->bus
,
329 link
->dev
, spa
->spa_mem
);
335 static void free_spa(struct link
*link
)
337 struct spa
*spa
= link
->spa
;
339 pr_debug("Freeing SPA for %x:%x:%x\n", link
->domain
, link
->bus
,
342 if (spa
&& spa
->spa_mem
) {
343 free_pages((unsigned long) spa
->spa_mem
, spa
->spa_order
);
349 static int alloc_link(struct pci_dev
*dev
, int PE_mask
, struct link
**out_link
)
354 link
= kzalloc(sizeof(struct link
), GFP_KERNEL
);
358 kref_init(&link
->ref
);
359 link
->domain
= pci_domain_nr(dev
->bus
);
360 link
->bus
= dev
->bus
->number
;
361 link
->dev
= PCI_SLOT(dev
->devfn
);
362 atomic_set(&link
->irq_available
, MAX_IRQ_PER_LINK
);
364 rc
= alloc_spa(dev
, link
);
368 rc
= setup_xsl_irq(dev
, link
);
372 /* platform specific hook */
373 rc
= pnv_ocxl_spa_setup(dev
, link
->spa
->spa_mem
, PE_mask
,
374 &link
->platform_data
);
382 release_xsl_irq(link
);
390 static void free_link(struct link
*link
)
392 release_xsl_irq(link
);
397 int ocxl_link_setup(struct pci_dev
*dev
, int PE_mask
, void **link_handle
)
402 mutex_lock(&links_list_lock
);
403 list_for_each_entry(link
, &links_list
, list
) {
404 /* The functions of a device all share the same link */
405 if (link
->domain
== pci_domain_nr(dev
->bus
) &&
406 link
->bus
== dev
->bus
->number
&&
407 link
->dev
== PCI_SLOT(dev
->devfn
)) {
408 kref_get(&link
->ref
);
413 rc
= alloc_link(dev
, PE_mask
, &link
);
417 list_add(&link
->list
, &links_list
);
420 mutex_unlock(&links_list_lock
);
424 static void release_xsl(struct kref
*ref
)
426 struct link
*link
= container_of(ref
, struct link
, ref
);
428 list_del(&link
->list
);
429 /* call platform code before releasing data */
430 pnv_ocxl_spa_release(link
->platform_data
);
434 void ocxl_link_release(struct pci_dev
*dev
, void *link_handle
)
436 struct link
*link
= (struct link
*) link_handle
;
438 mutex_lock(&links_list_lock
);
439 kref_put(&link
->ref
, release_xsl
);
440 mutex_unlock(&links_list_lock
);
443 static u64
calculate_cfg_state(bool kernel
)
448 if (mfspr(SPRN_LPCR
) & LPCR_TC
)
451 state
|= SPA_CFG_XLAT_ror
;
453 state
|= SPA_CFG_XLAT_hpt
;
456 if (mfmsr() & MSR_SF
)
460 if (!test_tsk_thread_flag(current
, TIF_32BIT
))
466 int ocxl_link_add_pe(void *link_handle
, int pasid
, u32 pidr
, u32 tidr
,
467 u64 amr
, struct mm_struct
*mm
,
468 void (*xsl_err_cb
)(void *data
, u64 addr
, u64 dsisr
),
471 struct link
*link
= (struct link
*) link_handle
;
472 struct spa
*spa
= link
->spa
;
473 struct ocxl_process_element
*pe
;
474 int pe_handle
, rc
= 0;
475 struct pe_data
*pe_data
;
477 BUILD_BUG_ON(sizeof(struct ocxl_process_element
) != 128);
478 if (pasid
> SPA_PASID_MAX
)
481 mutex_lock(&spa
->spa_lock
);
482 pe_handle
= pasid
& SPA_PE_MASK
;
483 pe
= spa
->spa_mem
+ pe_handle
;
485 if (pe
->software_state
) {
490 pe_data
= kmalloc(sizeof(*pe_data
), GFP_KERNEL
);
497 pe_data
->xsl_err_cb
= xsl_err_cb
;
498 pe_data
->xsl_err_data
= xsl_err_data
;
500 memset(pe
, 0, sizeof(struct ocxl_process_element
));
501 pe
->config_state
= cpu_to_be64(calculate_cfg_state(pidr
== 0));
502 pe
->lpid
= cpu_to_be32(mfspr(SPRN_LPID
));
503 pe
->pid
= cpu_to_be32(pidr
);
504 pe
->tid
= cpu_to_be32(tidr
);
505 pe
->amr
= cpu_to_be64(amr
);
506 pe
->software_state
= cpu_to_be32(SPA_PE_VALID
);
508 mm_context_add_copro(mm
);
510 * Barrier is to make sure PE is visible in the SPA before it
511 * is used by the device. It also helps with the global TLBI
515 radix_tree_insert(&spa
->pe_tree
, pe_handle
, pe_data
);
518 * The mm must stay valid for as long as the device uses it. We
519 * lower the count when the context is removed from the SPA.
521 * We grab mm_count (and not mm_users), as we don't want to
522 * end up in a circular dependency if a process mmaps its
523 * mmio, therefore incrementing the file ref count when
524 * calling mmap(), and forgets to unmap before exiting. In
525 * that scenario, when the kernel handles the death of the
526 * process, the file is not cleaned because unmap was not
527 * called, and the mm wouldn't be freed because we would still
528 * have a reference on mm_users. Incrementing mm_count solves
533 mutex_unlock(&spa
->spa_lock
);
537 int ocxl_link_remove_pe(void *link_handle
, int pasid
)
539 struct link
*link
= (struct link
*) link_handle
;
540 struct spa
*spa
= link
->spa
;
541 struct ocxl_process_element
*pe
;
542 struct pe_data
*pe_data
;
545 if (pasid
> SPA_PASID_MAX
)
549 * About synchronization with our memory fault handler:
551 * Before removing the PE, the driver is supposed to have
552 * notified the AFU, which should have cleaned up and make
553 * sure the PASID is no longer in use, including pending
554 * interrupts. However, there's no way to be sure...
556 * We clear the PE and remove the context from our radix
557 * tree. From that point on, any new interrupt for that
558 * context will fail silently, which is ok. As mentioned
559 * above, that's not expected, but it could happen if the
560 * driver or AFU didn't do the right thing.
562 * There could still be a bottom half running, but we don't
563 * need to wait/flush, as it is managing a reference count on
564 * the mm it reads from the radix tree.
566 pe_handle
= pasid
& SPA_PE_MASK
;
567 pe
= spa
->spa_mem
+ pe_handle
;
569 mutex_lock(&spa
->spa_lock
);
571 if (!(be32_to_cpu(pe
->software_state
) & SPA_PE_VALID
)) {
576 memset(pe
, 0, sizeof(struct ocxl_process_element
));
578 * The barrier makes sure the PE is removed from the SPA
579 * before we clear the NPU context cache below, so that the
580 * old PE cannot be reloaded erroneously.
585 * hook to platform code
586 * On powerpc, the entry needs to be cleared from the context
589 rc
= pnv_ocxl_spa_remove_pe(link
->platform_data
, pe_handle
);
592 pe_data
= radix_tree_delete(&spa
->pe_tree
, pe_handle
);
594 WARN(1, "Couldn't find pe data when removing PE\n");
596 mm_context_remove_copro(pe_data
->mm
);
598 kfree_rcu(pe_data
, rcu
);
601 mutex_unlock(&spa
->spa_lock
);
605 int ocxl_link_irq_alloc(void *link_handle
, int *hw_irq
, u64
*trigger_addr
)
607 struct link
*link
= (struct link
*) link_handle
;
611 if (atomic_dec_if_positive(&link
->irq_available
) < 0)
614 rc
= pnv_ocxl_alloc_xive_irq(&irq
, &addr
);
616 atomic_inc(&link
->irq_available
);
621 *trigger_addr
= addr
;
625 void ocxl_link_free_irq(void *link_handle
, int hw_irq
)
627 struct link
*link
= (struct link
*) link_handle
;
629 pnv_ocxl_free_xive_irq(hw_irq
);
630 atomic_inc(&link
->irq_available
);