2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/bitmap.h>
13 #include <linux/sched.h>
14 #include <linux/pid.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <asm/cputable.h>
21 #include <asm/current.h>
22 #include <asm/copro.h>
27 * Allocates space for a CXL context.
29 struct cxl_context
*cxl_context_alloc(void)
31 return kzalloc(sizeof(struct cxl_context
), GFP_KERNEL
);
35 * Initialises a CXL context.
37 int cxl_context_init(struct cxl_context
*ctx
, struct cxl_afu
*afu
, bool master
,
38 struct address_space
*mapping
)
42 spin_lock_init(&ctx
->sste_lock
);
45 ctx
->pid
= ctx
->glpid
= NULL
; /* Set in start work ioctl */
46 mutex_init(&ctx
->mapping_lock
);
47 ctx
->mapping
= mapping
;
50 * Allocate the segment table before we put it in the IDR so that we
51 * can always access it when dereferenced from IDR. For the same
52 * reason, the segment table is only destroyed after the context is
53 * removed from the IDR. Access to this in the IOCTL is protected by
54 * Linux filesytem symantics (can't IOCTL until open is complete).
56 i
= cxl_alloc_sst(ctx
);
60 INIT_WORK(&ctx
->fault_work
, cxl_handle_fault
);
62 init_waitqueue_head(&ctx
->wq
);
63 spin_lock_init(&ctx
->lock
);
65 ctx
->irq_bitmap
= NULL
;
66 ctx
->pending_irq
= false;
67 ctx
->pending_fault
= false;
68 ctx
->pending_afu_err
= false;
70 INIT_LIST_HEAD(&ctx
->irq_names
);
71 INIT_LIST_HEAD(&ctx
->extra_irq_contexts
);
74 * When we have to destroy all contexts in cxl_context_detach_all() we
75 * end up with afu_release_irqs() called from inside a
76 * idr_for_each_entry(). Hence we need to make sure that anything
77 * dereferenced from this IDR is ok before we allocate the IDR here.
78 * This clears out the IRQ ranges to ensure this.
80 for (i
= 0; i
< CXL_IRQ_RANGES
; i
++)
81 ctx
->irqs
.range
[i
] = 0;
83 mutex_init(&ctx
->status_mutex
);
88 * Allocating IDR! We better make sure everything's setup that
89 * dereferences from it.
91 mutex_lock(&afu
->contexts_lock
);
92 idr_preload(GFP_KERNEL
);
93 i
= idr_alloc(&ctx
->afu
->contexts_idr
, ctx
, 0,
94 ctx
->afu
->num_procs
, GFP_NOWAIT
);
96 mutex_unlock(&afu
->contexts_lock
);
101 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
102 ctx
->elem
= &ctx
->afu
->native
->spa
[i
];
103 ctx
->external_pe
= ctx
->pe
;
105 ctx
->external_pe
= -1; /* assigned when attaching */
107 ctx
->pe_inserted
= false;
110 * take a ref on the afu so that it stays alive at-least till
111 * this context is reclaimed inside reclaim_ctx.
117 static int cxl_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
119 struct cxl_context
*ctx
= vma
->vm_file
->private_data
;
120 unsigned long address
= (unsigned long)vmf
->virtual_address
;
123 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
125 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
126 __func__
, ctx
->pe
, address
, offset
);
128 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
) {
129 area
= ctx
->afu
->psn_phys
;
130 if (offset
>= ctx
->afu
->adapter
->ps_size
)
131 return VM_FAULT_SIGBUS
;
133 area
= ctx
->psn_phys
;
134 if (offset
>= ctx
->psn_size
)
135 return VM_FAULT_SIGBUS
;
138 mutex_lock(&ctx
->status_mutex
);
140 if (ctx
->status
!= STARTED
) {
141 mutex_unlock(&ctx
->status_mutex
);
142 pr_devel("%s: Context not started, failing problem state access\n", __func__
);
143 if (ctx
->mmio_err_ff
) {
145 ctx
->ff_page
= alloc_page(GFP_USER
);
148 memset(page_address(ctx
->ff_page
), 0xff, PAGE_SIZE
);
150 get_page(ctx
->ff_page
);
151 vmf
->page
= ctx
->ff_page
;
152 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
155 return VM_FAULT_SIGBUS
;
158 vm_insert_pfn(vma
, address
, (area
+ offset
) >> PAGE_SHIFT
);
160 mutex_unlock(&ctx
->status_mutex
);
162 return VM_FAULT_NOPAGE
;
165 static const struct vm_operations_struct cxl_mmap_vmops
= {
166 .fault
= cxl_mmap_fault
,
170 * Map a per-context mmio space into the given vma.
172 int cxl_context_iomap(struct cxl_context
*ctx
, struct vm_area_struct
*vma
)
174 u64 start
= vma
->vm_pgoff
<< PAGE_SHIFT
;
175 u64 len
= vma
->vm_end
- vma
->vm_start
;
177 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
) {
178 if (start
+ len
> ctx
->afu
->adapter
->ps_size
)
181 if (start
+ len
> ctx
->psn_size
)
185 if (ctx
->afu
->current_mode
!= CXL_MODE_DEDICATED
) {
186 /* make sure there is a valid per process space for this AFU */
187 if ((ctx
->master
&& !ctx
->afu
->psa
) || (!ctx
->afu
->pp_psa
)) {
188 pr_devel("AFU doesn't support mmio space\n");
192 /* Can't mmap until the AFU is enabled */
193 if (!ctx
->afu
->enabled
)
197 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__
,
198 ctx
->psn_phys
, ctx
->pe
, ctx
->master
);
200 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
201 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
202 vma
->vm_ops
= &cxl_mmap_vmops
;
207 * Detach a context from the hardware. This disables interrupts and doesn't
208 * return until all outstanding interrupts for this context have completed. The
209 * hardware should no longer access *ctx after this has returned.
211 int __detach_context(struct cxl_context
*ctx
)
213 enum cxl_context_status status
;
215 mutex_lock(&ctx
->status_mutex
);
216 status
= ctx
->status
;
217 ctx
->status
= CLOSED
;
218 mutex_unlock(&ctx
->status_mutex
);
219 if (status
!= STARTED
)
222 /* Only warn if we detached while the link was OK.
223 * If detach fails when hw is down, we don't care.
225 WARN_ON(cxl_ops
->detach_process(ctx
) &&
226 cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
));
227 flush_work(&ctx
->fault_work
); /* Only needed for dedicated process */
230 * Wait until no further interrupts are presented by the PSL
233 if (cxl_ops
->irq_wait
)
234 cxl_ops
->irq_wait(ctx
);
236 /* release the reference to the group leader and mm handling pid */
245 * Detach the given context from the AFU. This doesn't actually
246 * free the context but it should stop the context running in hardware
247 * (ie. prevent this context from generating any further interrupts
248 * so that it can be freed).
250 void cxl_context_detach(struct cxl_context
*ctx
)
254 rc
= __detach_context(ctx
);
258 afu_release_irqs(ctx
, ctx
);
259 wake_up_all(&ctx
->wq
);
263 * Detach all contexts on the given AFU.
265 void cxl_context_detach_all(struct cxl_afu
*afu
)
267 struct cxl_context
*ctx
;
270 mutex_lock(&afu
->contexts_lock
);
271 idr_for_each_entry(&afu
->contexts_idr
, ctx
, tmp
) {
273 * Anything done in here needs to be setup before the IDR is
274 * created and torn down after the IDR removed
276 cxl_context_detach(ctx
);
279 * We are force detaching - remove any active PSA mappings so
280 * userspace cannot interfere with the card if it comes back.
281 * Easiest way to exercise this is to unbind and rebind the
282 * driver via sysfs while it is in use.
284 mutex_lock(&ctx
->mapping_lock
);
286 unmap_mapping_range(ctx
->mapping
, 0, 0, 1);
287 mutex_unlock(&ctx
->mapping_lock
);
289 mutex_unlock(&afu
->contexts_lock
);
292 static void reclaim_ctx(struct rcu_head
*rcu
)
294 struct cxl_context
*ctx
= container_of(rcu
, struct cxl_context
, rcu
);
296 free_page((u64
)ctx
->sstp
);
298 __free_page(ctx
->ff_page
);
303 kfree(ctx
->irq_bitmap
);
305 /* Drop ref to the afu device taken during cxl_context_init */
306 cxl_afu_put(ctx
->afu
);
311 void cxl_context_free(struct cxl_context
*ctx
)
313 mutex_lock(&ctx
->afu
->contexts_lock
);
314 idr_remove(&ctx
->afu
->contexts_idr
, ctx
->pe
);
315 mutex_unlock(&ctx
->afu
->contexts_lock
);
316 call_rcu(&ctx
->rcu
, reclaim_ctx
);