2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/bitmap.h>
13 #include <linux/sched.h>
14 #include <linux/pid.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/sched/mm.h>
21 #include <asm/cputable.h>
22 #include <asm/current.h>
23 #include <asm/copro.h>
28 * Allocates space for a CXL context.
30 struct cxl_context
*cxl_context_alloc(void)
32 return kzalloc(sizeof(struct cxl_context
), GFP_KERNEL
);
36 * Initialises a CXL context.
38 int cxl_context_init(struct cxl_context
*ctx
, struct cxl_afu
*afu
, bool master
)
44 ctx
->pid
= NULL
; /* Set in start work ioctl */
45 mutex_init(&ctx
->mapping_lock
);
48 if (cxl_is_power8()) {
49 spin_lock_init(&ctx
->sste_lock
);
52 * Allocate the segment table before we put it in the IDR so that we
53 * can always access it when dereferenced from IDR. For the same
54 * reason, the segment table is only destroyed after the context is
55 * removed from the IDR. Access to this in the IOCTL is protected by
56 * Linux filesytem symantics (can't IOCTL until open is complete).
58 i
= cxl_alloc_sst(ctx
);
63 INIT_WORK(&ctx
->fault_work
, cxl_handle_fault
);
65 init_waitqueue_head(&ctx
->wq
);
66 spin_lock_init(&ctx
->lock
);
68 ctx
->irq_bitmap
= NULL
;
69 ctx
->pending_irq
= false;
70 ctx
->pending_fault
= false;
71 ctx
->pending_afu_err
= false;
73 INIT_LIST_HEAD(&ctx
->irq_names
);
74 INIT_LIST_HEAD(&ctx
->extra_irq_contexts
);
77 * When we have to destroy all contexts in cxl_context_detach_all() we
78 * end up with afu_release_irqs() called from inside a
79 * idr_for_each_entry(). Hence we need to make sure that anything
80 * dereferenced from this IDR is ok before we allocate the IDR here.
81 * This clears out the IRQ ranges to ensure this.
83 for (i
= 0; i
< CXL_IRQ_RANGES
; i
++)
84 ctx
->irqs
.range
[i
] = 0;
86 mutex_init(&ctx
->status_mutex
);
91 * Allocating IDR! We better make sure everything's setup that
92 * dereferences from it.
94 mutex_lock(&afu
->contexts_lock
);
95 idr_preload(GFP_KERNEL
);
96 i
= idr_alloc(&ctx
->afu
->contexts_idr
, ctx
, ctx
->afu
->adapter
->min_pe
,
97 ctx
->afu
->num_procs
, GFP_NOWAIT
);
99 mutex_unlock(&afu
->contexts_lock
);
104 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
105 ctx
->elem
= &ctx
->afu
->native
->spa
[i
];
106 ctx
->external_pe
= ctx
->pe
;
108 ctx
->external_pe
= -1; /* assigned when attaching */
110 ctx
->pe_inserted
= false;
113 * take a ref on the afu so that it stays alive at-least till
114 * this context is reclaimed inside reclaim_ctx.
120 void cxl_context_set_mapping(struct cxl_context
*ctx
,
121 struct address_space
*mapping
)
123 mutex_lock(&ctx
->mapping_lock
);
124 ctx
->mapping
= mapping
;
125 mutex_unlock(&ctx
->mapping_lock
);
128 static int cxl_mmap_fault(struct vm_fault
*vmf
)
130 struct vm_area_struct
*vma
= vmf
->vma
;
131 struct cxl_context
*ctx
= vma
->vm_file
->private_data
;
134 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
136 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
137 __func__
, ctx
->pe
, vmf
->address
, offset
);
139 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
) {
140 area
= ctx
->afu
->psn_phys
;
141 if (offset
>= ctx
->afu
->adapter
->ps_size
)
142 return VM_FAULT_SIGBUS
;
144 area
= ctx
->psn_phys
;
145 if (offset
>= ctx
->psn_size
)
146 return VM_FAULT_SIGBUS
;
149 mutex_lock(&ctx
->status_mutex
);
151 if (ctx
->status
!= STARTED
) {
152 mutex_unlock(&ctx
->status_mutex
);
153 pr_devel("%s: Context not started, failing problem state access\n", __func__
);
154 if (ctx
->mmio_err_ff
) {
156 ctx
->ff_page
= alloc_page(GFP_USER
);
159 memset(page_address(ctx
->ff_page
), 0xff, PAGE_SIZE
);
161 get_page(ctx
->ff_page
);
162 vmf
->page
= ctx
->ff_page
;
163 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
166 return VM_FAULT_SIGBUS
;
169 vm_insert_pfn(vma
, vmf
->address
, (area
+ offset
) >> PAGE_SHIFT
);
171 mutex_unlock(&ctx
->status_mutex
);
173 return VM_FAULT_NOPAGE
;
176 static const struct vm_operations_struct cxl_mmap_vmops
= {
177 .fault
= cxl_mmap_fault
,
181 * Map a per-context mmio space into the given vma.
183 int cxl_context_iomap(struct cxl_context
*ctx
, struct vm_area_struct
*vma
)
185 u64 start
= vma
->vm_pgoff
<< PAGE_SHIFT
;
186 u64 len
= vma
->vm_end
- vma
->vm_start
;
188 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
) {
189 if (start
+ len
> ctx
->afu
->adapter
->ps_size
)
192 if (cxl_is_power9()) {
194 * Make sure there is a valid problem state
195 * area space for this AFU.
197 if (ctx
->master
&& !ctx
->afu
->psa
) {
198 pr_devel("AFU doesn't support mmio space\n");
202 /* Can't mmap until the AFU is enabled */
203 if (!ctx
->afu
->enabled
)
207 if (start
+ len
> ctx
->psn_size
)
210 /* Make sure there is a valid per process space for this AFU */
211 if ((ctx
->master
&& !ctx
->afu
->psa
) || (!ctx
->afu
->pp_psa
)) {
212 pr_devel("AFU doesn't support mmio space\n");
216 /* Can't mmap until the AFU is enabled */
217 if (!ctx
->afu
->enabled
)
221 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__
,
222 ctx
->psn_phys
, ctx
->pe
, ctx
->master
);
224 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
225 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
226 vma
->vm_ops
= &cxl_mmap_vmops
;
231 * Detach a context from the hardware. This disables interrupts and doesn't
232 * return until all outstanding interrupts for this context have completed. The
233 * hardware should no longer access *ctx after this has returned.
235 int __detach_context(struct cxl_context
*ctx
)
237 enum cxl_context_status status
;
239 mutex_lock(&ctx
->status_mutex
);
240 status
= ctx
->status
;
241 ctx
->status
= CLOSED
;
242 mutex_unlock(&ctx
->status_mutex
);
243 if (status
!= STARTED
)
246 /* Only warn if we detached while the link was OK.
247 * If detach fails when hw is down, we don't care.
249 WARN_ON(cxl_ops
->detach_process(ctx
) &&
250 cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
));
251 flush_work(&ctx
->fault_work
); /* Only needed for dedicated process */
254 * Wait until no further interrupts are presented by the PSL
257 if (cxl_ops
->irq_wait
)
258 cxl_ops
->irq_wait(ctx
);
260 /* release the reference to the group leader and mm handling pid */
265 /* Decrease the attached context count on the adapter */
266 cxl_adapter_context_put(ctx
->afu
->adapter
);
268 /* Decrease the mm count on the context */
269 cxl_context_mm_count_put(ctx
);
276 * Detach the given context from the AFU. This doesn't actually
277 * free the context but it should stop the context running in hardware
278 * (ie. prevent this context from generating any further interrupts
279 * so that it can be freed).
281 void cxl_context_detach(struct cxl_context
*ctx
)
285 rc
= __detach_context(ctx
);
289 afu_release_irqs(ctx
, ctx
);
290 wake_up_all(&ctx
->wq
);
294 * Detach all contexts on the given AFU.
296 void cxl_context_detach_all(struct cxl_afu
*afu
)
298 struct cxl_context
*ctx
;
301 mutex_lock(&afu
->contexts_lock
);
302 idr_for_each_entry(&afu
->contexts_idr
, ctx
, tmp
) {
304 * Anything done in here needs to be setup before the IDR is
305 * created and torn down after the IDR removed
307 cxl_context_detach(ctx
);
310 * We are force detaching - remove any active PSA mappings so
311 * userspace cannot interfere with the card if it comes back.
312 * Easiest way to exercise this is to unbind and rebind the
313 * driver via sysfs while it is in use.
315 mutex_lock(&ctx
->mapping_lock
);
317 unmap_mapping_range(ctx
->mapping
, 0, 0, 1);
318 mutex_unlock(&ctx
->mapping_lock
);
320 mutex_unlock(&afu
->contexts_lock
);
323 static void reclaim_ctx(struct rcu_head
*rcu
)
325 struct cxl_context
*ctx
= container_of(rcu
, struct cxl_context
, rcu
);
328 free_page((u64
)ctx
->sstp
);
330 __free_page(ctx
->ff_page
);
333 kfree(ctx
->irq_bitmap
);
335 /* Drop ref to the afu device taken during cxl_context_init */
336 cxl_afu_put(ctx
->afu
);
341 void cxl_context_free(struct cxl_context
*ctx
)
343 if (ctx
->kernelapi
&& ctx
->mapping
)
344 cxl_release_mapping(ctx
);
345 mutex_lock(&ctx
->afu
->contexts_lock
);
346 idr_remove(&ctx
->afu
->contexts_idr
, ctx
->pe
);
347 mutex_unlock(&ctx
->afu
->contexts_lock
);
348 call_rcu(&ctx
->rcu
, reclaim_ctx
);
351 void cxl_context_mm_count_get(struct cxl_context
*ctx
)
354 atomic_inc(&ctx
->mm
->mm_count
);
357 void cxl_context_mm_count_put(struct cxl_context
*ctx
)