2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/bitmap.h>
13 #include <linux/sched.h>
14 #include <linux/pid.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/sched/mm.h>
21 #include <linux/mmu_context.h>
22 #include <asm/cputable.h>
23 #include <asm/current.h>
24 #include <asm/copro.h>
29 * Allocates space for a CXL context.
31 struct cxl_context
*cxl_context_alloc(void)
33 return kzalloc(sizeof(struct cxl_context
), GFP_KERNEL
);
37 * Initialises a CXL context.
39 int cxl_context_init(struct cxl_context
*ctx
, struct cxl_afu
*afu
, bool master
)
45 ctx
->pid
= NULL
; /* Set in start work ioctl */
46 mutex_init(&ctx
->mapping_lock
);
49 if (cxl_is_power8()) {
50 spin_lock_init(&ctx
->sste_lock
);
53 * Allocate the segment table before we put it in the IDR so that we
54 * can always access it when dereferenced from IDR. For the same
55 * reason, the segment table is only destroyed after the context is
56 * removed from the IDR. Access to this in the IOCTL is protected by
57 * Linux filesytem symantics (can't IOCTL until open is complete).
59 i
= cxl_alloc_sst(ctx
);
64 INIT_WORK(&ctx
->fault_work
, cxl_handle_fault
);
66 init_waitqueue_head(&ctx
->wq
);
67 spin_lock_init(&ctx
->lock
);
69 ctx
->irq_bitmap
= NULL
;
70 ctx
->pending_irq
= false;
71 ctx
->pending_fault
= false;
72 ctx
->pending_afu_err
= false;
74 INIT_LIST_HEAD(&ctx
->irq_names
);
75 INIT_LIST_HEAD(&ctx
->extra_irq_contexts
);
78 * When we have to destroy all contexts in cxl_context_detach_all() we
79 * end up with afu_release_irqs() called from inside a
80 * idr_for_each_entry(). Hence we need to make sure that anything
81 * dereferenced from this IDR is ok before we allocate the IDR here.
82 * This clears out the IRQ ranges to ensure this.
84 for (i
= 0; i
< CXL_IRQ_RANGES
; i
++)
85 ctx
->irqs
.range
[i
] = 0;
87 mutex_init(&ctx
->status_mutex
);
92 * Allocating IDR! We better make sure everything's setup that
93 * dereferences from it.
95 mutex_lock(&afu
->contexts_lock
);
96 idr_preload(GFP_KERNEL
);
97 i
= idr_alloc(&ctx
->afu
->contexts_idr
, ctx
, ctx
->afu
->adapter
->min_pe
,
98 ctx
->afu
->num_procs
, GFP_NOWAIT
);
100 mutex_unlock(&afu
->contexts_lock
);
105 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
106 ctx
->elem
= &ctx
->afu
->native
->spa
[i
];
107 ctx
->external_pe
= ctx
->pe
;
109 ctx
->external_pe
= -1; /* assigned when attaching */
111 ctx
->pe_inserted
= false;
114 * take a ref on the afu so that it stays alive at-least till
115 * this context is reclaimed inside reclaim_ctx.
121 void cxl_context_set_mapping(struct cxl_context
*ctx
,
122 struct address_space
*mapping
)
124 mutex_lock(&ctx
->mapping_lock
);
125 ctx
->mapping
= mapping
;
126 mutex_unlock(&ctx
->mapping_lock
);
129 static int cxl_mmap_fault(struct vm_fault
*vmf
)
131 struct vm_area_struct
*vma
= vmf
->vma
;
132 struct cxl_context
*ctx
= vma
->vm_file
->private_data
;
135 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
137 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
138 __func__
, ctx
->pe
, vmf
->address
, offset
);
140 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
) {
141 area
= ctx
->afu
->psn_phys
;
142 if (offset
>= ctx
->afu
->adapter
->ps_size
)
143 return VM_FAULT_SIGBUS
;
145 area
= ctx
->psn_phys
;
146 if (offset
>= ctx
->psn_size
)
147 return VM_FAULT_SIGBUS
;
150 mutex_lock(&ctx
->status_mutex
);
152 if (ctx
->status
!= STARTED
) {
153 mutex_unlock(&ctx
->status_mutex
);
154 pr_devel("%s: Context not started, failing problem state access\n", __func__
);
155 if (ctx
->mmio_err_ff
) {
157 ctx
->ff_page
= alloc_page(GFP_USER
);
160 memset(page_address(ctx
->ff_page
), 0xff, PAGE_SIZE
);
162 get_page(ctx
->ff_page
);
163 vmf
->page
= ctx
->ff_page
;
164 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
167 return VM_FAULT_SIGBUS
;
170 vm_insert_pfn(vma
, vmf
->address
, (area
+ offset
) >> PAGE_SHIFT
);
172 mutex_unlock(&ctx
->status_mutex
);
174 return VM_FAULT_NOPAGE
;
177 static const struct vm_operations_struct cxl_mmap_vmops
= {
178 .fault
= cxl_mmap_fault
,
182 * Map a per-context mmio space into the given vma.
184 int cxl_context_iomap(struct cxl_context
*ctx
, struct vm_area_struct
*vma
)
186 u64 start
= vma
->vm_pgoff
<< PAGE_SHIFT
;
187 u64 len
= vma
->vm_end
- vma
->vm_start
;
189 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
) {
190 if (start
+ len
> ctx
->afu
->adapter
->ps_size
)
193 if (cxl_is_power9()) {
195 * Make sure there is a valid problem state
196 * area space for this AFU.
198 if (ctx
->master
&& !ctx
->afu
->psa
) {
199 pr_devel("AFU doesn't support mmio space\n");
203 /* Can't mmap until the AFU is enabled */
204 if (!ctx
->afu
->enabled
)
208 if (start
+ len
> ctx
->psn_size
)
211 /* Make sure there is a valid per process space for this AFU */
212 if ((ctx
->master
&& !ctx
->afu
->psa
) || (!ctx
->afu
->pp_psa
)) {
213 pr_devel("AFU doesn't support mmio space\n");
217 /* Can't mmap until the AFU is enabled */
218 if (!ctx
->afu
->enabled
)
222 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__
,
223 ctx
->psn_phys
, ctx
->pe
, ctx
->master
);
225 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
226 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
227 vma
->vm_ops
= &cxl_mmap_vmops
;
232 * Detach a context from the hardware. This disables interrupts and doesn't
233 * return until all outstanding interrupts for this context have completed. The
234 * hardware should no longer access *ctx after this has returned.
236 int __detach_context(struct cxl_context
*ctx
)
238 enum cxl_context_status status
;
240 mutex_lock(&ctx
->status_mutex
);
241 status
= ctx
->status
;
242 ctx
->status
= CLOSED
;
243 mutex_unlock(&ctx
->status_mutex
);
244 if (status
!= STARTED
)
247 /* Only warn if we detached while the link was OK.
248 * If detach fails when hw is down, we don't care.
250 WARN_ON(cxl_ops
->detach_process(ctx
) &&
251 cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
));
252 flush_work(&ctx
->fault_work
); /* Only needed for dedicated process */
255 * Wait until no further interrupts are presented by the PSL
258 if (cxl_ops
->irq_wait
)
259 cxl_ops
->irq_wait(ctx
);
261 /* release the reference to the group leader and mm handling pid */
266 /* Decrease the attached context count on the adapter */
267 cxl_adapter_context_put(ctx
->afu
->adapter
);
269 /* Decrease the mm count on the context */
270 cxl_context_mm_count_put(ctx
);
272 mm_context_remove_copro(ctx
->mm
);
279 * Detach the given context from the AFU. This doesn't actually
280 * free the context but it should stop the context running in hardware
281 * (ie. prevent this context from generating any further interrupts
282 * so that it can be freed).
284 void cxl_context_detach(struct cxl_context
*ctx
)
288 rc
= __detach_context(ctx
);
292 afu_release_irqs(ctx
, ctx
);
293 wake_up_all(&ctx
->wq
);
297 * Detach all contexts on the given AFU.
299 void cxl_context_detach_all(struct cxl_afu
*afu
)
301 struct cxl_context
*ctx
;
304 mutex_lock(&afu
->contexts_lock
);
305 idr_for_each_entry(&afu
->contexts_idr
, ctx
, tmp
) {
307 * Anything done in here needs to be setup before the IDR is
308 * created and torn down after the IDR removed
310 cxl_context_detach(ctx
);
313 * We are force detaching - remove any active PSA mappings so
314 * userspace cannot interfere with the card if it comes back.
315 * Easiest way to exercise this is to unbind and rebind the
316 * driver via sysfs while it is in use.
318 mutex_lock(&ctx
->mapping_lock
);
320 unmap_mapping_range(ctx
->mapping
, 0, 0, 1);
321 mutex_unlock(&ctx
->mapping_lock
);
323 mutex_unlock(&afu
->contexts_lock
);
326 static void reclaim_ctx(struct rcu_head
*rcu
)
328 struct cxl_context
*ctx
= container_of(rcu
, struct cxl_context
, rcu
);
331 free_page((u64
)ctx
->sstp
);
333 __free_page(ctx
->ff_page
);
336 kfree(ctx
->irq_bitmap
);
338 /* Drop ref to the afu device taken during cxl_context_init */
339 cxl_afu_put(ctx
->afu
);
344 void cxl_context_free(struct cxl_context
*ctx
)
346 if (ctx
->kernelapi
&& ctx
->mapping
)
347 cxl_release_mapping(ctx
);
348 mutex_lock(&ctx
->afu
->contexts_lock
);
349 idr_remove(&ctx
->afu
->contexts_idr
, ctx
->pe
);
350 mutex_unlock(&ctx
->afu
->contexts_lock
);
351 call_rcu(&ctx
->rcu
, reclaim_ctx
);
354 void cxl_context_mm_count_get(struct cxl_context
*ctx
)
357 atomic_inc(&ctx
->mm
->mm_count
);
360 void cxl_context_mm_count_put(struct cxl_context
*ctx
)