]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/misc/cxl/fault.c
cb4f3231b4519d1ac1c4f19a3c39e0c240637e2c
2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/workqueue.h>
11 #include <linux/sched.h>
12 #include <linux/pid.h>
14 #include <linux/moduleparam.h>
16 #undef MODULE_PARAM_PREFIX
17 #define MODULE_PARAM_PREFIX "cxl" "."
18 #include <asm/current.h>
19 #include <asm/copro.h>
24 /* This finds a free SSTE for the given SLB */
25 static struct cxl_sste
* find_free_sste(struct cxl_context
*ctx
,
26 struct copro_slb
*slb
)
28 struct cxl_sste
*primary
, *sste
;
29 unsigned int mask
= (ctx
->sst_size
>> 7) - 1; /* SSTP0[SegTableSize] */
33 if (slb
->vsid
& SLB_VSID_B_1T
)
34 hash
= (slb
->esid
>> SID_SHIFT_1T
) & mask
;
36 hash
= (slb
->esid
>> SID_SHIFT
) & mask
;
38 primary
= ctx
->sstp
+ (hash
<< 3);
40 for (entry
= 0, sste
= primary
; entry
< 8; entry
++, sste
++) {
41 if (!(be64_to_cpu(sste
->esid_data
) & SLB_ESID_V
))
45 /* Nothing free, select an entry to cast out */
46 sste
= primary
+ ctx
->sst_lru
;
47 ctx
->sst_lru
= (ctx
->sst_lru
+ 1) & 0x7;
52 static void cxl_load_segment(struct cxl_context
*ctx
, struct copro_slb
*slb
)
54 /* mask is the group index, we search primary and secondary here. */
55 struct cxl_sste
*sste
;
58 spin_lock_irqsave(&ctx
->sste_lock
, flags
);
59 sste
= find_free_sste(ctx
, slb
);
61 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
62 sste
- ctx
->sstp
, slb
->vsid
, slb
->esid
);
64 sste
->vsid_data
= cpu_to_be64(slb
->vsid
);
65 sste
->esid_data
= cpu_to_be64(slb
->esid
);
66 spin_unlock_irqrestore(&ctx
->sste_lock
, flags
);
69 static int cxl_fault_segment(struct cxl_context
*ctx
, struct mm_struct
*mm
,
72 struct copro_slb slb
= {0,0};
75 if (!(rc
= copro_calculate_slb(mm
, ea
, &slb
))) {
76 cxl_load_segment(ctx
, &slb
);
82 static void cxl_ack_ae(struct cxl_context
*ctx
)
86 cxl_ack_irq(ctx
, CXL_PSL_TFC_An_AE
, 0);
88 spin_lock_irqsave(&ctx
->lock
, flags
);
89 ctx
->pending_fault
= true;
90 ctx
->fault_addr
= ctx
->dar
;
91 ctx
->fault_dsisr
= ctx
->dsisr
;
92 spin_unlock_irqrestore(&ctx
->lock
, flags
);
94 wake_up_all(&ctx
->wq
);
97 static int cxl_handle_segment_miss(struct cxl_context
*ctx
,
98 struct mm_struct
*mm
, u64 ea
)
102 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx
->pe
, ea
);
104 if ((rc
= cxl_fault_segment(ctx
, mm
, ea
)))
108 mb(); /* Order seg table write to TFC MMIO write */
109 cxl_ack_irq(ctx
, CXL_PSL_TFC_An_R
, 0);
115 static void cxl_handle_page_fault(struct cxl_context
*ctx
,
116 struct mm_struct
*mm
, u64 dsisr
, u64 dar
)
120 unsigned long access
, flags
;
122 if ((result
= copro_handle_mm_fault(mm
, dar
, dsisr
, &flt
))) {
123 pr_devel("copro_handle_mm_fault failed: %#x\n", result
);
124 return cxl_ack_ae(ctx
);
128 * update_mmu_cache() will not have loaded the hash since current->trap
129 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
131 access
= _PAGE_PRESENT
;
132 if (dsisr
& CXL_PSL_DSISR_An_S
)
134 if ((!ctx
->kernel
) || ~(dar
& (1ULL << 63)))
135 access
|= _PAGE_USER
;
136 local_irq_save(flags
);
137 hash_page_mm(mm
, dar
, access
, 0x300);
138 local_irq_restore(flags
);
140 pr_devel("Page fault successfully handled for pe: %i!\n", ctx
->pe
);
141 cxl_ack_irq(ctx
, CXL_PSL_TFC_An_R
, 0);
144 void cxl_handle_fault(struct work_struct
*fault_work
)
146 struct cxl_context
*ctx
=
147 container_of(fault_work
, struct cxl_context
, fault_work
);
148 u64 dsisr
= ctx
->dsisr
;
150 struct task_struct
*task
;
151 struct mm_struct
*mm
;
153 if (cxl_p2n_read(ctx
->afu
, CXL_PSL_DSISR_An
) != dsisr
||
154 cxl_p2n_read(ctx
->afu
, CXL_PSL_DAR_An
) != dar
||
155 cxl_p2n_read(ctx
->afu
, CXL_PSL_PEHandle_An
) != ctx
->pe
) {
156 /* Most likely explanation is harmless - a dedicated process
157 * has detached and these were cleared by the PSL purge, but
158 * warn about it just in case */
159 dev_notice(&ctx
->afu
->dev
, "cxl_handle_fault: Translation fault regs changed\n");
163 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
164 "DSISR: %#llx DAR: %#llx\n", ctx
->pe
, dsisr
, dar
);
166 if (!(task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
))) {
167 pr_devel("cxl_handle_fault unable to get task %i\n",
172 if (!(mm
= get_task_mm(task
))) {
173 pr_devel("cxl_handle_fault unable to get mm %i\n",
179 if (dsisr
& CXL_PSL_DSISR_An_DS
)
180 cxl_handle_segment_miss(ctx
, mm
, dar
);
181 else if (dsisr
& CXL_PSL_DSISR_An_DM
)
182 cxl_handle_page_fault(ctx
, mm
, dsisr
, dar
);
184 WARN(1, "cxl_handle_fault has nothing to handle\n");
188 put_task_struct(task
);
191 static void cxl_prefault_one(struct cxl_context
*ctx
, u64 ea
)
194 struct task_struct
*task
;
195 struct mm_struct
*mm
;
197 if (!(task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
))) {
198 pr_devel("cxl_prefault_one unable to get task %i\n",
202 if (!(mm
= get_task_mm(task
))) {
203 pr_devel("cxl_prefault_one unable to get mm %i\n",
205 put_task_struct(task
);
209 rc
= cxl_fault_segment(ctx
, mm
, ea
);
212 put_task_struct(task
);
215 static u64
next_segment(u64 ea
, u64 vsid
)
217 if (vsid
& SLB_VSID_B_1T
)
218 ea
|= (1ULL << 40) - 1;
220 ea
|= (1ULL << 28) - 1;
225 static void cxl_prefault_vma(struct cxl_context
*ctx
)
227 u64 ea
, last_esid
= 0;
228 struct copro_slb slb
;
229 struct vm_area_struct
*vma
;
231 struct task_struct
*task
;
232 struct mm_struct
*mm
;
234 if (!(task
= get_pid_task(ctx
->pid
, PIDTYPE_PID
))) {
235 pr_devel("cxl_prefault_vma unable to get task %i\n",
239 if (!(mm
= get_task_mm(task
))) {
240 pr_devel("cxl_prefault_vm unable to get mm %i\n",
245 down_read(&mm
->mmap_sem
);
246 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
247 for (ea
= vma
->vm_start
; ea
< vma
->vm_end
;
248 ea
= next_segment(ea
, slb
.vsid
)) {
249 rc
= copro_calculate_slb(mm
, ea
, &slb
);
253 if (last_esid
== slb
.esid
)
256 cxl_load_segment(ctx
, &slb
);
257 last_esid
= slb
.esid
;
260 up_read(&mm
->mmap_sem
);
264 put_task_struct(task
);
267 void cxl_prefault(struct cxl_context
*ctx
, u64 wed
)
269 switch (ctx
->afu
->prefault_mode
) {
270 case CXL_PREFAULT_WED
:
271 cxl_prefault_one(ctx
, wed
);
273 case CXL_PREFAULT_ALL
:
274 cxl_prefault_vma(ctx
);