2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/workqueue.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/mm.h>
13 #include <linux/pid.h>
15 #include <linux/moduleparam.h>
17 #undef MODULE_PARAM_PREFIX
18 #define MODULE_PARAM_PREFIX "cxl" "."
19 #include <asm/current.h>
20 #include <asm/copro.h>
26 static bool sste_matches(struct cxl_sste
*sste
, struct copro_slb
*slb
)
28 return ((sste
->vsid_data
== cpu_to_be64(slb
->vsid
)) &&
29 (sste
->esid_data
== cpu_to_be64(slb
->esid
)));
33 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
36 static struct cxl_sste
* find_free_sste(struct cxl_context
*ctx
,
37 struct copro_slb
*slb
)
39 struct cxl_sste
*primary
, *sste
, *ret
= NULL
;
40 unsigned int mask
= (ctx
->sst_size
>> 7) - 1; /* SSTP0[SegTableSize] */
44 if (slb
->vsid
& SLB_VSID_B_1T
)
45 hash
= (slb
->esid
>> SID_SHIFT_1T
) & mask
;
47 hash
= (slb
->esid
>> SID_SHIFT
) & mask
;
49 primary
= ctx
->sstp
+ (hash
<< 3);
51 for (entry
= 0, sste
= primary
; entry
< 8; entry
++, sste
++) {
52 if (!ret
&& !(be64_to_cpu(sste
->esid_data
) & SLB_ESID_V
))
54 if (sste_matches(sste
, slb
))
60 /* Nothing free, select an entry to cast out */
61 ret
= primary
+ ctx
->sst_lru
;
62 ctx
->sst_lru
= (ctx
->sst_lru
+ 1) & 0x7;
67 static void cxl_load_segment(struct cxl_context
*ctx
, struct copro_slb
*slb
)
69 /* mask is the group index, we search primary and secondary here. */
70 struct cxl_sste
*sste
;
73 spin_lock_irqsave(&ctx
->sste_lock
, flags
);
74 sste
= find_free_sste(ctx
, slb
);
78 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
79 sste
- ctx
->sstp
, slb
->vsid
, slb
->esid
);
80 trace_cxl_ste_write(ctx
, sste
- ctx
->sstp
, slb
->esid
, slb
->vsid
);
82 sste
->vsid_data
= cpu_to_be64(slb
->vsid
);
83 sste
->esid_data
= cpu_to_be64(slb
->esid
);
85 spin_unlock_irqrestore(&ctx
->sste_lock
, flags
);
88 static int cxl_fault_segment(struct cxl_context
*ctx
, struct mm_struct
*mm
,
91 struct copro_slb slb
= {0,0};
94 if (!(rc
= copro_calculate_slb(mm
, ea
, &slb
))) {
95 cxl_load_segment(ctx
, &slb
);
101 static void cxl_ack_ae(struct cxl_context
*ctx
)
105 cxl_ops
->ack_irq(ctx
, CXL_PSL_TFC_An_AE
, 0);
107 spin_lock_irqsave(&ctx
->lock
, flags
);
108 ctx
->pending_fault
= true;
109 ctx
->fault_addr
= ctx
->dar
;
110 ctx
->fault_dsisr
= ctx
->dsisr
;
111 spin_unlock_irqrestore(&ctx
->lock
, flags
);
113 wake_up_all(&ctx
->wq
);
116 static int cxl_handle_segment_miss(struct cxl_context
*ctx
,
117 struct mm_struct
*mm
, u64 ea
)
121 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx
->pe
, ea
);
122 trace_cxl_ste_miss(ctx
, ea
);
124 if ((rc
= cxl_fault_segment(ctx
, mm
, ea
)))
128 mb(); /* Order seg table write to TFC MMIO write */
129 cxl_ops
->ack_irq(ctx
, CXL_PSL_TFC_An_R
, 0);
135 static void cxl_handle_page_fault(struct cxl_context
*ctx
,
136 struct mm_struct
*mm
, u64 dsisr
, u64 dar
)
140 unsigned long access
, flags
, inv_flags
= 0;
142 trace_cxl_pte_miss(ctx
, dsisr
, dar
);
144 if ((result
= copro_handle_mm_fault(mm
, dar
, dsisr
, &flt
))) {
145 pr_devel("copro_handle_mm_fault failed: %#x\n", result
);
146 return cxl_ack_ae(ctx
);
149 if (!radix_enabled()) {
151 * update_mmu_cache() will not have loaded the hash since current->trap
152 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
154 access
= _PAGE_PRESENT
| _PAGE_READ
;
155 if (dsisr
& CXL_PSL_DSISR_An_S
)
156 access
|= _PAGE_WRITE
;
158 access
|= _PAGE_PRIVILEGED
;
159 if ((!ctx
->kernel
) || (REGION_ID(dar
) == USER_REGION_ID
))
160 access
&= ~_PAGE_PRIVILEGED
;
162 if (dsisr
& DSISR_NOHPTE
)
163 inv_flags
|= HPTE_NOHPTE_UPDATE
;
165 local_irq_save(flags
);
166 hash_page_mm(mm
, dar
, access
, 0x300, inv_flags
);
167 local_irq_restore(flags
);
169 pr_devel("Page fault successfully handled for pe: %i!\n", ctx
->pe
);
170 cxl_ops
->ack_irq(ctx
, CXL_PSL_TFC_An_R
, 0);
174 * Returns the mm_struct corresponding to the context ctx.
175 * mm_users == 0, the context may be in the process of being closed.
177 static struct mm_struct
*get_mem_context(struct cxl_context
*ctx
)
182 if (!atomic_inc_not_zero(&ctx
->mm
->mm_users
))
188 static bool cxl_is_segment_miss(struct cxl_context
*ctx
, u64 dsisr
)
190 if ((cxl_is_power8() && (dsisr
& CXL_PSL_DSISR_An_DS
)))
196 static bool cxl_is_page_fault(struct cxl_context
*ctx
, u64 dsisr
)
198 u64 crs
; /* Translation Checkout Response Status */
200 if ((cxl_is_power8()) && (dsisr
& CXL_PSL_DSISR_An_DM
))
203 if (cxl_is_power9()) {
204 crs
= (dsisr
& CXL_PSL9_DSISR_An_CO_MASK
);
205 if ((crs
== CXL_PSL9_DSISR_An_PF_SLR
) ||
206 (crs
== CXL_PSL9_DSISR_An_PF_RGC
) ||
207 (crs
== CXL_PSL9_DSISR_An_PF_RGP
) ||
208 (crs
== CXL_PSL9_DSISR_An_PF_HRH
) ||
209 (crs
== CXL_PSL9_DSISR_An_PF_STEG
) ||
210 (crs
== CXL_PSL9_DSISR_An_URTCH
)) {
218 void cxl_handle_fault(struct work_struct
*fault_work
)
220 struct cxl_context
*ctx
=
221 container_of(fault_work
, struct cxl_context
, fault_work
);
222 u64 dsisr
= ctx
->dsisr
;
224 struct mm_struct
*mm
= NULL
;
226 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
227 if (cxl_p2n_read(ctx
->afu
, CXL_PSL_DSISR_An
) != dsisr
||
228 cxl_p2n_read(ctx
->afu
, CXL_PSL_DAR_An
) != dar
||
229 cxl_p2n_read(ctx
->afu
, CXL_PSL_PEHandle_An
) != ctx
->pe
) {
230 /* Most likely explanation is harmless - a dedicated
231 * process has detached and these were cleared by the
232 * PSL purge, but warn about it just in case
234 dev_notice(&ctx
->afu
->dev
, "cxl_handle_fault: Translation fault regs changed\n");
239 /* Early return if the context is being / has been detached */
240 if (ctx
->status
== CLOSED
) {
245 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
246 "DSISR: %#llx DAR: %#llx\n", ctx
->pe
, dsisr
, dar
);
250 mm
= get_mem_context(ctx
);
252 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
253 __func__
, ctx
->pe
, pid_nr(ctx
->pid
));
257 pr_devel("Handling page fault for pe=%d pid=%i\n",
258 ctx
->pe
, pid_nr(ctx
->pid
));
262 if (cxl_is_segment_miss(ctx
, dsisr
))
263 cxl_handle_segment_miss(ctx
, mm
, dar
);
264 else if (cxl_is_page_fault(ctx
, dsisr
))
265 cxl_handle_page_fault(ctx
, mm
, dsisr
, dar
);
267 WARN(1, "cxl_handle_fault has nothing to handle\n");
273 static void cxl_prefault_one(struct cxl_context
*ctx
, u64 ea
)
275 struct mm_struct
*mm
;
277 mm
= get_mem_context(ctx
);
279 pr_devel("cxl_prefault_one unable to get mm %i\n",
284 cxl_fault_segment(ctx
, mm
, ea
);
289 static u64
next_segment(u64 ea
, u64 vsid
)
291 if (vsid
& SLB_VSID_B_1T
)
292 ea
|= (1ULL << 40) - 1;
294 ea
|= (1ULL << 28) - 1;
299 static void cxl_prefault_vma(struct cxl_context
*ctx
)
301 u64 ea
, last_esid
= 0;
302 struct copro_slb slb
;
303 struct vm_area_struct
*vma
;
305 struct mm_struct
*mm
;
307 mm
= get_mem_context(ctx
);
309 pr_devel("cxl_prefault_vm unable to get mm %i\n",
314 down_read(&mm
->mmap_sem
);
315 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
316 for (ea
= vma
->vm_start
; ea
< vma
->vm_end
;
317 ea
= next_segment(ea
, slb
.vsid
)) {
318 rc
= copro_calculate_slb(mm
, ea
, &slb
);
322 if (last_esid
== slb
.esid
)
325 cxl_load_segment(ctx
, &slb
);
326 last_esid
= slb
.esid
;
329 up_read(&mm
->mmap_sem
);
334 void cxl_prefault(struct cxl_context
*ctx
, u64 wed
)
336 switch (ctx
->afu
->prefault_mode
) {
337 case CXL_PREFAULT_WED
:
338 cxl_prefault_one(ctx
, wed
);
340 case CXL_PREFAULT_ALL
:
341 cxl_prefault_vma(ctx
);