]>
Commit | Line | Data |
---|---|---|
f204e0b8 IM |
1 | /* |
2 | * Copyright 2014 IBM Corp. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/workqueue.h> | |
11 | #include <linux/sched.h> | |
6e84f315 | 12 | #include <linux/sched/mm.h> |
f204e0b8 IM |
13 | #include <linux/pid.h> |
14 | #include <linux/mm.h> | |
15 | #include <linux/moduleparam.h> | |
16 | ||
17 | #undef MODULE_PARAM_PREFIX | |
18 | #define MODULE_PARAM_PREFIX "cxl" "." | |
19 | #include <asm/current.h> | |
20 | #include <asm/copro.h> | |
21 | #include <asm/mmu.h> | |
22 | ||
23 | #include "cxl.h" | |
9bcf28cd | 24 | #include "trace.h" |
f204e0b8 | 25 | |
eb01d4c2 IM |
26 | static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) |
27 | { | |
28 | return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && | |
29 | (sste->esid_data == cpu_to_be64(slb->esid))); | |
30 | } | |
31 | ||
32 | /* | |
33 | * This finds a free SSTE for the given SLB, or returns NULL if it's already in | |
34 | * the segment table. | |
35 | */ | |
b03a7f57 IM |
36 | static struct cxl_sste* find_free_sste(struct cxl_context *ctx, |
37 | struct copro_slb *slb) | |
f204e0b8 | 38 | { |
eb01d4c2 | 39 | struct cxl_sste *primary, *sste, *ret = NULL; |
b03a7f57 | 40 | unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ |
5100a9d6 | 41 | unsigned int entry; |
b03a7f57 IM |
42 | unsigned int hash; |
43 | ||
44 | if (slb->vsid & SLB_VSID_B_1T) | |
45 | hash = (slb->esid >> SID_SHIFT_1T) & mask; | |
46 | else /* 256M */ | |
47 | hash = (slb->esid >> SID_SHIFT) & mask; | |
f204e0b8 | 48 | |
b03a7f57 IM |
49 | primary = ctx->sstp + (hash << 3); |
50 | ||
51 | for (entry = 0, sste = primary; entry < 8; entry++, sste++) { | |
eb01d4c2 IM |
52 | if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) |
53 | ret = sste; | |
54 | if (sste_matches(sste, slb)) | |
55 | return NULL; | |
f204e0b8 | 56 | } |
eb01d4c2 IM |
57 | if (ret) |
58 | return ret; | |
b03a7f57 | 59 | |
f204e0b8 | 60 | /* Nothing free, select an entry to cast out */ |
eb01d4c2 | 61 | ret = primary + ctx->sst_lru; |
b03a7f57 | 62 | ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; |
f204e0b8 | 63 | |
eb01d4c2 | 64 | return ret; |
f204e0b8 IM |
65 | } |
66 | ||
67 | static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) | |
68 | { | |
69 | /* mask is the group index, we search primary and secondary here. */ | |
f204e0b8 | 70 | struct cxl_sste *sste; |
f204e0b8 IM |
71 | unsigned long flags; |
72 | ||
f204e0b8 | 73 | spin_lock_irqsave(&ctx->sste_lock, flags); |
b03a7f57 | 74 | sste = find_free_sste(ctx, slb); |
eb01d4c2 IM |
75 | if (!sste) |
76 | goto out_unlock; | |
f204e0b8 IM |
77 | |
78 | pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", | |
79 | sste - ctx->sstp, slb->vsid, slb->esid); | |
9bcf28cd | 80 | trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); |
f204e0b8 IM |
81 | |
82 | sste->vsid_data = cpu_to_be64(slb->vsid); | |
83 | sste->esid_data = cpu_to_be64(slb->esid); | |
eb01d4c2 | 84 | out_unlock: |
f204e0b8 IM |
85 | spin_unlock_irqrestore(&ctx->sste_lock, flags); |
86 | } | |
87 | ||
88 | static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, | |
89 | u64 ea) | |
90 | { | |
91 | struct copro_slb slb = {0,0}; | |
92 | int rc; | |
93 | ||
94 | if (!(rc = copro_calculate_slb(mm, ea, &slb))) { | |
95 | cxl_load_segment(ctx, &slb); | |
96 | } | |
97 | ||
98 | return rc; | |
99 | } | |
100 | ||
101 | static void cxl_ack_ae(struct cxl_context *ctx) | |
102 | { | |
103 | unsigned long flags; | |
104 | ||
5be587b1 | 105 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); |
f204e0b8 IM |
106 | |
107 | spin_lock_irqsave(&ctx->lock, flags); | |
108 | ctx->pending_fault = true; | |
109 | ctx->fault_addr = ctx->dar; | |
110 | ctx->fault_dsisr = ctx->dsisr; | |
111 | spin_unlock_irqrestore(&ctx->lock, flags); | |
112 | ||
113 | wake_up_all(&ctx->wq); | |
114 | } | |
115 | ||
116 | static int cxl_handle_segment_miss(struct cxl_context *ctx, | |
117 | struct mm_struct *mm, u64 ea) | |
118 | { | |
119 | int rc; | |
120 | ||
121 | pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); | |
9bcf28cd | 122 | trace_cxl_ste_miss(ctx, ea); |
f204e0b8 IM |
123 | |
124 | if ((rc = cxl_fault_segment(ctx, mm, ea))) | |
125 | cxl_ack_ae(ctx); | |
126 | else { | |
127 | ||
128 | mb(); /* Order seg table write to TFC MMIO write */ | |
5be587b1 | 129 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); |
f204e0b8 IM |
130 | } |
131 | ||
132 | return IRQ_HANDLED; | |
133 | } | |
134 | ||
135 | static void cxl_handle_page_fault(struct cxl_context *ctx, | |
136 | struct mm_struct *mm, u64 dsisr, u64 dar) | |
137 | { | |
138 | unsigned flt = 0; | |
139 | int result; | |
aefa5688 | 140 | unsigned long access, flags, inv_flags = 0; |
f204e0b8 | 141 | |
9bcf28cd IM |
142 | trace_cxl_pte_miss(ctx, dsisr, dar); |
143 | ||
f204e0b8 IM |
144 | if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { |
145 | pr_devel("copro_handle_mm_fault failed: %#x\n", result); | |
146 | return cxl_ack_ae(ctx); | |
147 | } | |
148 | ||
149 | /* | |
150 | * update_mmu_cache() will not have loaded the hash since current->trap | |
151 | * is not a 0x400 or 0x300, so just call hash_page_mm() here. | |
152 | */ | |
c7d54842 | 153 | access = _PAGE_PRESENT | _PAGE_READ; |
f204e0b8 | 154 | if (dsisr & CXL_PSL_DSISR_An_S) |
c7d54842 | 155 | access |= _PAGE_WRITE; |
ac29c640 AK |
156 | |
157 | access |= _PAGE_PRIVILEGED; | |
3b1dbfa1 | 158 | if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) |
ac29c640 | 159 | access &= ~_PAGE_PRIVILEGED; |
aefa5688 AK |
160 | |
161 | if (dsisr & DSISR_NOHPTE) | |
162 | inv_flags |= HPTE_NOHPTE_UPDATE; | |
163 | ||
f204e0b8 | 164 | local_irq_save(flags); |
aefa5688 | 165 | hash_page_mm(mm, dar, access, 0x300, inv_flags); |
f204e0b8 IM |
166 | local_irq_restore(flags); |
167 | ||
168 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); | |
5be587b1 | 169 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); |
f204e0b8 IM |
170 | } |
171 | ||
7b8ad495 VJ |
172 | /* |
173 | * Returns the mm_struct corresponding to the context ctx via ctx->pid | |
174 | * In case the task has exited we use the task group leader accessible | |
175 | * via ctx->glpid to find the next task in the thread group that has a | |
176 | * valid mm_struct associated with it. If a task with valid mm_struct | |
177 | * is found the ctx->pid is updated to use the task struct for subsequent | |
178 | * translations. In case no valid mm_struct is found in the task group to | |
179 | * service the fault a NULL is returned. | |
180 | */ | |
181 | static struct mm_struct *get_mem_context(struct cxl_context *ctx) | |
182 | { | |
183 | struct task_struct *task = NULL; | |
184 | struct mm_struct *mm = NULL; | |
185 | struct pid *old_pid = ctx->pid; | |
186 | ||
187 | if (old_pid == NULL) { | |
188 | pr_warn("%s: Invalid context for pe=%d\n", | |
189 | __func__, ctx->pe); | |
190 | return NULL; | |
191 | } | |
192 | ||
193 | task = get_pid_task(old_pid, PIDTYPE_PID); | |
194 | ||
195 | /* | |
196 | * pid_alive may look racy but this saves us from costly | |
197 | * get_task_mm when the task is a zombie. In worst case | |
198 | * we may think a task is alive, which is about to die | |
199 | * but get_task_mm will return NULL. | |
200 | */ | |
201 | if (task != NULL && pid_alive(task)) | |
202 | mm = get_task_mm(task); | |
203 | ||
204 | /* release the task struct that was taken earlier */ | |
205 | if (task) | |
206 | put_task_struct(task); | |
207 | else | |
208 | pr_devel("%s: Context owning pid=%i for pe=%i dead\n", | |
209 | __func__, pid_nr(old_pid), ctx->pe); | |
210 | ||
211 | /* | |
212 | * If we couldn't find the mm context then use the group | |
213 | * leader to iterate over the task group and find a task | |
214 | * that gives us mm_struct. | |
215 | */ | |
216 | if (unlikely(mm == NULL && ctx->glpid != NULL)) { | |
217 | ||
218 | rcu_read_lock(); | |
219 | task = pid_task(ctx->glpid, PIDTYPE_PID); | |
220 | if (task) | |
221 | do { | |
222 | mm = get_task_mm(task); | |
223 | if (mm) { | |
224 | ctx->pid = get_task_pid(task, | |
225 | PIDTYPE_PID); | |
226 | break; | |
227 | } | |
228 | task = next_thread(task); | |
229 | } while (task && !thread_group_leader(task)); | |
230 | rcu_read_unlock(); | |
231 | ||
232 | /* check if we switched pid */ | |
233 | if (ctx->pid != old_pid) { | |
234 | if (mm) | |
235 | pr_devel("%s:pe=%i switch pid %i->%i\n", | |
236 | __func__, ctx->pe, pid_nr(old_pid), | |
237 | pid_nr(ctx->pid)); | |
238 | else | |
239 | pr_devel("%s:Cannot find mm for pid=%i\n", | |
240 | __func__, pid_nr(old_pid)); | |
241 | ||
242 | /* drop the reference to older pid */ | |
243 | put_pid(old_pid); | |
244 | } | |
245 | } | |
246 | ||
247 | return mm; | |
248 | } | |
249 | ||
250 | ||
251 | ||
f204e0b8 IM |
252 | void cxl_handle_fault(struct work_struct *fault_work) |
253 | { | |
254 | struct cxl_context *ctx = | |
255 | container_of(fault_work, struct cxl_context, fault_work); | |
256 | u64 dsisr = ctx->dsisr; | |
257 | u64 dar = ctx->dar; | |
a6b07d82 | 258 | struct mm_struct *mm = NULL; |
f204e0b8 | 259 | |
ea2d1f95 FB |
260 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
261 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || | |
262 | cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || | |
263 | cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { | |
264 | /* Most likely explanation is harmless - a dedicated | |
265 | * process has detached and these were cleared by the | |
266 | * PSL purge, but warn about it just in case | |
267 | */ | |
268 | dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); | |
269 | return; | |
270 | } | |
f204e0b8 IM |
271 | } |
272 | ||
13da7046 IM |
273 | /* Early return if the context is being / has been detached */ |
274 | if (ctx->status == CLOSED) { | |
275 | cxl_ack_ae(ctx); | |
276 | return; | |
277 | } | |
278 | ||
f204e0b8 IM |
279 | pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " |
280 | "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); | |
281 | ||
a6b07d82 | 282 | if (!ctx->kernel) { |
7b8ad495 VJ |
283 | |
284 | mm = get_mem_context(ctx); | |
285 | /* indicates all the thread in task group have exited */ | |
286 | if (mm == NULL) { | |
287 | pr_devel("%s: unable to get mm for pe=%d pid=%i\n", | |
288 | __func__, ctx->pe, pid_nr(ctx->pid)); | |
a6b07d82 MN |
289 | cxl_ack_ae(ctx); |
290 | return; | |
7b8ad495 VJ |
291 | } else { |
292 | pr_devel("Handling page fault for pe=%d pid=%i\n", | |
293 | ctx->pe, pid_nr(ctx->pid)); | |
a6b07d82 | 294 | } |
f204e0b8 IM |
295 | } |
296 | ||
297 | if (dsisr & CXL_PSL_DSISR_An_DS) | |
298 | cxl_handle_segment_miss(ctx, mm, dar); | |
299 | else if (dsisr & CXL_PSL_DSISR_An_DM) | |
300 | cxl_handle_page_fault(ctx, mm, dsisr, dar); | |
301 | else | |
302 | WARN(1, "cxl_handle_fault has nothing to handle\n"); | |
303 | ||
a6b07d82 MN |
304 | if (mm) |
305 | mmput(mm); | |
f204e0b8 IM |
306 | } |
307 | ||
308 | static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) | |
309 | { | |
f204e0b8 IM |
310 | struct mm_struct *mm; |
311 | ||
7b8ad495 VJ |
312 | mm = get_mem_context(ctx); |
313 | if (mm == NULL) { | |
f204e0b8 IM |
314 | pr_devel("cxl_prefault_one unable to get mm %i\n", |
315 | pid_nr(ctx->pid)); | |
f204e0b8 IM |
316 | return; |
317 | } | |
318 | ||
7b8ad495 | 319 | cxl_fault_segment(ctx, mm, ea); |
f204e0b8 IM |
320 | |
321 | mmput(mm); | |
f204e0b8 IM |
322 | } |
323 | ||
324 | static u64 next_segment(u64 ea, u64 vsid) | |
325 | { | |
326 | if (vsid & SLB_VSID_B_1T) | |
327 | ea |= (1ULL << 40) - 1; | |
328 | else | |
329 | ea |= (1ULL << 28) - 1; | |
330 | ||
331 | return ea + 1; | |
332 | } | |
333 | ||
334 | static void cxl_prefault_vma(struct cxl_context *ctx) | |
335 | { | |
336 | u64 ea, last_esid = 0; | |
337 | struct copro_slb slb; | |
338 | struct vm_area_struct *vma; | |
339 | int rc; | |
f204e0b8 IM |
340 | struct mm_struct *mm; |
341 | ||
7b8ad495 VJ |
342 | mm = get_mem_context(ctx); |
343 | if (mm == NULL) { | |
f204e0b8 IM |
344 | pr_devel("cxl_prefault_vm unable to get mm %i\n", |
345 | pid_nr(ctx->pid)); | |
7b8ad495 | 346 | return; |
f204e0b8 IM |
347 | } |
348 | ||
349 | down_read(&mm->mmap_sem); | |
350 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
351 | for (ea = vma->vm_start; ea < vma->vm_end; | |
352 | ea = next_segment(ea, slb.vsid)) { | |
353 | rc = copro_calculate_slb(mm, ea, &slb); | |
354 | if (rc) | |
355 | continue; | |
356 | ||
357 | if (last_esid == slb.esid) | |
358 | continue; | |
359 | ||
360 | cxl_load_segment(ctx, &slb); | |
361 | last_esid = slb.esid; | |
362 | } | |
363 | } | |
364 | up_read(&mm->mmap_sem); | |
365 | ||
366 | mmput(mm); | |
f204e0b8 IM |
367 | } |
368 | ||
369 | void cxl_prefault(struct cxl_context *ctx, u64 wed) | |
370 | { | |
371 | switch (ctx->afu->prefault_mode) { | |
372 | case CXL_PREFAULT_WED: | |
373 | cxl_prefault_one(ctx, wed); | |
374 | break; | |
375 | case CXL_PREFAULT_ALL: | |
376 | cxl_prefault_vma(ctx); | |
377 | break; | |
378 | default: | |
379 | break; | |
380 | } | |
381 | } |