]>
Commit | Line | Data |
---|---|---|
f204e0b8 IM |
1 | /* |
2 | * Copyright 2014 IBM Corp. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/workqueue.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/pid.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/moduleparam.h> | |
15 | ||
16 | #undef MODULE_PARAM_PREFIX | |
17 | #define MODULE_PARAM_PREFIX "cxl" "." | |
18 | #include <asm/current.h> | |
19 | #include <asm/copro.h> | |
20 | #include <asm/mmu.h> | |
21 | ||
22 | #include "cxl.h" | |
23 | ||
b03a7f57 IM |
24 | /* This finds a free SSTE for the given SLB */ |
25 | static struct cxl_sste* find_free_sste(struct cxl_context *ctx, | |
26 | struct copro_slb *slb) | |
f204e0b8 | 27 | { |
b03a7f57 IM |
28 | struct cxl_sste *primary, *sste; |
29 | unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ | |
5100a9d6 | 30 | unsigned int entry; |
b03a7f57 IM |
31 | unsigned int hash; |
32 | ||
33 | if (slb->vsid & SLB_VSID_B_1T) | |
34 | hash = (slb->esid >> SID_SHIFT_1T) & mask; | |
35 | else /* 256M */ | |
36 | hash = (slb->esid >> SID_SHIFT) & mask; | |
f204e0b8 | 37 | |
b03a7f57 IM |
38 | primary = ctx->sstp + (hash << 3); |
39 | ||
40 | for (entry = 0, sste = primary; entry < 8; entry++, sste++) { | |
5100a9d6 IM |
41 | if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) |
42 | return sste; | |
f204e0b8 | 43 | } |
b03a7f57 | 44 | |
f204e0b8 | 45 | /* Nothing free, select an entry to cast out */ |
b03a7f57 IM |
46 | sste = primary + ctx->sst_lru; |
47 | ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; | |
f204e0b8 IM |
48 | |
49 | return sste; | |
50 | } | |
51 | ||
52 | static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) | |
53 | { | |
54 | /* mask is the group index, we search primary and secondary here. */ | |
f204e0b8 | 55 | struct cxl_sste *sste; |
f204e0b8 IM |
56 | unsigned long flags; |
57 | ||
f204e0b8 | 58 | spin_lock_irqsave(&ctx->sste_lock, flags); |
b03a7f57 | 59 | sste = find_free_sste(ctx, slb); |
f204e0b8 IM |
60 | |
61 | pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", | |
62 | sste - ctx->sstp, slb->vsid, slb->esid); | |
63 | ||
64 | sste->vsid_data = cpu_to_be64(slb->vsid); | |
65 | sste->esid_data = cpu_to_be64(slb->esid); | |
66 | spin_unlock_irqrestore(&ctx->sste_lock, flags); | |
67 | } | |
68 | ||
69 | static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, | |
70 | u64 ea) | |
71 | { | |
72 | struct copro_slb slb = {0,0}; | |
73 | int rc; | |
74 | ||
75 | if (!(rc = copro_calculate_slb(mm, ea, &slb))) { | |
76 | cxl_load_segment(ctx, &slb); | |
77 | } | |
78 | ||
79 | return rc; | |
80 | } | |
81 | ||
82 | static void cxl_ack_ae(struct cxl_context *ctx) | |
83 | { | |
84 | unsigned long flags; | |
85 | ||
86 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); | |
87 | ||
88 | spin_lock_irqsave(&ctx->lock, flags); | |
89 | ctx->pending_fault = true; | |
90 | ctx->fault_addr = ctx->dar; | |
91 | ctx->fault_dsisr = ctx->dsisr; | |
92 | spin_unlock_irqrestore(&ctx->lock, flags); | |
93 | ||
94 | wake_up_all(&ctx->wq); | |
95 | } | |
96 | ||
97 | static int cxl_handle_segment_miss(struct cxl_context *ctx, | |
98 | struct mm_struct *mm, u64 ea) | |
99 | { | |
100 | int rc; | |
101 | ||
102 | pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); | |
103 | ||
104 | if ((rc = cxl_fault_segment(ctx, mm, ea))) | |
105 | cxl_ack_ae(ctx); | |
106 | else { | |
107 | ||
108 | mb(); /* Order seg table write to TFC MMIO write */ | |
109 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | |
110 | } | |
111 | ||
112 | return IRQ_HANDLED; | |
113 | } | |
114 | ||
115 | static void cxl_handle_page_fault(struct cxl_context *ctx, | |
116 | struct mm_struct *mm, u64 dsisr, u64 dar) | |
117 | { | |
118 | unsigned flt = 0; | |
119 | int result; | |
120 | unsigned long access, flags; | |
121 | ||
122 | if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { | |
123 | pr_devel("copro_handle_mm_fault failed: %#x\n", result); | |
124 | return cxl_ack_ae(ctx); | |
125 | } | |
126 | ||
127 | /* | |
128 | * update_mmu_cache() will not have loaded the hash since current->trap | |
129 | * is not a 0x400 or 0x300, so just call hash_page_mm() here. | |
130 | */ | |
131 | access = _PAGE_PRESENT; | |
132 | if (dsisr & CXL_PSL_DSISR_An_S) | |
133 | access |= _PAGE_RW; | |
134 | if ((!ctx->kernel) || ~(dar & (1ULL << 63))) | |
135 | access |= _PAGE_USER; | |
136 | local_irq_save(flags); | |
137 | hash_page_mm(mm, dar, access, 0x300); | |
138 | local_irq_restore(flags); | |
139 | ||
140 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); | |
141 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | |
142 | } | |
143 | ||
144 | void cxl_handle_fault(struct work_struct *fault_work) | |
145 | { | |
146 | struct cxl_context *ctx = | |
147 | container_of(fault_work, struct cxl_context, fault_work); | |
148 | u64 dsisr = ctx->dsisr; | |
149 | u64 dar = ctx->dar; | |
150 | struct task_struct *task; | |
151 | struct mm_struct *mm; | |
152 | ||
153 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || | |
154 | cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || | |
155 | cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { | |
156 | /* Most likely explanation is harmless - a dedicated process | |
157 | * has detached and these were cleared by the PSL purge, but | |
158 | * warn about it just in case */ | |
159 | dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); | |
160 | return; | |
161 | } | |
162 | ||
163 | pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " | |
164 | "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); | |
165 | ||
166 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | |
167 | pr_devel("cxl_handle_fault unable to get task %i\n", | |
168 | pid_nr(ctx->pid)); | |
169 | cxl_ack_ae(ctx); | |
170 | return; | |
171 | } | |
172 | if (!(mm = get_task_mm(task))) { | |
173 | pr_devel("cxl_handle_fault unable to get mm %i\n", | |
174 | pid_nr(ctx->pid)); | |
175 | cxl_ack_ae(ctx); | |
176 | goto out; | |
177 | } | |
178 | ||
179 | if (dsisr & CXL_PSL_DSISR_An_DS) | |
180 | cxl_handle_segment_miss(ctx, mm, dar); | |
181 | else if (dsisr & CXL_PSL_DSISR_An_DM) | |
182 | cxl_handle_page_fault(ctx, mm, dsisr, dar); | |
183 | else | |
184 | WARN(1, "cxl_handle_fault has nothing to handle\n"); | |
185 | ||
186 | mmput(mm); | |
187 | out: | |
188 | put_task_struct(task); | |
189 | } | |
190 | ||
191 | static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) | |
192 | { | |
193 | int rc; | |
194 | struct task_struct *task; | |
195 | struct mm_struct *mm; | |
196 | ||
197 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | |
198 | pr_devel("cxl_prefault_one unable to get task %i\n", | |
199 | pid_nr(ctx->pid)); | |
200 | return; | |
201 | } | |
202 | if (!(mm = get_task_mm(task))) { | |
203 | pr_devel("cxl_prefault_one unable to get mm %i\n", | |
204 | pid_nr(ctx->pid)); | |
205 | put_task_struct(task); | |
206 | return; | |
207 | } | |
208 | ||
209 | rc = cxl_fault_segment(ctx, mm, ea); | |
210 | ||
211 | mmput(mm); | |
212 | put_task_struct(task); | |
213 | } | |
214 | ||
215 | static u64 next_segment(u64 ea, u64 vsid) | |
216 | { | |
217 | if (vsid & SLB_VSID_B_1T) | |
218 | ea |= (1ULL << 40) - 1; | |
219 | else | |
220 | ea |= (1ULL << 28) - 1; | |
221 | ||
222 | return ea + 1; | |
223 | } | |
224 | ||
225 | static void cxl_prefault_vma(struct cxl_context *ctx) | |
226 | { | |
227 | u64 ea, last_esid = 0; | |
228 | struct copro_slb slb; | |
229 | struct vm_area_struct *vma; | |
230 | int rc; | |
231 | struct task_struct *task; | |
232 | struct mm_struct *mm; | |
233 | ||
234 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | |
235 | pr_devel("cxl_prefault_vma unable to get task %i\n", | |
236 | pid_nr(ctx->pid)); | |
237 | return; | |
238 | } | |
239 | if (!(mm = get_task_mm(task))) { | |
240 | pr_devel("cxl_prefault_vm unable to get mm %i\n", | |
241 | pid_nr(ctx->pid)); | |
242 | goto out1; | |
243 | } | |
244 | ||
245 | down_read(&mm->mmap_sem); | |
246 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
247 | for (ea = vma->vm_start; ea < vma->vm_end; | |
248 | ea = next_segment(ea, slb.vsid)) { | |
249 | rc = copro_calculate_slb(mm, ea, &slb); | |
250 | if (rc) | |
251 | continue; | |
252 | ||
253 | if (last_esid == slb.esid) | |
254 | continue; | |
255 | ||
256 | cxl_load_segment(ctx, &slb); | |
257 | last_esid = slb.esid; | |
258 | } | |
259 | } | |
260 | up_read(&mm->mmap_sem); | |
261 | ||
262 | mmput(mm); | |
263 | out1: | |
264 | put_task_struct(task); | |
265 | } | |
266 | ||
267 | void cxl_prefault(struct cxl_context *ctx, u64 wed) | |
268 | { | |
269 | switch (ctx->afu->prefault_mode) { | |
270 | case CXL_PREFAULT_WED: | |
271 | cxl_prefault_one(ctx, wed); | |
272 | break; | |
273 | case CXL_PREFAULT_ALL: | |
274 | cxl_prefault_vma(ctx); | |
275 | break; | |
276 | default: | |
277 | break; | |
278 | } | |
279 | } |