]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/misc/cxl/irq.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[mirror_ubuntu-eoan-kernel.git] / drivers / misc / cxl / irq.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2014 IBM Corp.
4 */
5
6 #include <linux/interrupt.h>
7 #include <linux/workqueue.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/slab.h>
11 #include <linux/pid.h>
12 #include <asm/cputable.h>
13 #include <misc/cxl-base.h>
14
15 #include "cxl.h"
16 #include "trace.h"
17
18 static int afu_irq_range_start(void)
19 {
20 if (cpu_has_feature(CPU_FTR_HVMODE))
21 return 1;
22 return 0;
23 }
24
25 static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
26 {
27 ctx->dsisr = dsisr;
28 ctx->dar = dar;
29 schedule_work(&ctx->fault_work);
30 return IRQ_HANDLED;
31 }
32
33 irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
34 {
35 u64 dsisr, dar;
36
37 dsisr = irq_info->dsisr;
38 dar = irq_info->dar;
39
40 trace_cxl_psl9_irq(ctx, irq, dsisr, dar);
41
42 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
43
44 if (dsisr & CXL_PSL9_DSISR_An_TF) {
45 pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe);
46 return schedule_cxl_fault(ctx, dsisr, dar);
47 }
48
49 if (dsisr & CXL_PSL9_DSISR_An_PE)
50 return cxl_ops->handle_psl_slice_error(ctx, dsisr,
51 irq_info->errstat);
52 if (dsisr & CXL_PSL9_DSISR_An_AE) {
53 pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
54
55 if (ctx->pending_afu_err) {
56 /*
57 * This shouldn't happen - the PSL treats these errors
58 * as fatal and will have reset the AFU, so there's not
59 * much point buffering multiple AFU errors.
60 * OTOH if we DO ever see a storm of these come in it's
61 * probably best that we log them somewhere:
62 */
63 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n",
64 ctx->pe, irq_info->afu_err);
65 } else {
66 spin_lock(&ctx->lock);
67 ctx->afu_err = irq_info->afu_err;
68 ctx->pending_afu_err = 1;
69 spin_unlock(&ctx->lock);
70
71 wake_up_all(&ctx->wq);
72 }
73
74 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
75 return IRQ_HANDLED;
76 }
77 if (dsisr & CXL_PSL9_DSISR_An_OC)
78 pr_devel("CXL interrupt: OS Context Warning\n");
79
80 WARN(1, "Unhandled CXL PSL IRQ\n");
81 return IRQ_HANDLED;
82 }
83
84 irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
85 {
86 u64 dsisr, dar;
87
88 dsisr = irq_info->dsisr;
89 dar = irq_info->dar;
90
91 trace_cxl_psl_irq(ctx, irq, dsisr, dar);
92
93 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
94
95 if (dsisr & CXL_PSL_DSISR_An_DS) {
96 /*
97 * We don't inherently need to sleep to handle this, but we do
98 * need to get a ref to the task's mm, which we can't do from
99 * irq context without the potential for a deadlock since it
100 * takes the task_lock. An alternate option would be to keep a
101 * reference to the task's mm the entire time it has cxl open,
102 * but to do that we need to solve the issue where we hold a
103 * ref to the mm, but the mm can hold a ref to the fd after an
104 * mmap preventing anything from being cleaned up.
105 */
106 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
107 return schedule_cxl_fault(ctx, dsisr, dar);
108 }
109
110 if (dsisr & CXL_PSL_DSISR_An_M)
111 pr_devel("CXL interrupt: PTE not found\n");
112 if (dsisr & CXL_PSL_DSISR_An_P)
113 pr_devel("CXL interrupt: Storage protection violation\n");
114 if (dsisr & CXL_PSL_DSISR_An_A)
115 pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
116 if (dsisr & CXL_PSL_DSISR_An_S)
117 pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
118 if (dsisr & CXL_PSL_DSISR_An_K)
119 pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
120
121 if (dsisr & CXL_PSL_DSISR_An_DM) {
122 /*
123 * In some cases we might be able to handle the fault
124 * immediately if hash_page would succeed, but we still need
125 * the task's mm, which as above we can't get without a lock
126 */
127 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
128 return schedule_cxl_fault(ctx, dsisr, dar);
129 }
130 if (dsisr & CXL_PSL_DSISR_An_ST)
131 WARN(1, "CXL interrupt: Segment Table PTE not found\n");
132 if (dsisr & CXL_PSL_DSISR_An_UR)
133 pr_devel("CXL interrupt: AURP PTE not found\n");
134 if (dsisr & CXL_PSL_DSISR_An_PE)
135 return cxl_ops->handle_psl_slice_error(ctx, dsisr,
136 irq_info->errstat);
137 if (dsisr & CXL_PSL_DSISR_An_AE) {
138 pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
139
140 if (ctx->pending_afu_err) {
141 /*
142 * This shouldn't happen - the PSL treats these errors
143 * as fatal and will have reset the AFU, so there's not
144 * much point buffering multiple AFU errors.
145 * OTOH if we DO ever see a storm of these come in it's
146 * probably best that we log them somewhere:
147 */
148 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
149 "undelivered to pe %i: 0x%016llx\n",
150 ctx->pe, irq_info->afu_err);
151 } else {
152 spin_lock(&ctx->lock);
153 ctx->afu_err = irq_info->afu_err;
154 ctx->pending_afu_err = true;
155 spin_unlock(&ctx->lock);
156
157 wake_up_all(&ctx->wq);
158 }
159
160 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
161 return IRQ_HANDLED;
162 }
163 if (dsisr & CXL_PSL_DSISR_An_OC)
164 pr_devel("CXL interrupt: OS Context Warning\n");
165
166 WARN(1, "Unhandled CXL PSL IRQ\n");
167 return IRQ_HANDLED;
168 }
169
170 static irqreturn_t cxl_irq_afu(int irq, void *data)
171 {
172 struct cxl_context *ctx = data;
173 irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
174 int irq_off, afu_irq = 0;
175 __u16 range;
176 int r;
177
178 /*
179 * Look for the interrupt number.
180 * On bare-metal, we know range 0 only contains the PSL
181 * interrupt so we could start counting at range 1 and initialize
182 * afu_irq at 1.
183 * In a guest, range 0 also contains AFU interrupts, so it must
184 * be counted for. Therefore we initialize afu_irq at 0 to take into
185 * account the PSL interrupt.
186 *
187 * For code-readability, it just seems easier to go over all
188 * the ranges on bare-metal and guest. The end result is the same.
189 */
190 for (r = 0; r < CXL_IRQ_RANGES; r++) {
191 irq_off = hwirq - ctx->irqs.offset[r];
192 range = ctx->irqs.range[r];
193 if (irq_off >= 0 && irq_off < range) {
194 afu_irq += irq_off;
195 break;
196 }
197 afu_irq += range;
198 }
199 if (unlikely(r >= CXL_IRQ_RANGES)) {
200 WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
201 ctx->pe, irq, hwirq);
202 return IRQ_HANDLED;
203 }
204
205 trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
206 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
207 afu_irq, ctx->pe, irq, hwirq);
208
209 if (unlikely(!ctx->irq_bitmap)) {
210 WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
211 return IRQ_HANDLED;
212 }
213 spin_lock(&ctx->lock);
214 set_bit(afu_irq - 1, ctx->irq_bitmap);
215 ctx->pending_irq = true;
216 spin_unlock(&ctx->lock);
217
218 wake_up_all(&ctx->wq);
219
220 return IRQ_HANDLED;
221 }
222
223 unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
224 irq_handler_t handler, void *cookie, const char *name)
225 {
226 unsigned int virq;
227 int result;
228
229 /* IRQ Domain? */
230 virq = irq_create_mapping(NULL, hwirq);
231 if (!virq) {
232 dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
233 return 0;
234 }
235
236 if (cxl_ops->setup_irq)
237 cxl_ops->setup_irq(adapter, hwirq, virq);
238
239 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
240
241 result = request_irq(virq, handler, 0, name, cookie);
242 if (result) {
243 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
244 return 0;
245 }
246
247 return virq;
248 }
249
250 void cxl_unmap_irq(unsigned int virq, void *cookie)
251 {
252 free_irq(virq, cookie);
253 }
254
255 int cxl_register_one_irq(struct cxl *adapter,
256 irq_handler_t handler,
257 void *cookie,
258 irq_hw_number_t *dest_hwirq,
259 unsigned int *dest_virq,
260 const char *name)
261 {
262 int hwirq, virq;
263
264 if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
265 return hwirq;
266
267 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
268 goto err;
269
270 *dest_hwirq = hwirq;
271 *dest_virq = virq;
272
273 return 0;
274
275 err:
276 cxl_ops->release_one_irq(adapter, hwirq);
277 return -ENOMEM;
278 }
279
280 void afu_irq_name_free(struct cxl_context *ctx)
281 {
282 struct cxl_irq_name *irq_name, *tmp;
283
284 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
285 kfree(irq_name->name);
286 list_del(&irq_name->list);
287 kfree(irq_name);
288 }
289 }
290
291 int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
292 {
293 int rc, r, i, j = 1;
294 struct cxl_irq_name *irq_name;
295 int alloc_count;
296
297 /*
298 * In native mode, range 0 is reserved for the multiplexed
299 * PSL interrupt. It has been allocated when the AFU was initialized.
300 *
301 * In a guest, the PSL interrupt is not mutliplexed, but per-context,
302 * and is the first interrupt from range 0. It still needs to be
303 * allocated, so bump the count by one.
304 */
305 if (cpu_has_feature(CPU_FTR_HVMODE))
306 alloc_count = count;
307 else
308 alloc_count = count + 1;
309
310 if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
311 alloc_count)))
312 return rc;
313
314 if (cpu_has_feature(CPU_FTR_HVMODE)) {
315 /* Multiplexed PSL Interrupt */
316 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
317 ctx->irqs.range[0] = 1;
318 }
319
320 ctx->irq_count = count;
321 ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
322 sizeof(*ctx->irq_bitmap), GFP_KERNEL);
323 if (!ctx->irq_bitmap)
324 goto out;
325
326 /*
327 * Allocate names first. If any fail, bail out before allocating
328 * actual hardware IRQs.
329 */
330 for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
331 for (i = 0; i < ctx->irqs.range[r]; i++) {
332 irq_name = kmalloc(sizeof(struct cxl_irq_name),
333 GFP_KERNEL);
334 if (!irq_name)
335 goto out;
336 irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
337 dev_name(&ctx->afu->dev),
338 ctx->pe, j);
339 if (!irq_name->name) {
340 kfree(irq_name);
341 goto out;
342 }
343 /* Add to tail so next look get the correct order */
344 list_add_tail(&irq_name->list, &ctx->irq_names);
345 j++;
346 }
347 }
348 return 0;
349
350 out:
351 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
352 afu_irq_name_free(ctx);
353 return -ENOMEM;
354 }
355
356 static void afu_register_hwirqs(struct cxl_context *ctx)
357 {
358 irq_hw_number_t hwirq;
359 struct cxl_irq_name *irq_name;
360 int r, i;
361 irqreturn_t (*handler)(int irq, void *data);
362
363 /* We've allocated all memory now, so let's do the irq allocations */
364 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
365 for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
366 hwirq = ctx->irqs.offset[r];
367 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
368 if (r == 0 && i == 0)
369 /*
370 * The very first interrupt of range 0 is
371 * always the PSL interrupt, but we only
372 * need to connect a handler for guests,
373 * because there's one PSL interrupt per
374 * context.
375 * On bare-metal, the PSL interrupt is
376 * multiplexed and was setup when the AFU
377 * was configured.
378 */
379 handler = cxl_ops->psl_interrupt;
380 else
381 handler = cxl_irq_afu;
382 cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
383 irq_name->name);
384 irq_name = list_next_entry(irq_name, list);
385 }
386 }
387 }
388
389 int afu_register_irqs(struct cxl_context *ctx, u32 count)
390 {
391 int rc;
392
393 rc = afu_allocate_irqs(ctx, count);
394 if (rc)
395 return rc;
396
397 afu_register_hwirqs(ctx);
398 return 0;
399 }
400
401 void afu_release_irqs(struct cxl_context *ctx, void *cookie)
402 {
403 irq_hw_number_t hwirq;
404 unsigned int virq;
405 int r, i;
406
407 for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
408 hwirq = ctx->irqs.offset[r];
409 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
410 virq = irq_find_mapping(NULL, hwirq);
411 if (virq)
412 cxl_unmap_irq(virq, cookie);
413 }
414 }
415
416 afu_irq_name_free(ctx);
417 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
418
419 ctx->irq_count = 0;
420 }
421
422 void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
423 {
424 dev_crit(&afu->dev,
425 "PSL Slice error received. Check AFU for root cause.\n");
426 dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
427 if (serr & CXL_PSL_SERR_An_afuto)
428 dev_crit(&afu->dev, "AFU MMIO Timeout\n");
429 if (serr & CXL_PSL_SERR_An_afudis)
430 dev_crit(&afu->dev,
431 "MMIO targeted Accelerator that was not enabled\n");
432 if (serr & CXL_PSL_SERR_An_afuov)
433 dev_crit(&afu->dev, "AFU CTAG Overflow\n");
434 if (serr & CXL_PSL_SERR_An_badsrc)
435 dev_crit(&afu->dev, "Bad Interrupt Source\n");
436 if (serr & CXL_PSL_SERR_An_badctx)
437 dev_crit(&afu->dev, "Bad Context Handle\n");
438 if (serr & CXL_PSL_SERR_An_llcmdis)
439 dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
440 if (serr & CXL_PSL_SERR_An_llcmdto)
441 dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
442 if (serr & CXL_PSL_SERR_An_afupar)
443 dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
444 if (serr & CXL_PSL_SERR_An_afudup)
445 dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
446 if (serr & CXL_PSL_SERR_An_AE)
447 dev_crit(&afu->dev,
448 "AFU asserted JDONE with JERROR in AFU Directed Mode\n");
449 }