]>
Commit | Line | Data |
---|---|---|
14baf4d9 CL |
1 | /* |
2 | * Copyright 2015 IBM Corp. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/spinlock.h> | |
11 | #include <linux/uaccess.h> | |
12 | #include <linux/delay.h> | |
13 | ||
14 | #include "cxl.h" | |
15 | #include "hcalls.h" | |
16 | #include "trace.h" | |
17 | ||
0d400f77 CL |
18 | #define CXL_ERROR_DETECTED_EVENT 1 |
19 | #define CXL_SLOT_RESET_EVENT 2 | |
20 | #define CXL_RESUME_EVENT 3 | |
21 | ||
22 | static void pci_error_handlers(struct cxl_afu *afu, | |
23 | int bus_error_event, | |
24 | pci_channel_state_t state) | |
25 | { | |
26 | struct pci_dev *afu_dev; | |
27 | ||
28 | if (afu->phb == NULL) | |
29 | return; | |
30 | ||
31 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { | |
32 | if (!afu_dev->driver) | |
33 | continue; | |
34 | ||
35 | switch (bus_error_event) { | |
36 | case CXL_ERROR_DETECTED_EVENT: | |
37 | afu_dev->error_state = state; | |
38 | ||
39 | if (afu_dev->driver->err_handler && | |
40 | afu_dev->driver->err_handler->error_detected) | |
41 | afu_dev->driver->err_handler->error_detected(afu_dev, state); | |
42 | break; | |
43 | case CXL_SLOT_RESET_EVENT: | |
44 | afu_dev->error_state = state; | |
45 | ||
46 | if (afu_dev->driver->err_handler && | |
47 | afu_dev->driver->err_handler->slot_reset) | |
48 | afu_dev->driver->err_handler->slot_reset(afu_dev); | |
49 | break; | |
50 | case CXL_RESUME_EVENT: | |
51 | if (afu_dev->driver->err_handler && | |
52 | afu_dev->driver->err_handler->resume) | |
53 | afu_dev->driver->err_handler->resume(afu_dev); | |
54 | break; | |
55 | } | |
56 | } | |
57 | } | |
14baf4d9 CL |
58 | |
59 | static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, | |
60 | u64 errstat) | |
61 | { | |
62 | pr_devel("in %s\n", __func__); | |
63 | dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); | |
64 | ||
65 | return cxl_ops->ack_irq(ctx, 0, errstat); | |
66 | } | |
67 | ||
68 | static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, | |
69 | void *buf, size_t len) | |
70 | { | |
71 | unsigned int entries, mod; | |
72 | unsigned long **vpd_buf = NULL; | |
73 | struct sg_list *le; | |
74 | int rc = 0, i, tocopy; | |
75 | u64 out = 0; | |
76 | ||
77 | if (buf == NULL) | |
78 | return -EINVAL; | |
79 | ||
80 | /* number of entries in the list */ | |
81 | entries = len / SG_BUFFER_SIZE; | |
82 | mod = len % SG_BUFFER_SIZE; | |
83 | if (mod) | |
84 | entries++; | |
85 | ||
86 | if (entries > SG_MAX_ENTRIES) { | |
87 | entries = SG_MAX_ENTRIES; | |
88 | len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; | |
89 | mod = 0; | |
90 | } | |
91 | ||
92 | vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL); | |
93 | if (!vpd_buf) | |
94 | return -ENOMEM; | |
95 | ||
96 | le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); | |
97 | if (!le) { | |
98 | rc = -ENOMEM; | |
99 | goto err1; | |
100 | } | |
101 | ||
102 | for (i = 0; i < entries; i++) { | |
103 | vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); | |
104 | if (!vpd_buf[i]) { | |
105 | rc = -ENOMEM; | |
106 | goto err2; | |
107 | } | |
108 | le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); | |
109 | le[i].len = cpu_to_be64(SG_BUFFER_SIZE); | |
110 | if ((i == (entries - 1)) && mod) | |
111 | le[i].len = cpu_to_be64(mod); | |
112 | } | |
113 | ||
114 | if (adapter) | |
115 | rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, | |
116 | virt_to_phys(le), entries, &out); | |
117 | else | |
118 | rc = cxl_h_collect_vpd(afu->guest->handle, 0, | |
119 | virt_to_phys(le), entries, &out); | |
120 | pr_devel("length of available (entries: %i), vpd: %#llx\n", | |
121 | entries, out); | |
122 | ||
123 | if (!rc) { | |
124 | /* | |
125 | * hcall returns in 'out' the size of available VPDs. | |
126 | * It fills the buffer with as much data as possible. | |
127 | */ | |
128 | if (out < len) | |
129 | len = out; | |
130 | rc = len; | |
131 | if (out) { | |
132 | for (i = 0; i < entries; i++) { | |
133 | if (len < SG_BUFFER_SIZE) | |
134 | tocopy = len; | |
135 | else | |
136 | tocopy = SG_BUFFER_SIZE; | |
137 | memcpy(buf, vpd_buf[i], tocopy); | |
138 | buf += tocopy; | |
139 | len -= tocopy; | |
140 | } | |
141 | } | |
142 | } | |
143 | err2: | |
144 | for (i = 0; i < entries; i++) { | |
145 | if (vpd_buf[i]) | |
146 | free_page((unsigned long) vpd_buf[i]); | |
147 | } | |
148 | free_page((unsigned long) le); | |
149 | err1: | |
150 | kfree(vpd_buf); | |
151 | return rc; | |
152 | } | |
153 | ||
154 | static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) | |
155 | { | |
156 | return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); | |
157 | } | |
158 | ||
159 | static irqreturn_t guest_psl_irq(int irq, void *data) | |
160 | { | |
161 | struct cxl_context *ctx = data; | |
162 | struct cxl_irq_info irq_info; | |
163 | int rc; | |
164 | ||
165 | pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); | |
166 | rc = guest_get_irq_info(ctx, &irq_info); | |
167 | if (rc) { | |
168 | WARN(1, "Unable to get IRQ info: %i\n", rc); | |
169 | return IRQ_HANDLED; | |
170 | } | |
171 | ||
172 | rc = cxl_irq(irq, ctx, &irq_info); | |
173 | return rc; | |
174 | } | |
175 | ||
0d400f77 CL |
176 | static int afu_read_error_state(struct cxl_afu *afu, int *state_out) |
177 | { | |
178 | u64 state; | |
179 | int rc = 0; | |
180 | ||
181 | rc = cxl_h_read_error_state(afu->guest->handle, &state); | |
182 | if (!rc) { | |
183 | WARN_ON(state != H_STATE_NORMAL && | |
184 | state != H_STATE_DISABLE && | |
185 | state != H_STATE_TEMP_UNAVAILABLE && | |
186 | state != H_STATE_PERM_UNAVAILABLE); | |
187 | *state_out = state & 0xffffffff; | |
188 | } | |
189 | return rc; | |
190 | } | |
191 | ||
14baf4d9 CL |
192 | static irqreturn_t guest_slice_irq_err(int irq, void *data) |
193 | { | |
194 | struct cxl_afu *afu = data; | |
195 | int rc; | |
196 | u64 serr; | |
197 | ||
198 | WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); | |
199 | rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); | |
200 | if (rc) { | |
201 | dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); | |
202 | return IRQ_HANDLED; | |
203 | } | |
204 | dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); | |
205 | ||
206 | rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); | |
207 | if (rc) | |
208 | dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", | |
209 | rc); | |
210 | ||
211 | return IRQ_HANDLED; | |
212 | } | |
213 | ||
214 | ||
215 | static int irq_alloc_range(struct cxl *adapter, int len, int *irq) | |
216 | { | |
217 | int i, n; | |
218 | struct irq_avail *cur; | |
219 | ||
220 | for (i = 0; i < adapter->guest->irq_nranges; i++) { | |
221 | cur = &adapter->guest->irq_avail[i]; | |
222 | n = bitmap_find_next_zero_area(cur->bitmap, cur->range, | |
223 | 0, len, 0); | |
224 | if (n < cur->range) { | |
225 | bitmap_set(cur->bitmap, n, len); | |
226 | *irq = cur->offset + n; | |
227 | pr_devel("guest: allocate IRQs %#x->%#x\n", | |
228 | *irq, *irq + len - 1); | |
229 | ||
230 | return 0; | |
231 | } | |
232 | } | |
233 | return -ENOSPC; | |
234 | } | |
235 | ||
236 | static int irq_free_range(struct cxl *adapter, int irq, int len) | |
237 | { | |
238 | int i, n; | |
239 | struct irq_avail *cur; | |
240 | ||
241 | if (len == 0) | |
242 | return -ENOENT; | |
243 | ||
244 | for (i = 0; i < adapter->guest->irq_nranges; i++) { | |
245 | cur = &adapter->guest->irq_avail[i]; | |
246 | if (irq >= cur->offset && | |
247 | (irq + len) <= (cur->offset + cur->range)) { | |
248 | n = irq - cur->offset; | |
249 | bitmap_clear(cur->bitmap, n, len); | |
250 | pr_devel("guest: release IRQs %#x->%#x\n", | |
251 | irq, irq + len - 1); | |
252 | return 0; | |
253 | } | |
254 | } | |
255 | return -ENOENT; | |
256 | } | |
257 | ||
258 | static int guest_reset(struct cxl *adapter) | |
259 | { | |
0d400f77 CL |
260 | struct cxl_afu *afu = NULL; |
261 | int i, rc; | |
14baf4d9 CL |
262 | |
263 | pr_devel("Adapter reset request\n"); | |
0d400f77 CL |
264 | for (i = 0; i < adapter->slices; i++) { |
265 | if ((afu = adapter->afu[i])) { | |
266 | pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, | |
267 | pci_channel_io_frozen); | |
268 | cxl_context_detach_all(afu); | |
269 | } | |
270 | } | |
271 | ||
14baf4d9 | 272 | rc = cxl_h_reset_adapter(adapter->guest->handle); |
0d400f77 CL |
273 | for (i = 0; i < adapter->slices; i++) { |
274 | if (!rc && (afu = adapter->afu[i])) { | |
275 | pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, | |
276 | pci_channel_io_normal); | |
277 | pci_error_handlers(afu, CXL_RESUME_EVENT, 0); | |
278 | } | |
279 | } | |
14baf4d9 CL |
280 | return rc; |
281 | } | |
282 | ||
283 | static int guest_alloc_one_irq(struct cxl *adapter) | |
284 | { | |
285 | int irq; | |
286 | ||
287 | spin_lock(&adapter->guest->irq_alloc_lock); | |
288 | if (irq_alloc_range(adapter, 1, &irq)) | |
289 | irq = -ENOSPC; | |
290 | spin_unlock(&adapter->guest->irq_alloc_lock); | |
291 | return irq; | |
292 | } | |
293 | ||
294 | static void guest_release_one_irq(struct cxl *adapter, int irq) | |
295 | { | |
296 | spin_lock(&adapter->guest->irq_alloc_lock); | |
297 | irq_free_range(adapter, irq, 1); | |
298 | spin_unlock(&adapter->guest->irq_alloc_lock); | |
299 | } | |
300 | ||
301 | static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, | |
302 | struct cxl *adapter, unsigned int num) | |
303 | { | |
304 | int i, try, irq; | |
305 | ||
306 | memset(irqs, 0, sizeof(struct cxl_irq_ranges)); | |
307 | ||
308 | spin_lock(&adapter->guest->irq_alloc_lock); | |
309 | for (i = 0; i < CXL_IRQ_RANGES && num; i++) { | |
310 | try = num; | |
311 | while (try) { | |
312 | if (irq_alloc_range(adapter, try, &irq) == 0) | |
313 | break; | |
314 | try /= 2; | |
315 | } | |
316 | if (!try) | |
317 | goto error; | |
318 | irqs->offset[i] = irq; | |
319 | irqs->range[i] = try; | |
320 | num -= try; | |
321 | } | |
322 | if (num) | |
323 | goto error; | |
324 | spin_unlock(&adapter->guest->irq_alloc_lock); | |
325 | return 0; | |
326 | ||
327 | error: | |
328 | for (i = 0; i < CXL_IRQ_RANGES; i++) | |
329 | irq_free_range(adapter, irqs->offset[i], irqs->range[i]); | |
330 | spin_unlock(&adapter->guest->irq_alloc_lock); | |
331 | return -ENOSPC; | |
332 | } | |
333 | ||
334 | static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, | |
335 | struct cxl *adapter) | |
336 | { | |
337 | int i; | |
338 | ||
339 | spin_lock(&adapter->guest->irq_alloc_lock); | |
340 | for (i = 0; i < CXL_IRQ_RANGES; i++) | |
341 | irq_free_range(adapter, irqs->offset[i], irqs->range[i]); | |
342 | spin_unlock(&adapter->guest->irq_alloc_lock); | |
343 | } | |
344 | ||
345 | static int guest_register_serr_irq(struct cxl_afu *afu) | |
346 | { | |
347 | afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", | |
348 | dev_name(&afu->dev)); | |
349 | if (!afu->err_irq_name) | |
350 | return -ENOMEM; | |
351 | ||
352 | if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, | |
353 | guest_slice_irq_err, afu, afu->err_irq_name))) { | |
354 | kfree(afu->err_irq_name); | |
355 | afu->err_irq_name = NULL; | |
356 | return -ENOMEM; | |
357 | } | |
358 | ||
359 | return 0; | |
360 | } | |
361 | ||
362 | static void guest_release_serr_irq(struct cxl_afu *afu) | |
363 | { | |
364 | cxl_unmap_irq(afu->serr_virq, afu); | |
365 | cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); | |
366 | kfree(afu->err_irq_name); | |
367 | } | |
368 | ||
369 | static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) | |
370 | { | |
371 | return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, | |
372 | tfc >> 32, (psl_reset_mask != 0)); | |
373 | } | |
374 | ||
375 | static void disable_afu_irqs(struct cxl_context *ctx) | |
376 | { | |
377 | irq_hw_number_t hwirq; | |
378 | unsigned int virq; | |
379 | int r, i; | |
380 | ||
381 | pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); | |
382 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | |
383 | hwirq = ctx->irqs.offset[r]; | |
384 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | |
385 | virq = irq_find_mapping(NULL, hwirq); | |
386 | disable_irq(virq); | |
387 | } | |
388 | } | |
389 | } | |
390 | ||
391 | static void enable_afu_irqs(struct cxl_context *ctx) | |
392 | { | |
393 | irq_hw_number_t hwirq; | |
394 | unsigned int virq; | |
395 | int r, i; | |
396 | ||
397 | pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); | |
398 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | |
399 | hwirq = ctx->irqs.offset[r]; | |
400 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | |
401 | virq = irq_find_mapping(NULL, hwirq); | |
402 | enable_irq(virq); | |
403 | } | |
404 | } | |
405 | } | |
406 | ||
407 | static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, | |
408 | u64 offset, u64 *val) | |
409 | { | |
410 | unsigned long cr; | |
411 | char c; | |
412 | int rc = 0; | |
413 | ||
414 | if (afu->crs_len < sz) | |
415 | return -ENOENT; | |
416 | ||
417 | if (unlikely(offset >= afu->crs_len)) | |
418 | return -ERANGE; | |
419 | ||
420 | cr = get_zeroed_page(GFP_KERNEL); | |
421 | if (!cr) | |
422 | return -ENOMEM; | |
423 | ||
424 | rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, | |
425 | virt_to_phys((void *)cr), sz); | |
426 | if (rc) | |
427 | goto err; | |
428 | ||
429 | switch (sz) { | |
430 | case 1: | |
431 | c = *((char *) cr); | |
432 | *val = c; | |
433 | break; | |
434 | case 2: | |
435 | *val = in_le16((u16 *)cr); | |
436 | break; | |
437 | case 4: | |
438 | *val = in_le32((unsigned *)cr); | |
439 | break; | |
440 | case 8: | |
441 | *val = in_le64((u64 *)cr); | |
442 | break; | |
443 | default: | |
444 | WARN_ON(1); | |
445 | } | |
446 | err: | |
447 | free_page(cr); | |
448 | return rc; | |
449 | } | |
450 | ||
451 | static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, | |
452 | u32 *out) | |
453 | { | |
454 | int rc; | |
455 | u64 val; | |
456 | ||
457 | rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); | |
458 | if (!rc) | |
459 | *out = (u32) val; | |
460 | return rc; | |
461 | } | |
462 | ||
463 | static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, | |
464 | u16 *out) | |
465 | { | |
466 | int rc; | |
467 | u64 val; | |
468 | ||
469 | rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); | |
470 | if (!rc) | |
471 | *out = (u16) val; | |
472 | return rc; | |
473 | } | |
474 | ||
475 | static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, | |
476 | u8 *out) | |
477 | { | |
478 | int rc; | |
479 | u64 val; | |
480 | ||
481 | rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); | |
482 | if (!rc) | |
483 | *out = (u8) val; | |
484 | return rc; | |
485 | } | |
486 | ||
487 | static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, | |
488 | u64 *out) | |
489 | { | |
490 | return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); | |
491 | } | |
492 | ||
d601ea91 FB |
493 | static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) |
494 | { | |
495 | /* config record is not writable from guest */ | |
496 | return -EPERM; | |
497 | } | |
498 | ||
499 | static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) | |
500 | { | |
501 | /* config record is not writable from guest */ | |
502 | return -EPERM; | |
503 | } | |
504 | ||
505 | static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) | |
506 | { | |
507 | /* config record is not writable from guest */ | |
508 | return -EPERM; | |
509 | } | |
510 | ||
14baf4d9 CL |
511 | static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) |
512 | { | |
513 | struct cxl_process_element_hcall *elem; | |
514 | struct cxl *adapter = ctx->afu->adapter; | |
515 | const struct cred *cred; | |
516 | u32 pid, idx; | |
517 | int rc, r, i; | |
518 | u64 mmio_addr, mmio_size; | |
519 | __be64 flags = 0; | |
520 | ||
521 | /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ | |
522 | if (!(elem = (struct cxl_process_element_hcall *) | |
523 | get_zeroed_page(GFP_KERNEL))) | |
524 | return -ENOMEM; | |
525 | ||
526 | elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); | |
527 | if (ctx->kernel) { | |
528 | pid = 0; | |
529 | flags |= CXL_PE_TRANSLATION_ENABLED; | |
530 | flags |= CXL_PE_PRIVILEGED_PROCESS; | |
531 | if (mfmsr() & MSR_SF) | |
532 | flags |= CXL_PE_64_BIT; | |
533 | } else { | |
534 | pid = current->pid; | |
535 | flags |= CXL_PE_PROBLEM_STATE; | |
536 | flags |= CXL_PE_TRANSLATION_ENABLED; | |
537 | if (!test_tsk_thread_flag(current, TIF_32BIT)) | |
538 | flags |= CXL_PE_64_BIT; | |
539 | cred = get_current_cred(); | |
540 | if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) | |
541 | flags |= CXL_PE_PRIVILEGED_PROCESS; | |
542 | put_cred(cred); | |
543 | } | |
544 | elem->flags = cpu_to_be64(flags); | |
545 | elem->common.tid = cpu_to_be32(0); /* Unused */ | |
546 | elem->common.pid = cpu_to_be32(pid); | |
547 | elem->common.csrp = cpu_to_be64(0); /* disable */ | |
548 | elem->common.aurp0 = cpu_to_be64(0); /* disable */ | |
549 | elem->common.aurp1 = cpu_to_be64(0); /* disable */ | |
550 | ||
551 | cxl_prefault(ctx, wed); | |
552 | ||
553 | elem->common.sstp0 = cpu_to_be64(ctx->sstp0); | |
554 | elem->common.sstp1 = cpu_to_be64(ctx->sstp1); | |
3c206fa7 IM |
555 | |
556 | /* | |
557 | * Ensure we have at least one interrupt allocated to take faults for | |
558 | * kernel contexts that may not have allocated any AFU IRQs at all: | |
559 | */ | |
560 | if (ctx->irqs.range[0] == 0) { | |
561 | rc = afu_register_irqs(ctx, 0); | |
562 | if (rc) | |
563 | goto out_free; | |
564 | } | |
565 | ||
14baf4d9 CL |
566 | for (r = 0; r < CXL_IRQ_RANGES; r++) { |
567 | for (i = 0; i < ctx->irqs.range[r]; i++) { | |
568 | if (r == 0 && i == 0) { | |
569 | elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); | |
570 | } else { | |
571 | idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; | |
572 | elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); | |
573 | } | |
574 | } | |
575 | } | |
576 | elem->common.amr = cpu_to_be64(amr); | |
577 | elem->common.wed = cpu_to_be64(wed); | |
578 | ||
579 | disable_afu_irqs(ctx); | |
580 | ||
581 | rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, | |
582 | &ctx->process_token, &mmio_addr, &mmio_size); | |
583 | if (rc == H_SUCCESS) { | |
584 | if (ctx->master || !ctx->afu->pp_psa) { | |
585 | ctx->psn_phys = ctx->afu->psn_phys; | |
586 | ctx->psn_size = ctx->afu->adapter->ps_size; | |
587 | } else { | |
588 | ctx->psn_phys = mmio_addr; | |
589 | ctx->psn_size = mmio_size; | |
590 | } | |
591 | if (ctx->afu->pp_psa && mmio_size && | |
592 | ctx->afu->pp_size == 0) { | |
593 | /* | |
594 | * There's no property in the device tree to read the | |
595 | * pp_size. We only find out at the 1st attach. | |
596 | * Compared to bare-metal, it is too late and we | |
597 | * should really lock here. However, on powerVM, | |
598 | * pp_size is really only used to display in /sys. | |
599 | * Being discussed with pHyp for their next release. | |
600 | */ | |
601 | ctx->afu->pp_size = mmio_size; | |
602 | } | |
603 | /* from PAPR: process element is bytes 4-7 of process token */ | |
604 | ctx->external_pe = ctx->process_token & 0xFFFFFFFF; | |
605 | pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", | |
606 | ctx->pe, ctx->external_pe, ctx->psn_size); | |
607 | ctx->pe_inserted = true; | |
608 | enable_afu_irqs(ctx); | |
609 | } | |
610 | ||
3c206fa7 | 611 | out_free: |
14baf4d9 CL |
612 | free_page((u64)elem); |
613 | return rc; | |
614 | } | |
615 | ||
616 | static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) | |
617 | { | |
618 | pr_devel("in %s\n", __func__); | |
619 | ||
620 | ctx->kernel = kernel; | |
621 | if (ctx->afu->current_mode == CXL_MODE_DIRECTED) | |
622 | return attach_afu_directed(ctx, wed, amr); | |
623 | ||
624 | /* dedicated mode not supported on FW840 */ | |
625 | ||
626 | return -EINVAL; | |
627 | } | |
628 | ||
629 | static int detach_afu_directed(struct cxl_context *ctx) | |
630 | { | |
631 | if (!ctx->pe_inserted) | |
632 | return 0; | |
633 | if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) | |
634 | return -1; | |
635 | return 0; | |
636 | } | |
637 | ||
638 | static int guest_detach_process(struct cxl_context *ctx) | |
639 | { | |
640 | pr_devel("in %s\n", __func__); | |
641 | trace_cxl_detach(ctx); | |
642 | ||
0d400f77 | 643 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) |
14baf4d9 CL |
644 | return -EIO; |
645 | ||
646 | if (ctx->afu->current_mode == CXL_MODE_DIRECTED) | |
647 | return detach_afu_directed(ctx); | |
648 | ||
649 | return -EINVAL; | |
650 | } | |
651 | ||
652 | static void guest_release_afu(struct device *dev) | |
653 | { | |
654 | struct cxl_afu *afu = to_cxl_afu(dev); | |
655 | ||
656 | pr_devel("%s\n", __func__); | |
657 | ||
658 | idr_destroy(&afu->contexts_idr); | |
659 | ||
660 | kfree(afu->guest); | |
661 | kfree(afu); | |
662 | } | |
663 | ||
664 | ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) | |
665 | { | |
666 | return guest_collect_vpd(NULL, afu, buf, len); | |
667 | } | |
668 | ||
669 | #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE | |
670 | static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, | |
671 | loff_t off, size_t count) | |
672 | { | |
673 | void *tbuf = NULL; | |
674 | int rc = 0; | |
675 | ||
676 | tbuf = (void *) get_zeroed_page(GFP_KERNEL); | |
677 | if (!tbuf) | |
678 | return -ENOMEM; | |
679 | ||
680 | rc = cxl_h_get_afu_err(afu->guest->handle, | |
681 | off & 0x7, | |
682 | virt_to_phys(tbuf), | |
683 | count); | |
684 | if (rc) | |
685 | goto err; | |
686 | ||
687 | if (count > ERR_BUFF_MAX_COPY_SIZE) | |
688 | count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); | |
689 | memcpy(buf, tbuf, count); | |
690 | err: | |
691 | free_page((u64)tbuf); | |
692 | ||
693 | return rc; | |
694 | } | |
695 | ||
696 | static int guest_afu_check_and_enable(struct cxl_afu *afu) | |
697 | { | |
698 | return 0; | |
699 | } | |
700 | ||
4752876c CL |
701 | static bool guest_support_attributes(const char *attr_name, |
702 | enum cxl_attrs type) | |
703 | { | |
704 | switch (type) { | |
705 | case CXL_ADAPTER_ATTRS: | |
706 | if ((strcmp(attr_name, "base_image") == 0) || | |
707 | (strcmp(attr_name, "load_image_on_perst") == 0) || | |
708 | (strcmp(attr_name, "perst_reloads_same_image") == 0) || | |
709 | (strcmp(attr_name, "image_loaded") == 0)) | |
710 | return false; | |
711 | break; | |
712 | case CXL_AFU_MASTER_ATTRS: | |
713 | if ((strcmp(attr_name, "pp_mmio_off") == 0)) | |
714 | return false; | |
715 | break; | |
716 | case CXL_AFU_ATTRS: | |
717 | break; | |
718 | default: | |
719 | break; | |
720 | } | |
721 | ||
722 | return true; | |
723 | } | |
724 | ||
14baf4d9 CL |
725 | static int activate_afu_directed(struct cxl_afu *afu) |
726 | { | |
727 | int rc; | |
728 | ||
729 | dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); | |
730 | ||
731 | afu->current_mode = CXL_MODE_DIRECTED; | |
732 | ||
733 | afu->num_procs = afu->max_procs_virtualised; | |
734 | ||
735 | if ((rc = cxl_chardev_m_afu_add(afu))) | |
736 | return rc; | |
737 | ||
738 | if ((rc = cxl_sysfs_afu_m_add(afu))) | |
739 | goto err; | |
740 | ||
741 | if ((rc = cxl_chardev_s_afu_add(afu))) | |
742 | goto err1; | |
743 | ||
744 | return 0; | |
745 | err1: | |
746 | cxl_sysfs_afu_m_remove(afu); | |
747 | err: | |
748 | cxl_chardev_afu_remove(afu); | |
749 | return rc; | |
750 | } | |
751 | ||
752 | static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) | |
753 | { | |
754 | if (!mode) | |
755 | return 0; | |
756 | if (!(mode & afu->modes_supported)) | |
757 | return -EINVAL; | |
758 | ||
759 | if (mode == CXL_MODE_DIRECTED) | |
760 | return activate_afu_directed(afu); | |
761 | ||
762 | if (mode == CXL_MODE_DEDICATED) | |
763 | dev_err(&afu->dev, "Dedicated mode not supported\n"); | |
764 | ||
765 | return -EINVAL; | |
766 | } | |
767 | ||
768 | static int deactivate_afu_directed(struct cxl_afu *afu) | |
769 | { | |
770 | dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); | |
771 | ||
772 | afu->current_mode = 0; | |
773 | afu->num_procs = 0; | |
774 | ||
775 | cxl_sysfs_afu_m_remove(afu); | |
776 | cxl_chardev_afu_remove(afu); | |
777 | ||
778 | cxl_ops->afu_reset(afu); | |
779 | ||
780 | return 0; | |
781 | } | |
782 | ||
783 | static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) | |
784 | { | |
785 | if (!mode) | |
786 | return 0; | |
787 | if (!(mode & afu->modes_supported)) | |
788 | return -EINVAL; | |
789 | ||
790 | if (mode == CXL_MODE_DIRECTED) | |
791 | return deactivate_afu_directed(afu); | |
792 | return 0; | |
793 | } | |
794 | ||
795 | static int guest_afu_reset(struct cxl_afu *afu) | |
796 | { | |
797 | pr_devel("AFU(%d) reset request\n", afu->slice); | |
798 | return cxl_h_reset_afu(afu->guest->handle); | |
799 | } | |
800 | ||
801 | static int guest_map_slice_regs(struct cxl_afu *afu) | |
802 | { | |
803 | if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { | |
804 | dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", | |
805 | afu->slice); | |
806 | return -ENOMEM; | |
807 | } | |
808 | return 0; | |
809 | } | |
810 | ||
811 | static void guest_unmap_slice_regs(struct cxl_afu *afu) | |
812 | { | |
813 | if (afu->p2n_mmio) | |
814 | iounmap(afu->p2n_mmio); | |
815 | } | |
816 | ||
0d400f77 | 817 | static int afu_update_state(struct cxl_afu *afu) |
14baf4d9 | 818 | { |
0d400f77 CL |
819 | int rc, cur_state; |
820 | ||
821 | rc = afu_read_error_state(afu, &cur_state); | |
822 | if (rc) | |
823 | return rc; | |
824 | ||
825 | if (afu->guest->previous_state == cur_state) | |
826 | return 0; | |
827 | ||
828 | pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); | |
829 | ||
830 | switch (cur_state) { | |
831 | case H_STATE_NORMAL: | |
832 | afu->guest->previous_state = cur_state; | |
833 | rc = 1; | |
834 | break; | |
835 | ||
836 | case H_STATE_DISABLE: | |
837 | pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, | |
838 | pci_channel_io_frozen); | |
839 | ||
840 | cxl_context_detach_all(afu); | |
841 | if ((rc = cxl_ops->afu_reset(afu))) | |
842 | pr_devel("reset hcall failed %d\n", rc); | |
843 | ||
844 | rc = afu_read_error_state(afu, &cur_state); | |
845 | if (!rc && cur_state == H_STATE_NORMAL) { | |
846 | pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, | |
847 | pci_channel_io_normal); | |
848 | pci_error_handlers(afu, CXL_RESUME_EVENT, 0); | |
849 | rc = 1; | |
850 | } | |
851 | afu->guest->previous_state = 0; | |
852 | break; | |
853 | ||
854 | case H_STATE_TEMP_UNAVAILABLE: | |
855 | afu->guest->previous_state = cur_state; | |
856 | break; | |
857 | ||
858 | case H_STATE_PERM_UNAVAILABLE: | |
859 | dev_err(&afu->dev, "AFU is in permanent error state\n"); | |
860 | pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, | |
861 | pci_channel_io_perm_failure); | |
862 | afu->guest->previous_state = cur_state; | |
863 | break; | |
864 | ||
865 | default: | |
866 | pr_err("Unexpected AFU(%d) error state: %#x\n", | |
867 | afu->slice, cur_state); | |
868 | return -EINVAL; | |
869 | } | |
870 | ||
871 | return rc; | |
872 | } | |
873 | ||
874 | static int afu_do_recovery(struct cxl_afu *afu) | |
875 | { | |
876 | int rc; | |
877 | ||
878 | /* many threads can arrive here, in case of detach_all for example. | |
879 | * Only one needs to drive the recovery | |
880 | */ | |
881 | if (mutex_trylock(&afu->guest->recovery_lock)) { | |
882 | rc = afu_update_state(afu); | |
883 | mutex_unlock(&afu->guest->recovery_lock); | |
884 | return rc; | |
885 | } | |
886 | return 0; | |
887 | } | |
888 | ||
889 | static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) | |
890 | { | |
891 | int state; | |
892 | ||
893 | if (afu) { | |
894 | if (afu_read_error_state(afu, &state) || | |
895 | state != H_STATE_NORMAL) { | |
896 | if (afu_do_recovery(afu) > 0) { | |
897 | /* check again in case we've just fixed it */ | |
898 | if (!afu_read_error_state(afu, &state) && | |
899 | state == H_STATE_NORMAL) | |
900 | return true; | |
901 | } | |
902 | return false; | |
903 | } | |
904 | } | |
905 | ||
14baf4d9 CL |
906 | return true; |
907 | } | |
908 | ||
909 | static int afu_properties_look_ok(struct cxl_afu *afu) | |
910 | { | |
911 | if (afu->pp_irqs < 0) { | |
912 | dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); | |
913 | return -EINVAL; | |
914 | } | |
915 | ||
916 | if (afu->max_procs_virtualised < 1) { | |
917 | dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); | |
918 | return -EINVAL; | |
919 | } | |
920 | ||
921 | if (afu->crs_len < 0) { | |
922 | dev_err(&afu->dev, "Unexpected configuration record size value\n"); | |
923 | return -EINVAL; | |
924 | } | |
925 | ||
926 | return 0; | |
927 | } | |
928 | ||
929 | int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) | |
930 | { | |
931 | struct cxl_afu *afu; | |
932 | bool free = true; | |
933 | int rc; | |
934 | ||
935 | pr_devel("in %s - AFU(%d)\n", __func__, slice); | |
936 | if (!(afu = cxl_alloc_afu(adapter, slice))) | |
937 | return -ENOMEM; | |
938 | ||
939 | if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { | |
940 | kfree(afu); | |
941 | return -ENOMEM; | |
942 | } | |
943 | ||
0d400f77 CL |
944 | mutex_init(&afu->guest->recovery_lock); |
945 | ||
14baf4d9 CL |
946 | if ((rc = dev_set_name(&afu->dev, "afu%i.%i", |
947 | adapter->adapter_num, | |
948 | slice))) | |
949 | goto err1; | |
950 | ||
951 | adapter->slices++; | |
952 | ||
953 | if ((rc = cxl_of_read_afu_handle(afu, afu_np))) | |
954 | goto err1; | |
955 | ||
956 | if ((rc = cxl_ops->afu_reset(afu))) | |
957 | goto err1; | |
958 | ||
959 | if ((rc = cxl_of_read_afu_properties(afu, afu_np))) | |
960 | goto err1; | |
961 | ||
962 | if ((rc = afu_properties_look_ok(afu))) | |
963 | goto err1; | |
964 | ||
965 | if ((rc = guest_map_slice_regs(afu))) | |
966 | goto err1; | |
967 | ||
968 | if ((rc = guest_register_serr_irq(afu))) | |
969 | goto err2; | |
970 | ||
971 | /* | |
972 | * After we call this function we must not free the afu directly, even | |
973 | * if it returns an error! | |
974 | */ | |
975 | if ((rc = cxl_register_afu(afu))) | |
976 | goto err_put1; | |
977 | ||
978 | if ((rc = cxl_sysfs_afu_add(afu))) | |
979 | goto err_put1; | |
980 | ||
981 | /* | |
982 | * pHyp doesn't expose the programming models supported by the | |
983 | * AFU. pHyp currently only supports directed mode. If it adds | |
984 | * dedicated mode later, this version of cxl has no way to | |
985 | * detect it. So we'll initialize the driver, but the first | |
986 | * attach will fail. | |
987 | * Being discussed with pHyp to do better (likely new property) | |
988 | */ | |
989 | if (afu->max_procs_virtualised == 1) | |
990 | afu->modes_supported = CXL_MODE_DEDICATED; | |
991 | else | |
992 | afu->modes_supported = CXL_MODE_DIRECTED; | |
993 | ||
994 | if ((rc = cxl_afu_select_best_mode(afu))) | |
995 | goto err_put2; | |
996 | ||
997 | adapter->afu[afu->slice] = afu; | |
998 | ||
999 | afu->enabled = true; | |
1000 | ||
d601ea91 FB |
1001 | if ((rc = cxl_pci_vphb_add(afu))) |
1002 | dev_info(&afu->dev, "Can't register vPHB\n"); | |
1003 | ||
14baf4d9 CL |
1004 | return 0; |
1005 | ||
1006 | err_put2: | |
1007 | cxl_sysfs_afu_remove(afu); | |
1008 | err_put1: | |
1009 | device_unregister(&afu->dev); | |
1010 | free = false; | |
1011 | guest_release_serr_irq(afu); | |
1012 | err2: | |
1013 | guest_unmap_slice_regs(afu); | |
1014 | err1: | |
1015 | if (free) { | |
1016 | kfree(afu->guest); | |
1017 | kfree(afu); | |
1018 | } | |
1019 | return rc; | |
1020 | } | |
1021 | ||
1022 | void cxl_guest_remove_afu(struct cxl_afu *afu) | |
1023 | { | |
1024 | pr_devel("in %s - AFU(%d)\n", __func__, afu->slice); | |
1025 | ||
1026 | if (!afu) | |
1027 | return; | |
1028 | ||
d601ea91 | 1029 | cxl_pci_vphb_remove(afu); |
14baf4d9 CL |
1030 | cxl_sysfs_afu_remove(afu); |
1031 | ||
1032 | spin_lock(&afu->adapter->afu_list_lock); | |
1033 | afu->adapter->afu[afu->slice] = NULL; | |
1034 | spin_unlock(&afu->adapter->afu_list_lock); | |
1035 | ||
1036 | cxl_context_detach_all(afu); | |
1037 | cxl_ops->afu_deactivate_mode(afu, afu->current_mode); | |
1038 | guest_release_serr_irq(afu); | |
1039 | guest_unmap_slice_regs(afu); | |
1040 | ||
1041 | device_unregister(&afu->dev); | |
1042 | } | |
1043 | ||
1044 | static void free_adapter(struct cxl *adapter) | |
1045 | { | |
1046 | struct irq_avail *cur; | |
1047 | int i; | |
1048 | ||
1049 | if (adapter->guest->irq_avail) { | |
1050 | for (i = 0; i < adapter->guest->irq_nranges; i++) { | |
1051 | cur = &adapter->guest->irq_avail[i]; | |
1052 | kfree(cur->bitmap); | |
1053 | } | |
1054 | kfree(adapter->guest->irq_avail); | |
1055 | } | |
1056 | kfree(adapter->guest->status); | |
1057 | cxl_remove_adapter_nr(adapter); | |
1058 | kfree(adapter->guest); | |
1059 | kfree(adapter); | |
1060 | } | |
1061 | ||
1062 | static int properties_look_ok(struct cxl *adapter) | |
1063 | { | |
1064 | /* The absence of this property means that the operational | |
1065 | * status is unknown or okay | |
1066 | */ | |
1067 | if (strlen(adapter->guest->status) && | |
1068 | strcmp(adapter->guest->status, "okay")) { | |
1069 | pr_err("ABORTING:Bad operational status of the device\n"); | |
1070 | return -EINVAL; | |
1071 | } | |
1072 | ||
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) | |
1077 | { | |
1078 | return guest_collect_vpd(adapter, NULL, buf, len); | |
1079 | } | |
1080 | ||
1081 | void cxl_guest_remove_adapter(struct cxl *adapter) | |
1082 | { | |
1083 | pr_devel("in %s\n", __func__); | |
1084 | ||
1085 | cxl_sysfs_adapter_remove(adapter); | |
1086 | ||
594ff7d0 | 1087 | cxl_guest_remove_chardev(adapter); |
14baf4d9 CL |
1088 | device_unregister(&adapter->dev); |
1089 | } | |
1090 | ||
1091 | static void release_adapter(struct device *dev) | |
1092 | { | |
1093 | free_adapter(to_cxl_adapter(dev)); | |
1094 | } | |
1095 | ||
1096 | struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) | |
1097 | { | |
1098 | struct cxl *adapter; | |
1099 | bool free = true; | |
1100 | int rc; | |
1101 | ||
1102 | if (!(adapter = cxl_alloc_adapter())) | |
1103 | return ERR_PTR(-ENOMEM); | |
1104 | ||
1105 | if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { | |
1106 | free_adapter(adapter); | |
1107 | return ERR_PTR(-ENOMEM); | |
1108 | } | |
1109 | ||
1110 | adapter->slices = 0; | |
1111 | adapter->guest->pdev = pdev; | |
1112 | adapter->dev.parent = &pdev->dev; | |
1113 | adapter->dev.release = release_adapter; | |
1114 | dev_set_drvdata(&pdev->dev, adapter); | |
1115 | ||
e009a7e8 FB |
1116 | /* |
1117 | * Hypervisor controls PSL timebase initialization (p1 register). | |
1118 | * On FW840, PSL is initialized. | |
1119 | */ | |
1120 | adapter->psl_timebase_synced = true; | |
1121 | ||
14baf4d9 CL |
1122 | if ((rc = cxl_of_read_adapter_handle(adapter, np))) |
1123 | goto err1; | |
1124 | ||
1125 | if ((rc = cxl_of_read_adapter_properties(adapter, np))) | |
1126 | goto err1; | |
1127 | ||
1128 | if ((rc = properties_look_ok(adapter))) | |
1129 | goto err1; | |
1130 | ||
594ff7d0 CL |
1131 | if ((rc = cxl_guest_add_chardev(adapter))) |
1132 | goto err1; | |
1133 | ||
14baf4d9 CL |
1134 | /* |
1135 | * After we call this function we must not free the adapter directly, | |
1136 | * even if it returns an error! | |
1137 | */ | |
1138 | if ((rc = cxl_register_adapter(adapter))) | |
1139 | goto err_put1; | |
1140 | ||
1141 | if ((rc = cxl_sysfs_adapter_add(adapter))) | |
1142 | goto err_put1; | |
1143 | ||
1144 | return adapter; | |
1145 | ||
1146 | err_put1: | |
1147 | device_unregister(&adapter->dev); | |
1148 | free = false; | |
594ff7d0 | 1149 | cxl_guest_remove_chardev(adapter); |
14baf4d9 CL |
1150 | err1: |
1151 | if (free) | |
1152 | free_adapter(adapter); | |
1153 | return ERR_PTR(rc); | |
1154 | } | |
1155 | ||
594ff7d0 CL |
1156 | void cxl_guest_reload_module(struct cxl *adapter) |
1157 | { | |
1158 | struct platform_device *pdev; | |
1159 | ||
1160 | pdev = adapter->guest->pdev; | |
1161 | cxl_guest_remove_adapter(adapter); | |
1162 | ||
1163 | cxl_of_probe(pdev); | |
1164 | } | |
1165 | ||
14baf4d9 CL |
1166 | const struct cxl_backend_ops cxl_guest_ops = { |
1167 | .module = THIS_MODULE, | |
1168 | .adapter_reset = guest_reset, | |
1169 | .alloc_one_irq = guest_alloc_one_irq, | |
1170 | .release_one_irq = guest_release_one_irq, | |
1171 | .alloc_irq_ranges = guest_alloc_irq_ranges, | |
1172 | .release_irq_ranges = guest_release_irq_ranges, | |
1173 | .setup_irq = NULL, | |
1174 | .handle_psl_slice_error = guest_handle_psl_slice_error, | |
1175 | .psl_interrupt = guest_psl_irq, | |
1176 | .ack_irq = guest_ack_irq, | |
1177 | .attach_process = guest_attach_process, | |
1178 | .detach_process = guest_detach_process, | |
4752876c | 1179 | .support_attributes = guest_support_attributes, |
14baf4d9 CL |
1180 | .link_ok = guest_link_ok, |
1181 | .release_afu = guest_release_afu, | |
1182 | .afu_read_err_buffer = guest_afu_read_err_buffer, | |
1183 | .afu_check_and_enable = guest_afu_check_and_enable, | |
1184 | .afu_activate_mode = guest_afu_activate_mode, | |
1185 | .afu_deactivate_mode = guest_afu_deactivate_mode, | |
1186 | .afu_reset = guest_afu_reset, | |
1187 | .afu_cr_read8 = guest_afu_cr_read8, | |
1188 | .afu_cr_read16 = guest_afu_cr_read16, | |
1189 | .afu_cr_read32 = guest_afu_cr_read32, | |
1190 | .afu_cr_read64 = guest_afu_cr_read64, | |
d601ea91 FB |
1191 | .afu_cr_write8 = guest_afu_cr_write8, |
1192 | .afu_cr_write16 = guest_afu_cr_write16, | |
1193 | .afu_cr_write32 = guest_afu_cr_write32, | |
1194 | .read_adapter_vpd = cxl_guest_read_adapter_vpd, | |
14baf4d9 | 1195 | }; |