2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
15 #include <linux/uaccess.h>
16 #include <linux/delay.h>
17 #include <asm/synch.h>
18 #include <misc/cxl-base.h>
23 static int afu_control(struct cxl_afu
*afu
, u64 command
, u64 clear
,
24 u64 result
, u64 mask
, bool enabled
)
27 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
30 spin_lock(&afu
->afu_cntl_lock
);
31 pr_devel("AFU command starting: %llx\n", command
);
33 trace_cxl_afu_ctrl(afu
, command
);
35 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
36 cxl_p2n_write(afu
, CXL_AFU_Cntl_An
, (AFU_Cntl
& ~clear
) | command
);
38 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
39 while ((AFU_Cntl
& mask
) != result
) {
40 if (time_after_eq(jiffies
, timeout
)) {
41 dev_warn(&afu
->dev
, "WARNING: AFU control timed out!\n");
46 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
47 afu
->enabled
= enabled
;
52 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
55 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
58 if (AFU_Cntl
& CXL_AFU_Cntl_An_RA
) {
60 * Workaround for a bug in the XSL used in the Mellanox CX4
61 * that fails to clear the RA bit after an AFU reset,
62 * preventing subsequent AFU resets from working.
64 cxl_p2n_write(afu
, CXL_AFU_Cntl_An
, AFU_Cntl
& ~CXL_AFU_Cntl_An_RA
);
67 pr_devel("AFU command complete: %llx\n", command
);
68 afu
->enabled
= enabled
;
70 trace_cxl_afu_ctrl_done(afu
, command
, rc
);
71 spin_unlock(&afu
->afu_cntl_lock
);
76 static int afu_enable(struct cxl_afu
*afu
)
78 pr_devel("AFU enable request\n");
80 return afu_control(afu
, CXL_AFU_Cntl_An_E
, 0,
81 CXL_AFU_Cntl_An_ES_Enabled
,
82 CXL_AFU_Cntl_An_ES_MASK
, true);
85 int cxl_afu_disable(struct cxl_afu
*afu
)
87 pr_devel("AFU disable request\n");
89 return afu_control(afu
, 0, CXL_AFU_Cntl_An_E
,
90 CXL_AFU_Cntl_An_ES_Disabled
,
91 CXL_AFU_Cntl_An_ES_MASK
, false);
94 /* This will disable as well as reset */
95 static int native_afu_reset(struct cxl_afu
*afu
)
97 pr_devel("AFU reset request\n");
99 return afu_control(afu
, CXL_AFU_Cntl_An_RA
, 0,
100 CXL_AFU_Cntl_An_RS_Complete
| CXL_AFU_Cntl_An_ES_Disabled
,
101 CXL_AFU_Cntl_An_RS_MASK
| CXL_AFU_Cntl_An_ES_MASK
,
105 static int native_afu_check_and_enable(struct cxl_afu
*afu
)
107 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
108 WARN(1, "Refusing to enable afu while link down!\n");
113 return afu_enable(afu
);
116 int cxl_psl_purge(struct cxl_afu
*afu
)
118 u64 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
119 u64 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
122 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
125 trace_cxl_psl_ctrl(afu
, CXL_PSL_SCNTL_An_Pc
);
127 pr_devel("PSL purge request\n");
129 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
130 dev_warn(&afu
->dev
, "PSL Purge called with link down, ignoring\n");
135 if ((AFU_Cntl
& CXL_AFU_Cntl_An_ES_MASK
) != CXL_AFU_Cntl_An_ES_Disabled
) {
136 WARN(1, "psl_purge request while AFU not disabled!\n");
137 cxl_afu_disable(afu
);
140 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
,
141 PSL_CNTL
| CXL_PSL_SCNTL_An_Pc
);
142 start
= local_clock();
143 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
144 while ((PSL_CNTL
& CXL_PSL_SCNTL_An_Ps_MASK
)
145 == CXL_PSL_SCNTL_An_Ps_Pending
) {
146 if (time_after_eq(jiffies
, timeout
)) {
147 dev_warn(&afu
->dev
, "WARNING: PSL Purge timed out!\n");
151 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
156 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
157 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL
, dsisr
);
158 if (dsisr
& CXL_PSL_DSISR_TRANS
) {
159 dar
= cxl_p2n_read(afu
, CXL_PSL_DAR_An
);
160 dev_notice(&afu
->dev
, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr
, dar
);
161 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
163 dev_notice(&afu
->dev
, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr
);
164 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
168 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
171 pr_devel("PSL purged in %lld ns\n", end
- start
);
173 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
,
174 PSL_CNTL
& ~CXL_PSL_SCNTL_An_Pc
);
176 trace_cxl_psl_ctrl_done(afu
, CXL_PSL_SCNTL_An_Pc
, rc
);
180 static int spa_max_procs(int spa_size
)
184 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
185 * Most of that junk is really just an overly-complicated way of saying
186 * the last 256 bytes are __aligned(128), so it's really:
187 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
189 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
191 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
192 * Ignore the alignment (which is safe in this case as long as we are
193 * careful with our rounding) and solve for n:
195 return ((spa_size
/ 8) - 96) / 17;
198 int cxl_alloc_spa(struct cxl_afu
*afu
)
202 /* Work out how many pages to allocate */
203 afu
->native
->spa_order
= -1;
205 afu
->native
->spa_order
++;
206 spa_size
= (1 << afu
->native
->spa_order
) * PAGE_SIZE
;
208 if (spa_size
> 0x100000) {
209 dev_warn(&afu
->dev
, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
210 afu
->native
->spa_max_procs
, afu
->native
->spa_size
);
211 afu
->num_procs
= afu
->native
->spa_max_procs
;
215 afu
->native
->spa_size
= spa_size
;
216 afu
->native
->spa_max_procs
= spa_max_procs(afu
->native
->spa_size
);
217 } while (afu
->native
->spa_max_procs
< afu
->num_procs
);
219 if (!(afu
->native
->spa
= (struct cxl_process_element
*)
220 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, afu
->native
->spa_order
))) {
221 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
224 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
225 1<<afu
->native
->spa_order
, afu
->native
->spa_max_procs
, afu
->num_procs
);
230 static void attach_spa(struct cxl_afu
*afu
)
234 afu
->native
->sw_command_status
= (__be64
*)((char *)afu
->native
->spa
+
235 ((afu
->native
->spa_max_procs
+ 3) * 128));
237 spap
= virt_to_phys(afu
->native
->spa
) & CXL_PSL_SPAP_Addr
;
238 spap
|= ((afu
->native
->spa_size
>> (12 - CXL_PSL_SPAP_Size_Shift
)) - 1) & CXL_PSL_SPAP_Size
;
239 spap
|= CXL_PSL_SPAP_V
;
240 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
241 afu
->native
->spa
, afu
->native
->spa_max_procs
,
242 afu
->native
->sw_command_status
, spap
);
243 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, spap
);
246 static inline void detach_spa(struct cxl_afu
*afu
)
248 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, 0);
251 void cxl_release_spa(struct cxl_afu
*afu
)
253 if (afu
->native
->spa
) {
254 free_pages((unsigned long) afu
->native
->spa
,
255 afu
->native
->spa_order
);
256 afu
->native
->spa
= NULL
;
260 int cxl_tlb_slb_invalidate(struct cxl
*adapter
)
262 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
264 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
266 cxl_p1_write(adapter
, CXL_PSL_AFUSEL
, CXL_PSL_AFUSEL_A
);
268 cxl_p1_write(adapter
, CXL_PSL_TLBIA
, CXL_TLB_SLB_IQ_ALL
);
269 while (cxl_p1_read(adapter
, CXL_PSL_TLBIA
) & CXL_TLB_SLB_P
) {
270 if (time_after_eq(jiffies
, timeout
)) {
271 dev_warn(&adapter
->dev
, "WARNING: CXL adapter wide TLBIA timed out!\n");
274 if (!cxl_ops
->link_ok(adapter
, NULL
))
279 cxl_p1_write(adapter
, CXL_PSL_SLBIA
, CXL_TLB_SLB_IQ_ALL
);
280 while (cxl_p1_read(adapter
, CXL_PSL_SLBIA
) & CXL_TLB_SLB_P
) {
281 if (time_after_eq(jiffies
, timeout
)) {
282 dev_warn(&adapter
->dev
, "WARNING: CXL adapter wide SLBIA timed out!\n");
285 if (!cxl_ops
->link_ok(adapter
, NULL
))
292 int cxl_data_cache_flush(struct cxl
*adapter
)
295 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
297 pr_devel("Flushing data cache\n");
299 reg
= cxl_p1_read(adapter
, CXL_PSL_Control
);
300 reg
|= CXL_PSL_Control_Fr
;
301 cxl_p1_write(adapter
, CXL_PSL_Control
, reg
);
303 reg
= cxl_p1_read(adapter
, CXL_PSL_Control
);
304 while ((reg
& CXL_PSL_Control_Fs_MASK
) != CXL_PSL_Control_Fs_Complete
) {
305 if (time_after_eq(jiffies
, timeout
)) {
306 dev_warn(&adapter
->dev
, "WARNING: cache flush timed out!\n");
310 if (!cxl_ops
->link_ok(adapter
, NULL
)) {
311 dev_warn(&adapter
->dev
, "WARNING: link down when flushing cache\n");
315 reg
= cxl_p1_read(adapter
, CXL_PSL_Control
);
318 reg
&= ~CXL_PSL_Control_Fr
;
319 cxl_p1_write(adapter
, CXL_PSL_Control
, reg
);
323 static int cxl_write_sstp(struct cxl_afu
*afu
, u64 sstp0
, u64 sstp1
)
327 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
328 cxl_p2n_write(afu
, CXL_SSTP1_An
, 0);
330 /* 2. Invalidate all SLB entries */
331 if ((rc
= cxl_afu_slbia(afu
)))
334 /* 3. Set SSTP0_An */
335 cxl_p2n_write(afu
, CXL_SSTP0_An
, sstp0
);
337 /* 4. Set SSTP1_An */
338 cxl_p2n_write(afu
, CXL_SSTP1_An
, sstp1
);
343 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
344 static void slb_invalid(struct cxl_context
*ctx
)
346 struct cxl
*adapter
= ctx
->afu
->adapter
;
349 WARN_ON(!mutex_is_locked(&ctx
->afu
->native
->spa_mutex
));
351 cxl_p1_write(adapter
, CXL_PSL_LBISEL
,
352 ((u64
)be32_to_cpu(ctx
->elem
->common
.pid
) << 32) |
353 be32_to_cpu(ctx
->elem
->lpid
));
354 cxl_p1_write(adapter
, CXL_PSL_SLBIA
, CXL_TLB_SLB_IQ_LPIDPID
);
357 if (!cxl_ops
->link_ok(adapter
, NULL
))
359 slbia
= cxl_p1_read(adapter
, CXL_PSL_SLBIA
);
360 if (!(slbia
& CXL_TLB_SLB_P
))
366 static int do_process_element_cmd(struct cxl_context
*ctx
,
367 u64 cmd
, u64 pe_state
)
370 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
373 trace_cxl_llcmd(ctx
, cmd
);
375 WARN_ON(!ctx
->afu
->enabled
);
377 ctx
->elem
->software_state
= cpu_to_be32(pe_state
);
379 *(ctx
->afu
->native
->sw_command_status
) = cpu_to_be64(cmd
| 0 | ctx
->pe
);
381 cxl_p1n_write(ctx
->afu
, CXL_PSL_LLCMD_An
, cmd
| ctx
->pe
);
383 if (time_after_eq(jiffies
, timeout
)) {
384 dev_warn(&ctx
->afu
->dev
, "WARNING: Process Element Command timed out!\n");
388 if (!cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
)) {
389 dev_warn(&ctx
->afu
->dev
, "WARNING: Device link down, aborting Process Element Command!\n");
393 state
= be64_to_cpup(ctx
->afu
->native
->sw_command_status
);
394 if (state
== ~0ULL) {
395 pr_err("cxl: Error adding process element to AFU\n");
399 if ((state
& (CXL_SPA_SW_CMD_MASK
| CXL_SPA_SW_STATE_MASK
| CXL_SPA_SW_LINK_MASK
)) ==
400 (cmd
| (cmd
>> 16) | ctx
->pe
))
403 * The command won't finish in the PSL if there are
404 * outstanding DSIs. Hence we need to yield here in
405 * case there are outstanding DSIs that we need to
406 * service. Tuning possiblity: we could wait for a
413 trace_cxl_llcmd_done(ctx
, cmd
, rc
);
417 static int add_process_element(struct cxl_context
*ctx
)
421 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
422 pr_devel("%s Adding pe: %i started\n", __func__
, ctx
->pe
);
423 if (!(rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_ADD
, CXL_PE_SOFTWARE_STATE_V
)))
424 ctx
->pe_inserted
= true;
425 pr_devel("%s Adding pe: %i finished\n", __func__
, ctx
->pe
);
426 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
430 static int terminate_process_element(struct cxl_context
*ctx
)
434 /* fast path terminate if it's already invalid */
435 if (!(ctx
->elem
->software_state
& cpu_to_be32(CXL_PE_SOFTWARE_STATE_V
)))
438 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
439 pr_devel("%s Terminate pe: %i started\n", __func__
, ctx
->pe
);
440 /* We could be asked to terminate when the hw is down. That
441 * should always succeed: it's not running if the hw has gone
442 * away and is being reset.
444 if (cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
))
445 rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_TERMINATE
,
446 CXL_PE_SOFTWARE_STATE_V
| CXL_PE_SOFTWARE_STATE_T
);
447 ctx
->elem
->software_state
= 0; /* Remove Valid bit */
448 pr_devel("%s Terminate pe: %i finished\n", __func__
, ctx
->pe
);
449 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
453 static int remove_process_element(struct cxl_context
*ctx
)
457 mutex_lock(&ctx
->afu
->native
->spa_mutex
);
458 pr_devel("%s Remove pe: %i started\n", __func__
, ctx
->pe
);
460 /* We could be asked to remove when the hw is down. Again, if
461 * the hw is down, the PE is gone, so we succeed.
463 if (cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
))
464 rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_REMOVE
, 0);
467 ctx
->pe_inserted
= false;
469 pr_devel("%s Remove pe: %i finished\n", __func__
, ctx
->pe
);
470 mutex_unlock(&ctx
->afu
->native
->spa_mutex
);
475 void cxl_assign_psn_space(struct cxl_context
*ctx
)
477 if (!ctx
->afu
->pp_size
|| ctx
->master
) {
478 ctx
->psn_phys
= ctx
->afu
->psn_phys
;
479 ctx
->psn_size
= ctx
->afu
->adapter
->ps_size
;
481 ctx
->psn_phys
= ctx
->afu
->psn_phys
+
482 (ctx
->afu
->native
->pp_offset
+ ctx
->afu
->pp_size
* ctx
->pe
);
483 ctx
->psn_size
= ctx
->afu
->pp_size
;
487 static int activate_afu_directed(struct cxl_afu
*afu
)
491 dev_info(&afu
->dev
, "Activating AFU directed mode\n");
493 afu
->num_procs
= afu
->max_procs_virtualised
;
494 if (afu
->native
->spa
== NULL
) {
495 if (cxl_alloc_spa(afu
))
500 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
, CXL_PSL_SCNTL_An_PM_AFU
);
501 cxl_p1n_write(afu
, CXL_PSL_AMOR_An
, 0xFFFFFFFFFFFFFFFFULL
);
502 cxl_p1n_write(afu
, CXL_PSL_ID_An
, CXL_PSL_ID_An_F
| CXL_PSL_ID_An_L
);
504 afu
->current_mode
= CXL_MODE_DIRECTED
;
506 if ((rc
= cxl_chardev_m_afu_add(afu
)))
509 if ((rc
= cxl_sysfs_afu_m_add(afu
)))
512 if ((rc
= cxl_chardev_s_afu_add(afu
)))
517 cxl_sysfs_afu_m_remove(afu
);
519 cxl_chardev_afu_remove(afu
);
523 #ifdef CONFIG_CPU_LITTLE_ENDIAN
524 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
526 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
529 static u64
calculate_sr(struct cxl_context
*ctx
)
535 sr
|= CXL_PSL_SR_An_MP
;
536 if (mfspr(SPRN_LPCR
) & LPCR_TC
)
537 sr
|= CXL_PSL_SR_An_TC
;
540 sr
|= CXL_PSL_SR_An_R
;
541 sr
|= (mfmsr() & MSR_SF
) | CXL_PSL_SR_An_HV
;
543 sr
|= CXL_PSL_SR_An_PR
| CXL_PSL_SR_An_R
;
544 sr
&= ~(CXL_PSL_SR_An_HV
);
545 if (!test_tsk_thread_flag(current
, TIF_32BIT
))
546 sr
|= CXL_PSL_SR_An_SF
;
551 static void update_ivtes_directed(struct cxl_context
*ctx
)
553 bool need_update
= (ctx
->status
== STARTED
);
557 WARN_ON(terminate_process_element(ctx
));
558 WARN_ON(remove_process_element(ctx
));
561 for (r
= 0; r
< CXL_IRQ_RANGES
; r
++) {
562 ctx
->elem
->ivte_offsets
[r
] = cpu_to_be16(ctx
->irqs
.offset
[r
]);
563 ctx
->elem
->ivte_ranges
[r
] = cpu_to_be16(ctx
->irqs
.range
[r
]);
567 * Theoretically we could use the update llcmd, instead of a
568 * terminate/remove/add (or if an atomic update was required we could
569 * do a suspend/update/resume), however it seems there might be issues
570 * with the update llcmd on some cards (including those using an XSL on
571 * an ASIC) so for now it's safest to go with the commands that are
572 * known to work. In the future if we come across a situation where the
573 * card may be performing transactions using the same PE while we are
574 * doing this update we might need to revisit this.
577 WARN_ON(add_process_element(ctx
));
580 static int attach_afu_directed(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
585 cxl_assign_psn_space(ctx
);
587 ctx
->elem
->ctxtime
= 0; /* disable */
588 ctx
->elem
->lpid
= cpu_to_be32(mfspr(SPRN_LPID
));
589 ctx
->elem
->haurp
= 0; /* disable */
590 ctx
->elem
->sdr
= cpu_to_be64(mfspr(SPRN_SDR1
));
595 ctx
->elem
->common
.tid
= 0;
596 ctx
->elem
->common
.pid
= cpu_to_be32(pid
);
598 ctx
->elem
->sr
= cpu_to_be64(calculate_sr(ctx
));
600 ctx
->elem
->common
.csrp
= 0; /* disable */
601 ctx
->elem
->common
.aurp0
= 0; /* disable */
602 ctx
->elem
->common
.aurp1
= 0; /* disable */
604 cxl_prefault(ctx
, wed
);
606 ctx
->elem
->common
.sstp0
= cpu_to_be64(ctx
->sstp0
);
607 ctx
->elem
->common
.sstp1
= cpu_to_be64(ctx
->sstp1
);
610 * Ensure we have the multiplexed PSL interrupt set up to take faults
611 * for kernel contexts that may not have allocated any AFU IRQs at all:
613 if (ctx
->irqs
.range
[0] == 0) {
614 ctx
->irqs
.offset
[0] = ctx
->afu
->native
->psl_hwirq
;
615 ctx
->irqs
.range
[0] = 1;
618 update_ivtes_directed(ctx
);
620 ctx
->elem
->common
.amr
= cpu_to_be64(amr
);
621 ctx
->elem
->common
.wed
= cpu_to_be64(wed
);
623 /* first guy needs to enable */
624 if ((result
= cxl_ops
->afu_check_and_enable(ctx
->afu
)))
627 return add_process_element(ctx
);
630 static int deactivate_afu_directed(struct cxl_afu
*afu
)
632 dev_info(&afu
->dev
, "Deactivating AFU directed mode\n");
634 afu
->current_mode
= 0;
637 cxl_sysfs_afu_m_remove(afu
);
638 cxl_chardev_afu_remove(afu
);
641 * The CAIA section 2.2.1 indicates that the procedure for starting and
642 * stopping an AFU in AFU directed mode is AFU specific, which is not
643 * ideal since this code is generic and with one exception has no
644 * knowledge of the AFU. This is in contrast to the procedure for
645 * disabling a dedicated process AFU, which is documented to just
646 * require a reset. The architecture does indicate that both an AFU
647 * reset and an AFU disable should result in the AFU being disabled and
648 * we do both followed by a PSL purge for safety.
650 * Notably we used to have some issues with the disable sequence on PSL
651 * cards, which is why we ended up using this heavy weight procedure in
652 * the first place, however a bug was discovered that had rendered the
653 * disable operation ineffective, so it is conceivable that was the
654 * sole explanation for those difficulties. Careful regression testing
655 * is recommended if anyone attempts to remove or reorder these
658 * The XSL on the Mellanox CX4 behaves a little differently from the
659 * PSL based cards and will time out an AFU reset if the AFU is still
660 * enabled. That card is special in that we do have a means to identify
661 * it from this code, so in that case we skip the reset and just use a
662 * disable/purge to avoid the timeout and corresponding noise in the
665 if (afu
->adapter
->native
->sl_ops
->needs_reset_before_disable
)
666 cxl_ops
->afu_reset(afu
);
667 cxl_afu_disable(afu
);
673 static int activate_dedicated_process(struct cxl_afu
*afu
)
675 dev_info(&afu
->dev
, "Activating dedicated process mode\n");
677 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
, CXL_PSL_SCNTL_An_PM_Process
);
679 cxl_p1n_write(afu
, CXL_PSL_CtxTime_An
, 0); /* disable */
680 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, 0); /* disable */
681 cxl_p1n_write(afu
, CXL_PSL_AMOR_An
, 0xFFFFFFFFFFFFFFFFULL
);
682 cxl_p1n_write(afu
, CXL_PSL_LPID_An
, mfspr(SPRN_LPID
));
683 cxl_p1n_write(afu
, CXL_HAURP_An
, 0); /* disable */
684 cxl_p1n_write(afu
, CXL_PSL_SDR_An
, mfspr(SPRN_SDR1
));
686 cxl_p2n_write(afu
, CXL_CSRP_An
, 0); /* disable */
687 cxl_p2n_write(afu
, CXL_AURP0_An
, 0); /* disable */
688 cxl_p2n_write(afu
, CXL_AURP1_An
, 0); /* disable */
690 afu
->current_mode
= CXL_MODE_DEDICATED
;
693 return cxl_chardev_d_afu_add(afu
);
696 static void update_ivtes_dedicated(struct cxl_context
*ctx
)
698 struct cxl_afu
*afu
= ctx
->afu
;
700 cxl_p1n_write(afu
, CXL_PSL_IVTE_Offset_An
,
701 (((u64
)ctx
->irqs
.offset
[0] & 0xffff) << 48) |
702 (((u64
)ctx
->irqs
.offset
[1] & 0xffff) << 32) |
703 (((u64
)ctx
->irqs
.offset
[2] & 0xffff) << 16) |
704 ((u64
)ctx
->irqs
.offset
[3] & 0xffff));
705 cxl_p1n_write(afu
, CXL_PSL_IVTE_Limit_An
, (u64
)
706 (((u64
)ctx
->irqs
.range
[0] & 0xffff) << 48) |
707 (((u64
)ctx
->irqs
.range
[1] & 0xffff) << 32) |
708 (((u64
)ctx
->irqs
.range
[2] & 0xffff) << 16) |
709 ((u64
)ctx
->irqs
.range
[3] & 0xffff));
712 static int attach_dedicated(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
714 struct cxl_afu
*afu
= ctx
->afu
;
718 pid
= (u64
)current
->pid
<< 32;
721 cxl_p2n_write(afu
, CXL_PSL_PID_TID_An
, pid
);
723 cxl_p1n_write(afu
, CXL_PSL_SR_An
, calculate_sr(ctx
));
725 if ((rc
= cxl_write_sstp(afu
, ctx
->sstp0
, ctx
->sstp1
)))
728 cxl_prefault(ctx
, wed
);
730 update_ivtes_dedicated(ctx
);
732 cxl_p2n_write(afu
, CXL_PSL_AMR_An
, amr
);
734 /* master only context for dedicated */
735 cxl_assign_psn_space(ctx
);
737 if ((rc
= cxl_ops
->afu_reset(afu
)))
740 cxl_p2n_write(afu
, CXL_PSL_WED_An
, wed
);
742 return afu_enable(afu
);
745 static int deactivate_dedicated_process(struct cxl_afu
*afu
)
747 dev_info(&afu
->dev
, "Deactivating dedicated process mode\n");
749 afu
->current_mode
= 0;
752 cxl_chardev_afu_remove(afu
);
757 static int native_afu_deactivate_mode(struct cxl_afu
*afu
, int mode
)
759 if (mode
== CXL_MODE_DIRECTED
)
760 return deactivate_afu_directed(afu
);
761 if (mode
== CXL_MODE_DEDICATED
)
762 return deactivate_dedicated_process(afu
);
766 static int native_afu_activate_mode(struct cxl_afu
*afu
, int mode
)
770 if (!(mode
& afu
->modes_supported
))
773 if (!cxl_ops
->link_ok(afu
->adapter
, afu
)) {
774 WARN(1, "Device link is down, refusing to activate!\n");
778 if (mode
== CXL_MODE_DIRECTED
)
779 return activate_afu_directed(afu
);
780 if (mode
== CXL_MODE_DEDICATED
)
781 return activate_dedicated_process(afu
);
786 static int native_attach_process(struct cxl_context
*ctx
, bool kernel
,
789 if (!cxl_ops
->link_ok(ctx
->afu
->adapter
, ctx
->afu
)) {
790 WARN(1, "Device link is down, refusing to attach process!\n");
794 ctx
->kernel
= kernel
;
795 if (ctx
->afu
->current_mode
== CXL_MODE_DIRECTED
)
796 return attach_afu_directed(ctx
, wed
, amr
);
798 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
799 return attach_dedicated(ctx
, wed
, amr
);
804 static inline int detach_process_native_dedicated(struct cxl_context
*ctx
)
807 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
808 * stop the AFU in dedicated mode (we therefore do not make that
809 * optional like we do in the afu directed path). It does not indicate
810 * that we need to do an explicit disable (which should occur
811 * implicitly as part of the reset) or purge, but we do these as well
812 * to be on the safe side.
814 * Notably we used to have some issues with the disable sequence
815 * (before the sequence was spelled out in the architecture) which is
816 * why we were so heavy weight in the first place, however a bug was
817 * discovered that had rendered the disable operation ineffective, so
818 * it is conceivable that was the sole explanation for those
819 * difficulties. Point is, we should be careful and do some regression
820 * testing if we ever attempt to remove any part of this procedure.
822 cxl_ops
->afu_reset(ctx
->afu
);
823 cxl_afu_disable(ctx
->afu
);
824 cxl_psl_purge(ctx
->afu
);
828 static void native_update_ivtes(struct cxl_context
*ctx
)
830 if (ctx
->afu
->current_mode
== CXL_MODE_DIRECTED
)
831 return update_ivtes_directed(ctx
);
832 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
833 return update_ivtes_dedicated(ctx
);
834 WARN(1, "native_update_ivtes: Bad mode\n");
837 static inline int detach_process_native_afu_directed(struct cxl_context
*ctx
)
839 if (!ctx
->pe_inserted
)
841 if (terminate_process_element(ctx
))
843 if (remove_process_element(ctx
))
849 static int native_detach_process(struct cxl_context
*ctx
)
851 trace_cxl_detach(ctx
);
853 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
854 return detach_process_native_dedicated(ctx
);
856 return detach_process_native_afu_directed(ctx
);
859 static int native_get_irq_info(struct cxl_afu
*afu
, struct cxl_irq_info
*info
)
863 /* If the adapter has gone away, we can't get any meaningful
866 if (!cxl_ops
->link_ok(afu
->adapter
, afu
))
869 info
->dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
870 info
->dar
= cxl_p2n_read(afu
, CXL_PSL_DAR_An
);
871 info
->dsr
= cxl_p2n_read(afu
, CXL_PSL_DSR_An
);
872 pidtid
= cxl_p2n_read(afu
, CXL_PSL_PID_TID_An
);
873 info
->pid
= pidtid
>> 32;
874 info
->tid
= pidtid
& 0xffffffff;
875 info
->afu_err
= cxl_p2n_read(afu
, CXL_AFU_ERR_An
);
876 info
->errstat
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
877 info
->proc_handle
= 0;
882 void cxl_native_psl_irq_dump_regs(struct cxl_context
*ctx
)
884 u64 fir1
, fir2
, fir_slice
, serr
, afu_debug
;
886 fir1
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR1
);
887 fir2
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR2
);
888 fir_slice
= cxl_p1n_read(ctx
->afu
, CXL_PSL_FIR_SLICE_An
);
889 afu_debug
= cxl_p1n_read(ctx
->afu
, CXL_AFU_DEBUG_An
);
891 dev_crit(&ctx
->afu
->dev
, "PSL_FIR1: 0x%016llx\n", fir1
);
892 dev_crit(&ctx
->afu
->dev
, "PSL_FIR2: 0x%016llx\n", fir2
);
893 if (ctx
->afu
->adapter
->native
->sl_ops
->register_serr_irq
) {
894 serr
= cxl_p1n_read(ctx
->afu
, CXL_PSL_SERR_An
);
895 cxl_afu_decode_psl_serr(ctx
->afu
, serr
);
897 dev_crit(&ctx
->afu
->dev
, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice
);
898 dev_crit(&ctx
->afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug
);
901 static irqreturn_t
native_handle_psl_slice_error(struct cxl_context
*ctx
,
902 u64 dsisr
, u64 errstat
)
905 dev_crit(&ctx
->afu
->dev
, "PSL ERROR STATUS: 0x%016llx\n", errstat
);
907 if (ctx
->afu
->adapter
->native
->sl_ops
->psl_irq_dump_registers
)
908 ctx
->afu
->adapter
->native
->sl_ops
->psl_irq_dump_registers(ctx
);
910 if (ctx
->afu
->adapter
->native
->sl_ops
->debugfs_stop_trace
) {
911 dev_crit(&ctx
->afu
->dev
, "STOPPING CXL TRACE\n");
912 ctx
->afu
->adapter
->native
->sl_ops
->debugfs_stop_trace(ctx
->afu
->adapter
);
915 return cxl_ops
->ack_irq(ctx
, 0, errstat
);
918 static irqreturn_t
fail_psl_irq(struct cxl_afu
*afu
, struct cxl_irq_info
*irq_info
)
920 if (irq_info
->dsisr
& CXL_PSL_DSISR_TRANS
)
921 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
923 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
928 static irqreturn_t
native_irq_multiplexed(int irq
, void *data
)
930 struct cxl_afu
*afu
= data
;
931 struct cxl_context
*ctx
;
932 struct cxl_irq_info irq_info
;
933 u64 phreg
= cxl_p2n_read(afu
, CXL_PSL_PEHandle_An
);
936 /* check if eeh kicked in while the interrupt was in flight */
937 if (unlikely(phreg
== ~0ULL)) {
939 "Ignoring slice interrupt(%d) due to fenced card",
943 /* Mask the pe-handle from register value */
945 if ((ret
= native_get_irq_info(afu
, &irq_info
))) {
946 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret
);
947 return fail_psl_irq(afu
, &irq_info
);
951 ctx
= idr_find(&afu
->contexts_idr
, ph
);
953 ret
= cxl_irq(irq
, ctx
, &irq_info
);
959 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
960 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
961 " with outstanding transactions?)\n", ph
, irq_info
.dsisr
,
963 return fail_psl_irq(afu
, &irq_info
);
966 static void native_irq_wait(struct cxl_context
*ctx
)
973 * Wait until no further interrupts are presented by the PSL
977 ph
= cxl_p2n_read(ctx
->afu
, CXL_PSL_PEHandle_An
) & 0xffff;
980 dsisr
= cxl_p2n_read(ctx
->afu
, CXL_PSL_DSISR_An
);
981 if ((dsisr
& CXL_PSL_DSISR_PENDING
) == 0)
984 * We are waiting for the workqueue to process our
985 * irq, so need to let that run here.
990 dev_warn(&ctx
->afu
->dev
, "WARNING: waiting on DSI for PE %i"
991 " DSISR %016llx!\n", ph
, dsisr
);
995 static irqreturn_t
native_slice_irq_err(int irq
, void *data
)
997 struct cxl_afu
*afu
= data
;
998 u64 fir_slice
, errstat
, serr
, afu_debug
, afu_error
, dsisr
;
1001 * slice err interrupt is only used with full PSL (no XSL)
1003 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
1004 fir_slice
= cxl_p1n_read(afu
, CXL_PSL_FIR_SLICE_An
);
1005 errstat
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
1006 afu_debug
= cxl_p1n_read(afu
, CXL_AFU_DEBUG_An
);
1007 afu_error
= cxl_p2n_read(afu
, CXL_AFU_ERR_An
);
1008 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
1009 cxl_afu_decode_psl_serr(afu
, serr
);
1010 dev_crit(&afu
->dev
, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice
);
1011 dev_crit(&afu
->dev
, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat
);
1012 dev_crit(&afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug
);
1013 dev_crit(&afu
->dev
, "AFU_ERR_An: 0x%.16llx\n", afu_error
);
1014 dev_crit(&afu
->dev
, "PSL_DSISR_An: 0x%.16llx\n", dsisr
);
1016 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
1021 void cxl_native_err_irq_dump_regs(struct cxl
*adapter
)
1025 fir1
= cxl_p1_read(adapter
, CXL_PSL_FIR1
);
1026 fir2
= cxl_p1_read(adapter
, CXL_PSL_FIR2
);
1028 dev_crit(&adapter
->dev
, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1
, fir2
);
1031 static irqreturn_t
native_irq_err(int irq
, void *data
)
1033 struct cxl
*adapter
= data
;
1036 WARN(1, "CXL ERROR interrupt %i\n", irq
);
1038 err_ivte
= cxl_p1_read(adapter
, CXL_PSL_ErrIVTE
);
1039 dev_crit(&adapter
->dev
, "PSL_ErrIVTE: 0x%016llx\n", err_ivte
);
1041 if (adapter
->native
->sl_ops
->debugfs_stop_trace
) {
1042 dev_crit(&adapter
->dev
, "STOPPING CXL TRACE\n");
1043 adapter
->native
->sl_ops
->debugfs_stop_trace(adapter
);
1046 if (adapter
->native
->sl_ops
->err_irq_dump_registers
)
1047 adapter
->native
->sl_ops
->err_irq_dump_registers(adapter
);
1052 int cxl_native_register_psl_err_irq(struct cxl
*adapter
)
1056 adapter
->irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
1057 dev_name(&adapter
->dev
));
1058 if (!adapter
->irq_name
)
1061 if ((rc
= cxl_register_one_irq(adapter
, native_irq_err
, adapter
,
1062 &adapter
->native
->err_hwirq
,
1063 &adapter
->native
->err_virq
,
1064 adapter
->irq_name
))) {
1065 kfree(adapter
->irq_name
);
1066 adapter
->irq_name
= NULL
;
1070 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, adapter
->native
->err_hwirq
& 0xffff);
1075 void cxl_native_release_psl_err_irq(struct cxl
*adapter
)
1077 if (adapter
->native
->err_virq
!= irq_find_mapping(NULL
, adapter
->native
->err_hwirq
))
1080 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, 0x0000000000000000);
1081 cxl_unmap_irq(adapter
->native
->err_virq
, adapter
);
1082 cxl_ops
->release_one_irq(adapter
, adapter
->native
->err_hwirq
);
1083 kfree(adapter
->irq_name
);
1086 int cxl_native_register_serr_irq(struct cxl_afu
*afu
)
1091 afu
->err_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s-err",
1092 dev_name(&afu
->dev
));
1093 if (!afu
->err_irq_name
)
1096 if ((rc
= cxl_register_one_irq(afu
->adapter
, native_slice_irq_err
, afu
,
1098 &afu
->serr_virq
, afu
->err_irq_name
))) {
1099 kfree(afu
->err_irq_name
);
1100 afu
->err_irq_name
= NULL
;
1104 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
1105 serr
= (serr
& 0x00ffffffffff0000ULL
) | (afu
->serr_hwirq
& 0xffff);
1106 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
1111 void cxl_native_release_serr_irq(struct cxl_afu
*afu
)
1113 if (afu
->serr_virq
!= irq_find_mapping(NULL
, afu
->serr_hwirq
))
1116 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, 0x0000000000000000);
1117 cxl_unmap_irq(afu
->serr_virq
, afu
);
1118 cxl_ops
->release_one_irq(afu
->adapter
, afu
->serr_hwirq
);
1119 kfree(afu
->err_irq_name
);
1122 int cxl_native_register_psl_irq(struct cxl_afu
*afu
)
1126 afu
->psl_irq_name
= kasprintf(GFP_KERNEL
, "cxl-%s",
1127 dev_name(&afu
->dev
));
1128 if (!afu
->psl_irq_name
)
1131 if ((rc
= cxl_register_one_irq(afu
->adapter
, native_irq_multiplexed
,
1132 afu
, &afu
->native
->psl_hwirq
, &afu
->native
->psl_virq
,
1133 afu
->psl_irq_name
))) {
1134 kfree(afu
->psl_irq_name
);
1135 afu
->psl_irq_name
= NULL
;
1140 void cxl_native_release_psl_irq(struct cxl_afu
*afu
)
1142 if (afu
->native
->psl_virq
!= irq_find_mapping(NULL
, afu
->native
->psl_hwirq
))
1145 cxl_unmap_irq(afu
->native
->psl_virq
, afu
);
1146 cxl_ops
->release_one_irq(afu
->adapter
, afu
->native
->psl_hwirq
);
1147 kfree(afu
->psl_irq_name
);
1150 static void recover_psl_err(struct cxl_afu
*afu
, u64 errstat
)
1154 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat
);
1156 /* Clear PSL_DSISR[PE] */
1157 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
1158 cxl_p2n_write(afu
, CXL_PSL_DSISR_An
, dsisr
& ~CXL_PSL_DSISR_An_PE
);
1160 /* Write 1s to clear error status bits */
1161 cxl_p2n_write(afu
, CXL_PSL_ErrStat_An
, errstat
);
1164 static int native_ack_irq(struct cxl_context
*ctx
, u64 tfc
, u64 psl_reset_mask
)
1166 trace_cxl_psl_irq_ack(ctx
, tfc
);
1168 cxl_p2n_write(ctx
->afu
, CXL_PSL_TFC_An
, tfc
);
1170 recover_psl_err(ctx
->afu
, psl_reset_mask
);
1175 int cxl_check_error(struct cxl_afu
*afu
)
1177 return (cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
) == ~0ULL);
1180 static bool native_support_attributes(const char *attr_name
,
1181 enum cxl_attrs type
)
1186 static int native_afu_cr_read64(struct cxl_afu
*afu
, int cr
, u64 off
, u64
*out
)
1188 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
1190 if (unlikely(off
>= afu
->crs_len
))
1192 *out
= in_le64(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
1193 (cr
* afu
->crs_len
) + off
);
1197 static int native_afu_cr_read32(struct cxl_afu
*afu
, int cr
, u64 off
, u32
*out
)
1199 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
1201 if (unlikely(off
>= afu
->crs_len
))
1203 *out
= in_le32(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
1204 (cr
* afu
->crs_len
) + off
);
1208 static int native_afu_cr_read16(struct cxl_afu
*afu
, int cr
, u64 off
, u16
*out
)
1210 u64 aligned_off
= off
& ~0x3L
;
1214 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val
);
1216 *out
= (val
>> ((off
& 0x3) * 8)) & 0xffff;
1220 static int native_afu_cr_read8(struct cxl_afu
*afu
, int cr
, u64 off
, u8
*out
)
1222 u64 aligned_off
= off
& ~0x3L
;
1226 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val
);
1228 *out
= (val
>> ((off
& 0x3) * 8)) & 0xff;
1232 static int native_afu_cr_write32(struct cxl_afu
*afu
, int cr
, u64 off
, u32 in
)
1234 if (unlikely(!cxl_ops
->link_ok(afu
->adapter
, afu
)))
1236 if (unlikely(off
>= afu
->crs_len
))
1238 out_le32(afu
->native
->afu_desc_mmio
+ afu
->crs_offset
+
1239 (cr
* afu
->crs_len
) + off
, in
);
1243 static int native_afu_cr_write16(struct cxl_afu
*afu
, int cr
, u64 off
, u16 in
)
1245 u64 aligned_off
= off
& ~0x3L
;
1246 u32 val32
, mask
, shift
;
1249 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val32
);
1252 shift
= (off
& 0x3) * 8;
1253 WARN_ON(shift
== 24);
1254 mask
= 0xffff << shift
;
1255 val32
= (val32
& ~mask
) | (in
<< shift
);
1257 rc
= native_afu_cr_write32(afu
, cr
, aligned_off
, val32
);
1261 static int native_afu_cr_write8(struct cxl_afu
*afu
, int cr
, u64 off
, u8 in
)
1263 u64 aligned_off
= off
& ~0x3L
;
1264 u32 val32
, mask
, shift
;
1267 rc
= native_afu_cr_read32(afu
, cr
, aligned_off
, &val32
);
1270 shift
= (off
& 0x3) * 8;
1271 mask
= 0xff << shift
;
1272 val32
= (val32
& ~mask
) | (in
<< shift
);
1274 rc
= native_afu_cr_write32(afu
, cr
, aligned_off
, val32
);
1278 const struct cxl_backend_ops cxl_native_ops
= {
1279 .module
= THIS_MODULE
,
1280 .adapter_reset
= cxl_pci_reset
,
1281 .alloc_one_irq
= cxl_pci_alloc_one_irq
,
1282 .release_one_irq
= cxl_pci_release_one_irq
,
1283 .alloc_irq_ranges
= cxl_pci_alloc_irq_ranges
,
1284 .release_irq_ranges
= cxl_pci_release_irq_ranges
,
1285 .setup_irq
= cxl_pci_setup_irq
,
1286 .handle_psl_slice_error
= native_handle_psl_slice_error
,
1287 .psl_interrupt
= NULL
,
1288 .ack_irq
= native_ack_irq
,
1289 .irq_wait
= native_irq_wait
,
1290 .attach_process
= native_attach_process
,
1291 .detach_process
= native_detach_process
,
1292 .update_ivtes
= native_update_ivtes
,
1293 .support_attributes
= native_support_attributes
,
1294 .link_ok
= cxl_adapter_link_ok
,
1295 .release_afu
= cxl_pci_release_afu
,
1296 .afu_read_err_buffer
= cxl_pci_afu_read_err_buffer
,
1297 .afu_check_and_enable
= native_afu_check_and_enable
,
1298 .afu_activate_mode
= native_afu_activate_mode
,
1299 .afu_deactivate_mode
= native_afu_deactivate_mode
,
1300 .afu_reset
= native_afu_reset
,
1301 .afu_cr_read8
= native_afu_cr_read8
,
1302 .afu_cr_read16
= native_afu_cr_read16
,
1303 .afu_cr_read32
= native_afu_cr_read32
,
1304 .afu_cr_read64
= native_afu_cr_read64
,
1305 .afu_cr_write8
= native_afu_cr_write8
,
1306 .afu_cr_write16
= native_afu_cr_write16
,
1307 .afu_cr_write32
= native_afu_cr_write32
,
1308 .read_adapter_vpd
= cxl_pci_read_adapter_vpd
,