2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/pci_regs.h>
11 #include <linux/pci_ids.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
17 #include <linux/pci.h>
19 #include <linux/delay.h>
21 #include <asm/msi_bitmap.h>
22 #include <asm/pci-bridge.h> /* for struct pci_controller */
23 #include <asm/pnv-pci.h>
30 #define CXL_PCI_VSEC_ID 0x1280
31 #define CXL_VSEC_MIN_SIZE 0x80
33 #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
35 pci_read_config_word(dev, vsec + 0x6, dest); \
38 #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
39 pci_read_config_byte(dev, vsec + 0x8, dest)
41 #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
42 pci_read_config_byte(dev, vsec + 0x9, dest)
43 #define CXL_STATUS_SECOND_PORT 0x80
44 #define CXL_STATUS_MSI_X_FULL 0x40
45 #define CXL_STATUS_MSI_X_SINGLE 0x20
46 #define CXL_STATUS_FLASH_RW 0x08
47 #define CXL_STATUS_FLASH_RO 0x04
48 #define CXL_STATUS_LOADABLE_AFU 0x02
49 #define CXL_STATUS_LOADABLE_PSL 0x01
50 /* If we see these features we won't try to use the card */
51 #define CXL_UNSUPPORTED_FEATURES \
52 (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
54 #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
55 pci_read_config_byte(dev, vsec + 0xa, dest)
56 #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
57 pci_write_config_byte(dev, vsec + 0xa, val)
58 #define CXL_VSEC_PROTOCOL_MASK 0xe0
59 #define CXL_VSEC_PROTOCOL_1024TB 0x80
60 #define CXL_VSEC_PROTOCOL_512TB 0x40
61 #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */
62 #define CXL_VSEC_PROTOCOL_ENABLE 0x01
64 #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
65 pci_read_config_word(dev, vsec + 0xc, dest)
66 #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
67 pci_read_config_byte(dev, vsec + 0xe, dest)
68 #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
69 pci_read_config_byte(dev, vsec + 0xf, dest)
70 #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
71 pci_read_config_word(dev, vsec + 0x10, dest)
73 #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
74 pci_read_config_byte(dev, vsec + 0x13, dest)
75 #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
76 pci_write_config_byte(dev, vsec + 0x13, val)
77 #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
78 #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
79 #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
81 #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
82 pci_read_config_dword(dev, vsec + 0x20, dest)
83 #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
84 pci_read_config_dword(dev, vsec + 0x24, dest)
85 #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
86 pci_read_config_dword(dev, vsec + 0x28, dest)
87 #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
88 pci_read_config_dword(dev, vsec + 0x2c, dest)
91 /* This works a little different than the p1/p2 register accesses to make it
92 * easier to pull out individual fields */
93 #define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off)
94 #define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off)
95 #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
96 #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
98 #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
99 #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
100 #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
101 #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
102 #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
103 #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
104 #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
105 #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
106 #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
107 #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
108 #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
109 #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
110 #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
111 #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
112 #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
113 #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
114 #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
115 #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
116 #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
117 #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
119 u16
cxl_afu_cr_read16(struct cxl_afu
*afu
, int cr
, u64 off
)
121 u64 aligned_off
= off
& ~0x3L
;
124 val
= cxl_afu_cr_read32(afu
, cr
, aligned_off
);
125 return (val
>> ((off
& 0x2) * 8)) & 0xffff;
128 u8
cxl_afu_cr_read8(struct cxl_afu
*afu
, int cr
, u64 off
)
130 u64 aligned_off
= off
& ~0x3L
;
133 val
= cxl_afu_cr_read32(afu
, cr
, aligned_off
);
134 return (val
>> ((off
& 0x3) * 8)) & 0xff;
137 static const struct pci_device_id cxl_pci_tbl
[] = {
138 { PCI_DEVICE(PCI_VENDOR_ID_IBM
, 0x0477), },
139 { PCI_DEVICE(PCI_VENDOR_ID_IBM
, 0x044b), },
140 { PCI_DEVICE(PCI_VENDOR_ID_IBM
, 0x04cf), },
141 { PCI_DEVICE(PCI_VENDOR_ID_IBM
, 0x0601), },
142 { PCI_DEVICE_CLASS(0x120000, ~0), },
146 MODULE_DEVICE_TABLE(pci
, cxl_pci_tbl
);
150 * Mostly using these wrappers to avoid confusion:
151 * priv 1 is BAR2, while priv 2 is BAR0
153 static inline resource_size_t
p1_base(struct pci_dev
*dev
)
155 return pci_resource_start(dev
, 2);
158 static inline resource_size_t
p1_size(struct pci_dev
*dev
)
160 return pci_resource_len(dev
, 2);
163 static inline resource_size_t
p2_base(struct pci_dev
*dev
)
165 return pci_resource_start(dev
, 0);
168 static inline resource_size_t
p2_size(struct pci_dev
*dev
)
170 return pci_resource_len(dev
, 0);
173 static int find_cxl_vsec(struct pci_dev
*dev
)
178 while ((vsec
= pci_find_next_ext_capability(dev
, vsec
, PCI_EXT_CAP_ID_VNDR
))) {
179 pci_read_config_word(dev
, vsec
+ 0x4, &val
);
180 if (val
== CXL_PCI_VSEC_ID
)
187 static void dump_cxl_config_space(struct pci_dev
*dev
)
192 dev_info(&dev
->dev
, "dump_cxl_config_space\n");
194 pci_read_config_dword(dev
, PCI_BASE_ADDRESS_0
, &val
);
195 dev_info(&dev
->dev
, "BAR0: %#.8x\n", val
);
196 pci_read_config_dword(dev
, PCI_BASE_ADDRESS_1
, &val
);
197 dev_info(&dev
->dev
, "BAR1: %#.8x\n", val
);
198 pci_read_config_dword(dev
, PCI_BASE_ADDRESS_2
, &val
);
199 dev_info(&dev
->dev
, "BAR2: %#.8x\n", val
);
200 pci_read_config_dword(dev
, PCI_BASE_ADDRESS_3
, &val
);
201 dev_info(&dev
->dev
, "BAR3: %#.8x\n", val
);
202 pci_read_config_dword(dev
, PCI_BASE_ADDRESS_4
, &val
);
203 dev_info(&dev
->dev
, "BAR4: %#.8x\n", val
);
204 pci_read_config_dword(dev
, PCI_BASE_ADDRESS_5
, &val
);
205 dev_info(&dev
->dev
, "BAR5: %#.8x\n", val
);
207 dev_info(&dev
->dev
, "p1 regs: %#llx, len: %#llx\n",
208 p1_base(dev
), p1_size(dev
));
209 dev_info(&dev
->dev
, "p2 regs: %#llx, len: %#llx\n",
210 p2_base(dev
), p2_size(dev
));
211 dev_info(&dev
->dev
, "BAR 4/5: %#llx, len: %#llx\n",
212 pci_resource_start(dev
, 4), pci_resource_len(dev
, 4));
214 if (!(vsec
= find_cxl_vsec(dev
)))
217 #define show_reg(name, what) \
218 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
220 pci_read_config_dword(dev
, vsec
+ 0x0, &val
);
221 show_reg("Cap ID", (val
>> 0) & 0xffff);
222 show_reg("Cap Ver", (val
>> 16) & 0xf);
223 show_reg("Next Cap Ptr", (val
>> 20) & 0xfff);
224 pci_read_config_dword(dev
, vsec
+ 0x4, &val
);
225 show_reg("VSEC ID", (val
>> 0) & 0xffff);
226 show_reg("VSEC Rev", (val
>> 16) & 0xf);
227 show_reg("VSEC Length", (val
>> 20) & 0xfff);
228 pci_read_config_dword(dev
, vsec
+ 0x8, &val
);
229 show_reg("Num AFUs", (val
>> 0) & 0xff);
230 show_reg("Status", (val
>> 8) & 0xff);
231 show_reg("Mode Control", (val
>> 16) & 0xff);
232 show_reg("Reserved", (val
>> 24) & 0xff);
233 pci_read_config_dword(dev
, vsec
+ 0xc, &val
);
234 show_reg("PSL Rev", (val
>> 0) & 0xffff);
235 show_reg("CAIA Ver", (val
>> 16) & 0xffff);
236 pci_read_config_dword(dev
, vsec
+ 0x10, &val
);
237 show_reg("Base Image Rev", (val
>> 0) & 0xffff);
238 show_reg("Reserved", (val
>> 16) & 0x0fff);
239 show_reg("Image Control", (val
>> 28) & 0x3);
240 show_reg("Reserved", (val
>> 30) & 0x1);
241 show_reg("Image Loaded", (val
>> 31) & 0x1);
243 pci_read_config_dword(dev
, vsec
+ 0x14, &val
);
244 show_reg("Reserved", val
);
245 pci_read_config_dword(dev
, vsec
+ 0x18, &val
);
246 show_reg("Reserved", val
);
247 pci_read_config_dword(dev
, vsec
+ 0x1c, &val
);
248 show_reg("Reserved", val
);
250 pci_read_config_dword(dev
, vsec
+ 0x20, &val
);
251 show_reg("AFU Descriptor Offset", val
);
252 pci_read_config_dword(dev
, vsec
+ 0x24, &val
);
253 show_reg("AFU Descriptor Size", val
);
254 pci_read_config_dword(dev
, vsec
+ 0x28, &val
);
255 show_reg("Problem State Offset", val
);
256 pci_read_config_dword(dev
, vsec
+ 0x2c, &val
);
257 show_reg("Problem State Size", val
);
259 pci_read_config_dword(dev
, vsec
+ 0x30, &val
);
260 show_reg("Reserved", val
);
261 pci_read_config_dword(dev
, vsec
+ 0x34, &val
);
262 show_reg("Reserved", val
);
263 pci_read_config_dword(dev
, vsec
+ 0x38, &val
);
264 show_reg("Reserved", val
);
265 pci_read_config_dword(dev
, vsec
+ 0x3c, &val
);
266 show_reg("Reserved", val
);
268 pci_read_config_dword(dev
, vsec
+ 0x40, &val
);
269 show_reg("PSL Programming Port", val
);
270 pci_read_config_dword(dev
, vsec
+ 0x44, &val
);
271 show_reg("PSL Programming Control", val
);
273 pci_read_config_dword(dev
, vsec
+ 0x48, &val
);
274 show_reg("Reserved", val
);
275 pci_read_config_dword(dev
, vsec
+ 0x4c, &val
);
276 show_reg("Reserved", val
);
278 pci_read_config_dword(dev
, vsec
+ 0x50, &val
);
279 show_reg("Flash Address Register", val
);
280 pci_read_config_dword(dev
, vsec
+ 0x54, &val
);
281 show_reg("Flash Size Register", val
);
282 pci_read_config_dword(dev
, vsec
+ 0x58, &val
);
283 show_reg("Flash Status/Control Register", val
);
284 pci_read_config_dword(dev
, vsec
+ 0x58, &val
);
285 show_reg("Flash Data Port", val
);
290 static void dump_afu_descriptor(struct cxl_afu
*afu
)
292 u64 val
, afu_cr_num
, afu_cr_off
, afu_cr_len
;
295 #define show_reg(name, what) \
296 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
298 val
= AFUD_READ_INFO(afu
);
299 show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val
));
300 show_reg("num_of_processes", AFUD_NUM_PROCS(val
));
301 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val
));
302 show_reg("req_prog_mode", val
& 0xffffULL
);
303 afu_cr_num
= AFUD_NUM_CRS(val
);
305 val
= AFUD_READ(afu
, 0x8);
306 show_reg("Reserved", val
);
307 val
= AFUD_READ(afu
, 0x10);
308 show_reg("Reserved", val
);
309 val
= AFUD_READ(afu
, 0x18);
310 show_reg("Reserved", val
);
312 val
= AFUD_READ_CR(afu
);
313 show_reg("Reserved", (val
>> (63-7)) & 0xff);
314 show_reg("AFU_CR_len", AFUD_CR_LEN(val
));
315 afu_cr_len
= AFUD_CR_LEN(val
) * 256;
317 val
= AFUD_READ_CR_OFF(afu
);
319 show_reg("AFU_CR_offset", val
);
321 val
= AFUD_READ_PPPSA(afu
);
322 show_reg("PerProcessPSA_control", (val
>> (63-7)) & 0xff);
323 show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val
));
325 val
= AFUD_READ_PPPSA_OFF(afu
);
326 show_reg("PerProcessPSA_offset", val
);
328 val
= AFUD_READ_EB(afu
);
329 show_reg("Reserved", (val
>> (63-7)) & 0xff);
330 show_reg("AFU_EB_len", AFUD_EB_LEN(val
));
332 val
= AFUD_READ_EB_OFF(afu
);
333 show_reg("AFU_EB_offset", val
);
335 for (i
= 0; i
< afu_cr_num
; i
++) {
336 val
= AFUD_READ_LE(afu
, afu_cr_off
+ i
* afu_cr_len
);
337 show_reg("CR Vendor", val
& 0xffff);
338 show_reg("CR Device", (val
>> 16) & 0xffff);
343 static int init_implementation_adapter_regs(struct cxl
*adapter
, struct pci_dev
*dev
)
345 struct device_node
*np
;
350 if (!(np
= pnv_pci_get_phb_node(dev
)))
353 while (np
&& !(prop
= of_get_property(np
, "ibm,chip-id", NULL
)))
354 np
= of_get_next_parent(np
);
357 chipid
= be32_to_cpup(prop
);
360 /* Tell PSL where to route data to */
361 psl_dsnctl
= 0x02E8900002000000ULL
| (chipid
<< (63-5));
362 cxl_p1_write(adapter
, CXL_PSL_DSNDCTL
, psl_dsnctl
);
363 cxl_p1_write(adapter
, CXL_PSL_RESLCKTO
, 0x20000000200ULL
);
364 /* snoop write mask */
365 cxl_p1_write(adapter
, CXL_PSL_SNWRALLOC
, 0x00000000FFFFFFFFULL
);
367 cxl_p1_write(adapter
, CXL_PSL_FIR_CNTL
, 0x0800000000000000ULL
);
368 /* for debugging with trace arrays */
369 cxl_p1_write(adapter
, CXL_PSL_TRACE
, 0x0000FF7C00000000ULL
);
374 #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
375 #define _2048_250MHZ_CYCLES 1
377 static int cxl_setup_psl_timebase(struct cxl
*adapter
, struct pci_dev
*dev
)
381 unsigned int retry
= 0;
382 struct device_node
*np
;
384 if (!(np
= pnv_pci_get_phb_node(dev
)))
387 /* Do not fail when CAPP timebase sync is not supported by OPAL */
389 if (! of_get_property(np
, "ibm,capp-timebase-sync", NULL
)) {
391 pr_err("PSL: Timebase sync: OPAL support missing\n");
397 * Setup PSL Timebase Control and Status register
398 * with the recommended Timebase Sync Count value
400 cxl_p1_write(adapter
, CXL_PSL_TB_CTLSTAT
,
401 TBSYNC_CNT(2 * _2048_250MHZ_CYCLES
));
403 /* Enable PSL Timebase */
404 cxl_p1_write(adapter
, CXL_PSL_Control
, 0x0000000000000000);
405 cxl_p1_write(adapter
, CXL_PSL_Control
, CXL_PSL_Control_tb
);
407 /* Wait until CORE TB and PSL TB difference <= 16usecs */
411 pr_err("PSL: Timebase sync: giving up!\n");
414 psl_tb
= cxl_p1_read(adapter
, CXL_PSL_Timebase
);
415 delta
= mftb() - psl_tb
;
418 } while (tb_to_ns(delta
) > 16000);
423 static int init_implementation_afu_regs(struct cxl_afu
*afu
)
425 /* read/write masks for this slice */
426 cxl_p1n_write(afu
, CXL_PSL_APCALLOC_A
, 0xFFFFFFFEFEFEFEFEULL
);
427 /* APC read/write masks for this slice */
428 cxl_p1n_write(afu
, CXL_PSL_COALLOC_A
, 0xFF000000FEFEFEFEULL
);
429 /* for debugging with trace arrays */
430 cxl_p1n_write(afu
, CXL_PSL_SLICE_TRACE
, 0x0000FFFF00000000ULL
);
431 cxl_p1n_write(afu
, CXL_PSL_RXCTL_A
, CXL_PSL_RXCTL_AFUHP_4S
);
436 int cxl_setup_irq(struct cxl
*adapter
, unsigned int hwirq
,
439 struct pci_dev
*dev
= to_pci_dev(adapter
->dev
.parent
);
441 return pnv_cxl_ioda_msi_setup(dev
, hwirq
, virq
);
444 int cxl_update_image_control(struct cxl
*adapter
)
446 struct pci_dev
*dev
= to_pci_dev(adapter
->dev
.parent
);
451 if (!(vsec
= find_cxl_vsec(dev
))) {
452 dev_err(&dev
->dev
, "ABORTING: CXL VSEC not found!\n");
456 if ((rc
= CXL_READ_VSEC_IMAGE_STATE(dev
, vsec
, &image_state
))) {
457 dev_err(&dev
->dev
, "failed to read image state: %i\n", rc
);
461 if (adapter
->perst_loads_image
)
462 image_state
|= CXL_VSEC_PERST_LOADS_IMAGE
;
464 image_state
&= ~CXL_VSEC_PERST_LOADS_IMAGE
;
466 if (adapter
->perst_select_user
)
467 image_state
|= CXL_VSEC_PERST_SELECT_USER
;
469 image_state
&= ~CXL_VSEC_PERST_SELECT_USER
;
471 if ((rc
= CXL_WRITE_VSEC_IMAGE_STATE(dev
, vsec
, image_state
))) {
472 dev_err(&dev
->dev
, "failed to update image control: %i\n", rc
);
479 int cxl_alloc_one_irq(struct cxl
*adapter
)
481 struct pci_dev
*dev
= to_pci_dev(adapter
->dev
.parent
);
483 return pnv_cxl_alloc_hwirqs(dev
, 1);
486 void cxl_release_one_irq(struct cxl
*adapter
, int hwirq
)
488 struct pci_dev
*dev
= to_pci_dev(adapter
->dev
.parent
);
490 return pnv_cxl_release_hwirqs(dev
, hwirq
, 1);
493 int cxl_alloc_irq_ranges(struct cxl_irq_ranges
*irqs
, struct cxl
*adapter
, unsigned int num
)
495 struct pci_dev
*dev
= to_pci_dev(adapter
->dev
.parent
);
497 return pnv_cxl_alloc_hwirq_ranges(irqs
, dev
, num
);
500 void cxl_release_irq_ranges(struct cxl_irq_ranges
*irqs
, struct cxl
*adapter
)
502 struct pci_dev
*dev
= to_pci_dev(adapter
->dev
.parent
);
504 pnv_cxl_release_hwirq_ranges(irqs
, dev
);
507 static int setup_cxl_bars(struct pci_dev
*dev
)
509 /* Safety check in case we get backported to < 3.17 without M64 */
510 if ((p1_base(dev
) < 0x100000000ULL
) ||
511 (p2_base(dev
) < 0x100000000ULL
)) {
512 dev_err(&dev
->dev
, "ABORTING: M32 BAR assignment incompatible with CXL\n");
517 * BAR 4/5 has a special meaning for CXL and must be programmed with a
518 * special value corresponding to the CXL protocol address range.
519 * For POWER 8 that means bits 48:49 must be set to 10
521 pci_write_config_dword(dev
, PCI_BASE_ADDRESS_4
, 0x00000000);
522 pci_write_config_dword(dev
, PCI_BASE_ADDRESS_5
, 0x00020000);
527 /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
528 static int switch_card_to_cxl(struct pci_dev
*dev
)
534 dev_info(&dev
->dev
, "switch card to CXL\n");
536 if (!(vsec
= find_cxl_vsec(dev
))) {
537 dev_err(&dev
->dev
, "ABORTING: CXL VSEC not found!\n");
541 if ((rc
= CXL_READ_VSEC_MODE_CONTROL(dev
, vsec
, &val
))) {
542 dev_err(&dev
->dev
, "failed to read current mode control: %i", rc
);
545 val
&= ~CXL_VSEC_PROTOCOL_MASK
;
546 val
|= CXL_VSEC_PROTOCOL_256TB
| CXL_VSEC_PROTOCOL_ENABLE
;
547 if ((rc
= CXL_WRITE_VSEC_MODE_CONTROL(dev
, vsec
, val
))) {
548 dev_err(&dev
->dev
, "failed to enable CXL protocol: %i", rc
);
552 * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
553 * we must wait 100ms after this mode switch before touching
561 static int cxl_map_slice_regs(struct cxl_afu
*afu
, struct cxl
*adapter
, struct pci_dev
*dev
)
563 u64 p1n_base
, p2n_base
, afu_desc
;
564 const u64 p1n_size
= 0x100;
565 const u64 p2n_size
= 0x1000;
567 p1n_base
= p1_base(dev
) + 0x10000 + (afu
->slice
* p1n_size
);
568 p2n_base
= p2_base(dev
) + (afu
->slice
* p2n_size
);
569 afu
->psn_phys
= p2_base(dev
) + (adapter
->ps_off
+ (afu
->slice
* adapter
->ps_size
));
570 afu_desc
= p2_base(dev
) + adapter
->afu_desc_off
+ (afu
->slice
* adapter
->afu_desc_size
);
572 if (!(afu
->p1n_mmio
= ioremap(p1n_base
, p1n_size
)))
574 if (!(afu
->p2n_mmio
= ioremap(p2n_base
, p2n_size
)))
577 if (!(afu
->afu_desc_mmio
= ioremap(afu_desc
, adapter
->afu_desc_size
)))
583 iounmap(afu
->p2n_mmio
);
585 iounmap(afu
->p1n_mmio
);
587 dev_err(&afu
->dev
, "Error mapping AFU MMIO regions\n");
591 static void cxl_unmap_slice_regs(struct cxl_afu
*afu
)
594 iounmap(afu
->p2n_mmio
);
595 afu
->p2n_mmio
= NULL
;
598 iounmap(afu
->p1n_mmio
);
599 afu
->p1n_mmio
= NULL
;
601 if (afu
->afu_desc_mmio
) {
602 iounmap(afu
->afu_desc_mmio
);
603 afu
->afu_desc_mmio
= NULL
;
607 static void cxl_release_afu(struct device
*dev
)
609 struct cxl_afu
*afu
= to_cxl_afu(dev
);
611 pr_devel("cxl_release_afu\n");
613 idr_destroy(&afu
->contexts_idr
);
614 cxl_release_spa(afu
);
619 static struct cxl_afu
*cxl_alloc_afu(struct cxl
*adapter
, int slice
)
623 if (!(afu
= kzalloc(sizeof(struct cxl_afu
), GFP_KERNEL
)))
626 afu
->adapter
= adapter
;
627 afu
->dev
.parent
= &adapter
->dev
;
628 afu
->dev
.release
= cxl_release_afu
;
630 idr_init(&afu
->contexts_idr
);
631 mutex_init(&afu
->contexts_lock
);
632 spin_lock_init(&afu
->afu_cntl_lock
);
633 mutex_init(&afu
->spa_mutex
);
635 afu
->prefault_mode
= CXL_PREFAULT_NONE
;
636 afu
->irqs_max
= afu
->adapter
->user_irqs
;
641 /* Expects AFU struct to have recently been zeroed out */
642 static int cxl_read_afu_descriptor(struct cxl_afu
*afu
)
646 val
= AFUD_READ_INFO(afu
);
647 afu
->pp_irqs
= AFUD_NUM_INTS_PER_PROC(val
);
648 afu
->max_procs_virtualised
= AFUD_NUM_PROCS(val
);
649 afu
->crs_num
= AFUD_NUM_CRS(val
);
651 if (AFUD_AFU_DIRECTED(val
))
652 afu
->modes_supported
|= CXL_MODE_DIRECTED
;
653 if (AFUD_DEDICATED_PROCESS(val
))
654 afu
->modes_supported
|= CXL_MODE_DEDICATED
;
655 if (AFUD_TIME_SLICED(val
))
656 afu
->modes_supported
|= CXL_MODE_TIME_SLICED
;
658 val
= AFUD_READ_PPPSA(afu
);
659 afu
->pp_size
= AFUD_PPPSA_LEN(val
) * 4096;
660 afu
->psa
= AFUD_PPPSA_PSA(val
);
661 if ((afu
->pp_psa
= AFUD_PPPSA_PP(val
)))
662 afu
->pp_offset
= AFUD_READ_PPPSA_OFF(afu
);
664 val
= AFUD_READ_CR(afu
);
665 afu
->crs_len
= AFUD_CR_LEN(val
) * 256;
666 afu
->crs_offset
= AFUD_READ_CR_OFF(afu
);
669 /* eb_len is in multiple of 4K */
670 afu
->eb_len
= AFUD_EB_LEN(AFUD_READ_EB(afu
)) * 4096;
671 afu
->eb_offset
= AFUD_READ_EB_OFF(afu
);
673 /* eb_off is 4K aligned so lower 12 bits are always zero */
674 if (EXTRACT_PPC_BITS(afu
->eb_offset
, 0, 11) != 0) {
676 "Invalid AFU error buffer offset %Lx\n",
679 "Ignoring AFU error buffer in the descriptor\n");
680 /* indicate that no afu buffer exists */
687 static int cxl_afu_descriptor_looks_ok(struct cxl_afu
*afu
)
691 if (afu
->psa
&& afu
->adapter
->ps_size
<
692 (afu
->pp_offset
+ afu
->pp_size
*afu
->max_procs_virtualised
)) {
693 dev_err(&afu
->dev
, "per-process PSA can't fit inside the PSA!\n");
697 if (afu
->pp_psa
&& (afu
->pp_size
< PAGE_SIZE
))
698 dev_warn(&afu
->dev
, "AFU uses < PAGE_SIZE per-process PSA!");
700 for (i
= 0; i
< afu
->crs_num
; i
++) {
701 if ((cxl_afu_cr_read32(afu
, i
, 0) == 0)) {
702 dev_err(&afu
->dev
, "ABORTING: AFU configuration record %i is invalid\n", i
);
710 static int sanitise_afu_regs(struct cxl_afu
*afu
)
715 * Clear out any regs that contain either an IVTE or address or may be
716 * waiting on an acknowledgement to try to be a bit safer as we bring
719 reg
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
720 if ((reg
& CXL_AFU_Cntl_An_ES_MASK
) != CXL_AFU_Cntl_An_ES_Disabled
) {
721 dev_warn(&afu
->dev
, "WARNING: AFU was not disabled: %#016llx\n", reg
);
722 if (__cxl_afu_reset(afu
))
724 if (cxl_afu_disable(afu
))
726 if (cxl_psl_purge(afu
))
729 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, 0x0000000000000000);
730 cxl_p1n_write(afu
, CXL_PSL_IVTE_Limit_An
, 0x0000000000000000);
731 cxl_p1n_write(afu
, CXL_PSL_IVTE_Offset_An
, 0x0000000000000000);
732 cxl_p1n_write(afu
, CXL_PSL_AMBAR_An
, 0x0000000000000000);
733 cxl_p1n_write(afu
, CXL_PSL_SPOffset_An
, 0x0000000000000000);
734 cxl_p1n_write(afu
, CXL_HAURP_An
, 0x0000000000000000);
735 cxl_p2n_write(afu
, CXL_CSRP_An
, 0x0000000000000000);
736 cxl_p2n_write(afu
, CXL_AURP1_An
, 0x0000000000000000);
737 cxl_p2n_write(afu
, CXL_AURP0_An
, 0x0000000000000000);
738 cxl_p2n_write(afu
, CXL_SSTP1_An
, 0x0000000000000000);
739 cxl_p2n_write(afu
, CXL_SSTP0_An
, 0x0000000000000000);
740 reg
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
742 dev_warn(&afu
->dev
, "AFU had pending DSISR: %#016llx\n", reg
);
743 if (reg
& CXL_PSL_DSISR_TRANS
)
744 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
746 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
748 reg
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
751 dev_warn(&afu
->dev
, "AFU had pending SERR: %#016llx\n", reg
);
752 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, reg
& ~0xffff);
754 reg
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
756 dev_warn(&afu
->dev
, "AFU had pending error status: %#016llx\n", reg
);
757 cxl_p2n_write(afu
, CXL_PSL_ErrStat_An
, reg
);
763 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
766 * Called from sysfs and reads the afu error info buffer. The h/w only supports
767 * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
768 * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
770 ssize_t
cxl_afu_read_err_buffer(struct cxl_afu
*afu
, char *buf
,
771 loff_t off
, size_t count
)
773 loff_t aligned_start
, aligned_end
;
774 size_t aligned_length
;
776 const void __iomem
*ebuf
= afu
->afu_desc_mmio
+ afu
->eb_offset
;
778 if (count
== 0 || off
< 0 || (size_t)off
>= afu
->eb_len
)
781 /* calculate aligned read window */
782 count
= min((size_t)(afu
->eb_len
- off
), count
);
783 aligned_start
= round_down(off
, 8);
784 aligned_end
= round_up(off
+ count
, 8);
785 aligned_length
= aligned_end
- aligned_start
;
787 /* max we can copy in one read is PAGE_SIZE */
788 if (aligned_length
> ERR_BUFF_MAX_COPY_SIZE
) {
789 aligned_length
= ERR_BUFF_MAX_COPY_SIZE
;
790 count
= ERR_BUFF_MAX_COPY_SIZE
- (off
& 0x7);
793 /* use bounce buffer for copy */
794 tbuf
= (void *)__get_free_page(GFP_TEMPORARY
);
798 /* perform aligned read from the mmio region */
799 memcpy_fromio(tbuf
, ebuf
+ aligned_start
, aligned_length
);
800 memcpy(buf
, tbuf
+ (off
& 0x7), count
);
802 free_page((unsigned long)tbuf
);
807 static int cxl_configure_afu(struct cxl_afu
*afu
, struct cxl
*adapter
, struct pci_dev
*dev
)
811 if ((rc
= cxl_map_slice_regs(afu
, adapter
, dev
)))
814 if ((rc
= sanitise_afu_regs(afu
)))
817 /* We need to reset the AFU before we can read the AFU descriptor */
818 if ((rc
= __cxl_afu_reset(afu
)))
822 dump_afu_descriptor(afu
);
824 if ((rc
= cxl_read_afu_descriptor(afu
)))
827 if ((rc
= cxl_afu_descriptor_looks_ok(afu
)))
830 if ((rc
= init_implementation_afu_regs(afu
)))
833 if ((rc
= cxl_register_serr_irq(afu
)))
836 if ((rc
= cxl_register_psl_irq(afu
)))
842 cxl_release_serr_irq(afu
);
844 cxl_unmap_slice_regs(afu
);
848 static void cxl_deconfigure_afu(struct cxl_afu
*afu
)
850 cxl_release_psl_irq(afu
);
851 cxl_release_serr_irq(afu
);
852 cxl_unmap_slice_regs(afu
);
855 static int cxl_init_afu(struct cxl
*adapter
, int slice
, struct pci_dev
*dev
)
860 afu
= cxl_alloc_afu(adapter
, slice
);
864 rc
= dev_set_name(&afu
->dev
, "afu%i.%i", adapter
->adapter_num
, slice
);
868 rc
= cxl_configure_afu(afu
, adapter
, dev
);
872 /* Don't care if this fails */
873 cxl_debugfs_afu_add(afu
);
876 * After we call this function we must not free the afu directly, even
877 * if it returns an error!
879 if ((rc
= cxl_register_afu(afu
)))
882 if ((rc
= cxl_sysfs_afu_add(afu
)))
885 adapter
->afu
[afu
->slice
] = afu
;
887 if ((rc
= cxl_pci_vphb_add(afu
)))
888 dev_info(&afu
->dev
, "Can't register vPHB\n");
893 cxl_deconfigure_afu(afu
);
894 cxl_debugfs_afu_remove(afu
);
895 device_unregister(&afu
->dev
);
904 static void cxl_remove_afu(struct cxl_afu
*afu
)
906 pr_devel("cxl_remove_afu\n");
911 cxl_sysfs_afu_remove(afu
);
912 cxl_debugfs_afu_remove(afu
);
914 spin_lock(&afu
->adapter
->afu_list_lock
);
915 afu
->adapter
->afu
[afu
->slice
] = NULL
;
916 spin_unlock(&afu
->adapter
->afu_list_lock
);
918 cxl_context_detach_all(afu
);
919 cxl_afu_deactivate_mode(afu
);
921 cxl_deconfigure_afu(afu
);
922 device_unregister(&afu
->dev
);
925 int cxl_reset(struct cxl
*adapter
)
927 struct pci_dev
*dev
= to_pci_dev(adapter
->dev
.parent
);
930 if (adapter
->perst_same_image
) {
932 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
936 dev_info(&dev
->dev
, "CXL reset\n");
938 /* pcie_warm_reset requests a fundamental pci reset which includes a
939 * PERST assert/deassert. PERST triggers a loading of the image
940 * if "user" or "factory" is selected in sysfs */
941 if ((rc
= pci_set_pcie_reset_state(dev
, pcie_warm_reset
))) {
942 dev_err(&dev
->dev
, "cxl: pcie_warm_reset failed\n");
949 static int cxl_map_adapter_regs(struct cxl
*adapter
, struct pci_dev
*dev
)
951 if (pci_request_region(dev
, 2, "priv 2 regs"))
953 if (pci_request_region(dev
, 0, "priv 1 regs"))
956 pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
957 p1_base(dev
), p1_size(dev
), p2_base(dev
), p2_size(dev
));
959 if (!(adapter
->p1_mmio
= ioremap(p1_base(dev
), p1_size(dev
))))
962 if (!(adapter
->p2_mmio
= ioremap(p2_base(dev
), p2_size(dev
))))
968 iounmap(adapter
->p1_mmio
);
969 adapter
->p1_mmio
= NULL
;
971 pci_release_region(dev
, 0);
973 pci_release_region(dev
, 2);
978 static void cxl_unmap_adapter_regs(struct cxl
*adapter
)
980 if (adapter
->p1_mmio
) {
981 iounmap(adapter
->p1_mmio
);
982 adapter
->p1_mmio
= NULL
;
983 pci_release_region(to_pci_dev(adapter
->dev
.parent
), 2);
985 if (adapter
->p2_mmio
) {
986 iounmap(adapter
->p2_mmio
);
987 adapter
->p2_mmio
= NULL
;
988 pci_release_region(to_pci_dev(adapter
->dev
.parent
), 0);
992 static int cxl_read_vsec(struct cxl
*adapter
, struct pci_dev
*dev
)
995 u32 afu_desc_off
, afu_desc_size
;
1000 if (!(vsec
= find_cxl_vsec(dev
))) {
1001 dev_err(&dev
->dev
, "ABORTING: CXL VSEC not found!\n");
1005 CXL_READ_VSEC_LENGTH(dev
, vsec
, &vseclen
);
1006 if (vseclen
< CXL_VSEC_MIN_SIZE
) {
1007 dev_err(&dev
->dev
, "ABORTING: CXL VSEC too short\n");
1011 CXL_READ_VSEC_STATUS(dev
, vsec
, &adapter
->vsec_status
);
1012 CXL_READ_VSEC_PSL_REVISION(dev
, vsec
, &adapter
->psl_rev
);
1013 CXL_READ_VSEC_CAIA_MAJOR(dev
, vsec
, &adapter
->caia_major
);
1014 CXL_READ_VSEC_CAIA_MINOR(dev
, vsec
, &adapter
->caia_minor
);
1015 CXL_READ_VSEC_BASE_IMAGE(dev
, vsec
, &adapter
->base_image
);
1016 CXL_READ_VSEC_IMAGE_STATE(dev
, vsec
, &image_state
);
1017 adapter
->user_image_loaded
= !!(image_state
& CXL_VSEC_USER_IMAGE_LOADED
);
1018 adapter
->perst_select_user
= !!(image_state
& CXL_VSEC_USER_IMAGE_LOADED
);
1020 CXL_READ_VSEC_NAFUS(dev
, vsec
, &adapter
->slices
);
1021 CXL_READ_VSEC_AFU_DESC_OFF(dev
, vsec
, &afu_desc_off
);
1022 CXL_READ_VSEC_AFU_DESC_SIZE(dev
, vsec
, &afu_desc_size
);
1023 CXL_READ_VSEC_PS_OFF(dev
, vsec
, &ps_off
);
1024 CXL_READ_VSEC_PS_SIZE(dev
, vsec
, &ps_size
);
1026 /* Convert everything to bytes, because there is NO WAY I'd look at the
1027 * code a month later and forget what units these are in ;-) */
1028 adapter
->ps_off
= ps_off
* 64 * 1024;
1029 adapter
->ps_size
= ps_size
* 64 * 1024;
1030 adapter
->afu_desc_off
= afu_desc_off
* 64 * 1024;
1031 adapter
->afu_desc_size
= afu_desc_size
*64 * 1024;
1033 /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
1034 adapter
->user_irqs
= pnv_cxl_get_irq_count(dev
) - 1 - 2*adapter
->slices
;
1040 * Workaround a PCIe Host Bridge defect on some cards, that can cause
1041 * malformed Transaction Layer Packet (TLP) errors to be erroneously
1042 * reported. Mask this error in the Uncorrectable Error Mask Register.
1044 * The upper nibble of the PSL revision is used to distinguish between
1045 * different cards. The affected ones have it set to 0.
1047 static void cxl_fixup_malformed_tlp(struct cxl
*adapter
, struct pci_dev
*dev
)
1052 if (adapter
->psl_rev
& 0xf000)
1054 if (!(aer
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ERR
)))
1056 pci_read_config_dword(dev
, aer
+ PCI_ERR_UNCOR_MASK
, &data
);
1057 if (data
& PCI_ERR_UNC_MALF_TLP
)
1058 if (data
& PCI_ERR_UNC_INTN
)
1060 data
|= PCI_ERR_UNC_MALF_TLP
;
1061 data
|= PCI_ERR_UNC_INTN
;
1062 pci_write_config_dword(dev
, aer
+ PCI_ERR_UNCOR_MASK
, data
);
1065 static int cxl_vsec_looks_ok(struct cxl
*adapter
, struct pci_dev
*dev
)
1067 if (adapter
->vsec_status
& CXL_STATUS_SECOND_PORT
)
1070 if (adapter
->vsec_status
& CXL_UNSUPPORTED_FEATURES
) {
1071 dev_err(&dev
->dev
, "ABORTING: CXL requires unsupported features\n");
1075 if (!adapter
->slices
) {
1076 /* Once we support dynamic reprogramming we can use the card if
1077 * it supports loadable AFUs */
1078 dev_err(&dev
->dev
, "ABORTING: Device has no AFUs\n");
1082 if (!adapter
->afu_desc_off
|| !adapter
->afu_desc_size
) {
1083 dev_err(&dev
->dev
, "ABORTING: VSEC shows no AFU descriptors\n");
1087 if (adapter
->ps_size
> p2_size(dev
) - adapter
->ps_off
) {
1088 dev_err(&dev
->dev
, "ABORTING: Problem state size larger than "
1089 "available in BAR2: 0x%llx > 0x%llx\n",
1090 adapter
->ps_size
, p2_size(dev
) - adapter
->ps_off
);
1097 static void cxl_release_adapter(struct device
*dev
)
1099 struct cxl
*adapter
= to_cxl_adapter(dev
);
1101 pr_devel("cxl_release_adapter\n");
1103 cxl_remove_adapter_nr(adapter
);
1108 static struct cxl
*cxl_alloc_adapter(void)
1110 struct cxl
*adapter
;
1112 if (!(adapter
= kzalloc(sizeof(struct cxl
), GFP_KERNEL
)))
1115 spin_lock_init(&adapter
->afu_list_lock
);
1117 if (cxl_alloc_adapter_nr(adapter
))
1120 if (dev_set_name(&adapter
->dev
, "card%i", adapter
->adapter_num
))
1126 cxl_remove_adapter_nr(adapter
);
1132 #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1134 static int sanitise_adapter_regs(struct cxl
*adapter
)
1136 /* Clear PSL tberror bit by writing 1 to it */
1137 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, CXL_PSL_ErrIVTE_tberror
);
1138 return cxl_tlb_slb_invalidate(adapter
);
1141 /* This should contain *only* operations that can safely be done in
1142 * both creation and recovery.
1144 static int cxl_configure_adapter(struct cxl
*adapter
, struct pci_dev
*dev
)
1148 adapter
->dev
.parent
= &dev
->dev
;
1149 adapter
->dev
.release
= cxl_release_adapter
;
1150 pci_set_drvdata(dev
, adapter
);
1152 rc
= pci_enable_device(dev
);
1154 dev_err(&dev
->dev
, "pci_enable_device failed: %i\n", rc
);
1158 if ((rc
= cxl_read_vsec(adapter
, dev
)))
1161 if ((rc
= cxl_vsec_looks_ok(adapter
, dev
)))
1164 cxl_fixup_malformed_tlp(adapter
, dev
);
1166 if ((rc
= setup_cxl_bars(dev
)))
1169 if ((rc
= switch_card_to_cxl(dev
)))
1172 if ((rc
= cxl_update_image_control(adapter
)))
1175 if ((rc
= cxl_map_adapter_regs(adapter
, dev
)))
1178 if ((rc
= sanitise_adapter_regs(adapter
)))
1181 if ((rc
= init_implementation_adapter_regs(adapter
, dev
)))
1184 if ((rc
= pnv_phb_to_cxl_mode(dev
, OPAL_PHB_CAPI_MODE_CAPI
)))
1187 /* If recovery happened, the last step is to turn on snooping.
1188 * In the non-recovery case this has no effect */
1189 if ((rc
= pnv_phb_to_cxl_mode(dev
, OPAL_PHB_CAPI_MODE_SNOOP_ON
)))
1192 if ((rc
= cxl_setup_psl_timebase(adapter
, dev
)))
1195 if ((rc
= cxl_register_psl_err_irq(adapter
)))
1201 cxl_unmap_adapter_regs(adapter
);
1206 static void cxl_deconfigure_adapter(struct cxl
*adapter
)
1208 struct pci_dev
*pdev
= to_pci_dev(adapter
->dev
.parent
);
1210 cxl_release_psl_err_irq(adapter
);
1211 cxl_unmap_adapter_regs(adapter
);
1213 pci_disable_device(pdev
);
1216 static struct cxl
*cxl_init_adapter(struct pci_dev
*dev
)
1218 struct cxl
*adapter
;
1221 adapter
= cxl_alloc_adapter();
1223 return ERR_PTR(-ENOMEM
);
1225 /* Set defaults for parameters which need to persist over
1226 * configure/reconfigure
1228 adapter
->perst_loads_image
= true;
1229 adapter
->perst_same_image
= false;
1231 rc
= cxl_configure_adapter(adapter
, dev
);
1233 pci_disable_device(dev
);
1234 cxl_release_adapter(&adapter
->dev
);
1238 /* Don't care if this one fails: */
1239 cxl_debugfs_adapter_add(adapter
);
1242 * After we call this function we must not free the adapter directly,
1243 * even if it returns an error!
1245 if ((rc
= cxl_register_adapter(adapter
)))
1248 if ((rc
= cxl_sysfs_adapter_add(adapter
)))
1254 /* This should mirror cxl_remove_adapter, except without the
1257 cxl_debugfs_adapter_remove(adapter
);
1258 cxl_deconfigure_adapter(adapter
);
1259 device_unregister(&adapter
->dev
);
1263 static void cxl_remove_adapter(struct cxl
*adapter
)
1265 pr_devel("cxl_remove_adapter\n");
1267 cxl_sysfs_adapter_remove(adapter
);
1268 cxl_debugfs_adapter_remove(adapter
);
1270 cxl_deconfigure_adapter(adapter
);
1272 device_unregister(&adapter
->dev
);
1275 static int cxl_probe(struct pci_dev
*dev
, const struct pci_device_id
*id
)
1277 struct cxl
*adapter
;
1282 dump_cxl_config_space(dev
);
1284 adapter
= cxl_init_adapter(dev
);
1285 if (IS_ERR(adapter
)) {
1286 dev_err(&dev
->dev
, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter
));
1287 return PTR_ERR(adapter
);
1290 for (slice
= 0; slice
< adapter
->slices
; slice
++) {
1291 if ((rc
= cxl_init_afu(adapter
, slice
, dev
))) {
1292 dev_err(&dev
->dev
, "AFU %i failed to initialise: %i\n", slice
, rc
);
1296 rc
= cxl_afu_select_best_mode(adapter
->afu
[slice
]);
1298 dev_err(&dev
->dev
, "AFU %i failed to start: %i\n", slice
, rc
);
1304 static void cxl_remove(struct pci_dev
*dev
)
1306 struct cxl
*adapter
= pci_get_drvdata(dev
);
1307 struct cxl_afu
*afu
;
1311 * Lock to prevent someone grabbing a ref through the adapter list as
1312 * we are removing it
1314 for (i
= 0; i
< adapter
->slices
; i
++) {
1315 afu
= adapter
->afu
[i
];
1316 cxl_pci_vphb_remove(afu
);
1317 cxl_remove_afu(afu
);
1319 cxl_remove_adapter(adapter
);
1322 static pci_ers_result_t
cxl_vphb_error_detected(struct cxl_afu
*afu
,
1323 pci_channel_state_t state
)
1325 struct pci_dev
*afu_dev
;
1326 pci_ers_result_t result
= PCI_ERS_RESULT_NEED_RESET
;
1327 pci_ers_result_t afu_result
= PCI_ERS_RESULT_NEED_RESET
;
1329 /* There should only be one entry, but go through the list
1332 list_for_each_entry(afu_dev
, &afu
->phb
->bus
->devices
, bus_list
) {
1333 if (!afu_dev
->driver
)
1336 afu_dev
->error_state
= state
;
1338 if (afu_dev
->driver
->err_handler
)
1339 afu_result
= afu_dev
->driver
->err_handler
->error_detected(afu_dev
,
1341 /* Disconnect trumps all, NONE trumps NEED_RESET */
1342 if (afu_result
== PCI_ERS_RESULT_DISCONNECT
)
1343 result
= PCI_ERS_RESULT_DISCONNECT
;
1344 else if ((afu_result
== PCI_ERS_RESULT_NONE
) &&
1345 (result
== PCI_ERS_RESULT_NEED_RESET
))
1346 result
= PCI_ERS_RESULT_NONE
;
1351 static pci_ers_result_t
cxl_pci_error_detected(struct pci_dev
*pdev
,
1352 pci_channel_state_t state
)
1354 struct cxl
*adapter
= pci_get_drvdata(pdev
);
1355 struct cxl_afu
*afu
;
1356 pci_ers_result_t result
= PCI_ERS_RESULT_NEED_RESET
;
1359 /* At this point, we could still have an interrupt pending.
1360 * Let's try to get them out of the way before they do
1361 * anything we don't like.
1365 /* If we're permanently dead, give up. */
1366 if (state
== pci_channel_io_perm_failure
) {
1367 /* Tell the AFU drivers; but we don't care what they
1368 * say, we're going away.
1370 for (i
= 0; i
< adapter
->slices
; i
++) {
1371 afu
= adapter
->afu
[i
];
1372 cxl_vphb_error_detected(afu
, state
);
1374 return PCI_ERS_RESULT_DISCONNECT
;
1377 /* Are we reflashing?
1379 * If we reflash, we could come back as something entirely
1380 * different, including a non-CAPI card. As such, by default
1381 * we don't participate in the process. We'll be unbound and
1382 * the slot re-probed. (TODO: check EEH doesn't blindly rebind
1385 * However, this isn't the entire story: for reliablity
1386 * reasons, we usually want to reflash the FPGA on PERST in
1387 * order to get back to a more reliable known-good state.
1389 * This causes us a bit of a problem: if we reflash we can't
1390 * trust that we'll come back the same - we could have a new
1391 * image and been PERSTed in order to load that
1392 * image. However, most of the time we actually *will* come
1393 * back the same - for example a regular EEH event.
1395 * Therefore, we allow the user to assert that the image is
1396 * indeed the same and that we should continue on into EEH
1399 if (adapter
->perst_loads_image
&& !adapter
->perst_same_image
) {
1400 /* TODO take the PHB out of CXL mode */
1401 dev_info(&pdev
->dev
, "reflashing, so opting out of EEH!\n");
1402 return PCI_ERS_RESULT_NONE
;
1406 * At this point, we want to try to recover. We'll always
1407 * need a complete slot reset: we don't trust any other reset.
1409 * Now, we go through each AFU:
1410 * - We send the driver, if bound, an error_detected callback.
1411 * We expect it to clean up, but it can also tell us to give
1412 * up and permanently detach the card. To simplify things, if
1413 * any bound AFU driver doesn't support EEH, we give up on EEH.
1415 * - We detach all contexts associated with the AFU. This
1416 * does not free them, but puts them into a CLOSED state
1417 * which causes any the associated files to return useful
1418 * errors to userland. It also unmaps, but does not free,
1421 * - We clean up our side: releasing and unmapping resources we hold
1422 * so we can wire them up again when the hardware comes back up.
1424 * Driver authors should note:
1426 * - Any contexts you create in your kernel driver (except
1427 * those associated with anonymous file descriptors) are
1428 * your responsibility to free and recreate. Likewise with
1429 * any attached resources.
1431 * - We will take responsibility for re-initialising the
1432 * device context (the one set up for you in
1433 * cxl_pci_enable_device_hook and accessed through
1434 * cxl_get_context). If you've attached IRQs or other
1435 * resources to it, they remains yours to free.
1437 * You can call the same functions to release resources as you
1438 * normally would: we make sure that these functions continue
1439 * to work when the hardware is down.
1443 * 1) If you normally free all your resources at the end of
1444 * each request, or if you use anonymous FDs, your
1445 * error_detected callback can simply set a flag to tell
1446 * your driver not to start any new calls. You can then
1447 * clear the flag in the resume callback.
1449 * 2) If you normally allocate your resources on startup:
1450 * * Set a flag in error_detected as above.
1451 * * Let CXL detach your contexts.
1452 * * In slot_reset, free the old resources and allocate new ones.
1453 * * In resume, clear the flag to allow things to start.
1455 for (i
= 0; i
< adapter
->slices
; i
++) {
1456 afu
= adapter
->afu
[i
];
1458 result
= cxl_vphb_error_detected(afu
, state
);
1460 /* Only continue if everyone agrees on NEED_RESET */
1461 if (result
!= PCI_ERS_RESULT_NEED_RESET
)
1464 cxl_context_detach_all(afu
);
1465 cxl_afu_deactivate_mode(afu
);
1466 cxl_deconfigure_afu(afu
);
1468 cxl_deconfigure_adapter(adapter
);
1473 static pci_ers_result_t
cxl_pci_slot_reset(struct pci_dev
*pdev
)
1475 struct cxl
*adapter
= pci_get_drvdata(pdev
);
1476 struct cxl_afu
*afu
;
1477 struct cxl_context
*ctx
;
1478 struct pci_dev
*afu_dev
;
1479 pci_ers_result_t afu_result
= PCI_ERS_RESULT_RECOVERED
;
1480 pci_ers_result_t result
= PCI_ERS_RESULT_RECOVERED
;
1483 if (cxl_configure_adapter(adapter
, pdev
))
1486 for (i
= 0; i
< adapter
->slices
; i
++) {
1487 afu
= adapter
->afu
[i
];
1489 if (cxl_configure_afu(afu
, adapter
, pdev
))
1492 if (cxl_afu_select_best_mode(afu
))
1495 cxl_pci_vphb_reconfigure(afu
);
1497 list_for_each_entry(afu_dev
, &afu
->phb
->bus
->devices
, bus_list
) {
1498 /* Reset the device context.
1499 * TODO: make this less disruptive
1501 ctx
= cxl_get_context(afu_dev
);
1503 if (ctx
&& cxl_release_context(ctx
))
1506 ctx
= cxl_dev_context_init(afu_dev
);
1510 afu_dev
->dev
.archdata
.cxl_ctx
= ctx
;
1512 if (cxl_afu_check_and_enable(afu
))
1515 afu_dev
->error_state
= pci_channel_io_normal
;
1517 /* If there's a driver attached, allow it to
1518 * chime in on recovery. Drivers should check
1519 * if everything has come back OK, but
1520 * shouldn't start new work until we call
1521 * their resume function.
1523 if (!afu_dev
->driver
)
1526 if (afu_dev
->driver
->err_handler
&&
1527 afu_dev
->driver
->err_handler
->slot_reset
)
1528 afu_result
= afu_dev
->driver
->err_handler
->slot_reset(afu_dev
);
1530 if (afu_result
== PCI_ERS_RESULT_DISCONNECT
)
1531 result
= PCI_ERS_RESULT_DISCONNECT
;
1537 /* All the bits that happen in both error_detected and cxl_remove
1538 * should be idempotent, so we don't need to worry about leaving a mix
1539 * of unconfigured and reconfigured resources.
1541 dev_err(&pdev
->dev
, "EEH recovery failed. Asking to be disconnected.\n");
1542 return PCI_ERS_RESULT_DISCONNECT
;
1545 static void cxl_pci_resume(struct pci_dev
*pdev
)
1547 struct cxl
*adapter
= pci_get_drvdata(pdev
);
1548 struct cxl_afu
*afu
;
1549 struct pci_dev
*afu_dev
;
1552 /* Everything is back now. Drivers should restart work now.
1553 * This is not the place to be checking if everything came back up
1554 * properly, because there's no return value: do that in slot_reset.
1556 for (i
= 0; i
< adapter
->slices
; i
++) {
1557 afu
= adapter
->afu
[i
];
1559 list_for_each_entry(afu_dev
, &afu
->phb
->bus
->devices
, bus_list
) {
1560 if (afu_dev
->driver
&& afu_dev
->driver
->err_handler
&&
1561 afu_dev
->driver
->err_handler
->resume
)
1562 afu_dev
->driver
->err_handler
->resume(afu_dev
);
1567 static const struct pci_error_handlers cxl_err_handler
= {
1568 .error_detected
= cxl_pci_error_detected
,
1569 .slot_reset
= cxl_pci_slot_reset
,
1570 .resume
= cxl_pci_resume
,
1573 struct pci_driver cxl_pci_driver
= {
1575 .id_table
= cxl_pci_tbl
,
1577 .remove
= cxl_remove
,
1578 .shutdown
= cxl_remove
,
1579 .err_handler
= &cxl_err_handler
,