]> git.proxmox.com Git - mirror_qemu.git/blame - hw/cxl/cxl-component-utils.c
hw/cxl: Use switch statements for read and write of cachemem registers
[mirror_qemu.git] / hw / cxl / cxl-component-utils.c
CommitLineData
9e58f52d
BW
1/*
2 * CXL Utility library for components
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
829de299 12#include "qapi/error.h"
9e58f52d
BW
13#include "hw/pci/pci.h"
14#include "hw/cxl/cxl.h"
15
87de174a 16/* CXL r3.0 Section 8.2.4.19.1 CXL HDM Decoder Capability Register */
f5a4e1a6
JC
17int cxl_decoder_count_enc(int count)
18{
19 switch (count) {
87de174a
JC
20 case 1: return 0x0;
21 case 2: return 0x1;
22 case 4: return 0x2;
23 case 6: return 0x3;
24 case 8: return 0x4;
25 case 10: return 0x5;
26 /* Switches and Host Bridges may have more than 10 decoders */
27 case 12: return 0x6;
28 case 14: return 0x7;
29 case 16: return 0x8;
30 case 20: return 0x9;
31 case 24: return 0xa;
32 case 28: return 0xb;
33 case 32: return 0xc;
34 }
35 return 0;
36}
37
38int cxl_decoder_count_dec(int enc_cnt)
39{
40 switch (enc_cnt) {
41 case 0x0: return 1;
42 case 0x1: return 2;
43 case 0x2: return 4;
44 case 0x3: return 6;
45 case 0x4: return 8;
46 case 0x5: return 10;
47 /* Switches and Host Bridges may have more than 10 decoders */
48 case 0x6: return 12;
49 case 0x7: return 14;
50 case 0x8: return 16;
51 case 0x9: return 20;
52 case 0xa: return 24;
53 case 0xb: return 28;
54 case 0xc: return 32;
f5a4e1a6
JC
55 }
56 return 0;
57}
58
59hwaddr cxl_decode_ig(int ig)
60{
61 return 1ULL << (ig + 8);
62}
63
9e58f52d
BW
64static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset,
65 unsigned size)
66{
67 CXLComponentState *cxl_cstate = opaque;
68 ComponentRegisters *cregs = &cxl_cstate->crb;
69
388d6b57
JC
70 switch (size) {
71 case 4:
72 if (cregs->special_ops && cregs->special_ops->read) {
73 return cregs->special_ops->read(cxl_cstate, offset, 4);
74 } else {
75 QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_registers) != 4);
76 return cregs->cache_mem_registers[offset / 4];
77 }
78 case 8:
9e58f52d
BW
79 qemu_log_mask(LOG_UNIMP,
80 "CXL 8 byte cache mem registers not implemented\n");
81 return 0;
388d6b57
JC
82 default:
83 /*
84 * In line with specifiction limitaions on access sizes, this
85 * routine is not called with other sizes.
86 */
87 g_assert_not_reached();
9e58f52d
BW
88 }
89}
90
3540bf56
BW
91static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset,
92 uint32_t value)
93{
94 ComponentRegisters *cregs = &cxl_cstate->crb;
95 uint32_t *cache_mem = cregs->cache_mem_registers;
96 bool should_commit = false;
823371a6 97 bool should_uncommit = false;
3540bf56
BW
98
99 switch (offset) {
100 case A_CXL_HDM_DECODER0_CTRL:
e967413f
JC
101 case A_CXL_HDM_DECODER1_CTRL:
102 case A_CXL_HDM_DECODER2_CTRL:
103 case A_CXL_HDM_DECODER3_CTRL:
3540bf56 104 should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
823371a6 105 should_uncommit = !should_commit;
3540bf56
BW
106 break;
107 default:
108 break;
109 }
110
3540bf56 111 if (should_commit) {
92ff7cab
JC
112 value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
113 value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
823371a6
JC
114 } else if (should_uncommit) {
115 value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
116 value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
3540bf56 117 }
92ff7cab 118 stl_le_p((uint8_t *)cache_mem + offset, value);
3540bf56
BW
119}
120
9e58f52d
BW
121static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value,
122 unsigned size)
123{
124 CXLComponentState *cxl_cstate = opaque;
125 ComponentRegisters *cregs = &cxl_cstate->crb;
126 uint32_t mask;
127
388d6b57
JC
128 switch (size) {
129 case 4: {
130 QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_regs_write_mask) != 4);
131 QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_registers) != 4);
132 mask = cregs->cache_mem_regs_write_mask[offset / 4];
133 value &= mask;
134 /* RO bits should remain constant. Done by reading existing value */
135 value |= ~mask & cregs->cache_mem_registers[offset / 4];
136 if (cregs->special_ops && cregs->special_ops->write) {
137 cregs->special_ops->write(cxl_cstate, offset, value, size);
138 return;
139 }
140
141 if (offset >= A_CXL_HDM_DECODER_CAPABILITY &&
142 offset <= A_CXL_HDM_DECODER3_TARGET_LIST_HI) {
143 dumb_hdm_handler(cxl_cstate, offset, value);
144 } else {
145 cregs->cache_mem_registers[offset / 4] = value;
146 }
9e58f52d
BW
147 return;
148 }
388d6b57
JC
149 case 8:
150 qemu_log_mask(LOG_UNIMP,
151 "CXL 8 byte cache mem registers not implemented\n");
3540bf56 152 return;
388d6b57
JC
153 default:
154 /*
155 * In line with specifiction limitaions on access sizes, this
156 * routine is not called with other sizes.
157 */
158 g_assert_not_reached();
9e58f52d
BW
159 }
160}
161
162/*
163 * 8.2.3
164 * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
165 * Component Registers.
166 *
167 * 8.2.2
168 * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
169 * reads are not permitted.
170 * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
171 * reads are not permitted.
172 *
173 * As of the spec defined today, only 4 byte registers exist.
174 */
175static const MemoryRegionOps cache_mem_ops = {
176 .read = cxl_cache_mem_read_reg,
177 .write = cxl_cache_mem_write_reg,
178 .endianness = DEVICE_LITTLE_ENDIAN,
179 .valid = {
180 .min_access_size = 4,
181 .max_access_size = 8,
182 .unaligned = false,
183 },
184 .impl = {
185 .min_access_size = 4,
186 .max_access_size = 8,
187 },
188};
189
190void cxl_component_register_block_init(Object *obj,
191 CXLComponentState *cxl_cstate,
192 const char *type)
193{
194 ComponentRegisters *cregs = &cxl_cstate->crb;
195
196 memory_region_init(&cregs->component_registers, obj, type,
197 CXL2_COMPONENT_BLOCK_SIZE);
198
199 /* io registers controls link which we don't care about in QEMU */
200 memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io",
201 CXL2_COMPONENT_IO_REGION_SIZE);
202 memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs,
203 ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE);
204
205 memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io);
206 memory_region_add_subregion(&cregs->component_registers,
207 CXL2_COMPONENT_IO_REGION_SIZE,
208 &cregs->cache_mem);
209}
210
211static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk)
212{
213 /*
214 * Error status is RW1C but given bits are not yet set, it can
215 * be handled as RO.
216 */
cb4e642c 217 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, 0);
415442a1 218 stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_STATUS, 0x1cfff);
9e58f52d 219 /* Bits 12-13 and 17-31 reserved in CXL 2.0 */
cb4e642c
JC
220 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
221 stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
222 stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
223 stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
224 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, 0);
415442a1 225 stl_le_p(write_msk + R_CXL_RAS_COR_ERR_STATUS, 0x7f);
cb4e642c
JC
226 stl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK, 0x7f);
227 stl_le_p(write_msk + R_CXL_RAS_COR_ERR_MASK, 0x7f);
9e58f52d 228 /* CXL switches and devices must set */
415442a1 229 stl_le_p(reg_state + R_CXL_RAS_ERR_CAP_CTRL, 0x200);
9e58f52d
BW
230}
231
f824f529
JC
232static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk,
233 enum reg_type type)
9e58f52d 234{
e967413f 235 int decoder_count = CXL_HDM_DECODER_COUNT;
61c44bcf 236 int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
9e58f52d
BW
237 int i;
238
239 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT,
240 cxl_decoder_count_enc(decoder_count));
241 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1);
242 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1);
243 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1);
244 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0);
245 ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL,
246 HDM_DECODER_ENABLE, 0);
247 write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3;
248 for (i = 0; i < decoder_count; i++) {
61c44bcf
JC
249 write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc] = 0xf0000000;
250 write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc] = 0xffffffff;
251 write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc] = 0xf0000000;
252 write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc] = 0xffffffff;
253 write_msk[R_CXL_HDM_DECODER0_CTRL + i * hdm_inc] = 0x13ff;
f824f529
JC
254 if (type == CXL2_DEVICE ||
255 type == CXL2_TYPE3_DEVICE ||
256 type == CXL2_LOGICAL_DEVICE) {
61c44bcf
JC
257 write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * hdm_inc] =
258 0xf0000000;
f824f529 259 } else {
61c44bcf
JC
260 write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * hdm_inc] =
261 0xffffffff;
f824f529 262 }
61c44bcf 263 write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_HI + i * hdm_inc] = 0xffffffff;
9e58f52d
BW
264 }
265}
266
267void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk,
268 enum reg_type type)
269{
270 int caps = 0;
271
272 /*
273 * In CXL 2.0 the capabilities required for each CXL component are such that,
274 * with the ordering chosen here, a single number can be used to define
275 * which capabilities should be provided.
276 */
277 switch (type) {
278 case CXL2_DOWNSTREAM_PORT:
279 case CXL2_DEVICE:
280 /* RAS, Link */
281 caps = 2;
282 break;
283 case CXL2_UPSTREAM_PORT:
284 case CXL2_TYPE3_DEVICE:
285 case CXL2_LOGICAL_DEVICE:
286 /* + HDM */
287 caps = 3;
288 break;
289 case CXL2_ROOT_PORT:
290 /* + Extended Security, + Snoop */
291 caps = 5;
292 break;
293 default:
294 abort();
295 }
296
297 memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE);
298
299 /* CXL Capability Header Register */
300 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1);
301 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1);
302 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1);
303 ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps);
304
305#define init_cap_reg(reg, id, version) \
306 QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \
307 do { \
308 int which = R_CXL_##reg##_CAPABILITY_HEADER; \
309 reg_state[which] = FIELD_DP32(reg_state[which], \
310 CXL_##reg##_CAPABILITY_HEADER, ID, id); \
311 reg_state[which] = \
312 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \
313 VERSION, version); \
314 reg_state[which] = \
315 FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \
316 CXL_##reg##_REGISTERS_OFFSET); \
317 } while (0)
318
319 init_cap_reg(RAS, 2, 2);
320 ras_init_common(reg_state, write_msk);
321
322 init_cap_reg(LINK, 4, 2);
323
324 if (caps < 3) {
325 return;
326 }
327
328 init_cap_reg(HDM, 5, 1);
f824f529 329 hdm_init_common(reg_state, write_msk, type);
9e58f52d
BW
330
331 if (caps < 5) {
332 return;
333 }
334
335 init_cap_reg(EXTSEC, 6, 1);
336 init_cap_reg(SNOOP, 8, 1);
337
338#undef init_cap_reg
339}
340
341/*
342 * Helper to creates a DVSEC header for a CXL entity. The caller is responsible
343 * for tracking the valid offset.
344 *
345 * This function will build the DVSEC header on behalf of the caller and then
346 * copy in the remaining data for the vendor specific bits.
347 * It will also set up appropriate write masks.
348 */
349void cxl_component_create_dvsec(CXLComponentState *cxl,
350 enum reg_type cxl_dev_type, uint16_t length,
351 uint16_t type, uint8_t rev, uint8_t *body)
352{
353 PCIDevice *pdev = cxl->pdev;
354 uint16_t offset = cxl->dvsec_offset;
355 uint8_t *wmask = pdev->wmask;
356
357 assert(offset >= PCI_CFG_SPACE_SIZE &&
358 ((offset + length) < PCI_CFG_SPACE_EXP_SIZE));
359 assert((length & 0xf000) == 0);
360 assert((rev & ~0xf) == 0);
361
362 /* Create the DVSEC in the MCFG space */
363 pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length);
364 pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET,
365 (length << 20) | (rev << 16) | CXL_VENDOR_ID);
366 pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type);
367 memcpy(pdev->config + offset + sizeof(DVSECHeader),
368 body + sizeof(DVSECHeader),
369 length - sizeof(DVSECHeader));
370
371 /* Configure write masks */
372 switch (type) {
373 case PCIE_CXL_DEVICE_DVSEC:
e1706ea8
BW
374 /* Cntrl RW Lock - so needs explicit blocking when lock is set */
375 wmask[offset + offsetof(CXLDVSECDevice, ctrl)] = 0xFD;
376 wmask[offset + offsetof(CXLDVSECDevice, ctrl) + 1] = 0x4F;
377 /* Status is RW1CS */
378 wmask[offset + offsetof(CXLDVSECDevice, ctrl2)] = 0x0F;
379 /* Lock is RW Once */
380 wmask[offset + offsetof(CXLDVSECDevice, lock)] = 0x01;
381 /* range1/2_base_high/low is RW Lock */
382 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi)] = 0xFF;
383 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 1] = 0xFF;
384 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 2] = 0xFF;
385 wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 3] = 0xFF;
386 wmask[offset + offsetof(CXLDVSECDevice, range1_base_lo) + 3] = 0xF0;
387 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi)] = 0xFF;
388 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 1] = 0xFF;
389 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 2] = 0xFF;
390 wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 3] = 0xFF;
391 wmask[offset + offsetof(CXLDVSECDevice, range2_base_lo) + 3] = 0xF0;
9e58f52d
BW
392 break;
393 case NON_CXL_FUNCTION_MAP_DVSEC:
394 break; /* Not yet implemented */
395 case EXTENSIONS_PORT_DVSEC:
396 wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F;
397 wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40;
398 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF;
399 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF;
400 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0;
401 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF;
402 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0;
403 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF;
404 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0;
405 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF;
406 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0;
407 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF;
408 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF;
409 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF;
410 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF;
411 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF;
412 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF;
413 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF;
414 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF;
415 wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF;
416 break;
417 case GPF_PORT_DVSEC:
418 wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F;
419 wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F;
420 wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F;
421 wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F;
422 break;
423 case GPF_DEVICE_DVSEC:
424 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F;
425 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F;
426 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF;
427 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF;
428 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF;
429 wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF;
430 break;
431 case PCIE_FLEXBUS_PORT_DVSEC:
432 switch (cxl_dev_type) {
433 case CXL2_ROOT_PORT:
434 /* No MLD */
435 wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd;
436 break;
437 case CXL2_DOWNSTREAM_PORT:
438 wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd;
439 break;
440 default: /* Registers are RO for other component types */
441 break;
442 }
443 /* There are rw1cs bits in the status register but never set currently */
444 break;
445 }
446
447 /* Update state for future DVSEC additions */
448 range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length);
449 cxl->dvsec_offset += length;
450}
829de299 451
87de174a 452/* CXL r3.0 Section 8.2.4.19.7 CXL HDM Decoder n Control Register */
829de299
JC
453uint8_t cxl_interleave_ways_enc(int iw, Error **errp)
454{
455 switch (iw) {
456 case 1: return 0x0;
457 case 2: return 0x1;
458 case 4: return 0x2;
459 case 8: return 0x3;
460 case 16: return 0x4;
461 case 3: return 0x8;
462 case 6: return 0x9;
463 case 12: return 0xa;
464 default:
465 error_setg(errp, "Interleave ways: %d not supported", iw);
466 return 0;
467 }
468}
469
87de174a
JC
470int cxl_interleave_ways_dec(uint8_t iw_enc, Error **errp)
471{
472 switch (iw_enc) {
473 case 0x0: return 1;
474 case 0x1: return 2;
475 case 0x2: return 4;
476 case 0x3: return 8;
477 case 0x4: return 16;
478 case 0x8: return 3;
479 case 0x9: return 6;
480 case 0xa: return 12;
481 default:
482 error_setg(errp, "Encoded interleave ways: %d not supported", iw_enc);
483 return 0;
484 }
485}
486
829de299
JC
487uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp)
488{
489 switch (gran) {
490 case 256: return 0;
491 case 512: return 1;
492 case 1024: return 2;
493 case 2048: return 3;
494 case 4096: return 4;
495 case 8192: return 5;
496 case 16384: return 6;
497 default:
498 error_setg(errp, "Interleave granularity: %" PRIu64 " invalid", gran);
499 return 0;
500 }
501}