2 * CXL Utility library for mailbox interface
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "hw/cxl/cxl.h"
12 #include "hw/cxl/cxl_events.h"
13 #include "hw/pci/pci.h"
14 #include "hw/pci-bridge/cxl_upstream_port.h"
15 #include "qemu/cutils.h"
17 #include "qemu/units.h"
18 #include "qemu/uuid.h"
20 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
23 * How to add a new command, example. The command set FOO, with cmd BAR.
24 * 1. Add the command set and cmd to the enum.
27 * 2. Implement the handler
28 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
29 * CXLDeviceState *cxl_dstate, uint16_t *len)
30 * 3. Add the command to the cxl_cmd_set[][]
31 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
32 * 4. Implement your handler
33 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
36 * Writing the handler:
37 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
38 * in/out length of the payload. The handler is responsible for consuming the
39 * payload from cmd->payload and operating upon it as necessary. It must then
40 * fill the output data into cmd->payload (overwriting what was there),
41 * setting the length, and returning a valid return code.
43 * XXX: The handler need not worry about endianness. The payload is read out of
44 * a register interface that already deals with it.
49 #define IS_IDENTIFY 0x1
51 #define GET_RECORDS 0x0
52 #define CLEAR_RECORDS 0x1
53 #define GET_INTERRUPT_POLICY 0x2
54 #define SET_INTERRUPT_POLICY 0x3
55 FIRMWARE_UPDATE
= 0x02,
61 #define GET_SUPPORTED 0x0
64 #define MEMORY_DEVICE 0x0
66 #define GET_PARTITION_INFO 0x0
69 MEDIA_AND_POISON
= 0x43,
70 #define GET_POISON_LIST 0x0
71 #define INJECT_POISON 0x1
72 #define CLEAR_POISON 0x2
73 PHYSICAL_SWITCH
= 0x51,
74 #define IDENTIFY_SWITCH_DEVICE 0x0
78 static CXLRetCode
cmd_events_get_records(const struct cxl_cmd
*cmd
,
79 uint8_t *payload_in
, size_t len_in
,
80 uint8_t *payload_out
, size_t *len_out
,
83 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
84 CXLGetEventPayload
*pl
;
88 if (cmd
->in
< sizeof(log_type
)) {
89 return CXL_MBOX_INVALID_INPUT
;
92 log_type
= payload_in
[0];
94 pl
= (CXLGetEventPayload
*)payload_out
;
95 memset(pl
, 0, sizeof(*pl
));
97 max_recs
= (cxlds
->payload_size
- CXL_EVENT_PAYLOAD_HDR_SIZE
) /
98 CXL_EVENT_RECORD_SIZE
;
99 if (max_recs
> 0xFFFF) {
103 return cxl_event_get_records(cxlds
, pl
, log_type
, max_recs
, len_out
);
106 static CXLRetCode
cmd_events_clear_records(const struct cxl_cmd
*cmd
,
109 uint8_t *payload_out
,
113 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
114 CXLClearEventPayload
*pl
;
116 pl
= (CXLClearEventPayload
*)payload_in
;
118 return cxl_event_clear_records(cxlds
, pl
);
121 static CXLRetCode
cmd_events_get_interrupt_policy(const struct cxl_cmd
*cmd
,
124 uint8_t *payload_out
,
128 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
129 CXLEventInterruptPolicy
*policy
;
132 policy
= (CXLEventInterruptPolicy
*)payload_out
;
133 memset(policy
, 0, sizeof(*policy
));
135 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_INFO
];
136 if (log
->irq_enabled
) {
137 policy
->info_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
140 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_WARN
];
141 if (log
->irq_enabled
) {
142 policy
->warn_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
145 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FAIL
];
146 if (log
->irq_enabled
) {
147 policy
->failure_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
150 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FATAL
];
151 if (log
->irq_enabled
) {
152 policy
->fatal_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
155 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_DYNAMIC_CAP
];
156 if (log
->irq_enabled
) {
157 /* Dynamic Capacity borrows the same vector as info */
158 policy
->dyn_cap_settings
= CXL_INT_MSI_MSIX
;
161 *len_out
= sizeof(*policy
);
162 return CXL_MBOX_SUCCESS
;
165 static CXLRetCode
cmd_events_set_interrupt_policy(const struct cxl_cmd
*cmd
,
168 uint8_t *payload_out
,
172 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
173 CXLEventInterruptPolicy
*policy
;
176 if (len_in
< CXL_EVENT_INT_SETTING_MIN_LEN
) {
177 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
180 policy
= (CXLEventInterruptPolicy
*)payload_in
;
182 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_INFO
];
183 log
->irq_enabled
= (policy
->info_settings
& CXL_EVENT_INT_MODE_MASK
) ==
186 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_WARN
];
187 log
->irq_enabled
= (policy
->warn_settings
& CXL_EVENT_INT_MODE_MASK
) ==
190 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FAIL
];
191 log
->irq_enabled
= (policy
->failure_settings
& CXL_EVENT_INT_MODE_MASK
) ==
194 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FATAL
];
195 log
->irq_enabled
= (policy
->fatal_settings
& CXL_EVENT_INT_MODE_MASK
) ==
198 /* DCD is optional */
199 if (len_in
< sizeof(*policy
)) {
200 return CXL_MBOX_SUCCESS
;
203 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_DYNAMIC_CAP
];
204 log
->irq_enabled
= (policy
->dyn_cap_settings
& CXL_EVENT_INT_MODE_MASK
) ==
208 return CXL_MBOX_SUCCESS
;
211 /* CXL r3.0 section 8.2.9.1.1: Identify (Opcode 0001h) */
212 static CXLRetCode
cmd_infostat_identify(const struct cxl_cmd
*cmd
,
215 uint8_t *payload_out
,
219 PCIDeviceClass
*class = PCI_DEVICE_GET_CLASS(cci
->d
);
223 uint16_t pcie_subsys_vid
;
224 uint16_t pcie_subsys_id
;
226 uint8_t max_message_size
;
227 uint8_t component_type
;
228 } QEMU_PACKED
*is_identify
;
229 QEMU_BUILD_BUG_ON(sizeof(*is_identify
) != 18);
231 is_identify
= (void *)payload_out
;
232 memset(is_identify
, 0, sizeof(*is_identify
));
233 is_identify
->pcie_vid
= class->vendor_id
;
234 is_identify
->pcie_did
= class->device_id
;
235 if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_USP
)) {
236 is_identify
->sn
= CXL_USP(cci
->d
)->sn
;
237 /* Subsystem info not defined for a USP */
238 is_identify
->pcie_subsys_vid
= 0;
239 is_identify
->pcie_subsys_id
= 0;
240 is_identify
->component_type
= 0x0; /* Switch */
241 } else if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
242 PCIDevice
*pci_dev
= PCI_DEVICE(cci
->d
);
244 is_identify
->sn
= CXL_TYPE3(cci
->d
)->sn
;
246 * We can't always use class->subsystem_vendor_id as
247 * it is not set if the defaults are used.
249 is_identify
->pcie_subsys_vid
=
250 pci_get_word(pci_dev
->config
+ PCI_SUBSYSTEM_VENDOR_ID
);
251 is_identify
->pcie_subsys_id
=
252 pci_get_word(pci_dev
->config
+ PCI_SUBSYSTEM_ID
);
253 is_identify
->component_type
= 0x3; /* Type 3 */
256 /* TODO: Allow this to vary across different CCIs */
257 is_identify
->max_message_size
= 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
258 *len_out
= sizeof(*is_identify
);
259 return CXL_MBOX_SUCCESS
;
262 static void cxl_set_dsp_active_bm(PCIBus
*b
, PCIDevice
*d
,
265 uint8_t *bm
= private;
266 if (object_dynamic_cast(OBJECT(d
), TYPE_CXL_DSP
)) {
267 uint8_t port
= PCIE_PORT(d
)->port
;
268 bm
[port
/ 8] |= 1 << (port
% 8);
272 /* CXL r3 8.2.9.1.1 */
273 static CXLRetCode
cmd_identify_switch_device(const struct cxl_cmd
*cmd
,
276 uint8_t *payload_out
,
280 PCIEPort
*usp
= PCIE_PORT(cci
->d
);
281 PCIBus
*bus
= &PCI_BRIDGE(cci
->d
)->sec_bus
;
282 int num_phys_ports
= pcie_count_ds_ports(bus
);
284 struct cxl_fmapi_ident_switch_dev_resp_pl
{
285 uint8_t ingress_port_id
;
287 uint8_t num_physical_ports
;
289 uint8_t active_port_bitmask
[0x20];
290 uint8_t active_vcs_bitmask
[0x20];
291 uint16_t total_vppbs
;
292 uint16_t bound_vppbs
;
293 uint8_t num_hdm_decoders_per_usp
;
295 QEMU_BUILD_BUG_ON(sizeof(*out
) != 0x49);
297 out
= (struct cxl_fmapi_ident_switch_dev_resp_pl
*)payload_out
;
298 *out
= (struct cxl_fmapi_ident_switch_dev_resp_pl
) {
299 .num_physical_ports
= num_phys_ports
+ 1, /* 1 USP */
300 .num_vcss
= 1, /* Not yet support multiple VCS - potentialy tricky */
301 .active_vcs_bitmask
[0] = 0x1,
302 .total_vppbs
= num_phys_ports
+ 1,
303 .bound_vppbs
= num_phys_ports
+ 1,
304 .num_hdm_decoders_per_usp
= 4,
307 /* Depends on the CCI type */
308 if (object_dynamic_cast(OBJECT(cci
->intf
), TYPE_PCIE_PORT
)) {
309 out
->ingress_port_id
= PCIE_PORT(cci
->intf
)->port
;
312 out
->ingress_port_id
= 0;
315 pci_for_each_device_under_bus(bus
, cxl_set_dsp_active_bm
,
316 out
->active_port_bitmask
);
317 out
->active_port_bitmask
[usp
->port
/ 8] |= (1 << usp
->port
% 8);
319 *len_out
= sizeof(*out
);
321 return CXL_MBOX_SUCCESS
;
324 static CXLRetCode
cmd_firmware_update_get_info(const struct cxl_cmd
*cmd
,
327 uint8_t *payload_out
,
331 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
333 uint8_t slots_supported
;
341 } QEMU_PACKED
*fw_info
;
342 QEMU_BUILD_BUG_ON(sizeof(*fw_info
) != 0x50);
344 if ((cxl_dstate
->vmem_size
< CXL_CAPACITY_MULTIPLIER
) ||
345 (cxl_dstate
->pmem_size
< CXL_CAPACITY_MULTIPLIER
)) {
346 return CXL_MBOX_INTERNAL_ERROR
;
349 fw_info
= (void *)payload_out
;
350 memset(fw_info
, 0, sizeof(*fw_info
));
352 fw_info
->slots_supported
= 2;
353 fw_info
->slot_info
= BIT(0) | BIT(3);
355 pstrcpy(fw_info
->fw_rev1
, sizeof(fw_info
->fw_rev1
), "BWFW VERSION 0");
357 *len_out
= sizeof(*fw_info
);
358 return CXL_MBOX_SUCCESS
;
362 static CXLRetCode
cmd_timestamp_get(const struct cxl_cmd
*cmd
,
365 uint8_t *payload_out
,
369 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
370 uint64_t final_time
= cxl_device_get_timestamp(cxl_dstate
);
372 stq_le_p(payload_out
, final_time
);
375 return CXL_MBOX_SUCCESS
;
379 static CXLRetCode
cmd_timestamp_set(const struct cxl_cmd
*cmd
,
382 uint8_t *payload_out
,
386 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
388 cxl_dstate
->timestamp
.set
= true;
389 cxl_dstate
->timestamp
.last_set
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
391 cxl_dstate
->timestamp
.host_set
= le64_to_cpu(*(uint64_t *)payload_in
);
394 return CXL_MBOX_SUCCESS
;
397 /* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */
398 static const QemuUUID cel_uuid
= {
399 .data
= UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
400 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
404 static CXLRetCode
cmd_logs_get_supported(const struct cxl_cmd
*cmd
,
407 uint8_t *payload_out
,
418 } QEMU_PACKED
*supported_logs
= (void *)payload_out
;
419 QEMU_BUILD_BUG_ON(sizeof(*supported_logs
) != 0x1c);
421 supported_logs
->entries
= 1;
422 supported_logs
->log_entries
[0].uuid
= cel_uuid
;
423 supported_logs
->log_entries
[0].size
= 4 * cci
->cel_size
;
425 *len_out
= sizeof(*supported_logs
);
426 return CXL_MBOX_SUCCESS
;
430 static CXLRetCode
cmd_logs_get_log(const struct cxl_cmd
*cmd
,
433 uint8_t *payload_out
,
441 } QEMU_PACKED
QEMU_ALIGNED(16) *get_log
;
443 get_log
= (void *)payload_in
;
447 * The device shall return Invalid Parameter if the Offset or Length
448 * fields attempt to access beyond the size of the log as reported by Get
451 * XXX: Spec is wrong, "Invalid Parameter" isn't a thing.
452 * XXX: Spec doesn't address incorrect UUID incorrectness.
454 * The CEL buffer is large enough to fit all commands in the emulation, so
455 * the only possible failure would be if the mailbox itself isn't big
458 if (get_log
->offset
+ get_log
->length
> cci
->payload_max
) {
459 return CXL_MBOX_INVALID_INPUT
;
462 if (!qemu_uuid_is_equal(&get_log
->uuid
, &cel_uuid
)) {
463 return CXL_MBOX_UNSUPPORTED
;
466 /* Store off everything to local variables so we can wipe out the payload */
467 *len_out
= get_log
->length
;
469 memmove(payload_out
, cci
->cel_log
+ get_log
->offset
, get_log
->length
);
471 return CXL_MBOX_SUCCESS
;
475 static CXLRetCode
cmd_identify_memory_device(const struct cxl_cmd
*cmd
,
478 uint8_t *payload_out
,
483 char fw_revision
[0x10];
484 uint64_t total_capacity
;
485 uint64_t volatile_capacity
;
486 uint64_t persistent_capacity
;
487 uint64_t partition_align
;
488 uint16_t info_event_log_size
;
489 uint16_t warning_event_log_size
;
490 uint16_t failure_event_log_size
;
491 uint16_t fatal_event_log_size
;
493 uint8_t poison_list_max_mer
[3];
494 uint16_t inject_poison_limit
;
496 uint8_t qos_telemetry_caps
;
498 QEMU_BUILD_BUG_ON(sizeof(*id
) != 0x43);
499 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
500 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
501 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
503 if ((!QEMU_IS_ALIGNED(cxl_dstate
->vmem_size
, CXL_CAPACITY_MULTIPLIER
)) ||
504 (!QEMU_IS_ALIGNED(cxl_dstate
->pmem_size
, CXL_CAPACITY_MULTIPLIER
))) {
505 return CXL_MBOX_INTERNAL_ERROR
;
508 id
= (void *)payload_out
;
509 memset(id
, 0, sizeof(*id
));
511 snprintf(id
->fw_revision
, 0x10, "BWFW VERSION %02d", 0);
513 stq_le_p(&id
->total_capacity
,
514 cxl_dstate
->mem_size
/ CXL_CAPACITY_MULTIPLIER
);
515 stq_le_p(&id
->persistent_capacity
,
516 cxl_dstate
->pmem_size
/ CXL_CAPACITY_MULTIPLIER
);
517 stq_le_p(&id
->volatile_capacity
,
518 cxl_dstate
->vmem_size
/ CXL_CAPACITY_MULTIPLIER
);
519 stl_le_p(&id
->lsa_size
, cvc
->get_lsa_size(ct3d
));
520 /* 256 poison records */
521 st24_le_p(id
->poison_list_max_mer
, 256);
522 /* No limit - so limited by main poison record limit */
523 stw_le_p(&id
->inject_poison_limit
, 0);
525 *len_out
= sizeof(*id
);
526 return CXL_MBOX_SUCCESS
;
529 static CXLRetCode
cmd_ccls_get_partition_info(const struct cxl_cmd
*cmd
,
532 uint8_t *payload_out
,
536 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
538 uint64_t active_vmem
;
539 uint64_t active_pmem
;
542 } QEMU_PACKED
*part_info
= (void *)payload_out
;
543 QEMU_BUILD_BUG_ON(sizeof(*part_info
) != 0x20);
545 if ((!QEMU_IS_ALIGNED(cxl_dstate
->vmem_size
, CXL_CAPACITY_MULTIPLIER
)) ||
546 (!QEMU_IS_ALIGNED(cxl_dstate
->pmem_size
, CXL_CAPACITY_MULTIPLIER
))) {
547 return CXL_MBOX_INTERNAL_ERROR
;
550 stq_le_p(&part_info
->active_vmem
,
551 cxl_dstate
->vmem_size
/ CXL_CAPACITY_MULTIPLIER
);
553 * When both next_vmem and next_pmem are 0, there is no pending change to
556 stq_le_p(&part_info
->next_vmem
, 0);
557 stq_le_p(&part_info
->active_pmem
,
558 cxl_dstate
->pmem_size
/ CXL_CAPACITY_MULTIPLIER
);
559 stq_le_p(&part_info
->next_pmem
, 0);
561 *len_out
= sizeof(*part_info
);
562 return CXL_MBOX_SUCCESS
;
565 static CXLRetCode
cmd_ccls_get_lsa(const struct cxl_cmd
*cmd
,
568 uint8_t *payload_out
,
575 } QEMU_PACKED
*get_lsa
;
576 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
577 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
578 uint32_t offset
, length
;
580 get_lsa
= (void *)payload_in
;
581 offset
= get_lsa
->offset
;
582 length
= get_lsa
->length
;
584 if (offset
+ length
> cvc
->get_lsa_size(ct3d
)) {
586 return CXL_MBOX_INVALID_INPUT
;
589 *len_out
= cvc
->get_lsa(ct3d
, payload_out
, length
, offset
);
590 return CXL_MBOX_SUCCESS
;
593 static CXLRetCode
cmd_ccls_set_lsa(const struct cxl_cmd
*cmd
,
596 uint8_t *payload_out
,
605 struct set_lsa_pl
*set_lsa_payload
= (void *)payload_in
;
606 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
607 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
608 const size_t hdr_len
= offsetof(struct set_lsa_pl
, data
);
612 return CXL_MBOX_SUCCESS
;
615 if (set_lsa_payload
->offset
+ len_in
> cvc
->get_lsa_size(ct3d
) + hdr_len
) {
616 return CXL_MBOX_INVALID_INPUT
;
620 cvc
->set_lsa(ct3d
, set_lsa_payload
->data
, len_in
, set_lsa_payload
->offset
);
621 return CXL_MBOX_SUCCESS
;
625 * This is very inefficient, but good enough for now!
626 * Also the payload will always fit, so no need to handle the MORE flag and
627 * make this stateful. We may want to allow longer poison lists to aid
628 * testing that kernel functionality.
630 static CXLRetCode
cmd_media_get_poison_list(const struct cxl_cmd
*cmd
,
633 uint8_t *payload_out
,
637 struct get_poison_list_pl
{
642 struct get_poison_list_out_pl
{
645 uint64_t overflow_timestamp
;
652 } QEMU_PACKED records
[];
655 struct get_poison_list_pl
*in
= (void *)payload_in
;
656 struct get_poison_list_out_pl
*out
= (void *)payload_out
;
657 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
658 uint16_t record_count
= 0, i
= 0;
659 uint64_t query_start
, query_length
;
660 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
664 query_start
= ldq_le_p(&in
->pa
);
665 /* 64 byte alignment required */
666 if (query_start
& 0x3f) {
667 return CXL_MBOX_INVALID_INPUT
;
669 query_length
= ldq_le_p(&in
->length
) * CXL_CACHE_LINE_SIZE
;
671 QLIST_FOREACH(ent
, poison_list
, node
) {
672 /* Check for no overlap */
673 if (ent
->start
>= query_start
+ query_length
||
674 ent
->start
+ ent
->length
<= query_start
) {
679 out_pl_len
= sizeof(*out
) + record_count
* sizeof(out
->records
[0]);
680 assert(out_pl_len
<= CXL_MAILBOX_MAX_PAYLOAD_SIZE
);
682 memset(out
, 0, out_pl_len
);
683 QLIST_FOREACH(ent
, poison_list
, node
) {
684 uint64_t start
, stop
;
686 /* Check for no overlap */
687 if (ent
->start
>= query_start
+ query_length
||
688 ent
->start
+ ent
->length
<= query_start
) {
692 /* Deal with overlap */
693 start
= MAX(ROUND_DOWN(ent
->start
, 64ull), query_start
);
694 stop
= MIN(ROUND_DOWN(ent
->start
, 64ull) + ent
->length
,
695 query_start
+ query_length
);
696 stq_le_p(&out
->records
[i
].addr
, start
| (ent
->type
& 0x7));
697 stl_le_p(&out
->records
[i
].length
, (stop
- start
) / CXL_CACHE_LINE_SIZE
);
700 if (ct3d
->poison_list_overflowed
) {
701 out
->flags
= (1 << 1);
702 stq_le_p(&out
->overflow_timestamp
, ct3d
->poison_list_overflow_ts
);
704 stw_le_p(&out
->count
, record_count
);
705 *len_out
= out_pl_len
;
706 return CXL_MBOX_SUCCESS
;
709 static CXLRetCode
cmd_media_inject_poison(const struct cxl_cmd
*cmd
,
712 uint8_t *payload_out
,
716 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
717 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
719 struct inject_poison_pl
{
722 struct inject_poison_pl
*in
= (void *)payload_in
;
723 uint64_t dpa
= ldq_le_p(&in
->dpa
);
726 QLIST_FOREACH(ent
, poison_list
, node
) {
727 if (dpa
>= ent
->start
&&
728 dpa
+ CXL_CACHE_LINE_SIZE
<= ent
->start
+ ent
->length
) {
729 return CXL_MBOX_SUCCESS
;
733 if (ct3d
->poison_list_cnt
== CXL_POISON_LIST_LIMIT
) {
734 return CXL_MBOX_INJECT_POISON_LIMIT
;
736 p
= g_new0(CXLPoison
, 1);
738 p
->length
= CXL_CACHE_LINE_SIZE
;
740 p
->type
= CXL_POISON_TYPE_INJECTED
;
743 * Possible todo: Merge with existing entry if next to it and if same type
745 QLIST_INSERT_HEAD(poison_list
, p
, node
);
746 ct3d
->poison_list_cnt
++;
749 return CXL_MBOX_SUCCESS
;
752 static CXLRetCode
cmd_media_clear_poison(const struct cxl_cmd
*cmd
,
755 uint8_t *payload_out
,
759 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
760 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
761 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
762 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
763 struct clear_poison_pl
{
770 struct clear_poison_pl
*in
= (void *)payload_in
;
772 dpa
= ldq_le_p(&in
->dpa
);
773 if (dpa
+ CXL_CACHE_LINE_SIZE
> cxl_dstate
->mem_size
) {
774 return CXL_MBOX_INVALID_PA
;
777 /* Clearing a region with no poison is not an error so always do so */
778 if (cvc
->set_cacheline
) {
779 if (!cvc
->set_cacheline(ct3d
, dpa
, in
->data
)) {
780 return CXL_MBOX_INTERNAL_ERROR
;
784 QLIST_FOREACH(ent
, poison_list
, node
) {
786 * Test for contained in entry. Simpler than general case
787 * as clearing 64 bytes and entries 64 byte aligned
789 if ((dpa
>= ent
->start
) && (dpa
< ent
->start
+ ent
->length
)) {
794 return CXL_MBOX_SUCCESS
;
797 QLIST_REMOVE(ent
, node
);
798 ct3d
->poison_list_cnt
--;
800 if (dpa
> ent
->start
) {
802 /* Cannot overflow as replacing existing entry */
804 frag
= g_new0(CXLPoison
, 1);
806 frag
->start
= ent
->start
;
807 frag
->length
= dpa
- ent
->start
;
808 frag
->type
= ent
->type
;
810 QLIST_INSERT_HEAD(poison_list
, frag
, node
);
811 ct3d
->poison_list_cnt
++;
814 if (dpa
+ CXL_CACHE_LINE_SIZE
< ent
->start
+ ent
->length
) {
817 if (ct3d
->poison_list_cnt
== CXL_POISON_LIST_LIMIT
) {
818 cxl_set_poison_list_overflowed(ct3d
);
820 frag
= g_new0(CXLPoison
, 1);
822 frag
->start
= dpa
+ CXL_CACHE_LINE_SIZE
;
823 frag
->length
= ent
->start
+ ent
->length
- frag
->start
;
824 frag
->type
= ent
->type
;
825 QLIST_INSERT_HEAD(poison_list
, frag
, node
);
826 ct3d
->poison_list_cnt
++;
829 /* Any fragments have been added, free original entry */
833 return CXL_MBOX_SUCCESS
;
836 #define IMMEDIATE_CONFIG_CHANGE (1 << 1)
837 #define IMMEDIATE_DATA_CHANGE (1 << 2)
838 #define IMMEDIATE_POLICY_CHANGE (1 << 3)
839 #define IMMEDIATE_LOG_CHANGE (1 << 4)
841 static const struct cxl_cmd cxl_cmd_set
[256][256] = {
842 [EVENTS
][GET_RECORDS
] = { "EVENTS_GET_RECORDS",
843 cmd_events_get_records
, 1, 0 },
844 [EVENTS
][CLEAR_RECORDS
] = { "EVENTS_CLEAR_RECORDS",
845 cmd_events_clear_records
, ~0, IMMEDIATE_LOG_CHANGE
},
846 [EVENTS
][GET_INTERRUPT_POLICY
] = { "EVENTS_GET_INTERRUPT_POLICY",
847 cmd_events_get_interrupt_policy
, 0, 0 },
848 [EVENTS
][SET_INTERRUPT_POLICY
] = { "EVENTS_SET_INTERRUPT_POLICY",
849 cmd_events_set_interrupt_policy
,
850 ~0, IMMEDIATE_CONFIG_CHANGE
},
851 [FIRMWARE_UPDATE
][GET_INFO
] = { "FIRMWARE_UPDATE_GET_INFO",
852 cmd_firmware_update_get_info
, 0, 0 },
853 [TIMESTAMP
][GET
] = { "TIMESTAMP_GET", cmd_timestamp_get
, 0, 0 },
854 [TIMESTAMP
][SET
] = { "TIMESTAMP_SET", cmd_timestamp_set
,
855 8, IMMEDIATE_POLICY_CHANGE
},
856 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
,
858 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
859 [IDENTIFY
][MEMORY_DEVICE
] = { "IDENTIFY_MEMORY_DEVICE",
860 cmd_identify_memory_device
, 0, 0 },
861 [CCLS
][GET_PARTITION_INFO
] = { "CCLS_GET_PARTITION_INFO",
862 cmd_ccls_get_partition_info
, 0, 0 },
863 [CCLS
][GET_LSA
] = { "CCLS_GET_LSA", cmd_ccls_get_lsa
, 8, 0 },
864 [CCLS
][SET_LSA
] = { "CCLS_SET_LSA", cmd_ccls_set_lsa
,
865 ~0, IMMEDIATE_CONFIG_CHANGE
| IMMEDIATE_DATA_CHANGE
},
866 [MEDIA_AND_POISON
][GET_POISON_LIST
] = { "MEDIA_AND_POISON_GET_POISON_LIST",
867 cmd_media_get_poison_list
, 16, 0 },
868 [MEDIA_AND_POISON
][INJECT_POISON
] = { "MEDIA_AND_POISON_INJECT_POISON",
869 cmd_media_inject_poison
, 8, 0 },
870 [MEDIA_AND_POISON
][CLEAR_POISON
] = { "MEDIA_AND_POISON_CLEAR_POISON",
871 cmd_media_clear_poison
, 72, 0 },
874 static const struct cxl_cmd cxl_cmd_set_sw
[256][256] = {
875 [INFOSTAT
][IS_IDENTIFY
] = { "IDENTIFY", cmd_infostat_identify
, 0, 0 },
876 [TIMESTAMP
][GET
] = { "TIMESTAMP_GET", cmd_timestamp_get
, 0, 0 },
877 [TIMESTAMP
][SET
] = { "TIMESTAMP_SET", cmd_timestamp_set
, 0,
878 IMMEDIATE_POLICY_CHANGE
},
879 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
, 0,
881 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
882 [PHYSICAL_SWITCH
][IDENTIFY_SWITCH_DEVICE
] = { "IDENTIFY_SWITCH_DEVICE",
883 cmd_identify_switch_device
, 0, 0 },
886 int cxl_process_cci_message(CXLCCI
*cci
, uint8_t set
, uint8_t cmd
,
887 size_t len_in
, uint8_t *pl_in
, size_t *len_out
,
888 uint8_t *pl_out
, bool *bg_started
)
890 const struct cxl_cmd
*cxl_cmd
;
894 cxl_cmd
= &cci
->cxl_cmd_set
[set
][cmd
];
895 h
= cxl_cmd
->handler
;
897 qemu_log_mask(LOG_UNIMP
, "Command %04xh not implemented\n",
899 return CXL_MBOX_UNSUPPORTED
;
902 if (len_in
!= cxl_cmd
->in
&& cxl_cmd
->in
!= ~0) {
903 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
906 return (*h
)(cxl_cmd
, pl_in
, len_in
, pl_out
, len_out
, cci
);
909 void cxl_init_cci(CXLCCI
*cci
, size_t payload_max
)
911 cci
->payload_max
= payload_max
;
912 for (int set
= 0; set
< 256; set
++) {
913 for (int cmd
= 0; cmd
< 256; cmd
++) {
914 if (cci
->cxl_cmd_set
[set
][cmd
].handler
) {
915 const struct cxl_cmd
*c
= &cci
->cxl_cmd_set
[set
][cmd
];
916 struct cel_log
*log
=
917 &cci
->cel_log
[cci
->cel_size
];
919 log
->opcode
= (set
<< 8) | cmd
;
920 log
->effect
= c
->effect
;
927 void cxl_initialize_mailbox_swcci(CXLCCI
*cci
, DeviceState
*intf
,
928 DeviceState
*d
, size_t payload_max
)
930 cci
->cxl_cmd_set
= cxl_cmd_set_sw
;
933 cxl_init_cci(cci
, payload_max
);
936 void cxl_initialize_mailbox_t3(CXLCCI
*cci
, DeviceState
*d
, size_t payload_max
)
938 cci
->cxl_cmd_set
= cxl_cmd_set
;
941 /* No separation for PCI MB as protocol handled in PCI device */
943 cxl_init_cci(cci
, payload_max
);