2 * CXL Utility library for mailbox interface
4 * Copyright(C) 2020 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "hw/pci/msi.h"
12 #include "hw/pci/msix.h"
13 #include "hw/cxl/cxl.h"
14 #include "hw/cxl/cxl_events.h"
15 #include "hw/pci/pci.h"
16 #include "hw/pci-bridge/cxl_upstream_port.h"
17 #include "qemu/cutils.h"
19 #include "qemu/units.h"
20 #include "qemu/uuid.h"
21 #include "sysemu/hostmem.h"
23 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
26 * How to add a new command, example. The command set FOO, with cmd BAR.
27 * 1. Add the command set and cmd to the enum.
30 * 2. Implement the handler
31 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
32 * CXLDeviceState *cxl_dstate, uint16_t *len)
33 * 3. Add the command to the cxl_cmd_set[][]
34 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
35 * 4. Implement your handler
36 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
39 * Writing the handler:
40 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
41 * in/out length of the payload. The handler is responsible for consuming the
42 * payload from cmd->payload and operating upon it as necessary. It must then
43 * fill the output data into cmd->payload (overwriting what was there),
44 * setting the length, and returning a valid return code.
46 * XXX: The handler need not worry about endianness. The payload is read out of
47 * a register interface that already deals with it.
52 #define IS_IDENTIFY 0x1
53 #define BACKGROUND_OPERATION_STATUS 0x2
55 #define GET_RECORDS 0x0
56 #define CLEAR_RECORDS 0x1
57 #define GET_INTERRUPT_POLICY 0x2
58 #define SET_INTERRUPT_POLICY 0x3
59 FIRMWARE_UPDATE
= 0x02,
65 #define GET_SUPPORTED 0x0
68 #define MEMORY_DEVICE 0x0
70 #define GET_PARTITION_INFO 0x0
75 #define SECURE_ERASE 0x1
76 PERSISTENT_MEM
= 0x45,
77 #define GET_SECURITY_STATE 0x0
78 MEDIA_AND_POISON
= 0x43,
79 #define GET_POISON_LIST 0x0
80 #define INJECT_POISON 0x1
81 #define CLEAR_POISON 0x2
82 PHYSICAL_SWITCH
= 0x51,
83 #define IDENTIFY_SWITCH_DEVICE 0x0
84 #define GET_PHYSICAL_PORT_STATE 0x1
86 #define MANAGEMENT_COMMAND 0x0
89 /* CCI Message Format CXL r3.0 Figure 7-19 */
90 typedef struct CXLCCIMessage
{
92 #define CXL_CCI_CAT_REQ 0
93 #define CXL_CCI_CAT_RSP 1
100 uint16_t vendor_specific
;
102 } QEMU_PACKED CXLCCIMessage
;
104 /* This command is only defined to an MLD FM Owned LD or an MHD */
105 static CXLRetCode
cmd_tunnel_management_cmd(const struct cxl_cmd
*cmd
,
108 uint8_t *payload_out
,
112 PCIDevice
*tunnel_target
;
115 uint8_t port_or_ld_id
;
118 CXLCCIMessage ccimessage
;
123 CXLCCIMessage ccimessage
;
125 size_t pl_length
, length_out
;
129 if (cmd
->in
< sizeof(*in
)) {
130 return CXL_MBOX_INVALID_INPUT
;
132 in
= (void *)payload_in
;
133 out
= (void *)payload_out
;
135 /* Enough room for minimum sized message - no payload */
136 if (in
->size
< sizeof(in
->ccimessage
)) {
137 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
139 /* Length of input payload should be in->size + a wrapping tunnel header */
140 if (in
->size
!= len_in
- offsetof(typeof(*out
), ccimessage
)) {
141 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
143 if (in
->ccimessage
.category
!= CXL_CCI_CAT_REQ
) {
144 return CXL_MBOX_INVALID_INPUT
;
147 if (in
->target_type
!= 0) {
148 qemu_log_mask(LOG_UNIMP
,
149 "Tunneled Command sent to non existent FM-LD");
150 return CXL_MBOX_INVALID_INPUT
;
154 * Target of a tunnel unfortunately depends on type of CCI readint
156 * If in a switch, then it's the port number.
157 * If in an MLD it is the ld number.
158 * If in an MHD target type indicate where we are going.
160 if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
161 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
162 if (in
->port_or_ld_id
!= 0) {
163 /* Only pretending to have one for now! */
164 return CXL_MBOX_INVALID_INPUT
;
166 target_cci
= &ct3d
->ld0_cci
;
167 } else if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_USP
)) {
168 CXLUpstreamPort
*usp
= CXL_USP(cci
->d
);
170 tunnel_target
= pcie_find_port_by_pn(&PCI_BRIDGE(usp
)->sec_bus
,
172 if (!tunnel_target
) {
173 return CXL_MBOX_INVALID_INPUT
;
176 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target
))->devices
[0];
177 if (!tunnel_target
) {
178 return CXL_MBOX_INVALID_INPUT
;
180 if (object_dynamic_cast(OBJECT(tunnel_target
), TYPE_CXL_TYPE3
)) {
181 CXLType3Dev
*ct3d
= CXL_TYPE3(tunnel_target
);
182 /* Tunneled VDMs always land on FM Owned LD */
183 target_cci
= &ct3d
->vdm_fm_owned_ld_mctp_cci
;
185 return CXL_MBOX_INVALID_INPUT
;
188 return CXL_MBOX_INVALID_INPUT
;
191 pl_length
= in
->ccimessage
.pl_length
[2] << 16 |
192 in
->ccimessage
.pl_length
[1] << 8 | in
->ccimessage
.pl_length
[0];
193 rc
= cxl_process_cci_message(target_cci
,
194 in
->ccimessage
.command_set
,
195 in
->ccimessage
.command
,
196 pl_length
, in
->ccimessage
.payload
,
197 &length_out
, out
->ccimessage
.payload
,
199 /* Payload should be in place. Rest of CCI header and needs filling */
200 out
->resp_len
= length_out
+ sizeof(CXLCCIMessage
);
201 st24_le_p(out
->ccimessage
.pl_length
, length_out
);
202 out
->ccimessage
.rc
= rc
;
203 out
->ccimessage
.category
= CXL_CCI_CAT_RSP
;
204 out
->ccimessage
.command
= in
->ccimessage
.command
;
205 out
->ccimessage
.command_set
= in
->ccimessage
.command_set
;
206 out
->ccimessage
.tag
= in
->ccimessage
.tag
;
207 *len_out
= length_out
+ sizeof(*out
);
209 return CXL_MBOX_SUCCESS
;
212 static CXLRetCode
cmd_events_get_records(const struct cxl_cmd
*cmd
,
213 uint8_t *payload_in
, size_t len_in
,
214 uint8_t *payload_out
, size_t *len_out
,
217 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
218 CXLGetEventPayload
*pl
;
222 if (cmd
->in
< sizeof(log_type
)) {
223 return CXL_MBOX_INVALID_INPUT
;
226 log_type
= payload_in
[0];
228 pl
= (CXLGetEventPayload
*)payload_out
;
229 memset(pl
, 0, sizeof(*pl
));
231 max_recs
= (cxlds
->payload_size
- CXL_EVENT_PAYLOAD_HDR_SIZE
) /
232 CXL_EVENT_RECORD_SIZE
;
233 if (max_recs
> 0xFFFF) {
237 return cxl_event_get_records(cxlds
, pl
, log_type
, max_recs
, len_out
);
240 static CXLRetCode
cmd_events_clear_records(const struct cxl_cmd
*cmd
,
243 uint8_t *payload_out
,
247 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
248 CXLClearEventPayload
*pl
;
250 pl
= (CXLClearEventPayload
*)payload_in
;
252 return cxl_event_clear_records(cxlds
, pl
);
255 static CXLRetCode
cmd_events_get_interrupt_policy(const struct cxl_cmd
*cmd
,
258 uint8_t *payload_out
,
262 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
263 CXLEventInterruptPolicy
*policy
;
266 policy
= (CXLEventInterruptPolicy
*)payload_out
;
267 memset(policy
, 0, sizeof(*policy
));
269 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_INFO
];
270 if (log
->irq_enabled
) {
271 policy
->info_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
274 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_WARN
];
275 if (log
->irq_enabled
) {
276 policy
->warn_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
279 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FAIL
];
280 if (log
->irq_enabled
) {
281 policy
->failure_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
284 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FATAL
];
285 if (log
->irq_enabled
) {
286 policy
->fatal_settings
= CXL_EVENT_INT_SETTING(log
->irq_vec
);
289 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_DYNAMIC_CAP
];
290 if (log
->irq_enabled
) {
291 /* Dynamic Capacity borrows the same vector as info */
292 policy
->dyn_cap_settings
= CXL_INT_MSI_MSIX
;
295 *len_out
= sizeof(*policy
);
296 return CXL_MBOX_SUCCESS
;
299 static CXLRetCode
cmd_events_set_interrupt_policy(const struct cxl_cmd
*cmd
,
302 uint8_t *payload_out
,
306 CXLDeviceState
*cxlds
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
307 CXLEventInterruptPolicy
*policy
;
310 if (len_in
< CXL_EVENT_INT_SETTING_MIN_LEN
) {
311 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
314 policy
= (CXLEventInterruptPolicy
*)payload_in
;
316 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_INFO
];
317 log
->irq_enabled
= (policy
->info_settings
& CXL_EVENT_INT_MODE_MASK
) ==
320 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_WARN
];
321 log
->irq_enabled
= (policy
->warn_settings
& CXL_EVENT_INT_MODE_MASK
) ==
324 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FAIL
];
325 log
->irq_enabled
= (policy
->failure_settings
& CXL_EVENT_INT_MODE_MASK
) ==
328 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_FATAL
];
329 log
->irq_enabled
= (policy
->fatal_settings
& CXL_EVENT_INT_MODE_MASK
) ==
332 /* DCD is optional */
333 if (len_in
< sizeof(*policy
)) {
334 return CXL_MBOX_SUCCESS
;
337 log
= &cxlds
->event_logs
[CXL_EVENT_TYPE_DYNAMIC_CAP
];
338 log
->irq_enabled
= (policy
->dyn_cap_settings
& CXL_EVENT_INT_MODE_MASK
) ==
342 return CXL_MBOX_SUCCESS
;
345 /* CXL r3.0 section 8.2.9.1.1: Identify (Opcode 0001h) */
346 static CXLRetCode
cmd_infostat_identify(const struct cxl_cmd
*cmd
,
349 uint8_t *payload_out
,
353 PCIDeviceClass
*class = PCI_DEVICE_GET_CLASS(cci
->d
);
357 uint16_t pcie_subsys_vid
;
358 uint16_t pcie_subsys_id
;
360 uint8_t max_message_size
;
361 uint8_t component_type
;
362 } QEMU_PACKED
*is_identify
;
363 QEMU_BUILD_BUG_ON(sizeof(*is_identify
) != 18);
365 is_identify
= (void *)payload_out
;
366 memset(is_identify
, 0, sizeof(*is_identify
));
367 is_identify
->pcie_vid
= class->vendor_id
;
368 is_identify
->pcie_did
= class->device_id
;
369 if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_USP
)) {
370 is_identify
->sn
= CXL_USP(cci
->d
)->sn
;
371 /* Subsystem info not defined for a USP */
372 is_identify
->pcie_subsys_vid
= 0;
373 is_identify
->pcie_subsys_id
= 0;
374 is_identify
->component_type
= 0x0; /* Switch */
375 } else if (object_dynamic_cast(OBJECT(cci
->d
), TYPE_CXL_TYPE3
)) {
376 PCIDevice
*pci_dev
= PCI_DEVICE(cci
->d
);
378 is_identify
->sn
= CXL_TYPE3(cci
->d
)->sn
;
380 * We can't always use class->subsystem_vendor_id as
381 * it is not set if the defaults are used.
383 is_identify
->pcie_subsys_vid
=
384 pci_get_word(pci_dev
->config
+ PCI_SUBSYSTEM_VENDOR_ID
);
385 is_identify
->pcie_subsys_id
=
386 pci_get_word(pci_dev
->config
+ PCI_SUBSYSTEM_ID
);
387 is_identify
->component_type
= 0x3; /* Type 3 */
390 /* TODO: Allow this to vary across different CCIs */
391 is_identify
->max_message_size
= 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
392 *len_out
= sizeof(*is_identify
);
393 return CXL_MBOX_SUCCESS
;
396 static void cxl_set_dsp_active_bm(PCIBus
*b
, PCIDevice
*d
,
399 uint8_t *bm
= private;
400 if (object_dynamic_cast(OBJECT(d
), TYPE_CXL_DSP
)) {
401 uint8_t port
= PCIE_PORT(d
)->port
;
402 bm
[port
/ 8] |= 1 << (port
% 8);
406 /* CXL r3 8.2.9.1.1 */
407 static CXLRetCode
cmd_identify_switch_device(const struct cxl_cmd
*cmd
,
410 uint8_t *payload_out
,
414 PCIEPort
*usp
= PCIE_PORT(cci
->d
);
415 PCIBus
*bus
= &PCI_BRIDGE(cci
->d
)->sec_bus
;
416 int num_phys_ports
= pcie_count_ds_ports(bus
);
418 struct cxl_fmapi_ident_switch_dev_resp_pl
{
419 uint8_t ingress_port_id
;
421 uint8_t num_physical_ports
;
423 uint8_t active_port_bitmask
[0x20];
424 uint8_t active_vcs_bitmask
[0x20];
425 uint16_t total_vppbs
;
426 uint16_t bound_vppbs
;
427 uint8_t num_hdm_decoders_per_usp
;
429 QEMU_BUILD_BUG_ON(sizeof(*out
) != 0x49);
431 out
= (struct cxl_fmapi_ident_switch_dev_resp_pl
*)payload_out
;
432 *out
= (struct cxl_fmapi_ident_switch_dev_resp_pl
) {
433 .num_physical_ports
= num_phys_ports
+ 1, /* 1 USP */
434 .num_vcss
= 1, /* Not yet support multiple VCS - potentially tricky */
435 .active_vcs_bitmask
[0] = 0x1,
436 .total_vppbs
= num_phys_ports
+ 1,
437 .bound_vppbs
= num_phys_ports
+ 1,
438 .num_hdm_decoders_per_usp
= 4,
441 /* Depends on the CCI type */
442 if (object_dynamic_cast(OBJECT(cci
->intf
), TYPE_PCIE_PORT
)) {
443 out
->ingress_port_id
= PCIE_PORT(cci
->intf
)->port
;
446 out
->ingress_port_id
= 0;
449 pci_for_each_device_under_bus(bus
, cxl_set_dsp_active_bm
,
450 out
->active_port_bitmask
);
451 out
->active_port_bitmask
[usp
->port
/ 8] |= (1 << usp
->port
% 8);
453 *len_out
= sizeof(*out
);
455 return CXL_MBOX_SUCCESS
;
458 /* CXL r3.0 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
459 static CXLRetCode
cmd_get_physical_port_state(const struct cxl_cmd
*cmd
,
462 uint8_t *payload_out
,
466 /* CXL r3.0 Table 7-18: Get Physical Port State Request Payload */
467 struct cxl_fmapi_get_phys_port_state_req_pl
{
473 * CXL r3.0 Table 7-20: Get Physical Port State Port Information Block
476 struct cxl_fmapi_port_state_info_block
{
478 uint8_t config_state
;
479 uint8_t connected_device_cxl_version
;
481 uint8_t connected_device_type
;
482 uint8_t port_cxl_version_bitmask
;
483 uint8_t max_link_width
;
484 uint8_t negotiated_link_width
;
485 uint8_t supported_link_speeds_vector
;
486 uint8_t max_link_speed
;
487 uint8_t current_link_speed
;
489 uint8_t first_lane_num
;
491 uint8_t supported_ld_count
;
494 /* CXL r3.0 Table 7-19: Get Physical Port State Response Payload */
495 struct cxl_fmapi_get_phys_port_state_resp_pl
{
498 struct cxl_fmapi_port_state_info_block ports
[];
500 PCIBus
*bus
= &PCI_BRIDGE(cci
->d
)->sec_bus
;
501 PCIEPort
*usp
= PCIE_PORT(cci
->d
);
505 in
= (struct cxl_fmapi_get_phys_port_state_req_pl
*)payload_in
;
506 out
= (struct cxl_fmapi_get_phys_port_state_resp_pl
*)payload_out
;
508 /* Check if what was requested can fit */
509 if (sizeof(*out
) + sizeof(*out
->ports
) * in
->num_ports
> cci
->payload_max
) {
510 return CXL_MBOX_INVALID_INPUT
;
513 /* For success there should be a match for each requested */
514 out
->num_ports
= in
->num_ports
;
516 for (i
= 0; i
< in
->num_ports
; i
++) {
517 struct cxl_fmapi_port_state_info_block
*port
;
518 /* First try to match on downstream port */
520 uint16_t lnkcap
, lnkcap2
, lnksta
;
522 port
= &out
->ports
[i
];
524 port_dev
= pcie_find_port_by_pn(bus
, in
->ports
[i
]);
525 if (port_dev
) { /* DSP */
526 PCIDevice
*ds_dev
= pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev
))
528 port
->config_state
= 3;
530 if (object_dynamic_cast(OBJECT(ds_dev
), TYPE_CXL_TYPE3
)) {
531 port
->connected_device_type
= 5; /* Assume MLD for now */
533 port
->connected_device_type
= 1;
536 port
->connected_device_type
= 0;
538 port
->supported_ld_count
= 3;
539 } else if (usp
->port
== in
->ports
[i
]) { /* USP */
540 port_dev
= PCI_DEVICE(usp
);
541 port
->config_state
= 4;
542 port
->connected_device_type
= 0;
544 return CXL_MBOX_INVALID_INPUT
;
547 port
->port_id
= in
->ports
[i
];
548 /* Information on status of this port in lnksta, lnkcap */
549 if (!port_dev
->exp
.exp_cap
) {
550 return CXL_MBOX_INTERNAL_ERROR
;
552 lnksta
= port_dev
->config_read(port_dev
,
553 port_dev
->exp
.exp_cap
+ PCI_EXP_LNKSTA
,
555 lnkcap
= port_dev
->config_read(port_dev
,
556 port_dev
->exp
.exp_cap
+ PCI_EXP_LNKCAP
,
558 lnkcap2
= port_dev
->config_read(port_dev
,
559 port_dev
->exp
.exp_cap
+ PCI_EXP_LNKCAP2
,
562 port
->max_link_width
= (lnkcap
& PCI_EXP_LNKCAP_MLW
) >> 4;
563 port
->negotiated_link_width
= (lnksta
& PCI_EXP_LNKSTA_NLW
) >> 4;
564 /* No definition for SLS field in linux/pci_regs.h */
565 port
->supported_link_speeds_vector
= (lnkcap2
& 0xFE) >> 1;
566 port
->max_link_speed
= lnkcap
& PCI_EXP_LNKCAP_SLS
;
567 port
->current_link_speed
= lnksta
& PCI_EXP_LNKSTA_CLS
;
568 /* TODO: Track down if we can get the rest of the info */
569 port
->ltssm_state
= 0x7;
570 port
->first_lane_num
= 0;
571 port
->link_state
= 0;
572 port
->port_cxl_version_bitmask
= 0x2;
573 port
->connected_device_cxl_version
= 0x2;
576 pl_size
= sizeof(*out
) + sizeof(*out
->ports
) * in
->num_ports
;
579 return CXL_MBOX_SUCCESS
;
582 /* CXL r3.0 8.2.9.1.2 */
583 static CXLRetCode
cmd_infostat_bg_op_sts(const struct cxl_cmd
*cmd
,
586 uint8_t *payload_out
,
595 uint16_t vendor_ext_status
;
596 } QEMU_PACKED
*bg_op_status
;
597 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status
) != 8);
599 bg_op_status
= (void *)payload_out
;
600 memset(bg_op_status
, 0, sizeof(*bg_op_status
));
601 bg_op_status
->status
= cci
->bg
.complete_pct
<< 1;
602 if (cci
->bg
.runtime
> 0) {
603 bg_op_status
->status
|= 1U << 0;
605 bg_op_status
->opcode
= cci
->bg
.opcode
;
606 bg_op_status
->returncode
= cci
->bg
.ret_code
;
607 *len_out
= sizeof(*bg_op_status
);
609 return CXL_MBOX_SUCCESS
;
613 static CXLRetCode
cmd_firmware_update_get_info(const struct cxl_cmd
*cmd
,
616 uint8_t *payload_out
,
620 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
622 uint8_t slots_supported
;
630 } QEMU_PACKED
*fw_info
;
631 QEMU_BUILD_BUG_ON(sizeof(*fw_info
) != 0x50);
633 if ((cxl_dstate
->vmem_size
< CXL_CAPACITY_MULTIPLIER
) ||
634 (cxl_dstate
->pmem_size
< CXL_CAPACITY_MULTIPLIER
)) {
635 return CXL_MBOX_INTERNAL_ERROR
;
638 fw_info
= (void *)payload_out
;
639 memset(fw_info
, 0, sizeof(*fw_info
));
641 fw_info
->slots_supported
= 2;
642 fw_info
->slot_info
= BIT(0) | BIT(3);
644 pstrcpy(fw_info
->fw_rev1
, sizeof(fw_info
->fw_rev1
), "BWFW VERSION 0");
646 *len_out
= sizeof(*fw_info
);
647 return CXL_MBOX_SUCCESS
;
651 static CXLRetCode
cmd_timestamp_get(const struct cxl_cmd
*cmd
,
654 uint8_t *payload_out
,
658 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
659 uint64_t final_time
= cxl_device_get_timestamp(cxl_dstate
);
661 stq_le_p(payload_out
, final_time
);
664 return CXL_MBOX_SUCCESS
;
668 static CXLRetCode
cmd_timestamp_set(const struct cxl_cmd
*cmd
,
671 uint8_t *payload_out
,
675 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
677 cxl_dstate
->timestamp
.set
= true;
678 cxl_dstate
->timestamp
.last_set
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
680 cxl_dstate
->timestamp
.host_set
= le64_to_cpu(*(uint64_t *)payload_in
);
683 return CXL_MBOX_SUCCESS
;
686 /* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */
687 static const QemuUUID cel_uuid
= {
688 .data
= UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
689 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
693 static CXLRetCode
cmd_logs_get_supported(const struct cxl_cmd
*cmd
,
696 uint8_t *payload_out
,
707 } QEMU_PACKED
*supported_logs
= (void *)payload_out
;
708 QEMU_BUILD_BUG_ON(sizeof(*supported_logs
) != 0x1c);
710 supported_logs
->entries
= 1;
711 supported_logs
->log_entries
[0].uuid
= cel_uuid
;
712 supported_logs
->log_entries
[0].size
= 4 * cci
->cel_size
;
714 *len_out
= sizeof(*supported_logs
);
715 return CXL_MBOX_SUCCESS
;
719 static CXLRetCode
cmd_logs_get_log(const struct cxl_cmd
*cmd
,
722 uint8_t *payload_out
,
730 } QEMU_PACKED
QEMU_ALIGNED(16) *get_log
;
732 get_log
= (void *)payload_in
;
736 * The device shall return Invalid Parameter if the Offset or Length
737 * fields attempt to access beyond the size of the log as reported by Get
740 * XXX: Spec is wrong, "Invalid Parameter" isn't a thing.
741 * XXX: Spec doesn't address incorrect UUID incorrectness.
743 * The CEL buffer is large enough to fit all commands in the emulation, so
744 * the only possible failure would be if the mailbox itself isn't big
747 if (get_log
->offset
+ get_log
->length
> cci
->payload_max
) {
748 return CXL_MBOX_INVALID_INPUT
;
751 if (!qemu_uuid_is_equal(&get_log
->uuid
, &cel_uuid
)) {
752 return CXL_MBOX_UNSUPPORTED
;
755 /* Store off everything to local variables so we can wipe out the payload */
756 *len_out
= get_log
->length
;
758 memmove(payload_out
, cci
->cel_log
+ get_log
->offset
, get_log
->length
);
760 return CXL_MBOX_SUCCESS
;
764 static CXLRetCode
cmd_identify_memory_device(const struct cxl_cmd
*cmd
,
767 uint8_t *payload_out
,
772 char fw_revision
[0x10];
773 uint64_t total_capacity
;
774 uint64_t volatile_capacity
;
775 uint64_t persistent_capacity
;
776 uint64_t partition_align
;
777 uint16_t info_event_log_size
;
778 uint16_t warning_event_log_size
;
779 uint16_t failure_event_log_size
;
780 uint16_t fatal_event_log_size
;
782 uint8_t poison_list_max_mer
[3];
783 uint16_t inject_poison_limit
;
785 uint8_t qos_telemetry_caps
;
787 QEMU_BUILD_BUG_ON(sizeof(*id
) != 0x43);
788 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
789 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
790 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
792 if ((!QEMU_IS_ALIGNED(cxl_dstate
->vmem_size
, CXL_CAPACITY_MULTIPLIER
)) ||
793 (!QEMU_IS_ALIGNED(cxl_dstate
->pmem_size
, CXL_CAPACITY_MULTIPLIER
))) {
794 return CXL_MBOX_INTERNAL_ERROR
;
797 id
= (void *)payload_out
;
798 memset(id
, 0, sizeof(*id
));
800 snprintf(id
->fw_revision
, 0x10, "BWFW VERSION %02d", 0);
802 stq_le_p(&id
->total_capacity
,
803 cxl_dstate
->mem_size
/ CXL_CAPACITY_MULTIPLIER
);
804 stq_le_p(&id
->persistent_capacity
,
805 cxl_dstate
->pmem_size
/ CXL_CAPACITY_MULTIPLIER
);
806 stq_le_p(&id
->volatile_capacity
,
807 cxl_dstate
->vmem_size
/ CXL_CAPACITY_MULTIPLIER
);
808 stl_le_p(&id
->lsa_size
, cvc
->get_lsa_size(ct3d
));
809 /* 256 poison records */
810 st24_le_p(id
->poison_list_max_mer
, 256);
811 /* No limit - so limited by main poison record limit */
812 stw_le_p(&id
->inject_poison_limit
, 0);
814 *len_out
= sizeof(*id
);
815 return CXL_MBOX_SUCCESS
;
818 static CXLRetCode
cmd_ccls_get_partition_info(const struct cxl_cmd
*cmd
,
821 uint8_t *payload_out
,
825 CXLDeviceState
*cxl_dstate
= &CXL_TYPE3(cci
->d
)->cxl_dstate
;
827 uint64_t active_vmem
;
828 uint64_t active_pmem
;
831 } QEMU_PACKED
*part_info
= (void *)payload_out
;
832 QEMU_BUILD_BUG_ON(sizeof(*part_info
) != 0x20);
834 if ((!QEMU_IS_ALIGNED(cxl_dstate
->vmem_size
, CXL_CAPACITY_MULTIPLIER
)) ||
835 (!QEMU_IS_ALIGNED(cxl_dstate
->pmem_size
, CXL_CAPACITY_MULTIPLIER
))) {
836 return CXL_MBOX_INTERNAL_ERROR
;
839 stq_le_p(&part_info
->active_vmem
,
840 cxl_dstate
->vmem_size
/ CXL_CAPACITY_MULTIPLIER
);
842 * When both next_vmem and next_pmem are 0, there is no pending change to
845 stq_le_p(&part_info
->next_vmem
, 0);
846 stq_le_p(&part_info
->active_pmem
,
847 cxl_dstate
->pmem_size
/ CXL_CAPACITY_MULTIPLIER
);
848 stq_le_p(&part_info
->next_pmem
, 0);
850 *len_out
= sizeof(*part_info
);
851 return CXL_MBOX_SUCCESS
;
854 static CXLRetCode
cmd_ccls_get_lsa(const struct cxl_cmd
*cmd
,
857 uint8_t *payload_out
,
864 } QEMU_PACKED
*get_lsa
;
865 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
866 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
867 uint32_t offset
, length
;
869 get_lsa
= (void *)payload_in
;
870 offset
= get_lsa
->offset
;
871 length
= get_lsa
->length
;
873 if (offset
+ length
> cvc
->get_lsa_size(ct3d
)) {
875 return CXL_MBOX_INVALID_INPUT
;
878 *len_out
= cvc
->get_lsa(ct3d
, payload_out
, length
, offset
);
879 return CXL_MBOX_SUCCESS
;
882 static CXLRetCode
cmd_ccls_set_lsa(const struct cxl_cmd
*cmd
,
885 uint8_t *payload_out
,
894 struct set_lsa_pl
*set_lsa_payload
= (void *)payload_in
;
895 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
896 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
897 const size_t hdr_len
= offsetof(struct set_lsa_pl
, data
);
901 return CXL_MBOX_SUCCESS
;
904 if (set_lsa_payload
->offset
+ len_in
> cvc
->get_lsa_size(ct3d
) + hdr_len
) {
905 return CXL_MBOX_INVALID_INPUT
;
909 cvc
->set_lsa(ct3d
, set_lsa_payload
->data
, len_in
, set_lsa_payload
->offset
);
910 return CXL_MBOX_SUCCESS
;
913 /* Perform the actual device zeroing */
914 static void __do_sanitization(CXLType3Dev
*ct3d
)
918 if (ct3d
->hostvmem
) {
919 mr
= host_memory_backend_get_memory(ct3d
->hostvmem
);
921 void *hostmem
= memory_region_get_ram_ptr(mr
);
922 memset(hostmem
, 0, memory_region_size(mr
));
926 if (ct3d
->hostpmem
) {
927 mr
= host_memory_backend_get_memory(ct3d
->hostpmem
);
929 void *hostmem
= memory_region_get_ram_ptr(mr
);
930 memset(hostmem
, 0, memory_region_size(mr
));
934 mr
= host_memory_backend_get_memory(ct3d
->lsa
);
936 void *lsa
= memory_region_get_ram_ptr(mr
);
937 memset(lsa
, 0, memory_region_size(mr
));
943 * CXL 3.0 spec section 8.2.9.8.5.1 - Sanitize.
945 * Once the Sanitize command has started successfully, the device shall be
946 * placed in the media disabled state. If the command fails or is interrupted
947 * by a reset or power failure, it shall remain in the media disabled state
948 * until a successful Sanitize command has been completed. During this state:
950 * 1. Memory writes to the device will have no effect, and all memory reads
951 * will return random values (no user data returned, even for locations that
952 * the failed Sanitize operation didn’t sanitize yet).
954 * 2. Mailbox commands shall still be processed in the disabled state, except
955 * that commands that access Sanitized areas shall fail with the Media Disabled
958 static CXLRetCode
cmd_sanitize_overwrite(const struct cxl_cmd
*cmd
,
961 uint8_t *payload_out
,
965 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
966 uint64_t total_mem
; /* in Mb */
969 total_mem
= (ct3d
->cxl_dstate
.vmem_size
+ ct3d
->cxl_dstate
.pmem_size
) >> 20;
970 if (total_mem
<= 512) {
972 } else if (total_mem
<= 1024) {
974 } else if (total_mem
<= 2 * 1024) {
976 } else if (total_mem
<= 4 * 1024) {
978 } else if (total_mem
<= 8 * 1024) {
980 } else if (total_mem
<= 16 * 1024) {
982 } else if (total_mem
<= 32 * 1024) {
984 } else if (total_mem
<= 64 * 1024) {
986 } else if (total_mem
<= 128 * 1024) {
988 } else if (total_mem
<= 256 * 1024) {
990 } else if (total_mem
<= 512 * 1024) {
992 } else if (total_mem
<= 1024 * 1024) {
995 secs
= 240 * 60; /* max 4 hrs */
998 /* EBUSY other bg cmds as of now */
999 cci
->bg
.runtime
= secs
* 1000UL;
1002 cxl_dev_disable_media(&ct3d
->cxl_dstate
);
1005 /* sanitize when done */
1006 return CXL_MBOX_BG_STARTED
;
1008 __do_sanitization(ct3d
);
1009 cxl_dev_enable_media(&ct3d
->cxl_dstate
);
1011 return CXL_MBOX_SUCCESS
;
1015 static CXLRetCode
cmd_get_security_state(const struct cxl_cmd
*cmd
,
1016 uint8_t *payload_in
,
1018 uint8_t *payload_out
,
1022 uint32_t *state
= (uint32_t *)payload_out
;
1026 return CXL_MBOX_SUCCESS
;
1029 * This is very inefficient, but good enough for now!
1030 * Also the payload will always fit, so no need to handle the MORE flag and
1031 * make this stateful. We may want to allow longer poison lists to aid
1032 * testing that kernel functionality.
1034 static CXLRetCode
cmd_media_get_poison_list(const struct cxl_cmd
*cmd
,
1035 uint8_t *payload_in
,
1037 uint8_t *payload_out
,
1041 struct get_poison_list_pl
{
1046 struct get_poison_list_out_pl
{
1049 uint64_t overflow_timestamp
;
1051 uint8_t rsvd2
[0x14];
1056 } QEMU_PACKED records
[];
1059 struct get_poison_list_pl
*in
= (void *)payload_in
;
1060 struct get_poison_list_out_pl
*out
= (void *)payload_out
;
1061 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1062 uint16_t record_count
= 0, i
= 0;
1063 uint64_t query_start
, query_length
;
1064 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
1066 uint16_t out_pl_len
;
1068 query_start
= ldq_le_p(&in
->pa
);
1069 /* 64 byte alignment required */
1070 if (query_start
& 0x3f) {
1071 return CXL_MBOX_INVALID_INPUT
;
1073 query_length
= ldq_le_p(&in
->length
) * CXL_CACHE_LINE_SIZE
;
1075 QLIST_FOREACH(ent
, poison_list
, node
) {
1076 /* Check for no overlap */
1077 if (ent
->start
>= query_start
+ query_length
||
1078 ent
->start
+ ent
->length
<= query_start
) {
1083 out_pl_len
= sizeof(*out
) + record_count
* sizeof(out
->records
[0]);
1084 assert(out_pl_len
<= CXL_MAILBOX_MAX_PAYLOAD_SIZE
);
1086 memset(out
, 0, out_pl_len
);
1087 QLIST_FOREACH(ent
, poison_list
, node
) {
1088 uint64_t start
, stop
;
1090 /* Check for no overlap */
1091 if (ent
->start
>= query_start
+ query_length
||
1092 ent
->start
+ ent
->length
<= query_start
) {
1096 /* Deal with overlap */
1097 start
= MAX(ROUND_DOWN(ent
->start
, 64ull), query_start
);
1098 stop
= MIN(ROUND_DOWN(ent
->start
, 64ull) + ent
->length
,
1099 query_start
+ query_length
);
1100 stq_le_p(&out
->records
[i
].addr
, start
| (ent
->type
& 0x7));
1101 stl_le_p(&out
->records
[i
].length
, (stop
- start
) / CXL_CACHE_LINE_SIZE
);
1104 if (ct3d
->poison_list_overflowed
) {
1105 out
->flags
= (1 << 1);
1106 stq_le_p(&out
->overflow_timestamp
, ct3d
->poison_list_overflow_ts
);
1108 stw_le_p(&out
->count
, record_count
);
1109 *len_out
= out_pl_len
;
1110 return CXL_MBOX_SUCCESS
;
1113 static CXLRetCode
cmd_media_inject_poison(const struct cxl_cmd
*cmd
,
1114 uint8_t *payload_in
,
1116 uint8_t *payload_out
,
1120 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1121 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
1123 struct inject_poison_pl
{
1126 struct inject_poison_pl
*in
= (void *)payload_in
;
1127 uint64_t dpa
= ldq_le_p(&in
->dpa
);
1130 QLIST_FOREACH(ent
, poison_list
, node
) {
1131 if (dpa
>= ent
->start
&&
1132 dpa
+ CXL_CACHE_LINE_SIZE
<= ent
->start
+ ent
->length
) {
1133 return CXL_MBOX_SUCCESS
;
1137 if (ct3d
->poison_list_cnt
== CXL_POISON_LIST_LIMIT
) {
1138 return CXL_MBOX_INJECT_POISON_LIMIT
;
1140 p
= g_new0(CXLPoison
, 1);
1142 p
->length
= CXL_CACHE_LINE_SIZE
;
1144 p
->type
= CXL_POISON_TYPE_INJECTED
;
1147 * Possible todo: Merge with existing entry if next to it and if same type
1149 QLIST_INSERT_HEAD(poison_list
, p
, node
);
1150 ct3d
->poison_list_cnt
++;
1153 return CXL_MBOX_SUCCESS
;
1156 static CXLRetCode
cmd_media_clear_poison(const struct cxl_cmd
*cmd
,
1157 uint8_t *payload_in
,
1159 uint8_t *payload_out
,
1163 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1164 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
1165 CXLPoisonList
*poison_list
= &ct3d
->poison_list
;
1166 CXLType3Class
*cvc
= CXL_TYPE3_GET_CLASS(ct3d
);
1167 struct clear_poison_pl
{
1174 struct clear_poison_pl
*in
= (void *)payload_in
;
1176 dpa
= ldq_le_p(&in
->dpa
);
1177 if (dpa
+ CXL_CACHE_LINE_SIZE
> cxl_dstate
->mem_size
) {
1178 return CXL_MBOX_INVALID_PA
;
1181 /* Clearing a region with no poison is not an error so always do so */
1182 if (cvc
->set_cacheline
) {
1183 if (!cvc
->set_cacheline(ct3d
, dpa
, in
->data
)) {
1184 return CXL_MBOX_INTERNAL_ERROR
;
1188 QLIST_FOREACH(ent
, poison_list
, node
) {
1190 * Test for contained in entry. Simpler than general case
1191 * as clearing 64 bytes and entries 64 byte aligned
1193 if ((dpa
>= ent
->start
) && (dpa
< ent
->start
+ ent
->length
)) {
1198 return CXL_MBOX_SUCCESS
;
1201 QLIST_REMOVE(ent
, node
);
1202 ct3d
->poison_list_cnt
--;
1204 if (dpa
> ent
->start
) {
1206 /* Cannot overflow as replacing existing entry */
1208 frag
= g_new0(CXLPoison
, 1);
1210 frag
->start
= ent
->start
;
1211 frag
->length
= dpa
- ent
->start
;
1212 frag
->type
= ent
->type
;
1214 QLIST_INSERT_HEAD(poison_list
, frag
, node
);
1215 ct3d
->poison_list_cnt
++;
1218 if (dpa
+ CXL_CACHE_LINE_SIZE
< ent
->start
+ ent
->length
) {
1221 if (ct3d
->poison_list_cnt
== CXL_POISON_LIST_LIMIT
) {
1222 cxl_set_poison_list_overflowed(ct3d
);
1224 frag
= g_new0(CXLPoison
, 1);
1226 frag
->start
= dpa
+ CXL_CACHE_LINE_SIZE
;
1227 frag
->length
= ent
->start
+ ent
->length
- frag
->start
;
1228 frag
->type
= ent
->type
;
1229 QLIST_INSERT_HEAD(poison_list
, frag
, node
);
1230 ct3d
->poison_list_cnt
++;
1233 /* Any fragments have been added, free original entry */
1237 return CXL_MBOX_SUCCESS
;
1240 #define IMMEDIATE_CONFIG_CHANGE (1 << 1)
1241 #define IMMEDIATE_DATA_CHANGE (1 << 2)
1242 #define IMMEDIATE_POLICY_CHANGE (1 << 3)
1243 #define IMMEDIATE_LOG_CHANGE (1 << 4)
1244 #define SECURITY_STATE_CHANGE (1 << 5)
1245 #define BACKGROUND_OPERATION (1 << 6)
1247 static const struct cxl_cmd cxl_cmd_set
[256][256] = {
1248 [EVENTS
][GET_RECORDS
] = { "EVENTS_GET_RECORDS",
1249 cmd_events_get_records
, 1, 0 },
1250 [EVENTS
][CLEAR_RECORDS
] = { "EVENTS_CLEAR_RECORDS",
1251 cmd_events_clear_records
, ~0, IMMEDIATE_LOG_CHANGE
},
1252 [EVENTS
][GET_INTERRUPT_POLICY
] = { "EVENTS_GET_INTERRUPT_POLICY",
1253 cmd_events_get_interrupt_policy
, 0, 0 },
1254 [EVENTS
][SET_INTERRUPT_POLICY
] = { "EVENTS_SET_INTERRUPT_POLICY",
1255 cmd_events_set_interrupt_policy
,
1256 ~0, IMMEDIATE_CONFIG_CHANGE
},
1257 [FIRMWARE_UPDATE
][GET_INFO
] = { "FIRMWARE_UPDATE_GET_INFO",
1258 cmd_firmware_update_get_info
, 0, 0 },
1259 [TIMESTAMP
][GET
] = { "TIMESTAMP_GET", cmd_timestamp_get
, 0, 0 },
1260 [TIMESTAMP
][SET
] = { "TIMESTAMP_SET", cmd_timestamp_set
,
1261 8, IMMEDIATE_POLICY_CHANGE
},
1262 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
,
1264 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
1265 [IDENTIFY
][MEMORY_DEVICE
] = { "IDENTIFY_MEMORY_DEVICE",
1266 cmd_identify_memory_device
, 0, 0 },
1267 [CCLS
][GET_PARTITION_INFO
] = { "CCLS_GET_PARTITION_INFO",
1268 cmd_ccls_get_partition_info
, 0, 0 },
1269 [CCLS
][GET_LSA
] = { "CCLS_GET_LSA", cmd_ccls_get_lsa
, 8, 0 },
1270 [CCLS
][SET_LSA
] = { "CCLS_SET_LSA", cmd_ccls_set_lsa
,
1271 ~0, IMMEDIATE_CONFIG_CHANGE
| IMMEDIATE_DATA_CHANGE
},
1272 [SANITIZE
][OVERWRITE
] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite
, 0,
1273 IMMEDIATE_DATA_CHANGE
| SECURITY_STATE_CHANGE
| BACKGROUND_OPERATION
},
1274 [PERSISTENT_MEM
][GET_SECURITY_STATE
] = { "GET_SECURITY_STATE",
1275 cmd_get_security_state
, 0, 0 },
1276 [MEDIA_AND_POISON
][GET_POISON_LIST
] = { "MEDIA_AND_POISON_GET_POISON_LIST",
1277 cmd_media_get_poison_list
, 16, 0 },
1278 [MEDIA_AND_POISON
][INJECT_POISON
] = { "MEDIA_AND_POISON_INJECT_POISON",
1279 cmd_media_inject_poison
, 8, 0 },
1280 [MEDIA_AND_POISON
][CLEAR_POISON
] = { "MEDIA_AND_POISON_CLEAR_POISON",
1281 cmd_media_clear_poison
, 72, 0 },
1284 static const struct cxl_cmd cxl_cmd_set_sw
[256][256] = {
1285 [INFOSTAT
][IS_IDENTIFY
] = { "IDENTIFY", cmd_infostat_identify
, 0, 0 },
1286 [INFOSTAT
][BACKGROUND_OPERATION_STATUS
] = { "BACKGROUND_OPERATION_STATUS",
1287 cmd_infostat_bg_op_sts
, 0, 0 },
1288 [TIMESTAMP
][GET
] = { "TIMESTAMP_GET", cmd_timestamp_get
, 0, 0 },
1289 [TIMESTAMP
][SET
] = { "TIMESTAMP_SET", cmd_timestamp_set
, 0,
1290 IMMEDIATE_POLICY_CHANGE
},
1291 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
, 0,
1293 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
1294 [PHYSICAL_SWITCH
][IDENTIFY_SWITCH_DEVICE
] = { "IDENTIFY_SWITCH_DEVICE",
1295 cmd_identify_switch_device
, 0, 0 },
1296 [PHYSICAL_SWITCH
][GET_PHYSICAL_PORT_STATE
] = { "SWITCH_PHYSICAL_PORT_STATS",
1297 cmd_get_physical_port_state
, ~0, 0 },
1298 [TUNNEL
][MANAGEMENT_COMMAND
] = { "TUNNEL_MANAGEMENT_COMMAND",
1299 cmd_tunnel_management_cmd
, ~0, 0 },
1303 * While the command is executing in the background, the device should
1304 * update the percentage complete in the Background Command Status Register
1305 * at least once per second.
1308 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL
1310 int cxl_process_cci_message(CXLCCI
*cci
, uint8_t set
, uint8_t cmd
,
1311 size_t len_in
, uint8_t *pl_in
, size_t *len_out
,
1312 uint8_t *pl_out
, bool *bg_started
)
1315 const struct cxl_cmd
*cxl_cmd
;
1319 cxl_cmd
= &cci
->cxl_cmd_set
[set
][cmd
];
1320 h
= cxl_cmd
->handler
;
1322 qemu_log_mask(LOG_UNIMP
, "Command %04xh not implemented\n",
1324 return CXL_MBOX_UNSUPPORTED
;
1327 if (len_in
!= cxl_cmd
->in
&& cxl_cmd
->in
!= ~0) {
1328 return CXL_MBOX_INVALID_PAYLOAD_LENGTH
;
1331 /* Only one bg command at a time */
1332 if ((cxl_cmd
->effect
& BACKGROUND_OPERATION
) &&
1333 cci
->bg
.runtime
> 0) {
1334 return CXL_MBOX_BUSY
;
1337 /* forbid any selected commands while overwriting */
1338 if (sanitize_running(cci
)) {
1339 if (h
== cmd_events_get_records
||
1340 h
== cmd_ccls_get_partition_info
||
1341 h
== cmd_ccls_set_lsa
||
1342 h
== cmd_ccls_get_lsa
||
1343 h
== cmd_logs_get_log
||
1344 h
== cmd_media_get_poison_list
||
1345 h
== cmd_media_inject_poison
||
1346 h
== cmd_media_clear_poison
||
1347 h
== cmd_sanitize_overwrite
) {
1348 return CXL_MBOX_MEDIA_DISABLED
;
1352 ret
= (*h
)(cxl_cmd
, pl_in
, len_in
, pl_out
, len_out
, cci
);
1353 if ((cxl_cmd
->effect
& BACKGROUND_OPERATION
) &&
1354 ret
== CXL_MBOX_BG_STARTED
) {
1357 *bg_started
= false;
1360 /* Set bg and the return code */
1364 cci
->bg
.opcode
= (set
<< 8) | cmd
;
1366 cci
->bg
.complete_pct
= 0;
1367 cci
->bg
.ret_code
= 0;
1369 now
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1370 cci
->bg
.starttime
= now
;
1371 timer_mod(cci
->bg
.timer
, now
+ CXL_MBOX_BG_UPDATE_FREQ
);
1377 static void bg_timercb(void *opaque
)
1379 CXLCCI
*cci
= opaque
;
1380 uint64_t now
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1381 uint64_t total_time
= cci
->bg
.starttime
+ cci
->bg
.runtime
;
1383 assert(cci
->bg
.runtime
> 0);
1385 if (now
>= total_time
) { /* we are done */
1386 uint16_t ret
= CXL_MBOX_SUCCESS
;
1388 cci
->bg
.complete_pct
= 100;
1389 cci
->bg
.ret_code
= ret
;
1390 if (ret
== CXL_MBOX_SUCCESS
) {
1391 switch (cci
->bg
.opcode
) {
1392 case 0x4400: /* sanitize */
1394 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1396 __do_sanitization(ct3d
);
1397 cxl_dev_enable_media(&ct3d
->cxl_dstate
);
1400 case 0x4304: /* TODO: scan media */
1403 __builtin_unreachable();
1408 qemu_log("Background command %04xh finished: %s\n",
1410 ret
== CXL_MBOX_SUCCESS
? "success" : "aborted");
1413 cci
->bg
.complete_pct
= 100 * now
/ total_time
;
1414 timer_mod(cci
->bg
.timer
, now
+ CXL_MBOX_BG_UPDATE_FREQ
);
1417 if (cci
->bg
.complete_pct
== 100) {
1418 /* TODO: generalize to switch CCI */
1419 CXLType3Dev
*ct3d
= CXL_TYPE3(cci
->d
);
1420 CXLDeviceState
*cxl_dstate
= &ct3d
->cxl_dstate
;
1421 PCIDevice
*pdev
= PCI_DEVICE(cci
->d
);
1423 cci
->bg
.starttime
= 0;
1424 /* registers are updated, allow new bg-capable cmds */
1425 cci
->bg
.runtime
= 0;
1427 if (msix_enabled(pdev
)) {
1428 msix_notify(pdev
, cxl_dstate
->mbox_msi_n
);
1429 } else if (msi_enabled(pdev
)) {
1430 msi_notify(pdev
, cxl_dstate
->mbox_msi_n
);
1435 void cxl_init_cci(CXLCCI
*cci
, size_t payload_max
)
1437 cci
->payload_max
= payload_max
;
1438 for (int set
= 0; set
< 256; set
++) {
1439 for (int cmd
= 0; cmd
< 256; cmd
++) {
1440 if (cci
->cxl_cmd_set
[set
][cmd
].handler
) {
1441 const struct cxl_cmd
*c
= &cci
->cxl_cmd_set
[set
][cmd
];
1442 struct cel_log
*log
=
1443 &cci
->cel_log
[cci
->cel_size
];
1445 log
->opcode
= (set
<< 8) | cmd
;
1446 log
->effect
= c
->effect
;
1451 cci
->bg
.complete_pct
= 0;
1452 cci
->bg
.starttime
= 0;
1453 cci
->bg
.runtime
= 0;
1454 cci
->bg
.timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
1458 void cxl_initialize_mailbox_swcci(CXLCCI
*cci
, DeviceState
*intf
,
1459 DeviceState
*d
, size_t payload_max
)
1461 cci
->cxl_cmd_set
= cxl_cmd_set_sw
;
1464 cxl_init_cci(cci
, payload_max
);
1467 void cxl_initialize_mailbox_t3(CXLCCI
*cci
, DeviceState
*d
, size_t payload_max
)
1469 cci
->cxl_cmd_set
= cxl_cmd_set
;
1472 /* No separation for PCI MB as protocol handled in PCI device */
1474 cxl_init_cci(cci
, payload_max
);
1477 static const struct cxl_cmd cxl_cmd_set_t3_ld
[256][256] = {
1478 [INFOSTAT
][IS_IDENTIFY
] = { "IDENTIFY", cmd_infostat_identify
, 0, 0 },
1479 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
, 0,
1481 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
1484 void cxl_initialize_t3_ld_cci(CXLCCI
*cci
, DeviceState
*d
, DeviceState
*intf
,
1487 cci
->cxl_cmd_set
= cxl_cmd_set_t3_ld
;
1490 cxl_init_cci(cci
, payload_max
);
1493 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp
[256][256] = {
1494 [INFOSTAT
][IS_IDENTIFY
] = { "IDENTIFY", cmd_infostat_identify
, 0, 0},
1495 [LOGS
][GET_SUPPORTED
] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported
, 0,
1497 [LOGS
][GET_LOG
] = { "LOGS_GET_LOG", cmd_logs_get_log
, 0x18, 0 },
1498 [TIMESTAMP
][GET
] = { "TIMESTAMP_GET", cmd_timestamp_get
, 0, 0 },
1499 [TUNNEL
][MANAGEMENT_COMMAND
] = { "TUNNEL_MANAGEMENT_COMMAND",
1500 cmd_tunnel_management_cmd
, ~0, 0 },
1503 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI
*cci
, DeviceState
*d
,
1507 cci
->cxl_cmd_set
= cxl_cmd_set_t3_fm_owned_ld_mctp
;
1510 cxl_init_cci(cci
, payload_max
);