]> git.proxmox.com Git - mirror_qemu.git/blob - hw/cxl/cxl-mailbox-utils.c
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
[mirror_qemu.git] / hw / cxl / cxl-mailbox-utils.c
1 /*
2 * CXL Utility library for mailbox interface
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "hw/pci/msi.h"
12 #include "hw/pci/msix.h"
13 #include "hw/cxl/cxl.h"
14 #include "hw/cxl/cxl_events.h"
15 #include "hw/pci/pci.h"
16 #include "hw/pci-bridge/cxl_upstream_port.h"
17 #include "qemu/cutils.h"
18 #include "qemu/log.h"
19 #include "qemu/units.h"
20 #include "qemu/uuid.h"
21 #include "sysemu/hostmem.h"
22
23 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
24
25 /*
26 * How to add a new command, example. The command set FOO, with cmd BAR.
27 * 1. Add the command set and cmd to the enum.
28 * FOO = 0x7f,
29 * #define BAR 0
30 * 2. Implement the handler
31 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
32 * CXLDeviceState *cxl_dstate, uint16_t *len)
33 * 3. Add the command to the cxl_cmd_set[][]
34 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
35 * 4. Implement your handler
36 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
37 *
38 *
39 * Writing the handler:
40 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
41 * in/out length of the payload. The handler is responsible for consuming the
42 * payload from cmd->payload and operating upon it as necessary. It must then
43 * fill the output data into cmd->payload (overwriting what was there),
44 * setting the length, and returning a valid return code.
45 *
46 * XXX: The handler need not worry about endianness. The payload is read out of
47 * a register interface that already deals with it.
48 */
49
50 enum {
51 INFOSTAT = 0x00,
52 #define IS_IDENTIFY 0x1
53 #define BACKGROUND_OPERATION_STATUS 0x2
54 EVENTS = 0x01,
55 #define GET_RECORDS 0x0
56 #define CLEAR_RECORDS 0x1
57 #define GET_INTERRUPT_POLICY 0x2
58 #define SET_INTERRUPT_POLICY 0x3
59 FIRMWARE_UPDATE = 0x02,
60 #define GET_INFO 0x0
61 TIMESTAMP = 0x03,
62 #define GET 0x0
63 #define SET 0x1
64 LOGS = 0x04,
65 #define GET_SUPPORTED 0x0
66 #define GET_LOG 0x1
67 IDENTIFY = 0x40,
68 #define MEMORY_DEVICE 0x0
69 CCLS = 0x41,
70 #define GET_PARTITION_INFO 0x0
71 #define GET_LSA 0x2
72 #define SET_LSA 0x3
73 SANITIZE = 0x44,
74 #define OVERWRITE 0x0
75 #define SECURE_ERASE 0x1
76 PERSISTENT_MEM = 0x45,
77 #define GET_SECURITY_STATE 0x0
78 MEDIA_AND_POISON = 0x43,
79 #define GET_POISON_LIST 0x0
80 #define INJECT_POISON 0x1
81 #define CLEAR_POISON 0x2
82 PHYSICAL_SWITCH = 0x51,
83 #define IDENTIFY_SWITCH_DEVICE 0x0
84 #define GET_PHYSICAL_PORT_STATE 0x1
85 TUNNEL = 0x53,
86 #define MANAGEMENT_COMMAND 0x0
87 };
88
89 /* CCI Message Format CXL r3.0 Figure 7-19 */
90 typedef struct CXLCCIMessage {
91 uint8_t category;
92 #define CXL_CCI_CAT_REQ 0
93 #define CXL_CCI_CAT_RSP 1
94 uint8_t tag;
95 uint8_t resv1;
96 uint8_t command;
97 uint8_t command_set;
98 uint8_t pl_length[3];
99 uint16_t rc;
100 uint16_t vendor_specific;
101 uint8_t payload[];
102 } QEMU_PACKED CXLCCIMessage;
103
104 /* This command is only defined to an MLD FM Owned LD or an MHD */
105 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd,
106 uint8_t *payload_in,
107 size_t len_in,
108 uint8_t *payload_out,
109 size_t *len_out,
110 CXLCCI *cci)
111 {
112 PCIDevice *tunnel_target;
113 CXLCCI *target_cci;
114 struct {
115 uint8_t port_or_ld_id;
116 uint8_t target_type;
117 uint16_t size;
118 CXLCCIMessage ccimessage;
119 } QEMU_PACKED *in;
120 struct {
121 uint16_t resp_len;
122 uint8_t resv[2];
123 CXLCCIMessage ccimessage;
124 } QEMU_PACKED *out;
125 size_t pl_length, length_out;
126 bool bg_started;
127 int rc;
128
129 if (cmd->in < sizeof(*in)) {
130 return CXL_MBOX_INVALID_INPUT;
131 }
132 in = (void *)payload_in;
133 out = (void *)payload_out;
134
135 /* Enough room for minimum sized message - no payload */
136 if (in->size < sizeof(in->ccimessage)) {
137 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
138 }
139 /* Length of input payload should be in->size + a wrapping tunnel header */
140 if (in->size != len_in - offsetof(typeof(*out), ccimessage)) {
141 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
142 }
143 if (in->ccimessage.category != CXL_CCI_CAT_REQ) {
144 return CXL_MBOX_INVALID_INPUT;
145 }
146
147 if (in->target_type != 0) {
148 qemu_log_mask(LOG_UNIMP,
149 "Tunneled Command sent to non existent FM-LD");
150 return CXL_MBOX_INVALID_INPUT;
151 }
152
153 /*
154 * Target of a tunnel unfortunately depends on type of CCI readint
155 * the message.
156 * If in a switch, then it's the port number.
157 * If in an MLD it is the ld number.
158 * If in an MHD target type indicate where we are going.
159 */
160 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
161 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
162 if (in->port_or_ld_id != 0) {
163 /* Only pretending to have one for now! */
164 return CXL_MBOX_INVALID_INPUT;
165 }
166 target_cci = &ct3d->ld0_cci;
167 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
168 CXLUpstreamPort *usp = CXL_USP(cci->d);
169
170 tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus,
171 in->port_or_ld_id);
172 if (!tunnel_target) {
173 return CXL_MBOX_INVALID_INPUT;
174 }
175 tunnel_target =
176 pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0];
177 if (!tunnel_target) {
178 return CXL_MBOX_INVALID_INPUT;
179 }
180 if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) {
181 CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target);
182 /* Tunneled VDMs always land on FM Owned LD */
183 target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci;
184 } else {
185 return CXL_MBOX_INVALID_INPUT;
186 }
187 } else {
188 return CXL_MBOX_INVALID_INPUT;
189 }
190
191 pl_length = in->ccimessage.pl_length[2] << 16 |
192 in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0];
193 rc = cxl_process_cci_message(target_cci,
194 in->ccimessage.command_set,
195 in->ccimessage.command,
196 pl_length, in->ccimessage.payload,
197 &length_out, out->ccimessage.payload,
198 &bg_started);
199 /* Payload should be in place. Rest of CCI header and needs filling */
200 out->resp_len = length_out + sizeof(CXLCCIMessage);
201 st24_le_p(out->ccimessage.pl_length, length_out);
202 out->ccimessage.rc = rc;
203 out->ccimessage.category = CXL_CCI_CAT_RSP;
204 out->ccimessage.command = in->ccimessage.command;
205 out->ccimessage.command_set = in->ccimessage.command_set;
206 out->ccimessage.tag = in->ccimessage.tag;
207 *len_out = length_out + sizeof(*out);
208
209 return CXL_MBOX_SUCCESS;
210 }
211
212 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd,
213 uint8_t *payload_in, size_t len_in,
214 uint8_t *payload_out, size_t *len_out,
215 CXLCCI *cci)
216 {
217 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
218 CXLGetEventPayload *pl;
219 uint8_t log_type;
220 int max_recs;
221
222 if (cmd->in < sizeof(log_type)) {
223 return CXL_MBOX_INVALID_INPUT;
224 }
225
226 log_type = payload_in[0];
227
228 pl = (CXLGetEventPayload *)payload_out;
229 memset(pl, 0, sizeof(*pl));
230
231 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
232 CXL_EVENT_RECORD_SIZE;
233 if (max_recs > 0xFFFF) {
234 max_recs = 0xFFFF;
235 }
236
237 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out);
238 }
239
240 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd,
241 uint8_t *payload_in,
242 size_t len_in,
243 uint8_t *payload_out,
244 size_t *len_out,
245 CXLCCI *cci)
246 {
247 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
248 CXLClearEventPayload *pl;
249
250 pl = (CXLClearEventPayload *)payload_in;
251 *len_out = 0;
252 return cxl_event_clear_records(cxlds, pl);
253 }
254
255 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd,
256 uint8_t *payload_in,
257 size_t len_in,
258 uint8_t *payload_out,
259 size_t *len_out,
260 CXLCCI *cci)
261 {
262 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
263 CXLEventInterruptPolicy *policy;
264 CXLEventLog *log;
265
266 policy = (CXLEventInterruptPolicy *)payload_out;
267 memset(policy, 0, sizeof(*policy));
268
269 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
270 if (log->irq_enabled) {
271 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
272 }
273
274 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
275 if (log->irq_enabled) {
276 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
277 }
278
279 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
280 if (log->irq_enabled) {
281 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
282 }
283
284 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
285 if (log->irq_enabled) {
286 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
287 }
288
289 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
290 if (log->irq_enabled) {
291 /* Dynamic Capacity borrows the same vector as info */
292 policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
293 }
294
295 *len_out = sizeof(*policy);
296 return CXL_MBOX_SUCCESS;
297 }
298
299 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd,
300 uint8_t *payload_in,
301 size_t len_in,
302 uint8_t *payload_out,
303 size_t *len_out,
304 CXLCCI *cci)
305 {
306 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
307 CXLEventInterruptPolicy *policy;
308 CXLEventLog *log;
309
310 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) {
311 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
312 }
313
314 policy = (CXLEventInterruptPolicy *)payload_in;
315
316 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
317 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
318 CXL_INT_MSI_MSIX;
319
320 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
321 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
322 CXL_INT_MSI_MSIX;
323
324 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
325 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
326 CXL_INT_MSI_MSIX;
327
328 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
329 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
330 CXL_INT_MSI_MSIX;
331
332 /* DCD is optional */
333 if (len_in < sizeof(*policy)) {
334 return CXL_MBOX_SUCCESS;
335 }
336
337 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
338 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
339 CXL_INT_MSI_MSIX;
340
341 *len_out = 0;
342 return CXL_MBOX_SUCCESS;
343 }
344
345 /* CXL r3.0 section 8.2.9.1.1: Identify (Opcode 0001h) */
346 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
347 uint8_t *payload_in,
348 size_t len_in,
349 uint8_t *payload_out,
350 size_t *len_out,
351 CXLCCI *cci)
352 {
353 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d);
354 struct {
355 uint16_t pcie_vid;
356 uint16_t pcie_did;
357 uint16_t pcie_subsys_vid;
358 uint16_t pcie_subsys_id;
359 uint64_t sn;
360 uint8_t max_message_size;
361 uint8_t component_type;
362 } QEMU_PACKED *is_identify;
363 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18);
364
365 is_identify = (void *)payload_out;
366 memset(is_identify, 0, sizeof(*is_identify));
367 is_identify->pcie_vid = class->vendor_id;
368 is_identify->pcie_did = class->device_id;
369 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
370 is_identify->sn = CXL_USP(cci->d)->sn;
371 /* Subsystem info not defined for a USP */
372 is_identify->pcie_subsys_vid = 0;
373 is_identify->pcie_subsys_id = 0;
374 is_identify->component_type = 0x0; /* Switch */
375 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
376 PCIDevice *pci_dev = PCI_DEVICE(cci->d);
377
378 is_identify->sn = CXL_TYPE3(cci->d)->sn;
379 /*
380 * We can't always use class->subsystem_vendor_id as
381 * it is not set if the defaults are used.
382 */
383 is_identify->pcie_subsys_vid =
384 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID);
385 is_identify->pcie_subsys_id =
386 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID);
387 is_identify->component_type = 0x3; /* Type 3 */
388 }
389
390 /* TODO: Allow this to vary across different CCIs */
391 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
392 *len_out = sizeof(*is_identify);
393 return CXL_MBOX_SUCCESS;
394 }
395
396 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
397 void *private)
398 {
399 uint8_t *bm = private;
400 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) {
401 uint8_t port = PCIE_PORT(d)->port;
402 bm[port / 8] |= 1 << (port % 8);
403 }
404 }
405
406 /* CXL r3 8.2.9.1.1 */
407 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
408 uint8_t *payload_in,
409 size_t len_in,
410 uint8_t *payload_out,
411 size_t *len_out,
412 CXLCCI *cci)
413 {
414 PCIEPort *usp = PCIE_PORT(cci->d);
415 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
416 int num_phys_ports = pcie_count_ds_ports(bus);
417
418 struct cxl_fmapi_ident_switch_dev_resp_pl {
419 uint8_t ingress_port_id;
420 uint8_t rsvd;
421 uint8_t num_physical_ports;
422 uint8_t num_vcss;
423 uint8_t active_port_bitmask[0x20];
424 uint8_t active_vcs_bitmask[0x20];
425 uint16_t total_vppbs;
426 uint16_t bound_vppbs;
427 uint8_t num_hdm_decoders_per_usp;
428 } QEMU_PACKED *out;
429 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49);
430
431 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out;
432 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) {
433 .num_physical_ports = num_phys_ports + 1, /* 1 USP */
434 .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */
435 .active_vcs_bitmask[0] = 0x1,
436 .total_vppbs = num_phys_ports + 1,
437 .bound_vppbs = num_phys_ports + 1,
438 .num_hdm_decoders_per_usp = 4,
439 };
440
441 /* Depends on the CCI type */
442 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) {
443 out->ingress_port_id = PCIE_PORT(cci->intf)->port;
444 } else {
445 /* MCTP? */
446 out->ingress_port_id = 0;
447 }
448
449 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm,
450 out->active_port_bitmask);
451 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8);
452
453 *len_out = sizeof(*out);
454
455 return CXL_MBOX_SUCCESS;
456 }
457
458 /* CXL r3.0 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
459 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
460 uint8_t *payload_in,
461 size_t len_in,
462 uint8_t *payload_out,
463 size_t *len_out,
464 CXLCCI *cci)
465 {
466 /* CXL r3.0 Table 7-18: Get Physical Port State Request Payload */
467 struct cxl_fmapi_get_phys_port_state_req_pl {
468 uint8_t num_ports;
469 uint8_t ports[];
470 } QEMU_PACKED *in;
471
472 /*
473 * CXL r3.0 Table 7-20: Get Physical Port State Port Information Block
474 * Format
475 */
476 struct cxl_fmapi_port_state_info_block {
477 uint8_t port_id;
478 uint8_t config_state;
479 uint8_t connected_device_cxl_version;
480 uint8_t rsv1;
481 uint8_t connected_device_type;
482 uint8_t port_cxl_version_bitmask;
483 uint8_t max_link_width;
484 uint8_t negotiated_link_width;
485 uint8_t supported_link_speeds_vector;
486 uint8_t max_link_speed;
487 uint8_t current_link_speed;
488 uint8_t ltssm_state;
489 uint8_t first_lane_num;
490 uint16_t link_state;
491 uint8_t supported_ld_count;
492 } QEMU_PACKED;
493
494 /* CXL r3.0 Table 7-19: Get Physical Port State Response Payload */
495 struct cxl_fmapi_get_phys_port_state_resp_pl {
496 uint8_t num_ports;
497 uint8_t rsv1[3];
498 struct cxl_fmapi_port_state_info_block ports[];
499 } QEMU_PACKED *out;
500 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
501 PCIEPort *usp = PCIE_PORT(cci->d);
502 size_t pl_size;
503 int i;
504
505 in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in;
506 out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out;
507
508 /* Check if what was requested can fit */
509 if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) {
510 return CXL_MBOX_INVALID_INPUT;
511 }
512
513 /* For success there should be a match for each requested */
514 out->num_ports = in->num_ports;
515
516 for (i = 0; i < in->num_ports; i++) {
517 struct cxl_fmapi_port_state_info_block *port;
518 /* First try to match on downstream port */
519 PCIDevice *port_dev;
520 uint16_t lnkcap, lnkcap2, lnksta;
521
522 port = &out->ports[i];
523
524 port_dev = pcie_find_port_by_pn(bus, in->ports[i]);
525 if (port_dev) { /* DSP */
526 PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev))
527 ->devices[0];
528 port->config_state = 3;
529 if (ds_dev) {
530 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) {
531 port->connected_device_type = 5; /* Assume MLD for now */
532 } else {
533 port->connected_device_type = 1;
534 }
535 } else {
536 port->connected_device_type = 0;
537 }
538 port->supported_ld_count = 3;
539 } else if (usp->port == in->ports[i]) { /* USP */
540 port_dev = PCI_DEVICE(usp);
541 port->config_state = 4;
542 port->connected_device_type = 0;
543 } else {
544 return CXL_MBOX_INVALID_INPUT;
545 }
546
547 port->port_id = in->ports[i];
548 /* Information on status of this port in lnksta, lnkcap */
549 if (!port_dev->exp.exp_cap) {
550 return CXL_MBOX_INTERNAL_ERROR;
551 }
552 lnksta = port_dev->config_read(port_dev,
553 port_dev->exp.exp_cap + PCI_EXP_LNKSTA,
554 sizeof(lnksta));
555 lnkcap = port_dev->config_read(port_dev,
556 port_dev->exp.exp_cap + PCI_EXP_LNKCAP,
557 sizeof(lnkcap));
558 lnkcap2 = port_dev->config_read(port_dev,
559 port_dev->exp.exp_cap + PCI_EXP_LNKCAP2,
560 sizeof(lnkcap2));
561
562 port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
563 port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4;
564 /* No definition for SLS field in linux/pci_regs.h */
565 port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1;
566 port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS;
567 port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS;
568 /* TODO: Track down if we can get the rest of the info */
569 port->ltssm_state = 0x7;
570 port->first_lane_num = 0;
571 port->link_state = 0;
572 port->port_cxl_version_bitmask = 0x2;
573 port->connected_device_cxl_version = 0x2;
574 }
575
576 pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports;
577 *len_out = pl_size;
578
579 return CXL_MBOX_SUCCESS;
580 }
581
582 /* CXL r3.0 8.2.9.1.2 */
583 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
584 uint8_t *payload_in,
585 size_t len_in,
586 uint8_t *payload_out,
587 size_t *len_out,
588 CXLCCI *cci)
589 {
590 struct {
591 uint8_t status;
592 uint8_t rsvd;
593 uint16_t opcode;
594 uint16_t returncode;
595 uint16_t vendor_ext_status;
596 } QEMU_PACKED *bg_op_status;
597 QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8);
598
599 bg_op_status = (void *)payload_out;
600 memset(bg_op_status, 0, sizeof(*bg_op_status));
601 bg_op_status->status = cci->bg.complete_pct << 1;
602 if (cci->bg.runtime > 0) {
603 bg_op_status->status |= 1U << 0;
604 }
605 bg_op_status->opcode = cci->bg.opcode;
606 bg_op_status->returncode = cci->bg.ret_code;
607 *len_out = sizeof(*bg_op_status);
608
609 return CXL_MBOX_SUCCESS;
610 }
611
612 /* 8.2.9.2.1 */
613 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
614 uint8_t *payload_in,
615 size_t len,
616 uint8_t *payload_out,
617 size_t *len_out,
618 CXLCCI *cci)
619 {
620 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
621 struct {
622 uint8_t slots_supported;
623 uint8_t slot_info;
624 uint8_t caps;
625 uint8_t rsvd[0xd];
626 char fw_rev1[0x10];
627 char fw_rev2[0x10];
628 char fw_rev3[0x10];
629 char fw_rev4[0x10];
630 } QEMU_PACKED *fw_info;
631 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
632
633 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
634 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) {
635 return CXL_MBOX_INTERNAL_ERROR;
636 }
637
638 fw_info = (void *)payload_out;
639 memset(fw_info, 0, sizeof(*fw_info));
640
641 fw_info->slots_supported = 2;
642 fw_info->slot_info = BIT(0) | BIT(3);
643 fw_info->caps = 0;
644 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
645
646 *len_out = sizeof(*fw_info);
647 return CXL_MBOX_SUCCESS;
648 }
649
650 /* 8.2.9.3.1 */
651 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
652 uint8_t *payload_in,
653 size_t len_in,
654 uint8_t *payload_out,
655 size_t *len_out,
656 CXLCCI *cci)
657 {
658 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
659 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
660
661 stq_le_p(payload_out, final_time);
662 *len_out = 8;
663
664 return CXL_MBOX_SUCCESS;
665 }
666
667 /* 8.2.9.3.2 */
668 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
669 uint8_t *payload_in,
670 size_t len_in,
671 uint8_t *payload_out,
672 size_t *len_out,
673 CXLCCI *cci)
674 {
675 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
676
677 cxl_dstate->timestamp.set = true;
678 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
679
680 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in);
681
682 *len_out = 0;
683 return CXL_MBOX_SUCCESS;
684 }
685
686 /* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */
687 static const QemuUUID cel_uuid = {
688 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
689 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
690 };
691
692 /* 8.2.9.4.1 */
693 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
694 uint8_t *payload_in,
695 size_t len_in,
696 uint8_t *payload_out,
697 size_t *len_out,
698 CXLCCI *cci)
699 {
700 struct {
701 uint16_t entries;
702 uint8_t rsvd[6];
703 struct {
704 QemuUUID uuid;
705 uint32_t size;
706 } log_entries[1];
707 } QEMU_PACKED *supported_logs = (void *)payload_out;
708 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
709
710 supported_logs->entries = 1;
711 supported_logs->log_entries[0].uuid = cel_uuid;
712 supported_logs->log_entries[0].size = 4 * cci->cel_size;
713
714 *len_out = sizeof(*supported_logs);
715 return CXL_MBOX_SUCCESS;
716 }
717
718 /* 8.2.9.4.2 */
719 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
720 uint8_t *payload_in,
721 size_t len_in,
722 uint8_t *payload_out,
723 size_t *len_out,
724 CXLCCI *cci)
725 {
726 struct {
727 QemuUUID uuid;
728 uint32_t offset;
729 uint32_t length;
730 } QEMU_PACKED QEMU_ALIGNED(16) *get_log;
731
732 get_log = (void *)payload_in;
733
734 /*
735 * 8.2.9.4.2
736 * The device shall return Invalid Parameter if the Offset or Length
737 * fields attempt to access beyond the size of the log as reported by Get
738 * Supported Logs.
739 *
740 * XXX: Spec is wrong, "Invalid Parameter" isn't a thing.
741 * XXX: Spec doesn't address incorrect UUID incorrectness.
742 *
743 * The CEL buffer is large enough to fit all commands in the emulation, so
744 * the only possible failure would be if the mailbox itself isn't big
745 * enough.
746 */
747 if (get_log->offset + get_log->length > cci->payload_max) {
748 return CXL_MBOX_INVALID_INPUT;
749 }
750
751 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
752 return CXL_MBOX_UNSUPPORTED;
753 }
754
755 /* Store off everything to local variables so we can wipe out the payload */
756 *len_out = get_log->length;
757
758 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length);
759
760 return CXL_MBOX_SUCCESS;
761 }
762
763 /* 8.2.9.5.1.1 */
764 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
765 uint8_t *payload_in,
766 size_t len_in,
767 uint8_t *payload_out,
768 size_t *len_out,
769 CXLCCI *cci)
770 {
771 struct {
772 char fw_revision[0x10];
773 uint64_t total_capacity;
774 uint64_t volatile_capacity;
775 uint64_t persistent_capacity;
776 uint64_t partition_align;
777 uint16_t info_event_log_size;
778 uint16_t warning_event_log_size;
779 uint16_t failure_event_log_size;
780 uint16_t fatal_event_log_size;
781 uint32_t lsa_size;
782 uint8_t poison_list_max_mer[3];
783 uint16_t inject_poison_limit;
784 uint8_t poison_caps;
785 uint8_t qos_telemetry_caps;
786 } QEMU_PACKED *id;
787 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43);
788 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
789 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
790 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
791
792 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
793 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
794 return CXL_MBOX_INTERNAL_ERROR;
795 }
796
797 id = (void *)payload_out;
798 memset(id, 0, sizeof(*id));
799
800 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
801
802 stq_le_p(&id->total_capacity,
803 cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER);
804 stq_le_p(&id->persistent_capacity,
805 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
806 stq_le_p(&id->volatile_capacity,
807 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
808 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
809 /* 256 poison records */
810 st24_le_p(id->poison_list_max_mer, 256);
811 /* No limit - so limited by main poison record limit */
812 stw_le_p(&id->inject_poison_limit, 0);
813
814 *len_out = sizeof(*id);
815 return CXL_MBOX_SUCCESS;
816 }
817
818 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
819 uint8_t *payload_in,
820 size_t len_in,
821 uint8_t *payload_out,
822 size_t *len_out,
823 CXLCCI *cci)
824 {
825 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
826 struct {
827 uint64_t active_vmem;
828 uint64_t active_pmem;
829 uint64_t next_vmem;
830 uint64_t next_pmem;
831 } QEMU_PACKED *part_info = (void *)payload_out;
832 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
833
834 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
835 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
836 return CXL_MBOX_INTERNAL_ERROR;
837 }
838
839 stq_le_p(&part_info->active_vmem,
840 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
841 /*
842 * When both next_vmem and next_pmem are 0, there is no pending change to
843 * partitioning.
844 */
845 stq_le_p(&part_info->next_vmem, 0);
846 stq_le_p(&part_info->active_pmem,
847 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
848 stq_le_p(&part_info->next_pmem, 0);
849
850 *len_out = sizeof(*part_info);
851 return CXL_MBOX_SUCCESS;
852 }
853
854 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
855 uint8_t *payload_in,
856 size_t len_in,
857 uint8_t *payload_out,
858 size_t *len_out,
859 CXLCCI *cci)
860 {
861 struct {
862 uint32_t offset;
863 uint32_t length;
864 } QEMU_PACKED *get_lsa;
865 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
866 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
867 uint32_t offset, length;
868
869 get_lsa = (void *)payload_in;
870 offset = get_lsa->offset;
871 length = get_lsa->length;
872
873 if (offset + length > cvc->get_lsa_size(ct3d)) {
874 *len_out = 0;
875 return CXL_MBOX_INVALID_INPUT;
876 }
877
878 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset);
879 return CXL_MBOX_SUCCESS;
880 }
881
882 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
883 uint8_t *payload_in,
884 size_t len_in,
885 uint8_t *payload_out,
886 size_t *len_out,
887 CXLCCI *cci)
888 {
889 struct set_lsa_pl {
890 uint32_t offset;
891 uint32_t rsvd;
892 uint8_t data[];
893 } QEMU_PACKED;
894 struct set_lsa_pl *set_lsa_payload = (void *)payload_in;
895 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
896 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
897 const size_t hdr_len = offsetof(struct set_lsa_pl, data);
898
899 *len_out = 0;
900 if (!len_in) {
901 return CXL_MBOX_SUCCESS;
902 }
903
904 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) {
905 return CXL_MBOX_INVALID_INPUT;
906 }
907 len_in -= hdr_len;
908
909 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset);
910 return CXL_MBOX_SUCCESS;
911 }
912
913 /* Perform the actual device zeroing */
914 static void __do_sanitization(CXLType3Dev *ct3d)
915 {
916 MemoryRegion *mr;
917
918 if (ct3d->hostvmem) {
919 mr = host_memory_backend_get_memory(ct3d->hostvmem);
920 if (mr) {
921 void *hostmem = memory_region_get_ram_ptr(mr);
922 memset(hostmem, 0, memory_region_size(mr));
923 }
924 }
925
926 if (ct3d->hostpmem) {
927 mr = host_memory_backend_get_memory(ct3d->hostpmem);
928 if (mr) {
929 void *hostmem = memory_region_get_ram_ptr(mr);
930 memset(hostmem, 0, memory_region_size(mr));
931 }
932 }
933 if (ct3d->lsa) {
934 mr = host_memory_backend_get_memory(ct3d->lsa);
935 if (mr) {
936 void *lsa = memory_region_get_ram_ptr(mr);
937 memset(lsa, 0, memory_region_size(mr));
938 }
939 }
940 }
941
942 /*
943 * CXL 3.0 spec section 8.2.9.8.5.1 - Sanitize.
944 *
945 * Once the Sanitize command has started successfully, the device shall be
946 * placed in the media disabled state. If the command fails or is interrupted
947 * by a reset or power failure, it shall remain in the media disabled state
948 * until a successful Sanitize command has been completed. During this state:
949 *
950 * 1. Memory writes to the device will have no effect, and all memory reads
951 * will return random values (no user data returned, even for locations that
952 * the failed Sanitize operation didn’t sanitize yet).
953 *
954 * 2. Mailbox commands shall still be processed in the disabled state, except
955 * that commands that access Sanitized areas shall fail with the Media Disabled
956 * error code.
957 */
958 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
959 uint8_t *payload_in,
960 size_t len_in,
961 uint8_t *payload_out,
962 size_t *len_out,
963 CXLCCI *cci)
964 {
965 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
966 uint64_t total_mem; /* in Mb */
967 int secs;
968
969 total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20;
970 if (total_mem <= 512) {
971 secs = 4;
972 } else if (total_mem <= 1024) {
973 secs = 8;
974 } else if (total_mem <= 2 * 1024) {
975 secs = 15;
976 } else if (total_mem <= 4 * 1024) {
977 secs = 30;
978 } else if (total_mem <= 8 * 1024) {
979 secs = 60;
980 } else if (total_mem <= 16 * 1024) {
981 secs = 2 * 60;
982 } else if (total_mem <= 32 * 1024) {
983 secs = 4 * 60;
984 } else if (total_mem <= 64 * 1024) {
985 secs = 8 * 60;
986 } else if (total_mem <= 128 * 1024) {
987 secs = 15 * 60;
988 } else if (total_mem <= 256 * 1024) {
989 secs = 30 * 60;
990 } else if (total_mem <= 512 * 1024) {
991 secs = 60 * 60;
992 } else if (total_mem <= 1024 * 1024) {
993 secs = 120 * 60;
994 } else {
995 secs = 240 * 60; /* max 4 hrs */
996 }
997
998 /* EBUSY other bg cmds as of now */
999 cci->bg.runtime = secs * 1000UL;
1000 *len_out = 0;
1001
1002 cxl_dev_disable_media(&ct3d->cxl_dstate);
1003
1004 if (secs > 2) {
1005 /* sanitize when done */
1006 return CXL_MBOX_BG_STARTED;
1007 } else {
1008 __do_sanitization(ct3d);
1009 cxl_dev_enable_media(&ct3d->cxl_dstate);
1010
1011 return CXL_MBOX_SUCCESS;
1012 }
1013 }
1014
1015 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
1016 uint8_t *payload_in,
1017 size_t len_in,
1018 uint8_t *payload_out,
1019 size_t *len_out,
1020 CXLCCI *cci)
1021 {
1022 uint32_t *state = (uint32_t *)payload_out;
1023
1024 *state = 0;
1025 *len_out = 4;
1026 return CXL_MBOX_SUCCESS;
1027 }
1028 /*
1029 * This is very inefficient, but good enough for now!
1030 * Also the payload will always fit, so no need to handle the MORE flag and
1031 * make this stateful. We may want to allow longer poison lists to aid
1032 * testing that kernel functionality.
1033 */
1034 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
1035 uint8_t *payload_in,
1036 size_t len_in,
1037 uint8_t *payload_out,
1038 size_t *len_out,
1039 CXLCCI *cci)
1040 {
1041 struct get_poison_list_pl {
1042 uint64_t pa;
1043 uint64_t length;
1044 } QEMU_PACKED;
1045
1046 struct get_poison_list_out_pl {
1047 uint8_t flags;
1048 uint8_t rsvd1;
1049 uint64_t overflow_timestamp;
1050 uint16_t count;
1051 uint8_t rsvd2[0x14];
1052 struct {
1053 uint64_t addr;
1054 uint32_t length;
1055 uint32_t resv;
1056 } QEMU_PACKED records[];
1057 } QEMU_PACKED;
1058
1059 struct get_poison_list_pl *in = (void *)payload_in;
1060 struct get_poison_list_out_pl *out = (void *)payload_out;
1061 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1062 uint16_t record_count = 0, i = 0;
1063 uint64_t query_start, query_length;
1064 CXLPoisonList *poison_list = &ct3d->poison_list;
1065 CXLPoison *ent;
1066 uint16_t out_pl_len;
1067
1068 query_start = ldq_le_p(&in->pa);
1069 /* 64 byte alignment required */
1070 if (query_start & 0x3f) {
1071 return CXL_MBOX_INVALID_INPUT;
1072 }
1073 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
1074
1075 QLIST_FOREACH(ent, poison_list, node) {
1076 /* Check for no overlap */
1077 if (ent->start >= query_start + query_length ||
1078 ent->start + ent->length <= query_start) {
1079 continue;
1080 }
1081 record_count++;
1082 }
1083 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
1084 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
1085
1086 memset(out, 0, out_pl_len);
1087 QLIST_FOREACH(ent, poison_list, node) {
1088 uint64_t start, stop;
1089
1090 /* Check for no overlap */
1091 if (ent->start >= query_start + query_length ||
1092 ent->start + ent->length <= query_start) {
1093 continue;
1094 }
1095
1096 /* Deal with overlap */
1097 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
1098 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
1099 query_start + query_length);
1100 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
1101 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
1102 i++;
1103 }
1104 if (ct3d->poison_list_overflowed) {
1105 out->flags = (1 << 1);
1106 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
1107 }
1108 stw_le_p(&out->count, record_count);
1109 *len_out = out_pl_len;
1110 return CXL_MBOX_SUCCESS;
1111 }
1112
1113 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
1114 uint8_t *payload_in,
1115 size_t len_in,
1116 uint8_t *payload_out,
1117 size_t *len_out,
1118 CXLCCI *cci)
1119 {
1120 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1121 CXLPoisonList *poison_list = &ct3d->poison_list;
1122 CXLPoison *ent;
1123 struct inject_poison_pl {
1124 uint64_t dpa;
1125 };
1126 struct inject_poison_pl *in = (void *)payload_in;
1127 uint64_t dpa = ldq_le_p(&in->dpa);
1128 CXLPoison *p;
1129
1130 QLIST_FOREACH(ent, poison_list, node) {
1131 if (dpa >= ent->start &&
1132 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
1133 return CXL_MBOX_SUCCESS;
1134 }
1135 }
1136
1137 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1138 return CXL_MBOX_INJECT_POISON_LIMIT;
1139 }
1140 p = g_new0(CXLPoison, 1);
1141
1142 p->length = CXL_CACHE_LINE_SIZE;
1143 p->start = dpa;
1144 p->type = CXL_POISON_TYPE_INJECTED;
1145
1146 /*
1147 * Possible todo: Merge with existing entry if next to it and if same type
1148 */
1149 QLIST_INSERT_HEAD(poison_list, p, node);
1150 ct3d->poison_list_cnt++;
1151 *len_out = 0;
1152
1153 return CXL_MBOX_SUCCESS;
1154 }
1155
1156 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
1157 uint8_t *payload_in,
1158 size_t len_in,
1159 uint8_t *payload_out,
1160 size_t *len_out,
1161 CXLCCI *cci)
1162 {
1163 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1164 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1165 CXLPoisonList *poison_list = &ct3d->poison_list;
1166 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1167 struct clear_poison_pl {
1168 uint64_t dpa;
1169 uint8_t data[64];
1170 };
1171 CXLPoison *ent;
1172 uint64_t dpa;
1173
1174 struct clear_poison_pl *in = (void *)payload_in;
1175
1176 dpa = ldq_le_p(&in->dpa);
1177 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) {
1178 return CXL_MBOX_INVALID_PA;
1179 }
1180
1181 /* Clearing a region with no poison is not an error so always do so */
1182 if (cvc->set_cacheline) {
1183 if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
1184 return CXL_MBOX_INTERNAL_ERROR;
1185 }
1186 }
1187
1188 QLIST_FOREACH(ent, poison_list, node) {
1189 /*
1190 * Test for contained in entry. Simpler than general case
1191 * as clearing 64 bytes and entries 64 byte aligned
1192 */
1193 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
1194 break;
1195 }
1196 }
1197 if (!ent) {
1198 return CXL_MBOX_SUCCESS;
1199 }
1200
1201 QLIST_REMOVE(ent, node);
1202 ct3d->poison_list_cnt--;
1203
1204 if (dpa > ent->start) {
1205 CXLPoison *frag;
1206 /* Cannot overflow as replacing existing entry */
1207
1208 frag = g_new0(CXLPoison, 1);
1209
1210 frag->start = ent->start;
1211 frag->length = dpa - ent->start;
1212 frag->type = ent->type;
1213
1214 QLIST_INSERT_HEAD(poison_list, frag, node);
1215 ct3d->poison_list_cnt++;
1216 }
1217
1218 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
1219 CXLPoison *frag;
1220
1221 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1222 cxl_set_poison_list_overflowed(ct3d);
1223 } else {
1224 frag = g_new0(CXLPoison, 1);
1225
1226 frag->start = dpa + CXL_CACHE_LINE_SIZE;
1227 frag->length = ent->start + ent->length - frag->start;
1228 frag->type = ent->type;
1229 QLIST_INSERT_HEAD(poison_list, frag, node);
1230 ct3d->poison_list_cnt++;
1231 }
1232 }
1233 /* Any fragments have been added, free original entry */
1234 g_free(ent);
1235 *len_out = 0;
1236
1237 return CXL_MBOX_SUCCESS;
1238 }
1239
1240 #define IMMEDIATE_CONFIG_CHANGE (1 << 1)
1241 #define IMMEDIATE_DATA_CHANGE (1 << 2)
1242 #define IMMEDIATE_POLICY_CHANGE (1 << 3)
1243 #define IMMEDIATE_LOG_CHANGE (1 << 4)
1244 #define SECURITY_STATE_CHANGE (1 << 5)
1245 #define BACKGROUND_OPERATION (1 << 6)
1246
1247 static const struct cxl_cmd cxl_cmd_set[256][256] = {
1248 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
1249 cmd_events_get_records, 1, 0 },
1250 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
1251 cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE },
1252 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
1253 cmd_events_get_interrupt_policy, 0, 0 },
1254 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
1255 cmd_events_set_interrupt_policy,
1256 ~0, IMMEDIATE_CONFIG_CHANGE },
1257 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
1258 cmd_firmware_update_get_info, 0, 0 },
1259 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
1260 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set,
1261 8, IMMEDIATE_POLICY_CHANGE },
1262 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported,
1263 0, 0 },
1264 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
1265 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
1266 cmd_identify_memory_device, 0, 0 },
1267 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
1268 cmd_ccls_get_partition_info, 0, 0 },
1269 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
1270 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
1271 ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE },
1272 [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0,
1273 IMMEDIATE_DATA_CHANGE | SECURITY_STATE_CHANGE | BACKGROUND_OPERATION },
1274 [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE",
1275 cmd_get_security_state, 0, 0 },
1276 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
1277 cmd_media_get_poison_list, 16, 0 },
1278 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
1279 cmd_media_inject_poison, 8, 0 },
1280 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
1281 cmd_media_clear_poison, 72, 0 },
1282 };
1283
1284 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
1285 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
1286 [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS",
1287 cmd_infostat_bg_op_sts, 0, 0 },
1288 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
1289 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 0,
1290 IMMEDIATE_POLICY_CHANGE },
1291 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
1292 0 },
1293 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
1294 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE",
1295 cmd_identify_switch_device, 0, 0 },
1296 [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS",
1297 cmd_get_physical_port_state, ~0, 0 },
1298 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
1299 cmd_tunnel_management_cmd, ~0, 0 },
1300 };
1301
1302 /*
1303 * While the command is executing in the background, the device should
1304 * update the percentage complete in the Background Command Status Register
1305 * at least once per second.
1306 */
1307
1308 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL
1309
1310 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
1311 size_t len_in, uint8_t *pl_in, size_t *len_out,
1312 uint8_t *pl_out, bool *bg_started)
1313 {
1314 int ret;
1315 const struct cxl_cmd *cxl_cmd;
1316 opcode_handler h;
1317
1318 *len_out = 0;
1319 cxl_cmd = &cci->cxl_cmd_set[set][cmd];
1320 h = cxl_cmd->handler;
1321 if (!h) {
1322 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
1323 set << 8 | cmd);
1324 return CXL_MBOX_UNSUPPORTED;
1325 }
1326
1327 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) {
1328 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1329 }
1330
1331 /* Only one bg command at a time */
1332 if ((cxl_cmd->effect & BACKGROUND_OPERATION) &&
1333 cci->bg.runtime > 0) {
1334 return CXL_MBOX_BUSY;
1335 }
1336
1337 /* forbid any selected commands while overwriting */
1338 if (sanitize_running(cci)) {
1339 if (h == cmd_events_get_records ||
1340 h == cmd_ccls_get_partition_info ||
1341 h == cmd_ccls_set_lsa ||
1342 h == cmd_ccls_get_lsa ||
1343 h == cmd_logs_get_log ||
1344 h == cmd_media_get_poison_list ||
1345 h == cmd_media_inject_poison ||
1346 h == cmd_media_clear_poison ||
1347 h == cmd_sanitize_overwrite) {
1348 return CXL_MBOX_MEDIA_DISABLED;
1349 }
1350 }
1351
1352 ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci);
1353 if ((cxl_cmd->effect & BACKGROUND_OPERATION) &&
1354 ret == CXL_MBOX_BG_STARTED) {
1355 *bg_started = true;
1356 } else {
1357 *bg_started = false;
1358 }
1359
1360 /* Set bg and the return code */
1361 if (*bg_started) {
1362 uint64_t now;
1363
1364 cci->bg.opcode = (set << 8) | cmd;
1365
1366 cci->bg.complete_pct = 0;
1367 cci->bg.ret_code = 0;
1368
1369 now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1370 cci->bg.starttime = now;
1371 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
1372 }
1373
1374 return ret;
1375 }
1376
1377 static void bg_timercb(void *opaque)
1378 {
1379 CXLCCI *cci = opaque;
1380 uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1381 uint64_t total_time = cci->bg.starttime + cci->bg.runtime;
1382
1383 assert(cci->bg.runtime > 0);
1384
1385 if (now >= total_time) { /* we are done */
1386 uint16_t ret = CXL_MBOX_SUCCESS;
1387
1388 cci->bg.complete_pct = 100;
1389 cci->bg.ret_code = ret;
1390 if (ret == CXL_MBOX_SUCCESS) {
1391 switch (cci->bg.opcode) {
1392 case 0x4400: /* sanitize */
1393 {
1394 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1395
1396 __do_sanitization(ct3d);
1397 cxl_dev_enable_media(&ct3d->cxl_dstate);
1398 }
1399 break;
1400 case 0x4304: /* TODO: scan media */
1401 break;
1402 default:
1403 __builtin_unreachable();
1404 break;
1405 }
1406 }
1407
1408 qemu_log("Background command %04xh finished: %s\n",
1409 cci->bg.opcode,
1410 ret == CXL_MBOX_SUCCESS ? "success" : "aborted");
1411 } else {
1412 /* estimate only */
1413 cci->bg.complete_pct = 100 * now / total_time;
1414 timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
1415 }
1416
1417 if (cci->bg.complete_pct == 100) {
1418 /* TODO: generalize to switch CCI */
1419 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1420 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1421 PCIDevice *pdev = PCI_DEVICE(cci->d);
1422
1423 cci->bg.starttime = 0;
1424 /* registers are updated, allow new bg-capable cmds */
1425 cci->bg.runtime = 0;
1426
1427 if (msix_enabled(pdev)) {
1428 msix_notify(pdev, cxl_dstate->mbox_msi_n);
1429 } else if (msi_enabled(pdev)) {
1430 msi_notify(pdev, cxl_dstate->mbox_msi_n);
1431 }
1432 }
1433 }
1434
1435 void cxl_init_cci(CXLCCI *cci, size_t payload_max)
1436 {
1437 cci->payload_max = payload_max;
1438 for (int set = 0; set < 256; set++) {
1439 for (int cmd = 0; cmd < 256; cmd++) {
1440 if (cci->cxl_cmd_set[set][cmd].handler) {
1441 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd];
1442 struct cel_log *log =
1443 &cci->cel_log[cci->cel_size];
1444
1445 log->opcode = (set << 8) | cmd;
1446 log->effect = c->effect;
1447 cci->cel_size++;
1448 }
1449 }
1450 }
1451 cci->bg.complete_pct = 0;
1452 cci->bg.starttime = 0;
1453 cci->bg.runtime = 0;
1454 cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1455 bg_timercb, cci);
1456 }
1457
1458 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
1459 DeviceState *d, size_t payload_max)
1460 {
1461 cci->cxl_cmd_set = cxl_cmd_set_sw;
1462 cci->d = d;
1463 cci->intf = intf;
1464 cxl_init_cci(cci, payload_max);
1465 }
1466
1467 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max)
1468 {
1469 cci->cxl_cmd_set = cxl_cmd_set;
1470 cci->d = d;
1471
1472 /* No separation for PCI MB as protocol handled in PCI device */
1473 cci->intf = d;
1474 cxl_init_cci(cci, payload_max);
1475 }
1476
1477 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = {
1478 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
1479 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
1480 0 },
1481 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
1482 };
1483
1484 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf,
1485 size_t payload_max)
1486 {
1487 cci->cxl_cmd_set = cxl_cmd_set_t3_ld;
1488 cci->d = d;
1489 cci->intf = intf;
1490 cxl_init_cci(cci, payload_max);
1491 }
1492
1493 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = {
1494 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0},
1495 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
1496 0 },
1497 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
1498 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
1499 [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
1500 cmd_tunnel_management_cmd, ~0, 0 },
1501 };
1502
1503 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
1504 DeviceState *intf,
1505 size_t payload_max)
1506 {
1507 cci->cxl_cmd_set = cxl_cmd_set_t3_fm_owned_ld_mctp;
1508 cci->d = d;
1509 cci->intf = intf;
1510 cxl_init_cci(cci, payload_max);
1511 }