]> git.proxmox.com Git - mirror_qemu.git/blob - hw/cxl/cxl-mailbox-utils.c
hw/cxl/mbox: Add Physical Switch Identify command.
[mirror_qemu.git] / hw / cxl / cxl-mailbox-utils.c
1 /*
2 * CXL Utility library for mailbox interface
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "hw/cxl/cxl.h"
12 #include "hw/cxl/cxl_events.h"
13 #include "hw/pci/pci.h"
14 #include "hw/pci-bridge/cxl_upstream_port.h"
15 #include "qemu/cutils.h"
16 #include "qemu/log.h"
17 #include "qemu/units.h"
18 #include "qemu/uuid.h"
19
20 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
21
22 /*
23 * How to add a new command, example. The command set FOO, with cmd BAR.
24 * 1. Add the command set and cmd to the enum.
25 * FOO = 0x7f,
26 * #define BAR 0
27 * 2. Implement the handler
28 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
29 * CXLDeviceState *cxl_dstate, uint16_t *len)
30 * 3. Add the command to the cxl_cmd_set[][]
31 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
32 * 4. Implement your handler
33 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
34 *
35 *
36 * Writing the handler:
37 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
38 * in/out length of the payload. The handler is responsible for consuming the
39 * payload from cmd->payload and operating upon it as necessary. It must then
40 * fill the output data into cmd->payload (overwriting what was there),
41 * setting the length, and returning a valid return code.
42 *
43 * XXX: The handler need not worry about endianness. The payload is read out of
44 * a register interface that already deals with it.
45 */
46
47 enum {
48 INFOSTAT = 0x00,
49 #define IS_IDENTIFY 0x1
50 EVENTS = 0x01,
51 #define GET_RECORDS 0x0
52 #define CLEAR_RECORDS 0x1
53 #define GET_INTERRUPT_POLICY 0x2
54 #define SET_INTERRUPT_POLICY 0x3
55 FIRMWARE_UPDATE = 0x02,
56 #define GET_INFO 0x0
57 TIMESTAMP = 0x03,
58 #define GET 0x0
59 #define SET 0x1
60 LOGS = 0x04,
61 #define GET_SUPPORTED 0x0
62 #define GET_LOG 0x1
63 IDENTIFY = 0x40,
64 #define MEMORY_DEVICE 0x0
65 CCLS = 0x41,
66 #define GET_PARTITION_INFO 0x0
67 #define GET_LSA 0x2
68 #define SET_LSA 0x3
69 MEDIA_AND_POISON = 0x43,
70 #define GET_POISON_LIST 0x0
71 #define INJECT_POISON 0x1
72 #define CLEAR_POISON 0x2
73 PHYSICAL_SWITCH = 0x51,
74 #define IDENTIFY_SWITCH_DEVICE 0x0
75 };
76
77
78 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd,
79 uint8_t *payload_in, size_t len_in,
80 uint8_t *payload_out, size_t *len_out,
81 CXLCCI *cci)
82 {
83 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
84 CXLGetEventPayload *pl;
85 uint8_t log_type;
86 int max_recs;
87
88 if (cmd->in < sizeof(log_type)) {
89 return CXL_MBOX_INVALID_INPUT;
90 }
91
92 log_type = payload_in[0];
93
94 pl = (CXLGetEventPayload *)payload_out;
95 memset(pl, 0, sizeof(*pl));
96
97 max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
98 CXL_EVENT_RECORD_SIZE;
99 if (max_recs > 0xFFFF) {
100 max_recs = 0xFFFF;
101 }
102
103 return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out);
104 }
105
106 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd,
107 uint8_t *payload_in,
108 size_t len_in,
109 uint8_t *payload_out,
110 size_t *len_out,
111 CXLCCI *cci)
112 {
113 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
114 CXLClearEventPayload *pl;
115
116 pl = (CXLClearEventPayload *)payload_in;
117 *len_out = 0;
118 return cxl_event_clear_records(cxlds, pl);
119 }
120
121 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd,
122 uint8_t *payload_in,
123 size_t len_in,
124 uint8_t *payload_out,
125 size_t *len_out,
126 CXLCCI *cci)
127 {
128 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
129 CXLEventInterruptPolicy *policy;
130 CXLEventLog *log;
131
132 policy = (CXLEventInterruptPolicy *)payload_out;
133 memset(policy, 0, sizeof(*policy));
134
135 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
136 if (log->irq_enabled) {
137 policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
138 }
139
140 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
141 if (log->irq_enabled) {
142 policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
143 }
144
145 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
146 if (log->irq_enabled) {
147 policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
148 }
149
150 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
151 if (log->irq_enabled) {
152 policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
153 }
154
155 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
156 if (log->irq_enabled) {
157 /* Dynamic Capacity borrows the same vector as info */
158 policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
159 }
160
161 *len_out = sizeof(*policy);
162 return CXL_MBOX_SUCCESS;
163 }
164
165 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd,
166 uint8_t *payload_in,
167 size_t len_in,
168 uint8_t *payload_out,
169 size_t *len_out,
170 CXLCCI *cci)
171 {
172 CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
173 CXLEventInterruptPolicy *policy;
174 CXLEventLog *log;
175
176 if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) {
177 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
178 }
179
180 policy = (CXLEventInterruptPolicy *)payload_in;
181
182 log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
183 log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
184 CXL_INT_MSI_MSIX;
185
186 log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
187 log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
188 CXL_INT_MSI_MSIX;
189
190 log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
191 log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
192 CXL_INT_MSI_MSIX;
193
194 log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
195 log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
196 CXL_INT_MSI_MSIX;
197
198 /* DCD is optional */
199 if (len_in < sizeof(*policy)) {
200 return CXL_MBOX_SUCCESS;
201 }
202
203 log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
204 log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
205 CXL_INT_MSI_MSIX;
206
207 *len_out = 0;
208 return CXL_MBOX_SUCCESS;
209 }
210
211 /* CXL r3.0 section 8.2.9.1.1: Identify (Opcode 0001h) */
212 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
213 uint8_t *payload_in,
214 size_t len_in,
215 uint8_t *payload_out,
216 size_t *len_out,
217 CXLCCI *cci)
218 {
219 PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d);
220 struct {
221 uint16_t pcie_vid;
222 uint16_t pcie_did;
223 uint16_t pcie_subsys_vid;
224 uint16_t pcie_subsys_id;
225 uint64_t sn;
226 uint8_t max_message_size;
227 uint8_t component_type;
228 } QEMU_PACKED *is_identify;
229 QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18);
230
231 is_identify = (void *)payload_out;
232 memset(is_identify, 0, sizeof(*is_identify));
233 is_identify->pcie_vid = class->vendor_id;
234 is_identify->pcie_did = class->device_id;
235 if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
236 is_identify->sn = CXL_USP(cci->d)->sn;
237 /* Subsystem info not defined for a USP */
238 is_identify->pcie_subsys_vid = 0;
239 is_identify->pcie_subsys_id = 0;
240 is_identify->component_type = 0x0; /* Switch */
241 } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
242 PCIDevice *pci_dev = PCI_DEVICE(cci->d);
243
244 is_identify->sn = CXL_TYPE3(cci->d)->sn;
245 /*
246 * We can't always use class->subsystem_vendor_id as
247 * it is not set if the defaults are used.
248 */
249 is_identify->pcie_subsys_vid =
250 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID);
251 is_identify->pcie_subsys_id =
252 pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID);
253 is_identify->component_type = 0x3; /* Type 3 */
254 }
255
256 /* TODO: Allow this to vary across different CCIs */
257 is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
258 *len_out = sizeof(*is_identify);
259 return CXL_MBOX_SUCCESS;
260 }
261
262 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
263 void *private)
264 {
265 uint8_t *bm = private;
266 if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) {
267 uint8_t port = PCIE_PORT(d)->port;
268 bm[port / 8] |= 1 << (port % 8);
269 }
270 }
271
272 /* CXL r3 8.2.9.1.1 */
273 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
274 uint8_t *payload_in,
275 size_t len_in,
276 uint8_t *payload_out,
277 size_t *len_out,
278 CXLCCI *cci)
279 {
280 PCIEPort *usp = PCIE_PORT(cci->d);
281 PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
282 int num_phys_ports = pcie_count_ds_ports(bus);
283
284 struct cxl_fmapi_ident_switch_dev_resp_pl {
285 uint8_t ingress_port_id;
286 uint8_t rsvd;
287 uint8_t num_physical_ports;
288 uint8_t num_vcss;
289 uint8_t active_port_bitmask[0x20];
290 uint8_t active_vcs_bitmask[0x20];
291 uint16_t total_vppbs;
292 uint16_t bound_vppbs;
293 uint8_t num_hdm_decoders_per_usp;
294 } QEMU_PACKED *out;
295 QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49);
296
297 out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out;
298 *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) {
299 .num_physical_ports = num_phys_ports + 1, /* 1 USP */
300 .num_vcss = 1, /* Not yet support multiple VCS - potentialy tricky */
301 .active_vcs_bitmask[0] = 0x1,
302 .total_vppbs = num_phys_ports + 1,
303 .bound_vppbs = num_phys_ports + 1,
304 .num_hdm_decoders_per_usp = 4,
305 };
306
307 /* Depends on the CCI type */
308 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) {
309 out->ingress_port_id = PCIE_PORT(cci->intf)->port;
310 } else {
311 /* MCTP? */
312 out->ingress_port_id = 0;
313 }
314
315 pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm,
316 out->active_port_bitmask);
317 out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8);
318
319 *len_out = sizeof(*out);
320
321 return CXL_MBOX_SUCCESS;
322 }
323 /* 8.2.9.2.1 */
324 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
325 uint8_t *payload_in,
326 size_t len,
327 uint8_t *payload_out,
328 size_t *len_out,
329 CXLCCI *cci)
330 {
331 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
332 struct {
333 uint8_t slots_supported;
334 uint8_t slot_info;
335 uint8_t caps;
336 uint8_t rsvd[0xd];
337 char fw_rev1[0x10];
338 char fw_rev2[0x10];
339 char fw_rev3[0x10];
340 char fw_rev4[0x10];
341 } QEMU_PACKED *fw_info;
342 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
343
344 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
345 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) {
346 return CXL_MBOX_INTERNAL_ERROR;
347 }
348
349 fw_info = (void *)payload_out;
350 memset(fw_info, 0, sizeof(*fw_info));
351
352 fw_info->slots_supported = 2;
353 fw_info->slot_info = BIT(0) | BIT(3);
354 fw_info->caps = 0;
355 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
356
357 *len_out = sizeof(*fw_info);
358 return CXL_MBOX_SUCCESS;
359 }
360
361 /* 8.2.9.3.1 */
362 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
363 uint8_t *payload_in,
364 size_t len_in,
365 uint8_t *payload_out,
366 size_t *len_out,
367 CXLCCI *cci)
368 {
369 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
370 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
371
372 stq_le_p(payload_out, final_time);
373 *len_out = 8;
374
375 return CXL_MBOX_SUCCESS;
376 }
377
378 /* 8.2.9.3.2 */
379 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
380 uint8_t *payload_in,
381 size_t len_in,
382 uint8_t *payload_out,
383 size_t *len_out,
384 CXLCCI *cci)
385 {
386 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
387
388 cxl_dstate->timestamp.set = true;
389 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
390
391 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in);
392
393 *len_out = 0;
394 return CXL_MBOX_SUCCESS;
395 }
396
397 /* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */
398 static const QemuUUID cel_uuid = {
399 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
400 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
401 };
402
403 /* 8.2.9.4.1 */
404 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
405 uint8_t *payload_in,
406 size_t len_in,
407 uint8_t *payload_out,
408 size_t *len_out,
409 CXLCCI *cci)
410 {
411 struct {
412 uint16_t entries;
413 uint8_t rsvd[6];
414 struct {
415 QemuUUID uuid;
416 uint32_t size;
417 } log_entries[1];
418 } QEMU_PACKED *supported_logs = (void *)payload_out;
419 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
420
421 supported_logs->entries = 1;
422 supported_logs->log_entries[0].uuid = cel_uuid;
423 supported_logs->log_entries[0].size = 4 * cci->cel_size;
424
425 *len_out = sizeof(*supported_logs);
426 return CXL_MBOX_SUCCESS;
427 }
428
429 /* 8.2.9.4.2 */
430 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
431 uint8_t *payload_in,
432 size_t len_in,
433 uint8_t *payload_out,
434 size_t *len_out,
435 CXLCCI *cci)
436 {
437 struct {
438 QemuUUID uuid;
439 uint32_t offset;
440 uint32_t length;
441 } QEMU_PACKED QEMU_ALIGNED(16) *get_log;
442
443 get_log = (void *)payload_in;
444
445 /*
446 * 8.2.9.4.2
447 * The device shall return Invalid Parameter if the Offset or Length
448 * fields attempt to access beyond the size of the log as reported by Get
449 * Supported Logs.
450 *
451 * XXX: Spec is wrong, "Invalid Parameter" isn't a thing.
452 * XXX: Spec doesn't address incorrect UUID incorrectness.
453 *
454 * The CEL buffer is large enough to fit all commands in the emulation, so
455 * the only possible failure would be if the mailbox itself isn't big
456 * enough.
457 */
458 if (get_log->offset + get_log->length > cci->payload_max) {
459 return CXL_MBOX_INVALID_INPUT;
460 }
461
462 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
463 return CXL_MBOX_UNSUPPORTED;
464 }
465
466 /* Store off everything to local variables so we can wipe out the payload */
467 *len_out = get_log->length;
468
469 memmove(payload_out, cci->cel_log + get_log->offset, get_log->length);
470
471 return CXL_MBOX_SUCCESS;
472 }
473
474 /* 8.2.9.5.1.1 */
475 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
476 uint8_t *payload_in,
477 size_t len_in,
478 uint8_t *payload_out,
479 size_t *len_out,
480 CXLCCI *cci)
481 {
482 struct {
483 char fw_revision[0x10];
484 uint64_t total_capacity;
485 uint64_t volatile_capacity;
486 uint64_t persistent_capacity;
487 uint64_t partition_align;
488 uint16_t info_event_log_size;
489 uint16_t warning_event_log_size;
490 uint16_t failure_event_log_size;
491 uint16_t fatal_event_log_size;
492 uint32_t lsa_size;
493 uint8_t poison_list_max_mer[3];
494 uint16_t inject_poison_limit;
495 uint8_t poison_caps;
496 uint8_t qos_telemetry_caps;
497 } QEMU_PACKED *id;
498 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43);
499 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
500 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
501 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
502
503 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
504 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
505 return CXL_MBOX_INTERNAL_ERROR;
506 }
507
508 id = (void *)payload_out;
509 memset(id, 0, sizeof(*id));
510
511 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
512
513 stq_le_p(&id->total_capacity,
514 cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER);
515 stq_le_p(&id->persistent_capacity,
516 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
517 stq_le_p(&id->volatile_capacity,
518 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
519 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
520 /* 256 poison records */
521 st24_le_p(id->poison_list_max_mer, 256);
522 /* No limit - so limited by main poison record limit */
523 stw_le_p(&id->inject_poison_limit, 0);
524
525 *len_out = sizeof(*id);
526 return CXL_MBOX_SUCCESS;
527 }
528
529 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
530 uint8_t *payload_in,
531 size_t len_in,
532 uint8_t *payload_out,
533 size_t *len_out,
534 CXLCCI *cci)
535 {
536 CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
537 struct {
538 uint64_t active_vmem;
539 uint64_t active_pmem;
540 uint64_t next_vmem;
541 uint64_t next_pmem;
542 } QEMU_PACKED *part_info = (void *)payload_out;
543 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
544
545 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
546 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
547 return CXL_MBOX_INTERNAL_ERROR;
548 }
549
550 stq_le_p(&part_info->active_vmem,
551 cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
552 /*
553 * When both next_vmem and next_pmem are 0, there is no pending change to
554 * partitioning.
555 */
556 stq_le_p(&part_info->next_vmem, 0);
557 stq_le_p(&part_info->active_pmem,
558 cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
559 stq_le_p(&part_info->next_pmem, 0);
560
561 *len_out = sizeof(*part_info);
562 return CXL_MBOX_SUCCESS;
563 }
564
565 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
566 uint8_t *payload_in,
567 size_t len_in,
568 uint8_t *payload_out,
569 size_t *len_out,
570 CXLCCI *cci)
571 {
572 struct {
573 uint32_t offset;
574 uint32_t length;
575 } QEMU_PACKED *get_lsa;
576 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
577 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
578 uint32_t offset, length;
579
580 get_lsa = (void *)payload_in;
581 offset = get_lsa->offset;
582 length = get_lsa->length;
583
584 if (offset + length > cvc->get_lsa_size(ct3d)) {
585 *len_out = 0;
586 return CXL_MBOX_INVALID_INPUT;
587 }
588
589 *len_out = cvc->get_lsa(ct3d, payload_out, length, offset);
590 return CXL_MBOX_SUCCESS;
591 }
592
593 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
594 uint8_t *payload_in,
595 size_t len_in,
596 uint8_t *payload_out,
597 size_t *len_out,
598 CXLCCI *cci)
599 {
600 struct set_lsa_pl {
601 uint32_t offset;
602 uint32_t rsvd;
603 uint8_t data[];
604 } QEMU_PACKED;
605 struct set_lsa_pl *set_lsa_payload = (void *)payload_in;
606 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
607 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
608 const size_t hdr_len = offsetof(struct set_lsa_pl, data);
609
610 *len_out = 0;
611 if (!len_in) {
612 return CXL_MBOX_SUCCESS;
613 }
614
615 if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) {
616 return CXL_MBOX_INVALID_INPUT;
617 }
618 len_in -= hdr_len;
619
620 cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset);
621 return CXL_MBOX_SUCCESS;
622 }
623
624 /*
625 * This is very inefficient, but good enough for now!
626 * Also the payload will always fit, so no need to handle the MORE flag and
627 * make this stateful. We may want to allow longer poison lists to aid
628 * testing that kernel functionality.
629 */
630 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
631 uint8_t *payload_in,
632 size_t len_in,
633 uint8_t *payload_out,
634 size_t *len_out,
635 CXLCCI *cci)
636 {
637 struct get_poison_list_pl {
638 uint64_t pa;
639 uint64_t length;
640 } QEMU_PACKED;
641
642 struct get_poison_list_out_pl {
643 uint8_t flags;
644 uint8_t rsvd1;
645 uint64_t overflow_timestamp;
646 uint16_t count;
647 uint8_t rsvd2[0x14];
648 struct {
649 uint64_t addr;
650 uint32_t length;
651 uint32_t resv;
652 } QEMU_PACKED records[];
653 } QEMU_PACKED;
654
655 struct get_poison_list_pl *in = (void *)payload_in;
656 struct get_poison_list_out_pl *out = (void *)payload_out;
657 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
658 uint16_t record_count = 0, i = 0;
659 uint64_t query_start, query_length;
660 CXLPoisonList *poison_list = &ct3d->poison_list;
661 CXLPoison *ent;
662 uint16_t out_pl_len;
663
664 query_start = ldq_le_p(&in->pa);
665 /* 64 byte alignment required */
666 if (query_start & 0x3f) {
667 return CXL_MBOX_INVALID_INPUT;
668 }
669 query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
670
671 QLIST_FOREACH(ent, poison_list, node) {
672 /* Check for no overlap */
673 if (ent->start >= query_start + query_length ||
674 ent->start + ent->length <= query_start) {
675 continue;
676 }
677 record_count++;
678 }
679 out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
680 assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
681
682 memset(out, 0, out_pl_len);
683 QLIST_FOREACH(ent, poison_list, node) {
684 uint64_t start, stop;
685
686 /* Check for no overlap */
687 if (ent->start >= query_start + query_length ||
688 ent->start + ent->length <= query_start) {
689 continue;
690 }
691
692 /* Deal with overlap */
693 start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
694 stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
695 query_start + query_length);
696 stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
697 stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
698 i++;
699 }
700 if (ct3d->poison_list_overflowed) {
701 out->flags = (1 << 1);
702 stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
703 }
704 stw_le_p(&out->count, record_count);
705 *len_out = out_pl_len;
706 return CXL_MBOX_SUCCESS;
707 }
708
709 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
710 uint8_t *payload_in,
711 size_t len_in,
712 uint8_t *payload_out,
713 size_t *len_out,
714 CXLCCI *cci)
715 {
716 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
717 CXLPoisonList *poison_list = &ct3d->poison_list;
718 CXLPoison *ent;
719 struct inject_poison_pl {
720 uint64_t dpa;
721 };
722 struct inject_poison_pl *in = (void *)payload_in;
723 uint64_t dpa = ldq_le_p(&in->dpa);
724 CXLPoison *p;
725
726 QLIST_FOREACH(ent, poison_list, node) {
727 if (dpa >= ent->start &&
728 dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
729 return CXL_MBOX_SUCCESS;
730 }
731 }
732
733 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
734 return CXL_MBOX_INJECT_POISON_LIMIT;
735 }
736 p = g_new0(CXLPoison, 1);
737
738 p->length = CXL_CACHE_LINE_SIZE;
739 p->start = dpa;
740 p->type = CXL_POISON_TYPE_INJECTED;
741
742 /*
743 * Possible todo: Merge with existing entry if next to it and if same type
744 */
745 QLIST_INSERT_HEAD(poison_list, p, node);
746 ct3d->poison_list_cnt++;
747 *len_out = 0;
748
749 return CXL_MBOX_SUCCESS;
750 }
751
752 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
753 uint8_t *payload_in,
754 size_t len_in,
755 uint8_t *payload_out,
756 size_t *len_out,
757 CXLCCI *cci)
758 {
759 CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
760 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
761 CXLPoisonList *poison_list = &ct3d->poison_list;
762 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
763 struct clear_poison_pl {
764 uint64_t dpa;
765 uint8_t data[64];
766 };
767 CXLPoison *ent;
768 uint64_t dpa;
769
770 struct clear_poison_pl *in = (void *)payload_in;
771
772 dpa = ldq_le_p(&in->dpa);
773 if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) {
774 return CXL_MBOX_INVALID_PA;
775 }
776
777 /* Clearing a region with no poison is not an error so always do so */
778 if (cvc->set_cacheline) {
779 if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
780 return CXL_MBOX_INTERNAL_ERROR;
781 }
782 }
783
784 QLIST_FOREACH(ent, poison_list, node) {
785 /*
786 * Test for contained in entry. Simpler than general case
787 * as clearing 64 bytes and entries 64 byte aligned
788 */
789 if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
790 break;
791 }
792 }
793 if (!ent) {
794 return CXL_MBOX_SUCCESS;
795 }
796
797 QLIST_REMOVE(ent, node);
798 ct3d->poison_list_cnt--;
799
800 if (dpa > ent->start) {
801 CXLPoison *frag;
802 /* Cannot overflow as replacing existing entry */
803
804 frag = g_new0(CXLPoison, 1);
805
806 frag->start = ent->start;
807 frag->length = dpa - ent->start;
808 frag->type = ent->type;
809
810 QLIST_INSERT_HEAD(poison_list, frag, node);
811 ct3d->poison_list_cnt++;
812 }
813
814 if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
815 CXLPoison *frag;
816
817 if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
818 cxl_set_poison_list_overflowed(ct3d);
819 } else {
820 frag = g_new0(CXLPoison, 1);
821
822 frag->start = dpa + CXL_CACHE_LINE_SIZE;
823 frag->length = ent->start + ent->length - frag->start;
824 frag->type = ent->type;
825 QLIST_INSERT_HEAD(poison_list, frag, node);
826 ct3d->poison_list_cnt++;
827 }
828 }
829 /* Any fragments have been added, free original entry */
830 g_free(ent);
831 *len_out = 0;
832
833 return CXL_MBOX_SUCCESS;
834 }
835
836 #define IMMEDIATE_CONFIG_CHANGE (1 << 1)
837 #define IMMEDIATE_DATA_CHANGE (1 << 2)
838 #define IMMEDIATE_POLICY_CHANGE (1 << 3)
839 #define IMMEDIATE_LOG_CHANGE (1 << 4)
840
841 static const struct cxl_cmd cxl_cmd_set[256][256] = {
842 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
843 cmd_events_get_records, 1, 0 },
844 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
845 cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE },
846 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
847 cmd_events_get_interrupt_policy, 0, 0 },
848 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
849 cmd_events_set_interrupt_policy,
850 ~0, IMMEDIATE_CONFIG_CHANGE },
851 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
852 cmd_firmware_update_get_info, 0, 0 },
853 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
854 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set,
855 8, IMMEDIATE_POLICY_CHANGE },
856 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported,
857 0, 0 },
858 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
859 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
860 cmd_identify_memory_device, 0, 0 },
861 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
862 cmd_ccls_get_partition_info, 0, 0 },
863 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
864 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
865 ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE },
866 [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
867 cmd_media_get_poison_list, 16, 0 },
868 [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
869 cmd_media_inject_poison, 8, 0 },
870 [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
871 cmd_media_clear_poison, 72, 0 },
872 };
873
874 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
875 [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
876 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
877 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 0,
878 IMMEDIATE_POLICY_CHANGE },
879 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
880 0 },
881 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
882 [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE",
883 cmd_identify_switch_device, 0, 0 },
884 };
885
886 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
887 size_t len_in, uint8_t *pl_in, size_t *len_out,
888 uint8_t *pl_out, bool *bg_started)
889 {
890 const struct cxl_cmd *cxl_cmd;
891 opcode_handler h;
892
893 *len_out = 0;
894 cxl_cmd = &cci->cxl_cmd_set[set][cmd];
895 h = cxl_cmd->handler;
896 if (!h) {
897 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
898 set << 8 | cmd);
899 return CXL_MBOX_UNSUPPORTED;
900 }
901
902 if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) {
903 return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
904 }
905
906 return (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci);
907 }
908
909 void cxl_init_cci(CXLCCI *cci, size_t payload_max)
910 {
911 cci->payload_max = payload_max;
912 for (int set = 0; set < 256; set++) {
913 for (int cmd = 0; cmd < 256; cmd++) {
914 if (cci->cxl_cmd_set[set][cmd].handler) {
915 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd];
916 struct cel_log *log =
917 &cci->cel_log[cci->cel_size];
918
919 log->opcode = (set << 8) | cmd;
920 log->effect = c->effect;
921 cci->cel_size++;
922 }
923 }
924 }
925 }
926
927 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
928 DeviceState *d, size_t payload_max)
929 {
930 cci->cxl_cmd_set = cxl_cmd_set_sw;
931 cci->d = d;
932 cci->intf = intf;
933 cxl_init_cci(cci, payload_max);
934 }
935
936 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max)
937 {
938 cci->cxl_cmd_set = cxl_cmd_set;
939 cci->d = d;
940
941 /* No separation for PCI MB as protocol handled in PCI device */
942 cci->intf = d;
943 cxl_init_cci(cci, payload_max);
944 }