]> git.proxmox.com Git - mirror_qemu.git/blob - hw/cxl/cxl-mailbox-utils.c
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
[mirror_qemu.git] / hw / cxl / cxl-mailbox-utils.c
1 /*
2 * CXL Utility library for mailbox interface
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "hw/cxl/cxl.h"
12 #include "hw/pci/pci.h"
13 #include "qemu/cutils.h"
14 #include "qemu/log.h"
15 #include "qemu/units.h"
16 #include "qemu/uuid.h"
17
18 #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
19
20 /*
21 * How to add a new command, example. The command set FOO, with cmd BAR.
22 * 1. Add the command set and cmd to the enum.
23 * FOO = 0x7f,
24 * #define BAR 0
25 * 2. Implement the handler
26 * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
27 * CXLDeviceState *cxl_dstate, uint16_t *len)
28 * 3. Add the command to the cxl_cmd_set[][]
29 * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
30 * 4. Implement your handler
31 * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
32 *
33 *
34 * Writing the handler:
35 * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
36 * in/out length of the payload. The handler is responsible for consuming the
37 * payload from cmd->payload and operating upon it as necessary. It must then
38 * fill the output data into cmd->payload (overwriting what was there),
39 * setting the length, and returning a valid return code.
40 *
41 * XXX: The handler need not worry about endianess. The payload is read out of
42 * a register interface that already deals with it.
43 */
44
45 enum {
46 EVENTS = 0x01,
47 #define GET_RECORDS 0x0
48 #define CLEAR_RECORDS 0x1
49 #define GET_INTERRUPT_POLICY 0x2
50 #define SET_INTERRUPT_POLICY 0x3
51 FIRMWARE_UPDATE = 0x02,
52 #define GET_INFO 0x0
53 TIMESTAMP = 0x03,
54 #define GET 0x0
55 #define SET 0x1
56 LOGS = 0x04,
57 #define GET_SUPPORTED 0x0
58 #define GET_LOG 0x1
59 IDENTIFY = 0x40,
60 #define MEMORY_DEVICE 0x0
61 CCLS = 0x41,
62 #define GET_PARTITION_INFO 0x0
63 #define GET_LSA 0x2
64 #define SET_LSA 0x3
65 };
66
67 /* 8.2.8.4.5.1 Command Return Codes */
68 typedef enum {
69 CXL_MBOX_SUCCESS = 0x0,
70 CXL_MBOX_BG_STARTED = 0x1,
71 CXL_MBOX_INVALID_INPUT = 0x2,
72 CXL_MBOX_UNSUPPORTED = 0x3,
73 CXL_MBOX_INTERNAL_ERROR = 0x4,
74 CXL_MBOX_RETRY_REQUIRED = 0x5,
75 CXL_MBOX_BUSY = 0x6,
76 CXL_MBOX_MEDIA_DISABLED = 0x7,
77 CXL_MBOX_FW_XFER_IN_PROGRESS = 0x8,
78 CXL_MBOX_FW_XFER_OUT_OF_ORDER = 0x9,
79 CXL_MBOX_FW_AUTH_FAILED = 0xa,
80 CXL_MBOX_FW_INVALID_SLOT = 0xb,
81 CXL_MBOX_FW_ROLLEDBACK = 0xc,
82 CXL_MBOX_FW_REST_REQD = 0xd,
83 CXL_MBOX_INVALID_HANDLE = 0xe,
84 CXL_MBOX_INVALID_PA = 0xf,
85 CXL_MBOX_INJECT_POISON_LIMIT = 0x10,
86 CXL_MBOX_PERMANENT_MEDIA_FAILURE = 0x11,
87 CXL_MBOX_ABORTED = 0x12,
88 CXL_MBOX_INVALID_SECURITY_STATE = 0x13,
89 CXL_MBOX_INCORRECT_PASSPHRASE = 0x14,
90 CXL_MBOX_UNSUPPORTED_MAILBOX = 0x15,
91 CXL_MBOX_INVALID_PAYLOAD_LENGTH = 0x16,
92 CXL_MBOX_MAX = 0x17
93 } CXLRetCode;
94
95 struct cxl_cmd;
96 typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd,
97 CXLDeviceState *cxl_dstate, uint16_t *len);
98 struct cxl_cmd {
99 const char *name;
100 opcode_handler handler;
101 ssize_t in;
102 uint16_t effect; /* Reported in CEL */
103 uint8_t *payload;
104 };
105
106 #define DEFINE_MAILBOX_HANDLER_ZEROED(name, size) \
107 uint16_t __zero##name = size; \
108 static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \
109 CXLDeviceState *cxl_dstate, uint16_t *len) \
110 { \
111 *len = __zero##name; \
112 memset(cmd->payload, 0, *len); \
113 return CXL_MBOX_SUCCESS; \
114 }
115 #define DEFINE_MAILBOX_HANDLER_NOP(name) \
116 static CXLRetCode cmd_##name(struct cxl_cmd *cmd, \
117 CXLDeviceState *cxl_dstate, uint16_t *len) \
118 { \
119 return CXL_MBOX_SUCCESS; \
120 }
121
122 DEFINE_MAILBOX_HANDLER_ZEROED(events_get_records, 0x20);
123 DEFINE_MAILBOX_HANDLER_NOP(events_clear_records);
124 DEFINE_MAILBOX_HANDLER_ZEROED(events_get_interrupt_policy, 4);
125 DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy);
126
127 /* 8.2.9.2.1 */
128 static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd,
129 CXLDeviceState *cxl_dstate,
130 uint16_t *len)
131 {
132 struct {
133 uint8_t slots_supported;
134 uint8_t slot_info;
135 uint8_t caps;
136 uint8_t rsvd[0xd];
137 char fw_rev1[0x10];
138 char fw_rev2[0x10];
139 char fw_rev3[0x10];
140 char fw_rev4[0x10];
141 } QEMU_PACKED *fw_info;
142 QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
143
144 if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
145 (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) {
146 return CXL_MBOX_INTERNAL_ERROR;
147 }
148
149 fw_info = (void *)cmd->payload;
150 memset(fw_info, 0, sizeof(*fw_info));
151
152 fw_info->slots_supported = 2;
153 fw_info->slot_info = BIT(0) | BIT(3);
154 fw_info->caps = 0;
155 pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
156
157 *len = sizeof(*fw_info);
158 return CXL_MBOX_SUCCESS;
159 }
160
161 /* 8.2.9.3.1 */
162 static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd,
163 CXLDeviceState *cxl_dstate,
164 uint16_t *len)
165 {
166 uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
167
168 stq_le_p(cmd->payload, final_time);
169 *len = 8;
170
171 return CXL_MBOX_SUCCESS;
172 }
173
174 /* 8.2.9.3.2 */
175 static CXLRetCode cmd_timestamp_set(struct cxl_cmd *cmd,
176 CXLDeviceState *cxl_dstate,
177 uint16_t *len)
178 {
179 cxl_dstate->timestamp.set = true;
180 cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
181
182 cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)cmd->payload);
183
184 *len = 0;
185 return CXL_MBOX_SUCCESS;
186 }
187
188 /* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */
189 static const QemuUUID cel_uuid = {
190 .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
191 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
192 };
193
194 /* 8.2.9.4.1 */
195 static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd,
196 CXLDeviceState *cxl_dstate,
197 uint16_t *len)
198 {
199 struct {
200 uint16_t entries;
201 uint8_t rsvd[6];
202 struct {
203 QemuUUID uuid;
204 uint32_t size;
205 } log_entries[1];
206 } QEMU_PACKED *supported_logs = (void *)cmd->payload;
207 QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
208
209 supported_logs->entries = 1;
210 supported_logs->log_entries[0].uuid = cel_uuid;
211 supported_logs->log_entries[0].size = 4 * cxl_dstate->cel_size;
212
213 *len = sizeof(*supported_logs);
214 return CXL_MBOX_SUCCESS;
215 }
216
217 /* 8.2.9.4.2 */
218 static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd,
219 CXLDeviceState *cxl_dstate,
220 uint16_t *len)
221 {
222 struct {
223 QemuUUID uuid;
224 uint32_t offset;
225 uint32_t length;
226 } QEMU_PACKED QEMU_ALIGNED(16) *get_log = (void *)cmd->payload;
227
228 /*
229 * 8.2.9.4.2
230 * The device shall return Invalid Parameter if the Offset or Length
231 * fields attempt to access beyond the size of the log as reported by Get
232 * Supported Logs.
233 *
234 * XXX: Spec is wrong, "Invalid Parameter" isn't a thing.
235 * XXX: Spec doesn't address incorrect UUID incorrectness.
236 *
237 * The CEL buffer is large enough to fit all commands in the emulation, so
238 * the only possible failure would be if the mailbox itself isn't big
239 * enough.
240 */
241 if (get_log->offset + get_log->length > cxl_dstate->payload_size) {
242 return CXL_MBOX_INVALID_INPUT;
243 }
244
245 if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
246 return CXL_MBOX_UNSUPPORTED;
247 }
248
249 /* Store off everything to local variables so we can wipe out the payload */
250 *len = get_log->length;
251
252 memmove(cmd->payload, cxl_dstate->cel_log + get_log->offset,
253 get_log->length);
254
255 return CXL_MBOX_SUCCESS;
256 }
257
258 /* 8.2.9.5.1.1 */
259 static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd,
260 CXLDeviceState *cxl_dstate,
261 uint16_t *len)
262 {
263 struct {
264 char fw_revision[0x10];
265 uint64_t total_capacity;
266 uint64_t volatile_capacity;
267 uint64_t persistent_capacity;
268 uint64_t partition_align;
269 uint16_t info_event_log_size;
270 uint16_t warning_event_log_size;
271 uint16_t failure_event_log_size;
272 uint16_t fatal_event_log_size;
273 uint32_t lsa_size;
274 uint8_t poison_list_max_mer[3];
275 uint16_t inject_poison_limit;
276 uint8_t poison_caps;
277 uint8_t qos_telemetry_caps;
278 } QEMU_PACKED *id;
279 QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43);
280
281 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
282 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
283
284 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
285 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
286 return CXL_MBOX_INTERNAL_ERROR;
287 }
288
289 id = (void *)cmd->payload;
290 memset(id, 0, sizeof(*id));
291
292 snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
293
294 stq_le_p(&id->total_capacity, cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER);
295 stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
296 stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
297 stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
298
299 *len = sizeof(*id);
300 return CXL_MBOX_SUCCESS;
301 }
302
303 static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd,
304 CXLDeviceState *cxl_dstate,
305 uint16_t *len)
306 {
307 struct {
308 uint64_t active_vmem;
309 uint64_t active_pmem;
310 uint64_t next_vmem;
311 uint64_t next_pmem;
312 } QEMU_PACKED *part_info = (void *)cmd->payload;
313 QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
314
315 if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
316 (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
317 return CXL_MBOX_INTERNAL_ERROR;
318 }
319
320 stq_le_p(&part_info->active_vmem, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
321 /*
322 * When both next_vmem and next_pmem are 0, there is no pending change to
323 * partitioning.
324 */
325 stq_le_p(&part_info->next_vmem, 0);
326 stq_le_p(&part_info->active_pmem, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
327 stq_le_p(&part_info->next_pmem, 0);
328
329 *len = sizeof(*part_info);
330 return CXL_MBOX_SUCCESS;
331 }
332
333 static CXLRetCode cmd_ccls_get_lsa(struct cxl_cmd *cmd,
334 CXLDeviceState *cxl_dstate,
335 uint16_t *len)
336 {
337 struct {
338 uint32_t offset;
339 uint32_t length;
340 } QEMU_PACKED *get_lsa;
341 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
342 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
343 uint32_t offset, length;
344
345 get_lsa = (void *)cmd->payload;
346 offset = get_lsa->offset;
347 length = get_lsa->length;
348
349 if (offset + length > cvc->get_lsa_size(ct3d)) {
350 *len = 0;
351 return CXL_MBOX_INVALID_INPUT;
352 }
353
354 *len = cvc->get_lsa(ct3d, get_lsa, length, offset);
355 return CXL_MBOX_SUCCESS;
356 }
357
358 static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd,
359 CXLDeviceState *cxl_dstate,
360 uint16_t *len)
361 {
362 struct set_lsa_pl {
363 uint32_t offset;
364 uint32_t rsvd;
365 uint8_t data[];
366 } QEMU_PACKED;
367 struct set_lsa_pl *set_lsa_payload = (void *)cmd->payload;
368 CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
369 CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
370 const size_t hdr_len = offsetof(struct set_lsa_pl, data);
371 uint16_t plen = *len;
372
373 *len = 0;
374 if (!plen) {
375 return CXL_MBOX_SUCCESS;
376 }
377
378 if (set_lsa_payload->offset + plen > cvc->get_lsa_size(ct3d) + hdr_len) {
379 return CXL_MBOX_INVALID_INPUT;
380 }
381 plen -= hdr_len;
382
383 cvc->set_lsa(ct3d, set_lsa_payload->data, plen, set_lsa_payload->offset);
384 return CXL_MBOX_SUCCESS;
385 }
386
387 #define IMMEDIATE_CONFIG_CHANGE (1 << 1)
388 #define IMMEDIATE_DATA_CHANGE (1 << 2)
389 #define IMMEDIATE_POLICY_CHANGE (1 << 3)
390 #define IMMEDIATE_LOG_CHANGE (1 << 4)
391
392 static struct cxl_cmd cxl_cmd_set[256][256] = {
393 [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
394 cmd_events_get_records, 1, 0 },
395 [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
396 cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE },
397 [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
398 cmd_events_get_interrupt_policy, 0, 0 },
399 [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
400 cmd_events_set_interrupt_policy, 4, IMMEDIATE_CONFIG_CHANGE },
401 [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
402 cmd_firmware_update_get_info, 0, 0 },
403 [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
404 [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, IMMEDIATE_POLICY_CHANGE },
405 [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 0 },
406 [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
407 [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
408 cmd_identify_memory_device, 0, 0 },
409 [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
410 cmd_ccls_get_partition_info, 0, 0 },
411 [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
412 [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
413 ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE },
414 };
415
416 void cxl_process_mailbox(CXLDeviceState *cxl_dstate)
417 {
418 uint16_t ret = CXL_MBOX_SUCCESS;
419 struct cxl_cmd *cxl_cmd;
420 uint64_t status_reg;
421 opcode_handler h;
422 uint64_t command_reg = cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
423
424 uint8_t set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET);
425 uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
426 uint16_t len = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
427 cxl_cmd = &cxl_cmd_set[set][cmd];
428 h = cxl_cmd->handler;
429 if (h) {
430 if (len == cxl_cmd->in || cxl_cmd->in == ~0) {
431 cxl_cmd->payload = cxl_dstate->mbox_reg_state +
432 A_CXL_DEV_CMD_PAYLOAD;
433 ret = (*h)(cxl_cmd, cxl_dstate, &len);
434 assert(len <= cxl_dstate->payload_size);
435 } else {
436 ret = CXL_MBOX_INVALID_PAYLOAD_LENGTH;
437 }
438 } else {
439 qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
440 set << 8 | cmd);
441 ret = CXL_MBOX_UNSUPPORTED;
442 }
443
444 /* Set the return code */
445 status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, ERRNO, ret);
446
447 /* Set the return length */
448 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET, 0);
449 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND, 0);
450 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH, len);
451
452 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
453 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
454
455 /* Tell the host we're done */
456 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
457 DOORBELL, 0);
458 }
459
460 void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate)
461 {
462 for (int set = 0; set < 256; set++) {
463 for (int cmd = 0; cmd < 256; cmd++) {
464 if (cxl_cmd_set[set][cmd].handler) {
465 struct cxl_cmd *c = &cxl_cmd_set[set][cmd];
466 struct cel_log *log =
467 &cxl_dstate->cel_log[cxl_dstate->cel_size];
468
469 log->opcode = (set << 8) | cmd;
470 log->effect = c->effect;
471 cxl_dstate->cel_size++;
472 }
473 }
474 }
475 }