]> git.proxmox.com Git - mirror_qemu.git/blame - hw/cxl/cxl-device-utils.c
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
[mirror_qemu.git] / hw / cxl / cxl-device-utils.c
CommitLineData
6364adac
BW
1/*
2 * CXL Utility library for devices
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "hw/cxl/cxl.h"
13
14/*
15 * Device registers have no restrictions per the spec, and so fall back to the
16 * default memory mapped register rules in 8.2:
17 * Software shall use CXL.io Memory Read and Write to access memory mapped
18 * register defined in this section. Unless otherwise specified, software
19 * shall restrict the accesses width based on the following:
20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21 * quantity.
22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23 * Bytes
24 * • The address shall be a multiple of the access width, e.g. when
25 * accessing a register as a 4 Byte quantity, the address shall be
26 * multiple of 4.
27 * • The accesses shall map to contiguous bytes.If these rules are not
28 * followed, the behavior is undefined
29 */
30
31static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
32{
33 CXLDeviceState *cxl_dstate = opaque;
34
35 if (size == 4) {
36 return cxl_dstate->caps_reg_state32[offset / sizeof(*cxl_dstate->caps_reg_state32)];
37 } else {
38 return cxl_dstate->caps_reg_state64[offset / sizeof(*cxl_dstate->caps_reg_state64)];
39 }
40}
41
42static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
43{
d7b84ddc
IW
44 CXLDeviceState *cxl_dstate = opaque;
45
46 switch (size) {
47 case 1:
48 return cxl_dstate->dev_reg_state[offset];
49 case 2:
50 return cxl_dstate->dev_reg_state16[offset / size];
51 case 4:
52 return cxl_dstate->dev_reg_state32[offset / size];
53 case 8:
54 return cxl_dstate->dev_reg_state64[offset / size];
55 default:
56 g_assert_not_reached();
57 }
6364adac
BW
58}
59
464e14ac
BW
60static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
61{
62 CXLDeviceState *cxl_dstate = opaque;
63
64 switch (size) {
65 case 1:
66 return cxl_dstate->mbox_reg_state[offset];
67 case 2:
68 return cxl_dstate->mbox_reg_state16[offset / size];
69 case 4:
70 return cxl_dstate->mbox_reg_state32[offset / size];
71 case 8:
72 return cxl_dstate->mbox_reg_state64[offset / size];
73 default:
74 g_assert_not_reached();
75 }
76}
77
78static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
79 uint64_t value)
80{
81 switch (offset) {
82 case A_CXL_DEV_MAILBOX_CTRL:
83 /* fallthrough */
84 case A_CXL_DEV_MAILBOX_CAP:
85 /* RO register */
86 break;
87 default:
88 qemu_log_mask(LOG_UNIMP,
89 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
90 __func__, offset);
91 return;
92 }
93
94 reg_state[offset / sizeof(*reg_state)] = value;
95}
96
97static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
98 uint64_t value)
99{
100 switch (offset) {
101 case A_CXL_DEV_MAILBOX_CMD:
102 break;
103 case A_CXL_DEV_BG_CMD_STS:
104 /* BG not supported */
105 /* fallthrough */
106 case A_CXL_DEV_MAILBOX_STS:
107 /* Read only register, will get updated by the state machine */
108 return;
109 default:
110 qemu_log_mask(LOG_UNIMP,
111 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
112 __func__, offset);
113 return;
114 }
115
116
117 reg_state[offset / sizeof(*reg_state)] = value;
118}
119
120static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
121 unsigned size)
122{
123 CXLDeviceState *cxl_dstate = opaque;
124
125 if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
126 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
127 return;
128 }
129
130 switch (size) {
131 case 4:
132 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
133 break;
134 case 8:
135 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
136 break;
137 default:
138 g_assert_not_reached();
139 }
140
141 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
142 DOORBELL)) {
143 cxl_process_mailbox(cxl_dstate);
144 }
145}
146
ce3b4e5c
BW
147static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
148{
149 uint64_t retval = 0;
150
151 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
152 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
153
154 return retval;
155}
156
29d1fbc6
JC
157static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
158 unsigned size)
159{
160 /* Many register sets are read only */
161}
162
ce3b4e5c
BW
163static const MemoryRegionOps mdev_ops = {
164 .read = mdev_reg_read,
29d1fbc6 165 .write = ro_reg_write,
ce3b4e5c
BW
166 .endianness = DEVICE_LITTLE_ENDIAN,
167 .valid = {
168 .min_access_size = 1,
169 .max_access_size = 8,
170 .unaligned = false,
171 },
172 .impl = {
173 .min_access_size = 8,
174 .max_access_size = 8,
175 },
176};
177
464e14ac
BW
178static const MemoryRegionOps mailbox_ops = {
179 .read = mailbox_reg_read,
180 .write = mailbox_reg_write,
181 .endianness = DEVICE_LITTLE_ENDIAN,
182 .valid = {
183 .min_access_size = 1,
184 .max_access_size = 8,
185 .unaligned = false,
186 },
187 .impl = {
188 .min_access_size = 1,
189 .max_access_size = 8,
190 },
191};
192
6364adac
BW
193static const MemoryRegionOps dev_ops = {
194 .read = dev_reg_read,
29d1fbc6 195 .write = ro_reg_write,
6364adac
BW
196 .endianness = DEVICE_LITTLE_ENDIAN,
197 .valid = {
198 .min_access_size = 1,
199 .max_access_size = 8,
200 .unaligned = false,
201 },
202 .impl = {
203 .min_access_size = 1,
204 .max_access_size = 8,
205 },
206};
207
208static const MemoryRegionOps caps_ops = {
209 .read = caps_reg_read,
29d1fbc6 210 .write = ro_reg_write,
6364adac
BW
211 .endianness = DEVICE_LITTLE_ENDIAN,
212 .valid = {
213 .min_access_size = 1,
214 .max_access_size = 8,
215 .unaligned = false,
216 },
217 .impl = {
218 .min_access_size = 4,
219 .max_access_size = 8,
220 },
221};
222
223void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate)
224{
225 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
226 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
227 pow2ceil(CXL_MMIO_SIZE));
228
229 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
230 "cap-array", CXL_CAPS_SIZE);
231 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
232 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
464e14ac
BW
233 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cxl_dstate,
234 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
ce3b4e5c
BW
235 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
236 cxl_dstate, "memory device caps",
237 CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
6364adac
BW
238
239 memory_region_add_subregion(&cxl_dstate->device_registers, 0,
240 &cxl_dstate->caps);
241 memory_region_add_subregion(&cxl_dstate->device_registers,
242 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
243 &cxl_dstate->device);
464e14ac
BW
244 memory_region_add_subregion(&cxl_dstate->device_registers,
245 CXL_MAILBOX_REGISTERS_OFFSET,
246 &cxl_dstate->mailbox);
ce3b4e5c
BW
247 memory_region_add_subregion(&cxl_dstate->device_registers,
248 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
249 &cxl_dstate->memory_device);
6364adac
BW
250}
251
d7b84ddc
IW
252void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
253 bool available)
254{
255 if (available) {
256 cxl_dstate->event_status |= (1 << log_type);
257 } else {
258 cxl_dstate->event_status &= ~(1 << log_type);
259 }
260
261 ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
262 EVENT_STATUS, cxl_dstate->event_status);
263}
264
265static void device_reg_init_common(CXLDeviceState *cxl_dstate)
266{
267 CXLEventLogType log;
268
269 for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
270 cxl_event_set_status(cxl_dstate, log, false);
271 }
272}
6364adac 273
464e14ac
BW
274static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
275{
276 /* 2048 payload size, with no interrupt or background support */
277 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
278 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
279 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
280}
281
ce3b4e5c
BW
282static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
283
6364adac
BW
284void cxl_device_register_init_common(CXLDeviceState *cxl_dstate)
285{
286 uint64_t *cap_hdrs = cxl_dstate->caps_reg_state64;
ce3b4e5c 287 const int cap_count = 3;
6364adac
BW
288
289 /* CXL Device Capabilities Array Register */
290 ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
291 ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
292 ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
293
d7b84ddc 294 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
6364adac 295 device_reg_init_common(cxl_dstate);
464e14ac 296
d7b84ddc 297 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
464e14ac
BW
298 mailbox_reg_init_common(cxl_dstate);
299
d7b84ddc 300 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
ce3b4e5c
BW
301 memdev_reg_init_common(cxl_dstate);
302
e16add2b 303 cxl_initialize_mailbox(cxl_dstate);
6364adac 304}
547a652f
IW
305
306uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
307{
308 uint64_t time, delta;
309 uint64_t final_time = 0;
310
311 if (cxl_dstate->timestamp.set) {
312 /* Find the delta from the last time the host set the time. */
313 time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
314 delta = time - cxl_dstate->timestamp.last_set;
315 final_time = cxl_dstate->timestamp.host_set + delta;
316 }
317
318 return final_time;
319}