]> git.proxmox.com Git - mirror_qemu.git/blame - hw/pci/pcie_doe.c
Merge tag 'pull-target-arm-20221216' of https://git.linaro.org/people/pmaydell/qemu...
[mirror_qemu.git] / hw / pci / pcie_doe.c
CommitLineData
5fb52f6c
HCK
1/*
2 * PCIe Data Object Exchange
3 *
4 * Copyright (C) 2021 Avery Design Systems, Inc.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 */
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/error-report.h"
13#include "qapi/error.h"
14#include "qemu/range.h"
15#include "hw/pci/pci.h"
16#include "hw/pci/pcie.h"
17#include "hw/pci/pcie_doe.h"
18#include "hw/pci/msi.h"
19#include "hw/pci/msix.h"
20
21#define DWORD_BYTE 4
22
23typedef struct DoeDiscoveryReq {
24 DOEHeader header;
25 uint8_t index;
26 uint8_t reserved[3];
27} QEMU_PACKED DoeDiscoveryReq;
28
29typedef struct DoeDiscoveryRsp {
30 DOEHeader header;
31 uint16_t vendor_id;
32 uint8_t data_obj_type;
33 uint8_t next_index;
34} QEMU_PACKED DoeDiscoveryRsp;
35
36static bool pcie_doe_discovery(DOECap *doe_cap)
37{
38 DoeDiscoveryReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
39 DoeDiscoveryRsp rsp;
40 uint8_t index = req->index;
41 DOEProtocol *prot;
42
43 /* Discard request if length does not match DoeDiscoveryReq */
44 if (pcie_doe_get_obj_len(req) <
45 DIV_ROUND_UP(sizeof(DoeDiscoveryReq), DWORD_BYTE)) {
46 return false;
47 }
48
49 rsp.header = (DOEHeader) {
50 .vendor_id = PCI_VENDOR_ID_PCI_SIG,
51 .data_obj_type = PCI_SIG_DOE_DISCOVERY,
52 .length = DIV_ROUND_UP(sizeof(DoeDiscoveryRsp), DWORD_BYTE),
53 };
54
55 /* Point to the requested protocol, index 0 must be Discovery */
56 if (index == 0) {
57 rsp.vendor_id = PCI_VENDOR_ID_PCI_SIG;
58 rsp.data_obj_type = PCI_SIG_DOE_DISCOVERY;
59 } else {
60 if (index < doe_cap->protocol_num) {
61 prot = &doe_cap->protocols[index - 1];
62 rsp.vendor_id = prot->vendor_id;
63 rsp.data_obj_type = prot->data_obj_type;
64 } else {
65 rsp.vendor_id = 0xFFFF;
66 rsp.data_obj_type = 0xFF;
67 }
68 }
69
70 if (index + 1 == doe_cap->protocol_num) {
71 rsp.next_index = 0;
72 } else {
73 rsp.next_index = index + 1;
74 }
75
76 pcie_doe_set_rsp(doe_cap, &rsp);
77
78 return true;
79}
80
81static void pcie_doe_reset_mbox(DOECap *st)
82{
83 st->read_mbox_idx = 0;
84 st->read_mbox_len = 0;
85 st->write_mbox_len = 0;
86
87 memset(st->read_mbox, 0, PCI_DOE_DW_SIZE_MAX * DWORD_BYTE);
88 memset(st->write_mbox, 0, PCI_DOE_DW_SIZE_MAX * DWORD_BYTE);
89}
90
91void pcie_doe_init(PCIDevice *dev, DOECap *doe_cap, uint16_t offset,
92 DOEProtocol *protocols, bool intr, uint16_t vec)
93{
94 pcie_add_capability(dev, PCI_EXT_CAP_ID_DOE, 0x1, offset,
95 PCI_DOE_SIZEOF);
96
97 doe_cap->pdev = dev;
98 doe_cap->offset = offset;
99
100 if (intr && (msi_present(dev) || msix_present(dev))) {
101 doe_cap->cap.intr = intr;
102 doe_cap->cap.vec = vec;
103 }
104
105 doe_cap->write_mbox = g_malloc0(PCI_DOE_DW_SIZE_MAX * DWORD_BYTE);
106 doe_cap->read_mbox = g_malloc0(PCI_DOE_DW_SIZE_MAX * DWORD_BYTE);
107
108 pcie_doe_reset_mbox(doe_cap);
109
110 doe_cap->protocols = protocols;
111 for (; protocols->vendor_id; protocols++) {
112 doe_cap->protocol_num++;
113 }
114 assert(doe_cap->protocol_num < PCI_DOE_PROTOCOL_NUM_MAX);
115
116 /* Increment to allow for the discovery protocol */
117 doe_cap->protocol_num++;
118}
119
120void pcie_doe_fini(DOECap *doe_cap)
121{
122 g_free(doe_cap->read_mbox);
123 g_free(doe_cap->write_mbox);
124 g_free(doe_cap);
125}
126
127uint32_t pcie_doe_build_protocol(DOEProtocol *p)
128{
129 return DATA_OBJ_BUILD_HEADER1(p->vendor_id, p->data_obj_type);
130}
131
132void *pcie_doe_get_write_mbox_ptr(DOECap *doe_cap)
133{
134 return doe_cap->write_mbox;
135}
136
137/*
138 * Copy the response to read mailbox buffer
139 * This might be called in self-defined handle_request() if a DOE response is
140 * required in the corresponding protocol
141 */
142void pcie_doe_set_rsp(DOECap *doe_cap, void *rsp)
143{
144 uint32_t len = pcie_doe_get_obj_len(rsp);
145
146 memcpy(doe_cap->read_mbox + doe_cap->read_mbox_len, rsp, len * DWORD_BYTE);
147 doe_cap->read_mbox_len += len;
148}
149
150uint32_t pcie_doe_get_obj_len(void *obj)
151{
152 uint32_t len;
153
154 if (!obj) {
155 return 0;
156 }
157
158 /* Only lower 18 bits are valid */
159 len = DATA_OBJ_LEN_MASK(((DOEHeader *)obj)->length);
160
161 /* PCIe r6.0 Table 6.29: a value of 00000h indicates 2^18 DW */
162 return (len) ? len : PCI_DOE_DW_SIZE_MAX;
163}
164
165static void pcie_doe_irq_assert(DOECap *doe_cap)
166{
167 PCIDevice *dev = doe_cap->pdev;
168
169 if (doe_cap->cap.intr && doe_cap->ctrl.intr) {
170 if (doe_cap->status.intr) {
171 return;
172 }
173 doe_cap->status.intr = 1;
174
175 if (msix_enabled(dev)) {
176 msix_notify(dev, doe_cap->cap.vec);
177 } else if (msi_enabled(dev)) {
178 msi_notify(dev, doe_cap->cap.vec);
179 }
180 }
181}
182
183static void pcie_doe_set_ready(DOECap *doe_cap, bool rdy)
184{
185 doe_cap->status.ready = rdy;
186
187 if (rdy) {
188 pcie_doe_irq_assert(doe_cap);
189 }
190}
191
192static void pcie_doe_set_error(DOECap *doe_cap, bool err)
193{
194 doe_cap->status.error = err;
195
196 if (err) {
197 pcie_doe_irq_assert(doe_cap);
198 }
199}
200
201/*
202 * Check incoming request in write_mbox for protocol format
203 */
204static void pcie_doe_prepare_rsp(DOECap *doe_cap)
205{
206 bool success = false;
207 int p;
208 bool (*handle_request)(DOECap *) = NULL;
209
210 if (doe_cap->status.error) {
211 return;
212 }
213
214 if (doe_cap->write_mbox[0] ==
215 DATA_OBJ_BUILD_HEADER1(PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_DISCOVERY)) {
216 handle_request = pcie_doe_discovery;
217 } else {
218 for (p = 0; p < doe_cap->protocol_num - 1; p++) {
219 if (doe_cap->write_mbox[0] ==
220 pcie_doe_build_protocol(&doe_cap->protocols[p])) {
221 handle_request = doe_cap->protocols[p].handle_request;
222 break;
223 }
224 }
225 }
226
227 /*
228 * PCIe r6 DOE 6.30.1:
229 * If the number of DW transferred does not match the
230 * indicated Length for a data object, then the
231 * data object must be silently discarded.
232 */
233 if (handle_request && (doe_cap->write_mbox_len ==
234 pcie_doe_get_obj_len(pcie_doe_get_write_mbox_ptr(doe_cap)))) {
235 success = handle_request(doe_cap);
236 }
237
238 if (success) {
239 pcie_doe_set_ready(doe_cap, 1);
240 } else {
241 pcie_doe_reset_mbox(doe_cap);
242 }
243}
244
245/*
246 * Read from DOE config space.
247 * Return false if the address not within DOE_CAP range.
248 */
249bool pcie_doe_read_config(DOECap *doe_cap, uint32_t addr, int size,
250 uint32_t *buf)
251{
252 uint32_t shift;
253 uint16_t doe_offset = doe_cap->offset;
254
255 if (!range_covers_byte(doe_offset + PCI_EXP_DOE_CAP,
256 PCI_DOE_SIZEOF - 4, addr)) {
257 return false;
258 }
259
260 addr -= doe_offset;
261 *buf = 0;
262
263 if (range_covers_byte(PCI_EXP_DOE_CAP, DWORD_BYTE, addr)) {
264 *buf = FIELD_DP32(*buf, PCI_DOE_CAP_REG, INTR_SUPP,
265 doe_cap->cap.intr);
266 *buf = FIELD_DP32(*buf, PCI_DOE_CAP_REG, DOE_INTR_MSG_NUM,
267 doe_cap->cap.vec);
268 } else if (range_covers_byte(PCI_EXP_DOE_CTRL, DWORD_BYTE, addr)) {
269 /* Must return ABORT=0 and GO=0 */
270 *buf = FIELD_DP32(*buf, PCI_DOE_CAP_CONTROL, DOE_INTR_EN,
271 doe_cap->ctrl.intr);
272 } else if (range_covers_byte(PCI_EXP_DOE_STATUS, DWORD_BYTE, addr)) {
273 *buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_BUSY,
274 doe_cap->status.busy);
275 *buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_INTR_STATUS,
276 doe_cap->status.intr);
277 *buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_ERROR,
278 doe_cap->status.error);
279 *buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DATA_OBJ_RDY,
280 doe_cap->status.ready);
281 /* Mailbox should be DW accessed */
282 } else if (addr == PCI_EXP_DOE_RD_DATA_MBOX && size == DWORD_BYTE) {
283 if (doe_cap->status.ready && !doe_cap->status.error) {
284 *buf = doe_cap->read_mbox[doe_cap->read_mbox_idx];
285 }
286 }
287
288 /* Process Alignment */
289 shift = addr % DWORD_BYTE;
290 *buf = extract32(*buf, shift * 8, size * 8);
291
292 return true;
293}
294
295/*
296 * Write to DOE config space.
297 * Return if the address not within DOE_CAP range or receives an abort
298 */
299void pcie_doe_write_config(DOECap *doe_cap,
300 uint32_t addr, uint32_t val, int size)
301{
302 uint16_t doe_offset = doe_cap->offset;
303 uint32_t shift;
304
305 if (!range_covers_byte(doe_offset + PCI_EXP_DOE_CAP,
306 PCI_DOE_SIZEOF - 4, addr)) {
307 return;
308 }
309
310 /* Process Alignment */
311 shift = addr % DWORD_BYTE;
312 addr -= (doe_offset + shift);
313 val = deposit32(val, shift * 8, size * 8, val);
314
315 switch (addr) {
316 case PCI_EXP_DOE_CTRL:
317 if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_ABORT)) {
318 pcie_doe_set_ready(doe_cap, 0);
319 pcie_doe_set_error(doe_cap, 0);
320 pcie_doe_reset_mbox(doe_cap);
321 return;
322 }
323
324 if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_GO)) {
325 pcie_doe_prepare_rsp(doe_cap);
326 }
327
328 if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_INTR_EN)) {
329 doe_cap->ctrl.intr = 1;
330 /* Clear interrupt bit located within the first byte */
331 } else if (shift == 0) {
332 doe_cap->ctrl.intr = 0;
333 }
334 break;
335 case PCI_EXP_DOE_STATUS:
336 if (FIELD_EX32(val, PCI_DOE_CAP_STATUS, DOE_INTR_STATUS)) {
337 doe_cap->status.intr = 0;
338 }
339 break;
340 case PCI_EXP_DOE_RD_DATA_MBOX:
341 /* Mailbox should be DW accessed */
342 if (size != DWORD_BYTE) {
343 return;
344 }
345 doe_cap->read_mbox_idx++;
346 if (doe_cap->read_mbox_idx == doe_cap->read_mbox_len) {
347 pcie_doe_reset_mbox(doe_cap);
348 pcie_doe_set_ready(doe_cap, 0);
349 } else if (doe_cap->read_mbox_idx > doe_cap->read_mbox_len) {
350 /* Underflow */
351 pcie_doe_set_error(doe_cap, 1);
352 }
353 break;
354 case PCI_EXP_DOE_WR_DATA_MBOX:
355 /* Mailbox should be DW accessed */
356 if (size != DWORD_BYTE) {
357 return;
358 }
359 doe_cap->write_mbox[doe_cap->write_mbox_len] = val;
360 doe_cap->write_mbox_len++;
361 break;
362 case PCI_EXP_DOE_CAP:
363 /* fallthrough */
364 default:
365 break;
366 }
367}