]> git.proxmox.com Git - mirror_edk2.git/blame - OvmfPkg/VirtioGpuDxe/Commands.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / OvmfPkg / VirtioGpuDxe / Commands.c
CommitLineData
c5f235bb
LE
1/** @file\r
2\r
3 VirtIo GPU initialization, and commands (primitives) for the GPU device.\r
4\r
5 Copyright (C) 2016, Red Hat, Inc.\r
fc2c1543 6 Copyright (c) 2017, AMD Inc, All rights reserved.<BR>\r
c5f235bb 7\r
b26f0cf9 8 SPDX-License-Identifier: BSD-2-Clause-Patent\r
c5f235bb
LE
9\r
10**/\r
11\r
c5f235bb
LE
12#include <Library/VirtioLib.h>\r
13\r
14#include "VirtioGpu.h"\r
15\r
16/**\r
17 Configure the VirtIo GPU device that underlies VgpuDev.\r
18\r
19 @param[in,out] VgpuDev The VGPU_DEV object to set up VirtIo messaging for.\r
20 On input, the caller is responsible for having\r
21 initialized VgpuDev->VirtIo. On output, VgpuDev->Ring\r
22 has been initialized, and synchronous VirtIo GPU\r
23 commands (primitives) can be submitted to the device.\r
24\r
25 @retval EFI_SUCCESS VirtIo GPU configuration successful.\r
26\r
27 @retval EFI_UNSUPPORTED The host-side configuration of the VirtIo GPU is not\r
28 supported by this driver.\r
29\r
30 @retval Error codes from underlying functions.\r
31**/\r
32EFI_STATUS\r
33VirtioGpuInit (\r
ac0a286f 34 IN OUT VGPU_DEV *VgpuDev\r
c5f235bb
LE
35 )\r
36{\r
ac0a286f
MK
37 UINT8 NextDevStat;\r
38 EFI_STATUS Status;\r
39 UINT64 Features;\r
40 UINT16 QueueSize;\r
41 UINT64 RingBaseShift;\r
c5f235bb
LE
42\r
43 //\r
44 // Execute virtio-v1.0-cs04, 3.1.1 Driver Requirements: Device\r
45 // Initialization.\r
46 //\r
47 // 1. Reset the device.\r
48 //\r
49 NextDevStat = 0;\r
ac0a286f 50 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
c5f235bb
LE
51 if (EFI_ERROR (Status)) {\r
52 goto Failed;\r
53 }\r
54\r
55 //\r
56 // 2. Set the ACKNOWLEDGE status bit [...]\r
57 //\r
58 NextDevStat |= VSTAT_ACK;\r
ac0a286f 59 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
c5f235bb
LE
60 if (EFI_ERROR (Status)) {\r
61 goto Failed;\r
62 }\r
63\r
64 //\r
65 // 3. Set the DRIVER status bit [...]\r
66 //\r
67 NextDevStat |= VSTAT_DRIVER;\r
ac0a286f 68 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
c5f235bb
LE
69 if (EFI_ERROR (Status)) {\r
70 goto Failed;\r
71 }\r
72\r
73 //\r
74 // 4. Read device feature bits...\r
75 //\r
76 Status = VgpuDev->VirtIo->GetDeviceFeatures (VgpuDev->VirtIo, &Features);\r
77 if (EFI_ERROR (Status)) {\r
78 goto Failed;\r
79 }\r
ac0a286f 80\r
c5f235bb
LE
81 if ((Features & VIRTIO_F_VERSION_1) == 0) {\r
82 Status = EFI_UNSUPPORTED;\r
83 goto Failed;\r
84 }\r
ac0a286f 85\r
c5f235bb
LE
86 //\r
87 // We only want the most basic 2D features.\r
88 //\r
db528909 89 Features &= VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM;\r
c5f235bb
LE
90\r
91 //\r
92 // ... and write the subset of feature bits understood by the [...] driver to\r
93 // the device. [...]\r
94 // 5. Set the FEATURES_OK status bit.\r
95 // 6. Re-read device status to ensure the FEATURES_OK bit is still set [...]\r
96 //\r
97 Status = Virtio10WriteFeatures (VgpuDev->VirtIo, Features, &NextDevStat);\r
98 if (EFI_ERROR (Status)) {\r
99 goto Failed;\r
100 }\r
101\r
102 //\r
103 // 7. Perform device-specific setup, including discovery of virtqueues for\r
104 // the device [...]\r
105 //\r
ac0a286f
MK
106 Status = VgpuDev->VirtIo->SetQueueSel (\r
107 VgpuDev->VirtIo,\r
108 VIRTIO_GPU_CONTROL_QUEUE\r
109 );\r
c5f235bb
LE
110 if (EFI_ERROR (Status)) {\r
111 goto Failed;\r
112 }\r
ac0a286f 113\r
c5f235bb
LE
114 Status = VgpuDev->VirtIo->GetQueueNumMax (VgpuDev->VirtIo, &QueueSize);\r
115 if (EFI_ERROR (Status)) {\r
116 goto Failed;\r
117 }\r
118\r
119 //\r
120 // We implement each VirtIo GPU command that we use with two descriptors:\r
121 // request, response.\r
122 //\r
123 if (QueueSize < 2) {\r
124 Status = EFI_UNSUPPORTED;\r
125 goto Failed;\r
126 }\r
127\r
128 //\r
129 // [...] population of virtqueues [...]\r
130 //\r
fc2c1543 131 Status = VirtioRingInit (VgpuDev->VirtIo, QueueSize, &VgpuDev->Ring);\r
c5f235bb
LE
132 if (EFI_ERROR (Status)) {\r
133 goto Failed;\r
134 }\r
ac0a286f 135\r
9bc5026c
LE
136 //\r
137 // If anything fails from here on, we have to release the ring.\r
138 //\r
139 Status = VirtioRingMap (\r
140 VgpuDev->VirtIo,\r
141 &VgpuDev->Ring,\r
142 &RingBaseShift,\r
143 &VgpuDev->RingMap\r
144 );\r
145 if (EFI_ERROR (Status)) {\r
146 goto ReleaseQueue;\r
147 }\r
ac0a286f 148\r
9bc5026c
LE
149 //\r
150 // If anything fails from here on, we have to unmap the ring.\r
151 //\r
53a4c604
BS
152 Status = VgpuDev->VirtIo->SetQueueAddress (\r
153 VgpuDev->VirtIo,\r
154 &VgpuDev->Ring,\r
9bc5026c 155 RingBaseShift\r
53a4c604 156 );\r
c5f235bb 157 if (EFI_ERROR (Status)) {\r
9bc5026c 158 goto UnmapQueue;\r
c5f235bb
LE
159 }\r
160\r
161 //\r
162 // 8. Set the DRIVER_OK status bit.\r
163 //\r
164 NextDevStat |= VSTAT_DRIVER_OK;\r
ac0a286f 165 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
c5f235bb 166 if (EFI_ERROR (Status)) {\r
9bc5026c 167 goto UnmapQueue;\r
c5f235bb
LE
168 }\r
169\r
170 return EFI_SUCCESS;\r
171\r
9bc5026c
LE
172UnmapQueue:\r
173 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
174\r
c5f235bb 175ReleaseQueue:\r
fc2c1543 176 VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);\r
c5f235bb
LE
177\r
178Failed:\r
179 //\r
180 // If any of these steps go irrecoverably wrong, the driver SHOULD set the\r
181 // FAILED status bit to indicate that it has given up on the device (it can\r
182 // reset the device later to restart if desired). [...]\r
183 //\r
184 // VirtIo access failure here should not mask the original error.\r
185 //\r
186 NextDevStat |= VSTAT_FAILED;\r
187 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);\r
188\r
189 return Status;\r
190}\r
191\r
192/**\r
193 De-configure the VirtIo GPU device that underlies VgpuDev.\r
194\r
195 @param[in,out] VgpuDev The VGPU_DEV object to tear down VirtIo messaging\r
196 for. On input, the caller is responsible for having\r
197 called VirtioGpuInit(). On output, VgpuDev->Ring has\r
198 been uninitialized; VirtIo GPU commands (primitives)\r
199 can no longer be submitted to the device.\r
200**/\r
201VOID\r
202VirtioGpuUninit (\r
ac0a286f 203 IN OUT VGPU_DEV *VgpuDev\r
c5f235bb
LE
204 )\r
205{\r
206 //\r
207 // Resetting the VirtIo device makes it release its resources and forget its\r
208 // configuration.\r
209 //\r
210 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);\r
9bc5026c 211 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);\r
fc2c1543 212 VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);\r
c5f235bb
LE
213}\r
214\r
5c8f091b
LE
215/**\r
216 Allocate, zero and map memory, for bus master common buffer operation, to be\r
217 attached as backing store to a host-side VirtIo GPU resource.\r
218\r
219 @param[in] VgpuDev The VGPU_DEV object that represents the VirtIo GPU\r
220 device.\r
221\r
222 @param[in] NumberOfPages The number of whole pages to allocate and map.\r
223\r
224 @param[out] HostAddress The system memory address of the allocated area.\r
225\r
226 @param[out] DeviceAddress The bus master device address of the allocated\r
227 area. The VirtIo GPU device may be programmed to\r
228 access the allocated area through DeviceAddress;\r
229 DeviceAddress is to be passed to the\r
230 VirtioGpuResourceAttachBacking() function, as the\r
231 BackingStoreDeviceAddress parameter.\r
232\r
233 @param[out] Mapping A resulting token to pass to\r
234 VirtioGpuUnmapAndFreeBackingStore().\r
235\r
236 @retval EFI_SUCCESS The requested number of pages has been allocated, zeroed\r
237 and mapped.\r
238\r
239 @return Status codes propagated from\r
240 VgpuDev->VirtIo->AllocateSharedPages() and\r
241 VirtioMapAllBytesInSharedBuffer().\r
242**/\r
243EFI_STATUS\r
244VirtioGpuAllocateZeroAndMapBackingStore (\r
ac0a286f
MK
245 IN VGPU_DEV *VgpuDev,\r
246 IN UINTN NumberOfPages,\r
247 OUT VOID **HostAddress,\r
248 OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,\r
249 OUT VOID **Mapping\r
5c8f091b
LE
250 )\r
251{\r
ac0a286f
MK
252 EFI_STATUS Status;\r
253 VOID *NewHostAddress;\r
5c8f091b
LE
254\r
255 Status = VgpuDev->VirtIo->AllocateSharedPages (\r
256 VgpuDev->VirtIo,\r
257 NumberOfPages,\r
258 &NewHostAddress\r
259 );\r
260 if (EFI_ERROR (Status)) {\r
261 return Status;\r
262 }\r
263\r
264 //\r
265 // Avoid exposing stale data to the device even temporarily: zero the area\r
266 // before mapping it.\r
267 //\r
268 ZeroMem (NewHostAddress, EFI_PAGES_TO_SIZE (NumberOfPages));\r
269\r
270 Status = VirtioMapAllBytesInSharedBuffer (\r
271 VgpuDev->VirtIo, // VirtIo\r
272 VirtioOperationBusMasterCommonBuffer, // Operation\r
273 NewHostAddress, // HostAddress\r
274 EFI_PAGES_TO_SIZE (NumberOfPages), // NumberOfBytes\r
275 DeviceAddress, // DeviceAddress\r
276 Mapping // Mapping\r
277 );\r
278 if (EFI_ERROR (Status)) {\r
279 goto FreeSharedPages;\r
280 }\r
281\r
282 *HostAddress = NewHostAddress;\r
283 return EFI_SUCCESS;\r
284\r
285FreeSharedPages:\r
286 VgpuDev->VirtIo->FreeSharedPages (\r
287 VgpuDev->VirtIo,\r
288 NumberOfPages,\r
289 NewHostAddress\r
290 );\r
291 return Status;\r
292}\r
293\r
294/**\r
295 Unmap and free memory originally allocated and mapped with\r
296 VirtioGpuAllocateZeroAndMapBackingStore().\r
297\r
298 If the memory allocated and mapped with\r
299 VirtioGpuAllocateZeroAndMapBackingStore() was attached to a host-side VirtIo\r
300 GPU resource with VirtioGpuResourceAttachBacking(), then the caller is\r
301 responsible for detaching the backing store from the same resource, with\r
302 VirtioGpuResourceDetachBacking(), before calling this function.\r
303\r
304 @param[in] VgpuDev The VGPU_DEV object that represents the VirtIo GPU\r
305 device.\r
306\r
307 @param[in] NumberOfPages The NumberOfPages parameter originally passed to\r
308 VirtioGpuAllocateZeroAndMapBackingStore().\r
309\r
310 @param[in] HostAddress The HostAddress value originally output by\r
311 VirtioGpuAllocateZeroAndMapBackingStore().\r
312\r
313 @param[in] Mapping The token that was originally output by\r
314 VirtioGpuAllocateZeroAndMapBackingStore().\r
315**/\r
316VOID\r
317VirtioGpuUnmapAndFreeBackingStore (\r
ac0a286f
MK
318 IN VGPU_DEV *VgpuDev,\r
319 IN UINTN NumberOfPages,\r
320 IN VOID *HostAddress,\r
321 IN VOID *Mapping\r
5c8f091b
LE
322 )\r
323{\r
324 VgpuDev->VirtIo->UnmapSharedBuffer (\r
325 VgpuDev->VirtIo,\r
326 Mapping\r
327 );\r
328 VgpuDev->VirtIo->FreeSharedPages (\r
329 VgpuDev->VirtIo,\r
330 NumberOfPages,\r
331 HostAddress\r
332 );\r
333}\r
334\r
c5f235bb
LE
335/**\r
336 EFI_EVENT_NOTIFY function for the VGPU_DEV.ExitBoot event. It resets the\r
337 VirtIo device, causing it to release its resources and to forget its\r
338 configuration.\r
339\r
340 This function may only be called (that is, VGPU_DEV.ExitBoot may only be\r
341 signaled) after VirtioGpuInit() returns and before VirtioGpuUninit() is\r
342 called.\r
343\r
344 @param[in] Event Event whose notification function is being invoked.\r
345\r
346 @param[in] Context Pointer to the associated VGPU_DEV object.\r
347**/\r
348VOID\r
349EFIAPI\r
350VirtioGpuExitBoot (\r
ac0a286f
MK
351 IN EFI_EVENT Event,\r
352 IN VOID *Context\r
c5f235bb
LE
353 )\r
354{\r
ac0a286f 355 VGPU_DEV *VgpuDev;\r
c5f235bb 356\r
8ddd12e5 357 DEBUG ((DEBUG_VERBOSE, "%a: Context=0x%p\n", __FUNCTION__, Context));\r
c5f235bb
LE
358 VgpuDev = Context;\r
359 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);\r
360}\r
a66ea3b5
LE
361\r
362/**\r
363 Internal utility function that sends a request to the VirtIo GPU device\r
364 model, awaits the answer from the host, and returns a status.\r
365\r
366 @param[in,out] VgpuDev The VGPU_DEV object that represents the VirtIo GPU\r
367 device. The caller is responsible to have\r
368 successfully invoked VirtioGpuInit() on VgpuDev\r
369 previously, while VirtioGpuUninit() must not have\r
370 been called on VgpuDev.\r
371\r
372 @param[in] RequestType The type of the request. The caller is responsible\r
373 for providing a VirtioGpuCmd* RequestType which, on\r
374 success, elicits a VirtioGpuRespOkNodata response\r
375 from the host.\r
376\r
377 @param[in] Fence Whether to enable fencing for this request. Fencing\r
378 forces the host to complete the command before\r
379 producing a response. If Fence is TRUE, then\r
380 VgpuDev->FenceId is consumed, and incremented.\r
381\r
382 @param[in,out] Header Pointer to the caller-allocated request object. The\r
383 request must start with VIRTIO_GPU_CONTROL_HEADER.\r
384 This function overwrites all fields of Header before\r
385 submitting the request to the host:\r
386\r
387 - it sets Type from RequestType,\r
388\r
389 - it sets Flags and FenceId based on Fence,\r
390\r
391 - it zeroes CtxId and Padding.\r
392\r
393 @param[in] RequestSize Size of the entire caller-allocated request object,\r
394 including the leading VIRTIO_GPU_CONTROL_HEADER.\r
395\r
ecc79b09
GH
396 @param[in] ResponseType The type of the response (VirtioGpuResp*).\r
397\r
398 @param[in,out] Response Pointer to the caller-allocated response object. The\r
399 request must start with VIRTIO_GPU_CONTROL_HEADER.\r
400\r
401 @param[in] ResponseSize Size of the entire caller-allocated response object,\r
402 including the leading VIRTIO_GPU_CONTROL_HEADER.\r
403\r
a66ea3b5
LE
404 @retval EFI_SUCCESS Operation successful.\r
405\r
406 @retval EFI_DEVICE_ERROR The host rejected the request. The host error\r
70d5086c 407 code has been logged on the DEBUG_ERROR level.\r
a66ea3b5
LE
408\r
409 @return Codes for unexpected errors in VirtIo\r
067b6483
LE
410 messaging, or request/response\r
411 mapping/unmapping.\r
a66ea3b5
LE
412**/\r
413STATIC\r
414EFI_STATUS\r
ecc79b09 415VirtioGpuSendCommandWithReply (\r
ac0a286f
MK
416 IN OUT VGPU_DEV *VgpuDev,\r
417 IN VIRTIO_GPU_CONTROL_TYPE RequestType,\r
418 IN BOOLEAN Fence,\r
419 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
ecc79b09
GH
420 IN UINTN RequestSize,\r
421 IN VIRTIO_GPU_CONTROL_TYPE ResponseType,\r
422 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Response,\r
423 IN UINTN ResponseSize\r
a66ea3b5
LE
424 )\r
425{\r
ecc79b09
GH
426 DESC_INDICES Indices;\r
427 EFI_STATUS Status;\r
428 UINT32 ResponseSizeRet;\r
429 EFI_PHYSICAL_ADDRESS RequestDeviceAddress;\r
430 VOID *RequestMap;\r
431 EFI_PHYSICAL_ADDRESS ResponseDeviceAddress;\r
432 VOID *ResponseMap;\r
a66ea3b5
LE
433\r
434 //\r
435 // Initialize Header.\r
436 //\r
ac0a286f 437 Header->Type = RequestType;\r
a66ea3b5
LE
438 if (Fence) {\r
439 Header->Flags = VIRTIO_GPU_FLAG_FENCE;\r
440 Header->FenceId = VgpuDev->FenceId++;\r
441 } else {\r
442 Header->Flags = 0;\r
443 Header->FenceId = 0;\r
444 }\r
ac0a286f
MK
445\r
446 Header->CtxId = 0;\r
447 Header->Padding = 0;\r
a66ea3b5
LE
448\r
449 ASSERT (RequestSize >= sizeof *Header);\r
7f1bf51b 450 ASSERT (RequestSize <= MAX_UINT32);\r
a66ea3b5 451\r
067b6483
LE
452 //\r
453 // Map request and response to bus master device addresses.\r
454 //\r
455 Status = VirtioMapAllBytesInSharedBuffer (\r
456 VgpuDev->VirtIo,\r
457 VirtioOperationBusMasterRead,\r
458 (VOID *)Header,\r
459 RequestSize,\r
460 &RequestDeviceAddress,\r
461 &RequestMap\r
462 );\r
463 if (EFI_ERROR (Status)) {\r
464 return Status;\r
465 }\r
ac0a286f 466\r
067b6483
LE
467 Status = VirtioMapAllBytesInSharedBuffer (\r
468 VgpuDev->VirtIo,\r
469 VirtioOperationBusMasterWrite,\r
ecc79b09
GH
470 (VOID *)Response,\r
471 ResponseSize,\r
067b6483
LE
472 &ResponseDeviceAddress,\r
473 &ResponseMap\r
474 );\r
475 if (EFI_ERROR (Status)) {\r
476 goto UnmapRequest;\r
477 }\r
478\r
a66ea3b5
LE
479 //\r
480 // Compose the descriptor chain.\r
481 //\r
482 VirtioPrepare (&VgpuDev->Ring, &Indices);\r
067b6483
LE
483 VirtioAppendDesc (\r
484 &VgpuDev->Ring,\r
485 RequestDeviceAddress,\r
486 (UINT32)RequestSize,\r
487 VRING_DESC_F_NEXT,\r
488 &Indices\r
489 );\r
490 VirtioAppendDesc (\r
491 &VgpuDev->Ring,\r
492 ResponseDeviceAddress,\r
ecc79b09 493 (UINT32)ResponseSize,\r
067b6483
LE
494 VRING_DESC_F_WRITE,\r
495 &Indices\r
496 );\r
a66ea3b5
LE
497\r
498 //\r
499 // Send the command.\r
500 //\r
ac0a286f
MK
501 Status = VirtioFlush (\r
502 VgpuDev->VirtIo,\r
503 VIRTIO_GPU_CONTROL_QUEUE,\r
504 &VgpuDev->Ring,\r
505 &Indices,\r
ecc79b09 506 &ResponseSizeRet\r
ac0a286f 507 );\r
a66ea3b5 508 if (EFI_ERROR (Status)) {\r
067b6483 509 goto UnmapResponse;\r
a66ea3b5
LE
510 }\r
511\r
512 //\r
067b6483 513 // Verify response size.\r
a66ea3b5 514 //\r
ecc79b09 515 if (ResponseSize != ResponseSizeRet) {\r
ac0a286f
MK
516 DEBUG ((\r
517 DEBUG_ERROR,\r
518 "%a: malformed response to Request=0x%x\n",\r
519 __FUNCTION__,\r
520 (UINT32)RequestType\r
521 ));\r
067b6483
LE
522 Status = EFI_PROTOCOL_ERROR;\r
523 goto UnmapResponse;\r
a66ea3b5
LE
524 }\r
525\r
067b6483
LE
526 //\r
527 // Unmap response and request, in reverse order of mapping. On error, the\r
528 // respective mapping is invalidated anyway, only the data may not have been\r
529 // committed to system memory (in case of VirtioOperationBusMasterWrite).\r
530 //\r
531 Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);\r
532 if (EFI_ERROR (Status)) {\r
533 goto UnmapRequest;\r
534 }\r
ac0a286f 535\r
067b6483
LE
536 Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
537 if (EFI_ERROR (Status)) {\r
538 return Status;\r
539 }\r
540\r
541 //\r
542 // Parse the response.\r
543 //\r
ecc79b09 544 if (Response->Type == (UINT32)ResponseType) {\r
a66ea3b5
LE
545 return EFI_SUCCESS;\r
546 }\r
547\r
ac0a286f
MK
548 DEBUG ((\r
549 DEBUG_ERROR,\r
ecc79b09 550 "%a: Request=0x%x Response=0x%x (expected 0x%x)\n",\r
ac0a286f
MK
551 __FUNCTION__,\r
552 (UINT32)RequestType,\r
ecc79b09
GH
553 Response->Type,\r
554 ResponseType\r
ac0a286f 555 ));\r
a66ea3b5 556 return EFI_DEVICE_ERROR;\r
067b6483
LE
557\r
558UnmapResponse:\r
559 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);\r
560\r
561UnmapRequest:\r
562 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);\r
563\r
564 return Status;\r
a66ea3b5
LE
565}\r
566\r
ecc79b09
GH
567/**\r
568 Simplified version of VirtioGpuSendCommandWithReply() for commands\r
569 which do not send back any data.\r
570**/\r
571STATIC\r
572EFI_STATUS\r
573VirtioGpuSendCommand (\r
574 IN OUT VGPU_DEV *VgpuDev,\r
575 IN VIRTIO_GPU_CONTROL_TYPE RequestType,\r
576 IN BOOLEAN Fence,\r
577 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,\r
578 IN UINTN RequestSize\r
579 )\r
580{\r
581 volatile VIRTIO_GPU_CONTROL_HEADER Response;\r
582\r
583 return VirtioGpuSendCommandWithReply (\r
584 VgpuDev,\r
585 RequestType,\r
586 Fence,\r
587 Header,\r
588 RequestSize,\r
589 VirtioGpuRespOkNodata,\r
590 &Response,\r
591 sizeof (Response)\r
592 );\r
593}\r
594\r
a66ea3b5
LE
595/**\r
596 The following functions send requests to the VirtIo GPU device model, await\r
597 the answer from the host, and return a status. They share the following\r
598 interface details:\r
599\r
600 @param[in,out] VgpuDev The VGPU_DEV object that represents the VirtIo GPU\r
601 device. The caller is responsible to have\r
602 successfully invoked VirtioGpuInit() on VgpuDev\r
603 previously, while VirtioGpuUninit() must not have\r
604 been called on VgpuDev.\r
605\r
606 @retval EFI_INVALID_PARAMETER Invalid command-specific parameters were\r
607 detected by this driver.\r
608\r
609 @retval EFI_SUCCESS Operation successful.\r
610\r
611 @retval EFI_DEVICE_ERROR The host rejected the request. The host error\r
70d5086c 612 code has been logged on the DEBUG_ERROR level.\r
a66ea3b5
LE
613\r
614 @return Codes for unexpected errors in VirtIo\r
615 messaging.\r
616\r
617 For the command-specific parameters, please consult the GPU Device section of\r
618 the VirtIo 1.0 specification (see references in\r
619 "OvmfPkg/Include/IndustryStandard/VirtioGpu.h").\r
620**/\r
621EFI_STATUS\r
622VirtioGpuResourceCreate2d (\r
ac0a286f
MK
623 IN OUT VGPU_DEV *VgpuDev,\r
624 IN UINT32 ResourceId,\r
625 IN VIRTIO_GPU_FORMATS Format,\r
626 IN UINT32 Width,\r
627 IN UINT32 Height\r
a66ea3b5
LE
628 )\r
629{\r
ac0a286f 630 volatile VIRTIO_GPU_RESOURCE_CREATE_2D Request;\r
a66ea3b5
LE
631\r
632 if (ResourceId == 0) {\r
633 return EFI_INVALID_PARAMETER;\r
634 }\r
635\r
636 Request.ResourceId = ResourceId;\r
637 Request.Format = (UINT32)Format;\r
638 Request.Width = Width;\r
639 Request.Height = Height;\r
640\r
641 return VirtioGpuSendCommand (\r
642 VgpuDev,\r
643 VirtioGpuCmdResourceCreate2d,\r
644 FALSE, // Fence\r
645 &Request.Header,\r
646 sizeof Request\r
647 );\r
648}\r
649\r
650EFI_STATUS\r
651VirtioGpuResourceUnref (\r
ac0a286f
MK
652 IN OUT VGPU_DEV *VgpuDev,\r
653 IN UINT32 ResourceId\r
a66ea3b5
LE
654 )\r
655{\r
ac0a286f 656 volatile VIRTIO_GPU_RESOURCE_UNREF Request;\r
a66ea3b5
LE
657\r
658 if (ResourceId == 0) {\r
659 return EFI_INVALID_PARAMETER;\r
660 }\r
661\r
662 Request.ResourceId = ResourceId;\r
663 Request.Padding = 0;\r
664\r
665 return VirtioGpuSendCommand (\r
666 VgpuDev,\r
667 VirtioGpuCmdResourceUnref,\r
668 FALSE, // Fence\r
669 &Request.Header,\r
670 sizeof Request\r
671 );\r
672}\r
673\r
674EFI_STATUS\r
675VirtioGpuResourceAttachBacking (\r
ac0a286f
MK
676 IN OUT VGPU_DEV *VgpuDev,\r
677 IN UINT32 ResourceId,\r
678 IN EFI_PHYSICAL_ADDRESS BackingStoreDeviceAddress,\r
679 IN UINTN NumberOfPages\r
a66ea3b5
LE
680 )\r
681{\r
ac0a286f 682 volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;\r
a66ea3b5
LE
683\r
684 if (ResourceId == 0) {\r
685 return EFI_INVALID_PARAMETER;\r
686 }\r
687\r
688 Request.ResourceId = ResourceId;\r
689 Request.NrEntries = 1;\r
5409c6ab 690 Request.Entry.Addr = BackingStoreDeviceAddress;\r
a66ea3b5
LE
691 Request.Entry.Length = (UINT32)EFI_PAGES_TO_SIZE (NumberOfPages);\r
692 Request.Entry.Padding = 0;\r
693\r
694 return VirtioGpuSendCommand (\r
695 VgpuDev,\r
696 VirtioGpuCmdResourceAttachBacking,\r
697 FALSE, // Fence\r
698 &Request.Header,\r
699 sizeof Request\r
700 );\r
701}\r
702\r
703EFI_STATUS\r
704VirtioGpuResourceDetachBacking (\r
ac0a286f
MK
705 IN OUT VGPU_DEV *VgpuDev,\r
706 IN UINT32 ResourceId\r
a66ea3b5
LE
707 )\r
708{\r
ac0a286f 709 volatile VIRTIO_GPU_RESOURCE_DETACH_BACKING Request;\r
a66ea3b5
LE
710\r
711 if (ResourceId == 0) {\r
712 return EFI_INVALID_PARAMETER;\r
713 }\r
714\r
715 Request.ResourceId = ResourceId;\r
716 Request.Padding = 0;\r
717\r
718 //\r
719 // In this case, we set Fence to TRUE, because after this function returns,\r
720 // the caller might reasonably want to repurpose the backing pages\r
721 // immediately. Thus we should ensure that the host releases all references\r
722 // to the backing pages before we return.\r
723 //\r
724 return VirtioGpuSendCommand (\r
725 VgpuDev,\r
726 VirtioGpuCmdResourceDetachBacking,\r
727 TRUE, // Fence\r
728 &Request.Header,\r
729 sizeof Request\r
730 );\r
731}\r
732\r
733EFI_STATUS\r
734VirtioGpuSetScanout (\r
ac0a286f
MK
735 IN OUT VGPU_DEV *VgpuDev,\r
736 IN UINT32 X,\r
737 IN UINT32 Y,\r
738 IN UINT32 Width,\r
739 IN UINT32 Height,\r
740 IN UINT32 ScanoutId,\r
741 IN UINT32 ResourceId\r
a66ea3b5
LE
742 )\r
743{\r
ac0a286f 744 volatile VIRTIO_GPU_SET_SCANOUT Request;\r
a66ea3b5
LE
745\r
746 //\r
747 // Unlike for most other commands, ResourceId=0 is valid; it\r
748 // is used to disable a scanout.\r
749 //\r
750 Request.Rectangle.X = X;\r
751 Request.Rectangle.Y = Y;\r
752 Request.Rectangle.Width = Width;\r
753 Request.Rectangle.Height = Height;\r
754 Request.ScanoutId = ScanoutId;\r
755 Request.ResourceId = ResourceId;\r
756\r
757 return VirtioGpuSendCommand (\r
758 VgpuDev,\r
759 VirtioGpuCmdSetScanout,\r
760 FALSE, // Fence\r
761 &Request.Header,\r
762 sizeof Request\r
763 );\r
764}\r
765\r
766EFI_STATUS\r
767VirtioGpuTransferToHost2d (\r
ac0a286f
MK
768 IN OUT VGPU_DEV *VgpuDev,\r
769 IN UINT32 X,\r
770 IN UINT32 Y,\r
771 IN UINT32 Width,\r
772 IN UINT32 Height,\r
773 IN UINT64 Offset,\r
774 IN UINT32 ResourceId\r
a66ea3b5
LE
775 )\r
776{\r
ac0a286f 777 volatile VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D Request;\r
a66ea3b5
LE
778\r
779 if (ResourceId == 0) {\r
780 return EFI_INVALID_PARAMETER;\r
781 }\r
782\r
783 Request.Rectangle.X = X;\r
784 Request.Rectangle.Y = Y;\r
785 Request.Rectangle.Width = Width;\r
786 Request.Rectangle.Height = Height;\r
787 Request.Offset = Offset;\r
788 Request.ResourceId = ResourceId;\r
789 Request.Padding = 0;\r
790\r
791 return VirtioGpuSendCommand (\r
792 VgpuDev,\r
793 VirtioGpuCmdTransferToHost2d,\r
794 FALSE, // Fence\r
795 &Request.Header,\r
796 sizeof Request\r
797 );\r
798}\r
799\r
800EFI_STATUS\r
801VirtioGpuResourceFlush (\r
ac0a286f
MK
802 IN OUT VGPU_DEV *VgpuDev,\r
803 IN UINT32 X,\r
804 IN UINT32 Y,\r
805 IN UINT32 Width,\r
806 IN UINT32 Height,\r
807 IN UINT32 ResourceId\r
a66ea3b5
LE
808 )\r
809{\r
ac0a286f 810 volatile VIRTIO_GPU_RESOURCE_FLUSH Request;\r
a66ea3b5
LE
811\r
812 if (ResourceId == 0) {\r
813 return EFI_INVALID_PARAMETER;\r
814 }\r
815\r
816 Request.Rectangle.X = X;\r
817 Request.Rectangle.Y = Y;\r
818 Request.Rectangle.Width = Width;\r
819 Request.Rectangle.Height = Height;\r
820 Request.ResourceId = ResourceId;\r
821 Request.Padding = 0;\r
822\r
823 return VirtioGpuSendCommand (\r
824 VgpuDev,\r
825 VirtioGpuCmdResourceFlush,\r
826 FALSE, // Fence\r
827 &Request.Header,\r
828 sizeof Request\r
829 );\r
830}\r
82c07f2c
GH
831\r
832EFI_STATUS\r
833VirtioGpuGetDisplayInfo (\r
834 IN OUT VGPU_DEV *VgpuDev,\r
835 volatile VIRTIO_GPU_RESP_DISPLAY_INFO *Response\r
836 )\r
837{\r
838 volatile VIRTIO_GPU_CONTROL_HEADER Request;\r
839\r
840 return VirtioGpuSendCommandWithReply (\r
841 VgpuDev,\r
842 VirtioGpuCmdGetDisplayInfo,\r
843 FALSE, // Fence\r
844 &Request,\r
845 sizeof Request,\r
846 VirtioGpuRespOkDisplayInfo,\r
847 &Response->Header,\r
848 sizeof *Response\r
849 );\r
850}\r