]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/VirtioGpuDxe/Commands.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / OvmfPkg / VirtioGpuDxe / Commands.c
1 /** @file
2
3 VirtIo GPU initialization, and commands (primitives) for the GPU device.
4
5 Copyright (C) 2016, Red Hat, Inc.
6 Copyright (c) 2017, AMD Inc, All rights reserved.<BR>
7
8 SPDX-License-Identifier: BSD-2-Clause-Patent
9
10 **/
11
12 #include <Library/VirtioLib.h>
13
14 #include "VirtioGpu.h"
15
16 /**
17 Configure the VirtIo GPU device that underlies VgpuDev.
18
19 @param[in,out] VgpuDev The VGPU_DEV object to set up VirtIo messaging for.
20 On input, the caller is responsible for having
21 initialized VgpuDev->VirtIo. On output, VgpuDev->Ring
22 has been initialized, and synchronous VirtIo GPU
23 commands (primitives) can be submitted to the device.
24
25 @retval EFI_SUCCESS VirtIo GPU configuration successful.
26
27 @retval EFI_UNSUPPORTED The host-side configuration of the VirtIo GPU is not
28 supported by this driver.
29
30 @retval Error codes from underlying functions.
31 **/
32 EFI_STATUS
33 VirtioGpuInit (
34 IN OUT VGPU_DEV *VgpuDev
35 )
36 {
37 UINT8 NextDevStat;
38 EFI_STATUS Status;
39 UINT64 Features;
40 UINT16 QueueSize;
41 UINT64 RingBaseShift;
42
43 //
44 // Execute virtio-v1.0-cs04, 3.1.1 Driver Requirements: Device
45 // Initialization.
46 //
47 // 1. Reset the device.
48 //
49 NextDevStat = 0;
50 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
51 if (EFI_ERROR (Status)) {
52 goto Failed;
53 }
54
55 //
56 // 2. Set the ACKNOWLEDGE status bit [...]
57 //
58 NextDevStat |= VSTAT_ACK;
59 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
60 if (EFI_ERROR (Status)) {
61 goto Failed;
62 }
63
64 //
65 // 3. Set the DRIVER status bit [...]
66 //
67 NextDevStat |= VSTAT_DRIVER;
68 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
69 if (EFI_ERROR (Status)) {
70 goto Failed;
71 }
72
73 //
74 // 4. Read device feature bits...
75 //
76 Status = VgpuDev->VirtIo->GetDeviceFeatures (VgpuDev->VirtIo, &Features);
77 if (EFI_ERROR (Status)) {
78 goto Failed;
79 }
80
81 if ((Features & VIRTIO_F_VERSION_1) == 0) {
82 Status = EFI_UNSUPPORTED;
83 goto Failed;
84 }
85
86 //
87 // We only want the most basic 2D features.
88 //
89 Features &= VIRTIO_F_VERSION_1 | VIRTIO_F_IOMMU_PLATFORM;
90
91 //
92 // ... and write the subset of feature bits understood by the [...] driver to
93 // the device. [...]
94 // 5. Set the FEATURES_OK status bit.
95 // 6. Re-read device status to ensure the FEATURES_OK bit is still set [...]
96 //
97 Status = Virtio10WriteFeatures (VgpuDev->VirtIo, Features, &NextDevStat);
98 if (EFI_ERROR (Status)) {
99 goto Failed;
100 }
101
102 //
103 // 7. Perform device-specific setup, including discovery of virtqueues for
104 // the device [...]
105 //
106 Status = VgpuDev->VirtIo->SetQueueSel (
107 VgpuDev->VirtIo,
108 VIRTIO_GPU_CONTROL_QUEUE
109 );
110 if (EFI_ERROR (Status)) {
111 goto Failed;
112 }
113
114 Status = VgpuDev->VirtIo->GetQueueNumMax (VgpuDev->VirtIo, &QueueSize);
115 if (EFI_ERROR (Status)) {
116 goto Failed;
117 }
118
119 //
120 // We implement each VirtIo GPU command that we use with two descriptors:
121 // request, response.
122 //
123 if (QueueSize < 2) {
124 Status = EFI_UNSUPPORTED;
125 goto Failed;
126 }
127
128 //
129 // [...] population of virtqueues [...]
130 //
131 Status = VirtioRingInit (VgpuDev->VirtIo, QueueSize, &VgpuDev->Ring);
132 if (EFI_ERROR (Status)) {
133 goto Failed;
134 }
135
136 //
137 // If anything fails from here on, we have to release the ring.
138 //
139 Status = VirtioRingMap (
140 VgpuDev->VirtIo,
141 &VgpuDev->Ring,
142 &RingBaseShift,
143 &VgpuDev->RingMap
144 );
145 if (EFI_ERROR (Status)) {
146 goto ReleaseQueue;
147 }
148
149 //
150 // If anything fails from here on, we have to unmap the ring.
151 //
152 Status = VgpuDev->VirtIo->SetQueueAddress (
153 VgpuDev->VirtIo,
154 &VgpuDev->Ring,
155 RingBaseShift
156 );
157 if (EFI_ERROR (Status)) {
158 goto UnmapQueue;
159 }
160
161 //
162 // 8. Set the DRIVER_OK status bit.
163 //
164 NextDevStat |= VSTAT_DRIVER_OK;
165 Status = VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
166 if (EFI_ERROR (Status)) {
167 goto UnmapQueue;
168 }
169
170 return EFI_SUCCESS;
171
172 UnmapQueue:
173 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);
174
175 ReleaseQueue:
176 VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);
177
178 Failed:
179 //
180 // If any of these steps go irrecoverably wrong, the driver SHOULD set the
181 // FAILED status bit to indicate that it has given up on the device (it can
182 // reset the device later to restart if desired). [...]
183 //
184 // VirtIo access failure here should not mask the original error.
185 //
186 NextDevStat |= VSTAT_FAILED;
187 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, NextDevStat);
188
189 return Status;
190 }
191
192 /**
193 De-configure the VirtIo GPU device that underlies VgpuDev.
194
195 @param[in,out] VgpuDev The VGPU_DEV object to tear down VirtIo messaging
196 for. On input, the caller is responsible for having
197 called VirtioGpuInit(). On output, VgpuDev->Ring has
198 been uninitialized; VirtIo GPU commands (primitives)
199 can no longer be submitted to the device.
200 **/
201 VOID
202 VirtioGpuUninit (
203 IN OUT VGPU_DEV *VgpuDev
204 )
205 {
206 //
207 // Resetting the VirtIo device makes it release its resources and forget its
208 // configuration.
209 //
210 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);
211 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, VgpuDev->RingMap);
212 VirtioRingUninit (VgpuDev->VirtIo, &VgpuDev->Ring);
213 }
214
215 /**
216 Allocate, zero and map memory, for bus master common buffer operation, to be
217 attached as backing store to a host-side VirtIo GPU resource.
218
219 @param[in] VgpuDev The VGPU_DEV object that represents the VirtIo GPU
220 device.
221
222 @param[in] NumberOfPages The number of whole pages to allocate and map.
223
224 @param[out] HostAddress The system memory address of the allocated area.
225
226 @param[out] DeviceAddress The bus master device address of the allocated
227 area. The VirtIo GPU device may be programmed to
228 access the allocated area through DeviceAddress;
229 DeviceAddress is to be passed to the
230 VirtioGpuResourceAttachBacking() function, as the
231 BackingStoreDeviceAddress parameter.
232
233 @param[out] Mapping A resulting token to pass to
234 VirtioGpuUnmapAndFreeBackingStore().
235
236 @retval EFI_SUCCESS The requested number of pages has been allocated, zeroed
237 and mapped.
238
239 @return Status codes propagated from
240 VgpuDev->VirtIo->AllocateSharedPages() and
241 VirtioMapAllBytesInSharedBuffer().
242 **/
243 EFI_STATUS
244 VirtioGpuAllocateZeroAndMapBackingStore (
245 IN VGPU_DEV *VgpuDev,
246 IN UINTN NumberOfPages,
247 OUT VOID **HostAddress,
248 OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
249 OUT VOID **Mapping
250 )
251 {
252 EFI_STATUS Status;
253 VOID *NewHostAddress;
254
255 Status = VgpuDev->VirtIo->AllocateSharedPages (
256 VgpuDev->VirtIo,
257 NumberOfPages,
258 &NewHostAddress
259 );
260 if (EFI_ERROR (Status)) {
261 return Status;
262 }
263
264 //
265 // Avoid exposing stale data to the device even temporarily: zero the area
266 // before mapping it.
267 //
268 ZeroMem (NewHostAddress, EFI_PAGES_TO_SIZE (NumberOfPages));
269
270 Status = VirtioMapAllBytesInSharedBuffer (
271 VgpuDev->VirtIo, // VirtIo
272 VirtioOperationBusMasterCommonBuffer, // Operation
273 NewHostAddress, // HostAddress
274 EFI_PAGES_TO_SIZE (NumberOfPages), // NumberOfBytes
275 DeviceAddress, // DeviceAddress
276 Mapping // Mapping
277 );
278 if (EFI_ERROR (Status)) {
279 goto FreeSharedPages;
280 }
281
282 *HostAddress = NewHostAddress;
283 return EFI_SUCCESS;
284
285 FreeSharedPages:
286 VgpuDev->VirtIo->FreeSharedPages (
287 VgpuDev->VirtIo,
288 NumberOfPages,
289 NewHostAddress
290 );
291 return Status;
292 }
293
294 /**
295 Unmap and free memory originally allocated and mapped with
296 VirtioGpuAllocateZeroAndMapBackingStore().
297
298 If the memory allocated and mapped with
299 VirtioGpuAllocateZeroAndMapBackingStore() was attached to a host-side VirtIo
300 GPU resource with VirtioGpuResourceAttachBacking(), then the caller is
301 responsible for detaching the backing store from the same resource, with
302 VirtioGpuResourceDetachBacking(), before calling this function.
303
304 @param[in] VgpuDev The VGPU_DEV object that represents the VirtIo GPU
305 device.
306
307 @param[in] NumberOfPages The NumberOfPages parameter originally passed to
308 VirtioGpuAllocateZeroAndMapBackingStore().
309
310 @param[in] HostAddress The HostAddress value originally output by
311 VirtioGpuAllocateZeroAndMapBackingStore().
312
313 @param[in] Mapping The token that was originally output by
314 VirtioGpuAllocateZeroAndMapBackingStore().
315 **/
316 VOID
317 VirtioGpuUnmapAndFreeBackingStore (
318 IN VGPU_DEV *VgpuDev,
319 IN UINTN NumberOfPages,
320 IN VOID *HostAddress,
321 IN VOID *Mapping
322 )
323 {
324 VgpuDev->VirtIo->UnmapSharedBuffer (
325 VgpuDev->VirtIo,
326 Mapping
327 );
328 VgpuDev->VirtIo->FreeSharedPages (
329 VgpuDev->VirtIo,
330 NumberOfPages,
331 HostAddress
332 );
333 }
334
335 /**
336 EFI_EVENT_NOTIFY function for the VGPU_DEV.ExitBoot event. It resets the
337 VirtIo device, causing it to release its resources and to forget its
338 configuration.
339
340 This function may only be called (that is, VGPU_DEV.ExitBoot may only be
341 signaled) after VirtioGpuInit() returns and before VirtioGpuUninit() is
342 called.
343
344 @param[in] Event Event whose notification function is being invoked.
345
346 @param[in] Context Pointer to the associated VGPU_DEV object.
347 **/
348 VOID
349 EFIAPI
350 VirtioGpuExitBoot (
351 IN EFI_EVENT Event,
352 IN VOID *Context
353 )
354 {
355 VGPU_DEV *VgpuDev;
356
357 DEBUG ((DEBUG_VERBOSE, "%a: Context=0x%p\n", __FUNCTION__, Context));
358 VgpuDev = Context;
359 VgpuDev->VirtIo->SetDeviceStatus (VgpuDev->VirtIo, 0);
360 }
361
362 /**
363 Internal utility function that sends a request to the VirtIo GPU device
364 model, awaits the answer from the host, and returns a status.
365
366 @param[in,out] VgpuDev The VGPU_DEV object that represents the VirtIo GPU
367 device. The caller is responsible to have
368 successfully invoked VirtioGpuInit() on VgpuDev
369 previously, while VirtioGpuUninit() must not have
370 been called on VgpuDev.
371
372 @param[in] RequestType The type of the request. The caller is responsible
373 for providing a VirtioGpuCmd* RequestType which, on
374 success, elicits a VirtioGpuRespOkNodata response
375 from the host.
376
377 @param[in] Fence Whether to enable fencing for this request. Fencing
378 forces the host to complete the command before
379 producing a response. If Fence is TRUE, then
380 VgpuDev->FenceId is consumed, and incremented.
381
382 @param[in,out] Header Pointer to the caller-allocated request object. The
383 request must start with VIRTIO_GPU_CONTROL_HEADER.
384 This function overwrites all fields of Header before
385 submitting the request to the host:
386
387 - it sets Type from RequestType,
388
389 - it sets Flags and FenceId based on Fence,
390
391 - it zeroes CtxId and Padding.
392
393 @param[in] RequestSize Size of the entire caller-allocated request object,
394 including the leading VIRTIO_GPU_CONTROL_HEADER.
395
396 @param[in] ResponseType The type of the response (VirtioGpuResp*).
397
398 @param[in,out] Response Pointer to the caller-allocated response object. The
399 request must start with VIRTIO_GPU_CONTROL_HEADER.
400
401 @param[in] ResponseSize Size of the entire caller-allocated response object,
402 including the leading VIRTIO_GPU_CONTROL_HEADER.
403
404 @retval EFI_SUCCESS Operation successful.
405
406 @retval EFI_DEVICE_ERROR The host rejected the request. The host error
407 code has been logged on the DEBUG_ERROR level.
408
409 @return Codes for unexpected errors in VirtIo
410 messaging, or request/response
411 mapping/unmapping.
412 **/
413 STATIC
414 EFI_STATUS
415 VirtioGpuSendCommandWithReply (
416 IN OUT VGPU_DEV *VgpuDev,
417 IN VIRTIO_GPU_CONTROL_TYPE RequestType,
418 IN BOOLEAN Fence,
419 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,
420 IN UINTN RequestSize,
421 IN VIRTIO_GPU_CONTROL_TYPE ResponseType,
422 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Response,
423 IN UINTN ResponseSize
424 )
425 {
426 DESC_INDICES Indices;
427 EFI_STATUS Status;
428 UINT32 ResponseSizeRet;
429 EFI_PHYSICAL_ADDRESS RequestDeviceAddress;
430 VOID *RequestMap;
431 EFI_PHYSICAL_ADDRESS ResponseDeviceAddress;
432 VOID *ResponseMap;
433
434 //
435 // Initialize Header.
436 //
437 Header->Type = RequestType;
438 if (Fence) {
439 Header->Flags = VIRTIO_GPU_FLAG_FENCE;
440 Header->FenceId = VgpuDev->FenceId++;
441 } else {
442 Header->Flags = 0;
443 Header->FenceId = 0;
444 }
445
446 Header->CtxId = 0;
447 Header->Padding = 0;
448
449 ASSERT (RequestSize >= sizeof *Header);
450 ASSERT (RequestSize <= MAX_UINT32);
451
452 //
453 // Map request and response to bus master device addresses.
454 //
455 Status = VirtioMapAllBytesInSharedBuffer (
456 VgpuDev->VirtIo,
457 VirtioOperationBusMasterRead,
458 (VOID *)Header,
459 RequestSize,
460 &RequestDeviceAddress,
461 &RequestMap
462 );
463 if (EFI_ERROR (Status)) {
464 return Status;
465 }
466
467 Status = VirtioMapAllBytesInSharedBuffer (
468 VgpuDev->VirtIo,
469 VirtioOperationBusMasterWrite,
470 (VOID *)Response,
471 ResponseSize,
472 &ResponseDeviceAddress,
473 &ResponseMap
474 );
475 if (EFI_ERROR (Status)) {
476 goto UnmapRequest;
477 }
478
479 //
480 // Compose the descriptor chain.
481 //
482 VirtioPrepare (&VgpuDev->Ring, &Indices);
483 VirtioAppendDesc (
484 &VgpuDev->Ring,
485 RequestDeviceAddress,
486 (UINT32)RequestSize,
487 VRING_DESC_F_NEXT,
488 &Indices
489 );
490 VirtioAppendDesc (
491 &VgpuDev->Ring,
492 ResponseDeviceAddress,
493 (UINT32)ResponseSize,
494 VRING_DESC_F_WRITE,
495 &Indices
496 );
497
498 //
499 // Send the command.
500 //
501 Status = VirtioFlush (
502 VgpuDev->VirtIo,
503 VIRTIO_GPU_CONTROL_QUEUE,
504 &VgpuDev->Ring,
505 &Indices,
506 &ResponseSizeRet
507 );
508 if (EFI_ERROR (Status)) {
509 goto UnmapResponse;
510 }
511
512 //
513 // Verify response size.
514 //
515 if (ResponseSize != ResponseSizeRet) {
516 DEBUG ((
517 DEBUG_ERROR,
518 "%a: malformed response to Request=0x%x\n",
519 __FUNCTION__,
520 (UINT32)RequestType
521 ));
522 Status = EFI_PROTOCOL_ERROR;
523 goto UnmapResponse;
524 }
525
526 //
527 // Unmap response and request, in reverse order of mapping. On error, the
528 // respective mapping is invalidated anyway, only the data may not have been
529 // committed to system memory (in case of VirtioOperationBusMasterWrite).
530 //
531 Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);
532 if (EFI_ERROR (Status)) {
533 goto UnmapRequest;
534 }
535
536 Status = VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);
537 if (EFI_ERROR (Status)) {
538 return Status;
539 }
540
541 //
542 // Parse the response.
543 //
544 if (Response->Type == (UINT32)ResponseType) {
545 return EFI_SUCCESS;
546 }
547
548 DEBUG ((
549 DEBUG_ERROR,
550 "%a: Request=0x%x Response=0x%x (expected 0x%x)\n",
551 __FUNCTION__,
552 (UINT32)RequestType,
553 Response->Type,
554 ResponseType
555 ));
556 return EFI_DEVICE_ERROR;
557
558 UnmapResponse:
559 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, ResponseMap);
560
561 UnmapRequest:
562 VgpuDev->VirtIo->UnmapSharedBuffer (VgpuDev->VirtIo, RequestMap);
563
564 return Status;
565 }
566
567 /**
568 Simplified version of VirtioGpuSendCommandWithReply() for commands
569 which do not send back any data.
570 **/
571 STATIC
572 EFI_STATUS
573 VirtioGpuSendCommand (
574 IN OUT VGPU_DEV *VgpuDev,
575 IN VIRTIO_GPU_CONTROL_TYPE RequestType,
576 IN BOOLEAN Fence,
577 IN OUT volatile VIRTIO_GPU_CONTROL_HEADER *Header,
578 IN UINTN RequestSize
579 )
580 {
581 volatile VIRTIO_GPU_CONTROL_HEADER Response;
582
583 return VirtioGpuSendCommandWithReply (
584 VgpuDev,
585 RequestType,
586 Fence,
587 Header,
588 RequestSize,
589 VirtioGpuRespOkNodata,
590 &Response,
591 sizeof (Response)
592 );
593 }
594
595 /**
596 The following functions send requests to the VirtIo GPU device model, await
597 the answer from the host, and return a status. They share the following
598 interface details:
599
600 @param[in,out] VgpuDev The VGPU_DEV object that represents the VirtIo GPU
601 device. The caller is responsible to have
602 successfully invoked VirtioGpuInit() on VgpuDev
603 previously, while VirtioGpuUninit() must not have
604 been called on VgpuDev.
605
606 @retval EFI_INVALID_PARAMETER Invalid command-specific parameters were
607 detected by this driver.
608
609 @retval EFI_SUCCESS Operation successful.
610
611 @retval EFI_DEVICE_ERROR The host rejected the request. The host error
612 code has been logged on the DEBUG_ERROR level.
613
614 @return Codes for unexpected errors in VirtIo
615 messaging.
616
617 For the command-specific parameters, please consult the GPU Device section of
618 the VirtIo 1.0 specification (see references in
619 "OvmfPkg/Include/IndustryStandard/VirtioGpu.h").
620 **/
621 EFI_STATUS
622 VirtioGpuResourceCreate2d (
623 IN OUT VGPU_DEV *VgpuDev,
624 IN UINT32 ResourceId,
625 IN VIRTIO_GPU_FORMATS Format,
626 IN UINT32 Width,
627 IN UINT32 Height
628 )
629 {
630 volatile VIRTIO_GPU_RESOURCE_CREATE_2D Request;
631
632 if (ResourceId == 0) {
633 return EFI_INVALID_PARAMETER;
634 }
635
636 Request.ResourceId = ResourceId;
637 Request.Format = (UINT32)Format;
638 Request.Width = Width;
639 Request.Height = Height;
640
641 return VirtioGpuSendCommand (
642 VgpuDev,
643 VirtioGpuCmdResourceCreate2d,
644 FALSE, // Fence
645 &Request.Header,
646 sizeof Request
647 );
648 }
649
650 EFI_STATUS
651 VirtioGpuResourceUnref (
652 IN OUT VGPU_DEV *VgpuDev,
653 IN UINT32 ResourceId
654 )
655 {
656 volatile VIRTIO_GPU_RESOURCE_UNREF Request;
657
658 if (ResourceId == 0) {
659 return EFI_INVALID_PARAMETER;
660 }
661
662 Request.ResourceId = ResourceId;
663 Request.Padding = 0;
664
665 return VirtioGpuSendCommand (
666 VgpuDev,
667 VirtioGpuCmdResourceUnref,
668 FALSE, // Fence
669 &Request.Header,
670 sizeof Request
671 );
672 }
673
674 EFI_STATUS
675 VirtioGpuResourceAttachBacking (
676 IN OUT VGPU_DEV *VgpuDev,
677 IN UINT32 ResourceId,
678 IN EFI_PHYSICAL_ADDRESS BackingStoreDeviceAddress,
679 IN UINTN NumberOfPages
680 )
681 {
682 volatile VIRTIO_GPU_RESOURCE_ATTACH_BACKING Request;
683
684 if (ResourceId == 0) {
685 return EFI_INVALID_PARAMETER;
686 }
687
688 Request.ResourceId = ResourceId;
689 Request.NrEntries = 1;
690 Request.Entry.Addr = BackingStoreDeviceAddress;
691 Request.Entry.Length = (UINT32)EFI_PAGES_TO_SIZE (NumberOfPages);
692 Request.Entry.Padding = 0;
693
694 return VirtioGpuSendCommand (
695 VgpuDev,
696 VirtioGpuCmdResourceAttachBacking,
697 FALSE, // Fence
698 &Request.Header,
699 sizeof Request
700 );
701 }
702
703 EFI_STATUS
704 VirtioGpuResourceDetachBacking (
705 IN OUT VGPU_DEV *VgpuDev,
706 IN UINT32 ResourceId
707 )
708 {
709 volatile VIRTIO_GPU_RESOURCE_DETACH_BACKING Request;
710
711 if (ResourceId == 0) {
712 return EFI_INVALID_PARAMETER;
713 }
714
715 Request.ResourceId = ResourceId;
716 Request.Padding = 0;
717
718 //
719 // In this case, we set Fence to TRUE, because after this function returns,
720 // the caller might reasonably want to repurpose the backing pages
721 // immediately. Thus we should ensure that the host releases all references
722 // to the backing pages before we return.
723 //
724 return VirtioGpuSendCommand (
725 VgpuDev,
726 VirtioGpuCmdResourceDetachBacking,
727 TRUE, // Fence
728 &Request.Header,
729 sizeof Request
730 );
731 }
732
733 EFI_STATUS
734 VirtioGpuSetScanout (
735 IN OUT VGPU_DEV *VgpuDev,
736 IN UINT32 X,
737 IN UINT32 Y,
738 IN UINT32 Width,
739 IN UINT32 Height,
740 IN UINT32 ScanoutId,
741 IN UINT32 ResourceId
742 )
743 {
744 volatile VIRTIO_GPU_SET_SCANOUT Request;
745
746 //
747 // Unlike for most other commands, ResourceId=0 is valid; it
748 // is used to disable a scanout.
749 //
750 Request.Rectangle.X = X;
751 Request.Rectangle.Y = Y;
752 Request.Rectangle.Width = Width;
753 Request.Rectangle.Height = Height;
754 Request.ScanoutId = ScanoutId;
755 Request.ResourceId = ResourceId;
756
757 return VirtioGpuSendCommand (
758 VgpuDev,
759 VirtioGpuCmdSetScanout,
760 FALSE, // Fence
761 &Request.Header,
762 sizeof Request
763 );
764 }
765
766 EFI_STATUS
767 VirtioGpuTransferToHost2d (
768 IN OUT VGPU_DEV *VgpuDev,
769 IN UINT32 X,
770 IN UINT32 Y,
771 IN UINT32 Width,
772 IN UINT32 Height,
773 IN UINT64 Offset,
774 IN UINT32 ResourceId
775 )
776 {
777 volatile VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D Request;
778
779 if (ResourceId == 0) {
780 return EFI_INVALID_PARAMETER;
781 }
782
783 Request.Rectangle.X = X;
784 Request.Rectangle.Y = Y;
785 Request.Rectangle.Width = Width;
786 Request.Rectangle.Height = Height;
787 Request.Offset = Offset;
788 Request.ResourceId = ResourceId;
789 Request.Padding = 0;
790
791 return VirtioGpuSendCommand (
792 VgpuDev,
793 VirtioGpuCmdTransferToHost2d,
794 FALSE, // Fence
795 &Request.Header,
796 sizeof Request
797 );
798 }
799
800 EFI_STATUS
801 VirtioGpuResourceFlush (
802 IN OUT VGPU_DEV *VgpuDev,
803 IN UINT32 X,
804 IN UINT32 Y,
805 IN UINT32 Width,
806 IN UINT32 Height,
807 IN UINT32 ResourceId
808 )
809 {
810 volatile VIRTIO_GPU_RESOURCE_FLUSH Request;
811
812 if (ResourceId == 0) {
813 return EFI_INVALID_PARAMETER;
814 }
815
816 Request.Rectangle.X = X;
817 Request.Rectangle.Y = Y;
818 Request.Rectangle.Width = Width;
819 Request.Rectangle.Height = Height;
820 Request.ResourceId = ResourceId;
821 Request.Padding = 0;
822
823 return VirtioGpuSendCommand (
824 VgpuDev,
825 VirtioGpuCmdResourceFlush,
826 FALSE, // Fence
827 &Request.Header,
828 sizeof Request
829 );
830 }
831
832 EFI_STATUS
833 VirtioGpuGetDisplayInfo (
834 IN OUT VGPU_DEV *VgpuDev,
835 volatile VIRTIO_GPU_RESP_DISPLAY_INFO *Response
836 )
837 {
838 volatile VIRTIO_GPU_CONTROL_HEADER Request;
839
840 return VirtioGpuSendCommandWithReply (
841 VgpuDev,
842 VirtioGpuCmdGetDisplayInfo,
843 FALSE, // Fence
844 &Request,
845 sizeof Request,
846 VirtioGpuRespOkDisplayInfo,
847 &Response->Header,
848 sizeof *Response
849 );
850 }