]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/Library/VirtioLib/VirtioLib.c
OvmfPkg: librarize reusable bits from VirtioBlkDxe's SynchronousRequest()
[mirror_edk2.git] / OvmfPkg / Library / VirtioLib / VirtioLib.c
1 /** @file
2
3 Utility functions used by virtio device drivers.
4
5 Copyright (C) 2012, Red Hat, Inc.
6
7 This program and the accompanying materials are licensed and made available
8 under the terms and conditions of the BSD License which accompanies this
9 distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT
13 WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include <IndustryStandard/Pci22.h>
18 #include <Library/BaseLib.h>
19 #include <Library/BaseMemoryLib.h>
20 #include <Library/DebugLib.h>
21 #include <Library/MemoryAllocationLib.h>
22 #include <Library/UefiBootServicesTableLib.h>
23
24 #include <Library/VirtioLib.h>
25
26
27 /**
28
29 Write a word into Region 0 of the device specified by PciIo.
30
31 Region 0 must be an iomem region. This is an internal function for the
32 driver-specific VIRTIO_CFG_WRITE() macros.
33
34 @param[in] PciIo Target PCI device.
35
36 @param[in] FieldOffset Destination offset.
37
38 @param[in] FieldSize Destination field size, must be in { 1, 2, 4, 8 }.
39
40 @param[in] Value Little endian value to write, converted to UINT64.
41 The least significant FieldSize bytes will be used.
42
43
44 @return Status code returned by PciIo->Io.Write().
45
46 **/
47 EFIAPI
48 EFI_STATUS
49 VirtioWrite (
50 IN EFI_PCI_IO_PROTOCOL *PciIo,
51 IN UINTN FieldOffset,
52 IN UINTN FieldSize,
53 IN UINT64 Value
54 )
55 {
56 UINTN Count;
57 EFI_PCI_IO_PROTOCOL_WIDTH Width;
58
59 Count = 1;
60 switch (FieldSize) {
61 case 1:
62 Width = EfiPciIoWidthUint8;
63 break;
64
65 case 2:
66 Width = EfiPciIoWidthUint16;
67 break;
68
69 case 8:
70 Count = 2;
71 // fall through
72
73 case 4:
74 Width = EfiPciIoWidthUint32;
75 break;
76
77 default:
78 ASSERT (FALSE);
79 }
80
81 return PciIo->Io.Write (
82 PciIo,
83 Width,
84 PCI_BAR_IDX0,
85 FieldOffset,
86 Count,
87 &Value
88 );
89 }
90
91
92 /**
93
94 Read a word from Region 0 of the device specified by PciIo.
95
96 Region 0 must be an iomem region. This is an internal function for the
97 driver-specific VIRTIO_CFG_READ() macros.
98
99 @param[in] PciIo Source PCI device.
100
101 @param[in] FieldOffset Source offset.
102
103 @param[in] FieldSize Source field size, must be in { 1, 2, 4, 8 }.
104
105 @param[in] BufferSize Number of bytes available in the target buffer. Must
106 equal FieldSize.
107
108 @param[out] Buffer Target buffer.
109
110
111 @return Status code returned by PciIo->Io.Read().
112
113 **/
114 EFIAPI
115 EFI_STATUS
116 VirtioRead (
117 IN EFI_PCI_IO_PROTOCOL *PciIo,
118 IN UINTN FieldOffset,
119 IN UINTN FieldSize,
120 IN UINTN BufferSize,
121 OUT VOID *Buffer
122 )
123 {
124 UINTN Count;
125 EFI_PCI_IO_PROTOCOL_WIDTH Width;
126
127 ASSERT (FieldSize == BufferSize);
128
129 Count = 1;
130 switch (FieldSize) {
131 case 1:
132 Width = EfiPciIoWidthUint8;
133 break;
134
135 case 2:
136 Width = EfiPciIoWidthUint16;
137 break;
138
139 case 8:
140 Count = 2;
141 // fall through
142
143 case 4:
144 Width = EfiPciIoWidthUint32;
145 break;
146
147 default:
148 ASSERT (FALSE);
149 }
150
151 return PciIo->Io.Read (
152 PciIo,
153 Width,
154 PCI_BAR_IDX0,
155 FieldOffset,
156 Count,
157 Buffer
158 );
159 }
160
161
162 /**
163
164 Configure a virtio ring.
165
166 This function sets up internal storage (the guest-host communication area)
167 and lays out several "navigation" (ie. no-ownership) pointers to parts of
168 that storage.
169
170 Relevant sections from the virtio-0.9.5 spec:
171 - 1.1 Virtqueues,
172 - 2.3 Virtqueue Configuration.
173
174 @param[in] The number of descriptors to allocate for the
175 virtio ring, as requested by the host.
176
177 @param[out] Ring The virtio ring to set up.
178
179 @retval EFI_OUT_OF_RESOURCES AllocatePages() failed to allocate contiguous
180 pages for the requested QueueSize. Fields of
181 Ring have indeterminate value.
182
183 @retval EFI_SUCCESS Allocation and setup successful. Ring->Base
184 (and nothing else) is responsible for
185 deallocation.
186
187 **/
188 EFI_STATUS
189 EFIAPI
190 VirtioRingInit (
191 IN UINT16 QueueSize,
192 OUT VRING *Ring
193 )
194 {
195 UINTN RingSize;
196 volatile UINT8 *RingPagesPtr;
197
198 RingSize = ALIGN_VALUE (
199 sizeof *Ring->Desc * QueueSize +
200 sizeof *Ring->Avail.Flags +
201 sizeof *Ring->Avail.Idx +
202 sizeof *Ring->Avail.Ring * QueueSize +
203 sizeof *Ring->Avail.UsedEvent,
204 EFI_PAGE_SIZE);
205
206 RingSize += ALIGN_VALUE (
207 sizeof *Ring->Used.Flags +
208 sizeof *Ring->Used.Idx +
209 sizeof *Ring->Used.UsedElem * QueueSize +
210 sizeof *Ring->Used.AvailEvent,
211 EFI_PAGE_SIZE);
212
213 Ring->NumPages = EFI_SIZE_TO_PAGES (RingSize);
214 Ring->Base = AllocatePages (Ring->NumPages);
215 if (Ring->Base == NULL) {
216 return EFI_OUT_OF_RESOURCES;
217 }
218 SetMem (Ring->Base, RingSize, 0x00);
219 RingPagesPtr = Ring->Base;
220
221 Ring->Desc = (volatile VOID *) RingPagesPtr;
222 RingPagesPtr += sizeof *Ring->Desc * QueueSize;
223
224 Ring->Avail.Flags = (volatile VOID *) RingPagesPtr;
225 RingPagesPtr += sizeof *Ring->Avail.Flags;
226
227 Ring->Avail.Idx = (volatile VOID *) RingPagesPtr;
228 RingPagesPtr += sizeof *Ring->Avail.Idx;
229
230 Ring->Avail.Ring = (volatile VOID *) RingPagesPtr;
231 RingPagesPtr += sizeof *Ring->Avail.Ring * QueueSize;
232
233 Ring->Avail.UsedEvent = (volatile VOID *) RingPagesPtr;
234 RingPagesPtr += sizeof *Ring->Avail.UsedEvent;
235
236 RingPagesPtr = (volatile UINT8 *) Ring->Base +
237 ALIGN_VALUE (RingPagesPtr - (volatile UINT8 *) Ring->Base,
238 EFI_PAGE_SIZE);
239
240 Ring->Used.Flags = (volatile VOID *) RingPagesPtr;
241 RingPagesPtr += sizeof *Ring->Used.Flags;
242
243 Ring->Used.Idx = (volatile VOID *) RingPagesPtr;
244 RingPagesPtr += sizeof *Ring->Used.Idx;
245
246 Ring->Used.UsedElem = (volatile VOID *) RingPagesPtr;
247 RingPagesPtr += sizeof *Ring->Used.UsedElem * QueueSize;
248
249 Ring->Used.AvailEvent = (volatile VOID *) RingPagesPtr;
250 RingPagesPtr += sizeof *Ring->Used.AvailEvent;
251
252 Ring->QueueSize = QueueSize;
253 return EFI_SUCCESS;
254 }
255
256
257 /**
258
259 Tear down the internal resources of a configured virtio ring.
260
261 The caller is responsible to stop the host from using this ring before
262 invoking this function: the VSTAT_DRIVER_OK bit must be clear in
263 VhdrDeviceStatus.
264
265 @param[out] Ring The virtio ring to clean up.
266
267 **/
268 VOID
269 EFIAPI
270 VirtioRingUninit (
271 IN OUT VRING *Ring
272 )
273 {
274 FreePages (Ring->Base, Ring->NumPages);
275 SetMem (Ring, sizeof *Ring, 0x00);
276 }
277
278
279 /**
280
281 Turn off interrupt notifications from the host, and prepare for appending
282 multiple descriptors to the virtio ring.
283
284 The calling driver must be in VSTAT_DRIVER_OK state.
285
286 @param[in out] Ring The virtio ring we intend to append descriptors to.
287
288 @param[out] Indices The DESC_INDICES structure to initialize.
289
290 **/
291 VOID
292 EFIAPI
293 VirtioPrepare (
294 IN OUT VRING *Ring,
295 OUT DESC_INDICES *Indices
296 )
297 {
298 //
299 // Prepare for virtio-0.9.5, 2.4.2 Receiving Used Buffers From the Device.
300 // We're going to poll the answer, the host should not send an interrupt.
301 //
302 *Ring->Avail.Flags = (UINT16) VRING_AVAIL_F_NO_INTERRUPT;
303
304 //
305 // Prepare for virtio-0.9.5, 2.4.1 Supplying Buffers to the Device.
306 //
307 Indices->HeadIdx = *Ring->Avail.Idx;
308 Indices->NextAvailIdx = Indices->HeadIdx;
309 }
310
311
312 /**
313
314 Append a contiguous buffer for transmission / reception via the virtio ring.
315
316 This function implements the following sections from virtio-0.9.5:
317 - 2.4.1.1 Placing Buffers into the Descriptor Table
318 - 2.4.1.2 Updating the Available Ring
319
320 Free space is taken as granted, since the individual drivers support only
321 synchronous requests and host side status is processed in lock-step with
322 request submission. It is the calling driver's responsibility to verify the
323 ring size in advance.
324
325 The caller is responsible for initializing *Indices with VirtioPrepare()
326 first.
327
328 @param[in out] Ring The virtio ring to append the buffer to, as a
329 descriptor.
330
331 @param [in] BufferPhysAddr (Guest pseudo-physical) start address of the
332 transmit / receive buffer.
333
334 @param [in] BufferSize Number of bytes to transmit or receive.
335
336 @param [in] Flags A bitmask of VRING_DESC_F_* flags. The caller
337 computes this mask dependent on further buffers
338 to append and transfer direction.
339 VRING_DESC_F_INDIRECT is unsupported. The
340 VRING_DESC.Next field is always set, but the
341 host only interprets it dependent on
342 VRING_DESC_F_NEXT.
343
344 In *Indices:
345
346 @param [in] HeadIdx The index identifying the head buffer (first
347 buffer appended) belonging to this same
348 request.
349
350 @param [in out] NextAvailIdx On input, the index identifying the next
351 descriptor available to carry the buffer. On
352 output, incremented by one, modulo 2^16.
353
354 **/
355 VOID
356 EFIAPI
357 VirtioAppendDesc (
358 IN OUT VRING *Ring,
359 IN UINTN BufferPhysAddr,
360 IN UINT32 BufferSize,
361 IN UINT16 Flags,
362 IN OUT DESC_INDICES *Indices
363 )
364 {
365 volatile VRING_DESC *Desc;
366
367 Desc = &Ring->Desc[Indices->NextAvailIdx % Ring->QueueSize];
368 Desc->Addr = BufferPhysAddr;
369 Desc->Len = BufferSize;
370 Desc->Flags = Flags;
371 Ring->Avail.Ring[Indices->NextAvailIdx++ % Ring->QueueSize] =
372 Indices->HeadIdx % Ring->QueueSize;
373 Desc->Next = Indices->NextAvailIdx % Ring->QueueSize;
374 }
375
376
377 /**
378
379 Notify the host about appended descriptors and wait until it processes the
380 last one (ie. all of them).
381
382 @param[in] PciIo The target virtio PCI device to notify.
383
384 @param[in] VirtQueueId Identifies the queue for the target device.
385
386 @param[in out] Ring The virtio ring with descriptors to submit.
387
388 @param[in] Indices The function waits until the host processes
389 descriptors up to Indices->NextAvailIdx.
390
391
392 @return Error code from VirtioWrite() if it fails.
393
394 @retval EFI_SUCCESS Otherwise, the host processed all descriptors.
395
396 **/
397 EFI_STATUS
398 EFIAPI
399 VirtioFlush (
400 IN EFI_PCI_IO_PROTOCOL *PciIo,
401 IN UINT16 VirtQueueId,
402 IN OUT VRING *Ring,
403 IN DESC_INDICES *Indices
404 )
405 {
406 EFI_STATUS Status;
407 UINTN PollPeriodUsecs;
408
409 //
410 // virtio-0.9.5, 2.4.1.3 Updating the Index Field
411 //
412 MemoryFence();
413 *Ring->Avail.Idx = Indices->NextAvailIdx;
414
415 //
416 // virtio-0.9.5, 2.4.1.4 Notifying the Device -- gratuitous notifications are
417 // OK.
418 //
419 MemoryFence();
420 Status = VirtioWrite (
421 PciIo,
422 OFFSET_OF (VIRTIO_HDR, VhdrQueueNotify),
423 sizeof (UINT16),
424 VirtQueueId
425 );
426 if (EFI_ERROR (Status)) {
427 return Status;
428 }
429
430 //
431 // virtio-0.9.5, 2.4.2 Receiving Used Buffers From the Device
432 // Wait until the host processes and acknowledges our descriptor chain. The
433 // condition we use for polling is greatly simplified and relies on the
434 // synchronous, lock-step progress.
435 //
436 // Keep slowing down until we reach a poll period of slightly above 1 ms.
437 //
438 PollPeriodUsecs = 1;
439 MemoryFence();
440 while (*Ring->Used.Idx != Indices->NextAvailIdx) {
441 gBS->Stall (PollPeriodUsecs); // calls AcpiTimerLib::MicroSecondDelay
442
443 if (PollPeriodUsecs < 1024) {
444 PollPeriodUsecs *= 2;
445 }
446 MemoryFence();
447 }
448
449 return EFI_SUCCESS;
450 }