]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/Library/VirtioLib/VirtioLib.c
959bc5da87682f0fb743a51612c29bfbff02f803
[mirror_edk2.git] / OvmfPkg / Library / VirtioLib / VirtioLib.c
1 /** @file
2
3 Utility functions used by virtio device drivers.
4
5 Copyright (C) 2012, Red Hat, Inc.
6
7 This program and the accompanying materials are licensed and made available
8 under the terms and conditions of the BSD License which accompanies this
9 distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT
13 WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include <IndustryStandard/Pci22.h>
18 #include <Library/BaseLib.h>
19 #include <Library/BaseMemoryLib.h>
20 #include <Library/DebugLib.h>
21 #include <Library/MemoryAllocationLib.h>
22 #include <Library/UefiBootServicesTableLib.h>
23
24 #include <Library/VirtioLib.h>
25
26
27 /**
28
29 Write a word into Region 0 of the device specified by PciIo.
30
31 Region 0 must be an iomem region. This is an internal function for the
32 driver-specific VIRTIO_CFG_WRITE() macros.
33
34 @param[in] PciIo Target PCI device.
35
36 @param[in] FieldOffset Destination offset.
37
38 @param[in] FieldSize Destination field size, must be in { 1, 2, 4, 8 }.
39
40 @param[in] Value Little endian value to write, converted to UINT64.
41 The least significant FieldSize bytes will be used.
42
43
44 @return Status code returned by PciIo->Io.Write().
45
46 **/
47 EFI_STATUS
48 EFIAPI
49 VirtioWrite (
50 IN EFI_PCI_IO_PROTOCOL *PciIo,
51 IN UINTN FieldOffset,
52 IN UINTN FieldSize,
53 IN UINT64 Value
54 )
55 {
56 UINTN Count;
57 EFI_PCI_IO_PROTOCOL_WIDTH Width;
58
59 Count = 1;
60 switch (FieldSize) {
61 case 1:
62 Width = EfiPciIoWidthUint8;
63 break;
64
65 case 2:
66 Width = EfiPciIoWidthUint16;
67 break;
68
69 case 8:
70 Count = 2;
71 // fall through
72
73 case 4:
74 Width = EfiPciIoWidthUint32;
75 break;
76
77 default:
78 ASSERT (FALSE);
79 return EFI_INVALID_PARAMETER;
80 }
81
82 return PciIo->Io.Write (
83 PciIo,
84 Width,
85 PCI_BAR_IDX0,
86 FieldOffset,
87 Count,
88 &Value
89 );
90 }
91
92
93 /**
94
95 Read a word from Region 0 of the device specified by PciIo.
96
97 Region 0 must be an iomem region. This is an internal function for the
98 driver-specific VIRTIO_CFG_READ() macros.
99
100 @param[in] PciIo Source PCI device.
101
102 @param[in] FieldOffset Source offset.
103
104 @param[in] FieldSize Source field size, must be in { 1, 2, 4, 8 }.
105
106 @param[in] BufferSize Number of bytes available in the target buffer. Must
107 equal FieldSize.
108
109 @param[out] Buffer Target buffer.
110
111
112 @return Status code returned by PciIo->Io.Read().
113
114 **/
115 EFI_STATUS
116 EFIAPI
117 VirtioRead (
118 IN EFI_PCI_IO_PROTOCOL *PciIo,
119 IN UINTN FieldOffset,
120 IN UINTN FieldSize,
121 IN UINTN BufferSize,
122 OUT VOID *Buffer
123 )
124 {
125 UINTN Count;
126 EFI_PCI_IO_PROTOCOL_WIDTH Width;
127
128 ASSERT (FieldSize == BufferSize);
129
130 Count = 1;
131 switch (FieldSize) {
132 case 1:
133 Width = EfiPciIoWidthUint8;
134 break;
135
136 case 2:
137 Width = EfiPciIoWidthUint16;
138 break;
139
140 case 8:
141 Count = 2;
142 // fall through
143
144 case 4:
145 Width = EfiPciIoWidthUint32;
146 break;
147
148 default:
149 ASSERT (FALSE);
150 return EFI_INVALID_PARAMETER;
151 }
152
153 return PciIo->Io.Read (
154 PciIo,
155 Width,
156 PCI_BAR_IDX0,
157 FieldOffset,
158 Count,
159 Buffer
160 );
161 }
162
163
164 /**
165
166 Configure a virtio ring.
167
168 This function sets up internal storage (the guest-host communication area)
169 and lays out several "navigation" (ie. no-ownership) pointers to parts of
170 that storage.
171
172 Relevant sections from the virtio-0.9.5 spec:
173 - 1.1 Virtqueues,
174 - 2.3 Virtqueue Configuration.
175
176 @param[in] The number of descriptors to allocate for the
177 virtio ring, as requested by the host.
178
179 @param[out] Ring The virtio ring to set up.
180
181 @retval EFI_OUT_OF_RESOURCES AllocatePages() failed to allocate contiguous
182 pages for the requested QueueSize. Fields of
183 Ring have indeterminate value.
184
185 @retval EFI_SUCCESS Allocation and setup successful. Ring->Base
186 (and nothing else) is responsible for
187 deallocation.
188
189 **/
190 EFI_STATUS
191 EFIAPI
192 VirtioRingInit (
193 IN UINT16 QueueSize,
194 OUT VRING *Ring
195 )
196 {
197 UINTN RingSize;
198 volatile UINT8 *RingPagesPtr;
199
200 RingSize = ALIGN_VALUE (
201 sizeof *Ring->Desc * QueueSize +
202 sizeof *Ring->Avail.Flags +
203 sizeof *Ring->Avail.Idx +
204 sizeof *Ring->Avail.Ring * QueueSize +
205 sizeof *Ring->Avail.UsedEvent,
206 EFI_PAGE_SIZE);
207
208 RingSize += ALIGN_VALUE (
209 sizeof *Ring->Used.Flags +
210 sizeof *Ring->Used.Idx +
211 sizeof *Ring->Used.UsedElem * QueueSize +
212 sizeof *Ring->Used.AvailEvent,
213 EFI_PAGE_SIZE);
214
215 Ring->NumPages = EFI_SIZE_TO_PAGES (RingSize);
216 Ring->Base = AllocatePages (Ring->NumPages);
217 if (Ring->Base == NULL) {
218 return EFI_OUT_OF_RESOURCES;
219 }
220 SetMem (Ring->Base, RingSize, 0x00);
221 RingPagesPtr = Ring->Base;
222
223 Ring->Desc = (volatile VOID *) RingPagesPtr;
224 RingPagesPtr += sizeof *Ring->Desc * QueueSize;
225
226 Ring->Avail.Flags = (volatile VOID *) RingPagesPtr;
227 RingPagesPtr += sizeof *Ring->Avail.Flags;
228
229 Ring->Avail.Idx = (volatile VOID *) RingPagesPtr;
230 RingPagesPtr += sizeof *Ring->Avail.Idx;
231
232 Ring->Avail.Ring = (volatile VOID *) RingPagesPtr;
233 RingPagesPtr += sizeof *Ring->Avail.Ring * QueueSize;
234
235 Ring->Avail.UsedEvent = (volatile VOID *) RingPagesPtr;
236 RingPagesPtr += sizeof *Ring->Avail.UsedEvent;
237
238 RingPagesPtr = (volatile UINT8 *) Ring->Base +
239 ALIGN_VALUE (RingPagesPtr - (volatile UINT8 *) Ring->Base,
240 EFI_PAGE_SIZE);
241
242 Ring->Used.Flags = (volatile VOID *) RingPagesPtr;
243 RingPagesPtr += sizeof *Ring->Used.Flags;
244
245 Ring->Used.Idx = (volatile VOID *) RingPagesPtr;
246 RingPagesPtr += sizeof *Ring->Used.Idx;
247
248 Ring->Used.UsedElem = (volatile VOID *) RingPagesPtr;
249 RingPagesPtr += sizeof *Ring->Used.UsedElem * QueueSize;
250
251 Ring->Used.AvailEvent = (volatile VOID *) RingPagesPtr;
252 RingPagesPtr += sizeof *Ring->Used.AvailEvent;
253
254 Ring->QueueSize = QueueSize;
255 return EFI_SUCCESS;
256 }
257
258
259 /**
260
261 Tear down the internal resources of a configured virtio ring.
262
263 The caller is responsible to stop the host from using this ring before
264 invoking this function: the VSTAT_DRIVER_OK bit must be clear in
265 VhdrDeviceStatus.
266
267 @param[out] Ring The virtio ring to clean up.
268
269 **/
270 VOID
271 EFIAPI
272 VirtioRingUninit (
273 IN OUT VRING *Ring
274 )
275 {
276 FreePages (Ring->Base, Ring->NumPages);
277 SetMem (Ring, sizeof *Ring, 0x00);
278 }
279
280
281 /**
282
283 Turn off interrupt notifications from the host, and prepare for appending
284 multiple descriptors to the virtio ring.
285
286 The calling driver must be in VSTAT_DRIVER_OK state.
287
288 @param[in,out] Ring The virtio ring we intend to append descriptors to.
289
290 @param[out] Indices The DESC_INDICES structure to initialize.
291
292 **/
293 VOID
294 EFIAPI
295 VirtioPrepare (
296 IN OUT VRING *Ring,
297 OUT DESC_INDICES *Indices
298 )
299 {
300 //
301 // Prepare for virtio-0.9.5, 2.4.2 Receiving Used Buffers From the Device.
302 // We're going to poll the answer, the host should not send an interrupt.
303 //
304 *Ring->Avail.Flags = (UINT16) VRING_AVAIL_F_NO_INTERRUPT;
305
306 //
307 // Prepare for virtio-0.9.5, 2.4.1 Supplying Buffers to the Device.
308 //
309 // Since we support only one in-flight descriptor chain, we can always build
310 // that chain starting at entry #0 of the descriptor table.
311 //
312 Indices->HeadDescIdx = 0;
313 Indices->NextDescIdx = Indices->HeadDescIdx;
314 }
315
316
317 /**
318
319 Append a contiguous buffer for transmission / reception via the virtio ring.
320
321 This function implements the following section from virtio-0.9.5:
322 - 2.4.1.1 Placing Buffers into the Descriptor Table
323
324 Free space is taken as granted, since the individual drivers support only
325 synchronous requests and host side status is processed in lock-step with
326 request submission. It is the calling driver's responsibility to verify the
327 ring size in advance.
328
329 The caller is responsible for initializing *Indices with VirtioPrepare()
330 first.
331
332 @param[in,out] Ring The virtio ring to append the buffer to, as a
333 descriptor.
334
335 @param[in] BufferPhysAddr (Guest pseudo-physical) start address of the
336 transmit / receive buffer.
337
338 @param[in] BufferSize Number of bytes to transmit or receive.
339
340 @param[in] Flags A bitmask of VRING_DESC_F_* flags. The caller
341 computes this mask dependent on further buffers to
342 append and transfer direction.
343 VRING_DESC_F_INDIRECT is unsupported. The
344 VRING_DESC.Next field is always set, but the host
345 only interprets it dependent on VRING_DESC_F_NEXT.
346
347 @param[in,out] Indices Indices->HeadDescIdx is not accessed.
348 On input, Indices->NextDescIdx identifies the next
349 descriptor to carry the buffer. On output,
350 Indices->NextDescIdx is incremented by one, modulo
351 2^16.
352
353 **/
354 VOID
355 EFIAPI
356 VirtioAppendDesc (
357 IN OUT VRING *Ring,
358 IN UINTN BufferPhysAddr,
359 IN UINT32 BufferSize,
360 IN UINT16 Flags,
361 IN OUT DESC_INDICES *Indices
362 )
363 {
364 volatile VRING_DESC *Desc;
365
366 Desc = &Ring->Desc[Indices->NextDescIdx++ % Ring->QueueSize];
367 Desc->Addr = BufferPhysAddr;
368 Desc->Len = BufferSize;
369 Desc->Flags = Flags;
370 Desc->Next = Indices->NextDescIdx % Ring->QueueSize;
371 }
372
373
374 /**
375
376 Notify the host about the descriptor chain just built, and wait until the
377 host processes it.
378
379 @param[in] PciIo The target virtio PCI device to notify.
380
381 @param[in] VirtQueueId Identifies the queue for the target device.
382
383 @param[in,out] Ring The virtio ring with descriptors to submit.
384
385 @param[in] Indices Indices->NextDescIdx is not accessed.
386 Indices->HeadDescIdx identifies the head descriptor
387 of the descriptor chain.
388
389
390 @return Error code from VirtioWrite() if it fails.
391
392 @retval EFI_SUCCESS Otherwise, the host processed all descriptors.
393
394 **/
395 EFI_STATUS
396 EFIAPI
397 VirtioFlush (
398 IN EFI_PCI_IO_PROTOCOL *PciIo,
399 IN UINT16 VirtQueueId,
400 IN OUT VRING *Ring,
401 IN DESC_INDICES *Indices
402 )
403 {
404 UINT16 NextAvailIdx;
405 EFI_STATUS Status;
406 UINTN PollPeriodUsecs;
407
408 //
409 // virtio-0.9.5, 2.4.1.2 Updating the Available Ring
410 //
411 // It is not exactly clear from the wording of the virtio-0.9.5
412 // specification, but each entry in the Available Ring references only the
413 // head descriptor of any given descriptor chain.
414 //
415 NextAvailIdx = *Ring->Avail.Idx;
416 Ring->Avail.Ring[NextAvailIdx++ % Ring->QueueSize] =
417 Indices->HeadDescIdx % Ring->QueueSize;
418
419 //
420 // virtio-0.9.5, 2.4.1.3 Updating the Index Field
421 //
422 MemoryFence();
423 *Ring->Avail.Idx = NextAvailIdx;
424
425 //
426 // virtio-0.9.5, 2.4.1.4 Notifying the Device -- gratuitous notifications are
427 // OK.
428 //
429 MemoryFence();
430 Status = VirtioWrite (
431 PciIo,
432 OFFSET_OF (VIRTIO_HDR, VhdrQueueNotify),
433 sizeof (UINT16),
434 VirtQueueId
435 );
436 if (EFI_ERROR (Status)) {
437 return Status;
438 }
439
440 //
441 // virtio-0.9.5, 2.4.2 Receiving Used Buffers From the Device
442 // Wait until the host processes and acknowledges our descriptor chain. The
443 // condition we use for polling is greatly simplified and relies on the
444 // synchronous, lock-step progress.
445 //
446 // Keep slowing down until we reach a poll period of slightly above 1 ms.
447 //
448 PollPeriodUsecs = 1;
449 MemoryFence();
450 while (*Ring->Used.Idx != NextAvailIdx) {
451 gBS->Stall (PollPeriodUsecs); // calls AcpiTimerLib::MicroSecondDelay
452
453 if (PollPeriodUsecs < 1024) {
454 PollPeriodUsecs *= 2;
455 }
456 MemoryFence();
457 }
458
459 MemoryFence();
460 return EFI_SUCCESS;
461 }