]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/Include/IndustryStandard/Xen/io/blkif.h
OvmfPkg: Apply uncrustify changes
[mirror_edk2.git] / OvmfPkg / Include / IndustryStandard / Xen / io / blkif.h
1 /******************************************************************************
2 * blkif.h
3 *
4 * Unified block-device I/O interface for Xen guest OSes.
5 *
6 * SPDX-License-Identifier: MIT
7 *
8 * Copyright (c) 2003-2004, Keir Fraser
9 * Copyright (c) 2012, Spectra Logic Corporation
10 */
11
12 #ifndef __XEN_PUBLIC_IO_BLKIF_H__
13 #define __XEN_PUBLIC_IO_BLKIF_H__
14
15 #include "ring.h"
16 #include "../grant_table.h"
17
18 /*
19 * Front->back notifications: When enqueuing a new request, sending a
20 * notification can be made conditional on req_event (i.e., the generic
21 * hold-off mechanism provided by the ring macros). Backends must set
22 * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
23 *
24 * Back->front notifications: When enqueuing a new response, sending a
25 * notification can be made conditional on rsp_event (i.e., the generic
26 * hold-off mechanism provided by the ring macros). Frontends must set
27 * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
28 */
29
30 #ifndef blkif_vdev_t
31 #define blkif_vdev_t UINT16
32 #endif
33 #define blkif_sector_t UINT64
34
35 /*
36 * Feature and Parameter Negotiation
37 * =================================
38 * The two halves of a Xen block driver utilize nodes within the XenStore to
39 * communicate capabilities and to negotiate operating parameters. This
40 * section enumerates these nodes which reside in the respective front and
41 * backend portions of the XenStore, following the XenBus convention.
42 *
43 * All data in the XenStore is stored as strings. Nodes specifying numeric
44 * values are encoded in decimal. Integer value ranges listed below are
45 * expressed as fixed sized integer types capable of storing the conversion
46 * of a properly formatted node string, without loss of information.
47 *
48 * Any specified default value is in effect if the corresponding XenBus node
49 * is not present in the XenStore.
50 *
51 * XenStore nodes in sections marked "PRIVATE" are solely for use by the
52 * driver side whose XenBus tree contains them.
53 *
54 * XenStore nodes marked "DEPRECATED" in their notes section should only be
55 * used to provide interoperability with legacy implementations.
56 *
57 * See the XenBus state transition diagram below for details on when XenBus
58 * nodes must be published and when they can be queried.
59 *
60 *****************************************************************************
61 * Backend XenBus Nodes
62 *****************************************************************************
63 *
64 *------------------ Backend Device Identification (PRIVATE) ------------------
65 *
66 * mode
67 * Values: "r" (read only), "w" (writable)
68 *
69 * The read or write access permissions to the backing store to be
70 * granted to the frontend.
71 *
72 * params
73 * Values: string
74 *
75 * A free formatted string providing sufficient information for the
76 * backend driver to open the backing device. (e.g. the path to the
77 * file or block device representing the backing store.)
78 *
79 * type
80 * Values: "file", "phy", "tap"
81 *
82 * The type of the backing device/object.
83 *
84 *--------------------------------- Features ---------------------------------
85 *
86 * feature-barrier
87 * Values: 0/1 (boolean)
88 * Default Value: 0
89 *
90 * A value of "1" indicates that the backend can process requests
91 * containing the BLKIF_OP_WRITE_BARRIER request opcode. Requests
92 * of this type may still be returned at any time with the
93 * BLKIF_RSP_EOPNOTSUPP result code.
94 *
95 * feature-flush-cache
96 * Values: 0/1 (boolean)
97 * Default Value: 0
98 *
99 * A value of "1" indicates that the backend can process requests
100 * containing the BLKIF_OP_FLUSH_DISKCACHE request opcode. Requests
101 * of this type may still be returned at any time with the
102 * BLKIF_RSP_EOPNOTSUPP result code.
103 *
104 * feature-discard
105 * Values: 0/1 (boolean)
106 * Default Value: 0
107 *
108 * A value of "1" indicates that the backend can process requests
109 * containing the BLKIF_OP_DISCARD request opcode. Requests
110 * of this type may still be returned at any time with the
111 * BLKIF_RSP_EOPNOTSUPP result code.
112 *
113 * feature-persistent
114 * Values: 0/1 (boolean)
115 * Default Value: 0
116 * Notes: 7
117 *
118 * A value of "1" indicates that the backend can keep the grants used
119 * by the frontend driver mapped, so the same set of grants should be
120 * used in all transactions. The maximum number of grants the backend
121 * can map persistently depends on the implementation, but ideally it
122 * should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
123 * feature the backend doesn't need to unmap each grant, preventing
124 * costly TLB flushes. The backend driver should only map grants
125 * persistently if the frontend supports it. If a backend driver chooses
126 * to use the persistent protocol when the frontend doesn't support it,
127 * it will probably hit the maximum number of persistently mapped grants
128 * (due to the fact that the frontend won't be reusing the same grants),
129 * and fall back to non-persistent mode. Backend implementations may
130 * shrink or expand the number of persistently mapped grants without
131 * notifying the frontend depending on memory constraints (this might
132 * cause a performance degradation).
133 *
134 * If a backend driver wants to limit the maximum number of persistently
135 * mapped grants to a value less than RING_SIZE *
136 * BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
137 * discard the grants that are less commonly used. Using a LRU in the
138 * backend driver paired with a LIFO queue in the frontend will
139 * allow us to have better performance in this scenario.
140 *
141 *----------------------- Request Transport Parameters ------------------------
142 *
143 * max-ring-page-order
144 * Values: <UINT32>
145 * Default Value: 0
146 * Notes: 1, 3
147 *
148 * The maximum supported size of the request ring buffer in units of
149 * lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
150 * etc.).
151 *
152 * max-ring-pages
153 * Values: <UINT32>
154 * Default Value: 1
155 * Notes: DEPRECATED, 2, 3
156 *
157 * The maximum supported size of the request ring buffer in units of
158 * machine pages. The value must be a power of 2.
159 *
160 *------------------------- Backend Device Properties -------------------------
161 *
162 * discard-alignment
163 * Values: <UINT32>
164 * Default Value: 0
165 * Notes: 4, 5
166 *
167 * The offset, in bytes from the beginning of the virtual block device,
168 * to the first, addressable, discard extent on the underlying device.
169 *
170 * discard-granularity
171 * Values: <UINT32>
172 * Default Value: <"sector-size">
173 * Notes: 4
174 *
175 * The size, in bytes, of the individually addressable discard extents
176 * of the underlying device.
177 *
178 * discard-secure
179 * Values: 0/1 (boolean)
180 * Default Value: 0
181 * Notes: 10
182 *
183 * A value of "1" indicates that the backend can process BLKIF_OP_DISCARD
184 * requests with the BLKIF_DISCARD_SECURE flag set.
185 *
186 * info
187 * Values: <UINT32> (bitmap)
188 *
189 * A collection of bit flags describing attributes of the backing
190 * device. The VDISK_* macros define the meaning of each bit
191 * location.
192 *
193 * sector-size
194 * Values: <UINT32>
195 *
196 * The logical sector size, in bytes, of the backend device.
197 *
198 * physical-sector-size
199 * Values: <UINT32>
200 *
201 * The physical sector size, in bytes, of the backend device.
202 *
203 * sectors
204 * Values: <UINT64>
205 *
206 * The size of the backend device, expressed in units of its logical
207 * sector size ("sector-size").
208 *
209 *****************************************************************************
210 * Frontend XenBus Nodes
211 *****************************************************************************
212 *
213 *----------------------- Request Transport Parameters -----------------------
214 *
215 * event-channel
216 * Values: <UINT32>
217 *
218 * The identifier of the Xen event channel used to signal activity
219 * in the ring buffer.
220 *
221 * ring-ref
222 * Values: <UINT32>
223 * Notes: 6
224 *
225 * The Xen grant reference granting permission for the backend to map
226 * the sole page in a single page sized ring buffer.
227 *
228 * ring-ref%u
229 * Values: <UINT32>
230 * Notes: 6
231 *
232 * For a frontend providing a multi-page ring, a "number of ring pages"
233 * sized list of nodes, each containing a Xen grant reference granting
234 * permission for the backend to map the page of the ring located
235 * at page index "%u". Page indexes are zero based.
236 *
237 * protocol
238 * Values: string (XEN_IO_PROTO_ABI_*)
239 * Default Value: XEN_IO_PROTO_ABI_NATIVE
240 *
241 * The machine ABI rules governing the format of all ring request and
242 * response structures.
243 *
244 * ring-page-order
245 * Values: <UINT32>
246 * Default Value: 0
247 * Maximum Value: MAX(ffs(max-ring-pages) - 1, max-ring-page-order)
248 * Notes: 1, 3
249 *
250 * The size of the frontend allocated request ring buffer in units
251 * of lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages,
252 * etc.).
253 *
254 * num-ring-pages
255 * Values: <UINT32>
256 * Default Value: 1
257 * Maximum Value: MAX(max-ring-pages,(0x1 << max-ring-page-order))
258 * Notes: DEPRECATED, 2, 3
259 *
260 * The size of the frontend allocated request ring buffer in units of
261 * machine pages. The value must be a power of 2.
262 *
263 * feature-persistent
264 * Values: 0/1 (boolean)
265 * Default Value: 0
266 * Notes: 7, 8, 9
267 *
268 * A value of "1" indicates that the frontend will reuse the same grants
269 * for all transactions, allowing the backend to map them with write
270 * access (even when it should be read-only). If the frontend hits the
271 * maximum number of allowed persistently mapped grants, it can fallback
272 * to non persistent mode. This will cause a performance degradation,
273 * since the backend driver will still try to map those grants
274 * persistently. Since the persistent grants protocol is compatible with
275 * the previous protocol, a frontend driver can choose to work in
276 * persistent mode even when the backend doesn't support it.
277 *
278 * It is recommended that the frontend driver stores the persistently
279 * mapped grants in a LIFO queue, so a subset of all persistently mapped
280 * grants gets used commonly. This is done in case the backend driver
281 * decides to limit the maximum number of persistently mapped grants
282 * to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
283 *
284 *------------------------- Virtual Device Properties -------------------------
285 *
286 * device-type
287 * Values: "disk", "cdrom", "floppy", etc.
288 *
289 * virtual-device
290 * Values: <UINT32>
291 *
292 * A value indicating the physical device to virtualize within the
293 * frontend's domain. (e.g. "The first ATA disk", "The third SCSI
294 * disk", etc.)
295 *
296 * See docs/misc/vbd-interface.txt for details on the format of this
297 * value.
298 *
299 * Notes
300 * -----
301 * (1) Multi-page ring buffer scheme first developed in the Citrix XenServer
302 * PV drivers.
303 * (2) Multi-page ring buffer scheme first used in some RedHat distributions
304 * including a distribution deployed on certain nodes of the Amazon
305 * EC2 cluster.
306 * (3) Support for multi-page ring buffers was implemented independently,
307 * in slightly different forms, by both Citrix and RedHat/Amazon.
308 * For full interoperability, block front and backends should publish
309 * identical ring parameters, adjusted for unit differences, to the
310 * XenStore nodes used in both schemes.
311 * (4) Devices that support discard functionality may internally allocate space
312 * (discardable extents) in units that are larger than the exported logical
313 * block size. If the backing device has such discardable extents the
314 * backend should provide both discard-granularity and discard-alignment.
315 * Providing just one of the two may be considered an error by the frontend.
316 * Backends supporting discard should include discard-granularity and
317 * discard-alignment even if it supports discarding individual sectors.
318 * Frontends should assume discard-alignment == 0 and discard-granularity
319 * == sector size if these keys are missing.
320 * (5) The discard-alignment parameter allows a physical device to be
321 * partitioned into virtual devices that do not necessarily begin or
322 * end on a discardable extent boundary.
323 * (6) When there is only a single page allocated to the request ring,
324 * 'ring-ref' is used to communicate the grant reference for this
325 * page to the backend. When using a multi-page ring, the 'ring-ref'
326 * node is not created. Instead 'ring-ref0' - 'ring-refN' are used.
327 * (7) When using persistent grants data has to be copied from/to the page
328 * where the grant is currently mapped. The overhead of doing this copy
329 * however doesn't suppress the speed improvement of not having to unmap
330 * the grants.
331 * (8) The frontend driver has to allow the backend driver to map all grants
332 * with write access, even when they should be mapped read-only, since
333 * further requests may reuse these grants and require write permissions.
334 * (9) Linux implementation doesn't have a limit on the maximum number of
335 * grants that can be persistently mapped in the frontend driver, but
336 * due to the frontent driver implementation it should never be bigger
337 * than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
338 *(10) The discard-secure property may be present and will be set to 1 if the
339 * backing device supports secure discard.
340 */
341
342 /*
343 * STATE DIAGRAMS
344 *
345 *****************************************************************************
346 * Startup *
347 *****************************************************************************
348 *
349 * Tool stack creates front and back nodes with state XenbusStateInitialising.
350 *
351 * Front Back
352 * ================================= =====================================
353 * XenbusStateInitialising XenbusStateInitialising
354 * o Query virtual device o Query backend device identification
355 * properties. data.
356 * o Setup OS device instance. o Open and validate backend device.
357 * o Publish backend features and
358 * transport parameters.
359 * |
360 * |
361 * V
362 * XenbusStateInitWait
363 *
364 * o Query backend features and
365 * transport parameters.
366 * o Allocate and initialize the
367 * request ring.
368 * o Publish transport parameters
369 * that will be in effect during
370 * this connection.
371 * |
372 * |
373 * V
374 * XenbusStateInitialised
375 *
376 * o Query frontend transport parameters.
377 * o Connect to the request ring and
378 * event channel.
379 * o Publish backend device properties.
380 * |
381 * |
382 * V
383 * XenbusStateConnected
384 *
385 * o Query backend device properties.
386 * o Finalize OS virtual device
387 * instance.
388 * |
389 * |
390 * V
391 * XenbusStateConnected
392 *
393 * Note: Drivers that do not support any optional features, or the negotiation
394 * of transport parameters, can skip certain states in the state machine:
395 *
396 * o A frontend may transition to XenbusStateInitialised without
397 * waiting for the backend to enter XenbusStateInitWait. In this
398 * case, default transport parameters are in effect and any
399 * transport parameters published by the frontend must contain
400 * their default values.
401 *
402 * o A backend may transition to XenbusStateInitialised, bypassing
403 * XenbusStateInitWait, without waiting for the frontend to first
404 * enter the XenbusStateInitialised state. In this case, default
405 * transport parameters are in effect and any transport parameters
406 * published by the backend must contain their default values.
407 *
408 * Drivers that support optional features and/or transport parameter
409 * negotiation must tolerate these additional state transition paths.
410 * In general this means performing the work of any skipped state
411 * transition, if it has not already been performed, in addition to the
412 * work associated with entry into the current state.
413 */
414
415 /*
416 * REQUEST CODES.
417 */
418 #define BLKIF_OP_READ 0
419 #define BLKIF_OP_WRITE 1
420
421 /*
422 * All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER
423 * operation code ("barrier request") must be completed prior to the
424 * execution of the barrier request. All writes issued after the barrier
425 * request must not execute until after the completion of the barrier request.
426 *
427 * Optional. See "feature-barrier" XenBus node documentation above.
428 */
429 #define BLKIF_OP_WRITE_BARRIER 2
430
431 /*
432 * Commit any uncommitted contents of the backing device's volatile cache
433 * to stable storage.
434 *
435 * Optional. See "feature-flush-cache" XenBus node documentation above.
436 */
437 #define BLKIF_OP_FLUSH_DISKCACHE 3
438
439 /*
440 * Used in SLES sources for device specific command packet
441 * contained within the request. Reserved for that purpose.
442 */
443 #define BLKIF_OP_RESERVED_1 4
444
445 /*
446 * Indicate to the backend device that a region of storage is no longer in
447 * use, and may be discarded at any time without impact to the client. If
448 * the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the
449 * discarded region on the device must be rendered unrecoverable before the
450 * command returns.
451 *
452 * This operation is analogous to performing a trim (ATA) or unamp (SCSI),
453 * command on a native device.
454 *
455 * More information about trim/unmap operations can be found at:
456 * http://t13.org/Documents/UploadedDocuments/docs2008/
457 * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
458 * http://www.seagate.com/staticfiles/support/disc/manuals/
459 * Interface%20manuals/100293068c.pdf
460 *
461 * Optional. See "feature-discard", "discard-alignment",
462 * "discard-granularity", and "discard-secure" in the XenBus node
463 * documentation above.
464 */
465 #define BLKIF_OP_DISCARD 5
466
467 /*
468 * Recognized if "feature-max-indirect-segments" in present in the backend
469 * xenbus info. The "feature-max-indirect-segments" node contains the maximum
470 * number of segments allowed by the backend per request. If the node is
471 * present, the frontend might use blkif_request_indirect structs in order to
472 * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
473 * maximum number of indirect segments is fixed by the backend, but the
474 * frontend can issue requests with any number of indirect segments as long as
475 * it's less than the number provided by the backend. The indirect_grefs field
476 * in blkif_request_indirect should be filled by the frontend with the
477 * grant references of the pages that are holding the indirect segments.
478 * These pages are filled with an array of blkif_request_segment that hold the
479 * information about the segments. The number of indirect pages to use is
480 * determined by the number of segments an indirect request contains. Every
481 * indirect page can contain a maximum of
482 * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
483 * calculate the number of indirect pages to use we have to do
484 * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
485 *
486 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
487 * create the "feature-max-indirect-segments" node!
488 */
489 #define BLKIF_OP_INDIRECT 6
490
491 /*
492 * Maximum scatter/gather segments per request.
493 * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
494 * NB. This could be 12 if the ring indexes weren't stored in the same page.
495 */
496 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
497
498 /*
499 * Maximum number of indirect pages to use per request.
500 */
501 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
502
503 /*
504 * NB. first_sect and last_sect in blkif_request_segment, as well as
505 * sector_number in blkif_request, are always expressed in 512-byte units.
506 * However they must be properly aligned to the real sector size of the
507 * physical disk, which is reported in the "physical-sector-size" node in
508 * the backend xenbus info. Also the xenbus "sectors" node is expressed in
509 * 512-byte units.
510 */
511 struct blkif_request_segment {
512 grant_ref_t gref; /* reference to I/O buffer frame */
513 /* @first_sect: first sector in frame to transfer (inclusive). */
514 /* @last_sect: last sector in frame to transfer (inclusive). */
515 UINT8 first_sect, last_sect;
516 };
517
518 /*
519 * Starting ring element for any I/O request.
520 */
521 #if defined (MDE_CPU_IA32)
522 //
523 // pack(4) is necessary when these structs are compiled for Ia32.
524 // Without it, the struct will have a different alignment than the one
525 // a backend expect for a 32bit guest.
526 //
527 #pragma pack(4)
528 #endif
529 struct blkif_request {
530 UINT8 operation; /* BLKIF_OP_??? */
531 UINT8 nr_segments; /* number of segments */
532 blkif_vdev_t handle; /* only for read/write requests */
533 UINT64 id; /* private guest value, echoed in resp */
534 blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */
535 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
536 };
537
538 typedef struct blkif_request blkif_request_t;
539
540 /*
541 * Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD
542 * sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request)
543 */
544 struct blkif_request_discard {
545 UINT8 operation; /* BLKIF_OP_DISCARD */
546 UINT8 flag; /* BLKIF_DISCARD_SECURE or zero */
547 #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
548 blkif_vdev_t handle; /* same as for read/write requests */
549 UINT64 id; /* private guest value, echoed in resp */
550 blkif_sector_t sector_number; /* start sector idx on disk */
551 UINT64 nr_sectors; /* number of contiguous sectors to discard*/
552 };
553
554 typedef struct blkif_request_discard blkif_request_discard_t;
555
556 struct blkif_request_indirect {
557 UINT8 operation; /* BLKIF_OP_INDIRECT */
558 UINT8 indirect_op; /* BLKIF_OP_{READ/WRITE} */
559 UINT16 nr_segments; /* number of segments */
560 UINT64 id; /* private guest value, echoed in resp */
561 blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */
562 blkif_vdev_t handle; /* same as for read/write requests */
563 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
564 #ifdef MDE_CPU_IA32
565 UINT64 pad; /* Make it 64 byte aligned on i386 */
566 #endif
567 };
568
569 typedef struct blkif_request_indirect blkif_request_indirect_t;
570
571 struct blkif_response {
572 UINT64 id; /* copied from request */
573 UINT8 operation; /* copied from request */
574 INT16 status; /* BLKIF_RSP_??? */
575 };
576
577 typedef struct blkif_response blkif_response_t;
578 #if defined (MDE_CPU_IA32)
579 #pragma pack()
580 #endif
581
582 /*
583 * STATUS RETURN CODES.
584 */
585 /* Operation not supported (only happens on barrier writes). */
586 #define BLKIF_RSP_EOPNOTSUPP -2
587 /* Operation failed for some unspecified reason (-EIO). */
588 #define BLKIF_RSP_ERROR -1
589 /* Operation completed successfully. */
590 #define BLKIF_RSP_OKAY 0
591
592 /*
593 * Generate blkif ring structures and types.
594 */
595 DEFINE_RING_TYPES (blkif, struct blkif_request, struct blkif_response);
596
597 #define VDISK_CDROM 0x1
598 #define VDISK_REMOVABLE 0x2
599 #define VDISK_READONLY 0x4
600
601 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
602
603 /*
604 * Local variables:
605 * mode: C
606 * c-file-style: "BSD"
607 * c-basic-offset: 4
608 * tab-width: 4
609 * indent-tabs-mode: nil
610 * End:
611 */