]>
git.proxmox.com Git - mirror_qemu.git/blob - block/qed-table.c
2 * QEMU Enhanced Disk Format Table I/O
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
17 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */
19 #include "qemu/bswap.h"
21 static int qed_read_table(BDRVQEDState
*s
, uint64_t offset
, QEDTable
*table
)
28 .iov_base
= table
->offsets
,
29 .iov_len
= s
->header
.cluster_size
* s
->header
.table_size
,
31 qemu_iovec_init_external(&qiov
, &iov
, 1);
33 trace_qed_read_table(s
, offset
, table
);
35 ret
= bdrv_preadv(s
->bs
->file
, offset
, &qiov
);
40 /* Byteswap offsets */
42 noffsets
= qiov
.size
/ sizeof(uint64_t);
43 for (i
= 0; i
< noffsets
; i
++) {
44 table
->offsets
[i
] = le64_to_cpu(table
->offsets
[i
]);
51 trace_qed_read_table_cb(s
, table
, ret
);
60 bool flush
; /* flush after write? */
66 static void qed_write_table_cb(void *opaque
, int ret
)
68 QEDWriteTableCB
*write_table_cb
= opaque
;
69 BDRVQEDState
*s
= write_table_cb
->s
;
71 trace_qed_write_table_cb(s
,
72 write_table_cb
->orig_table
,
73 write_table_cb
->flush
,
80 if (write_table_cb
->flush
) {
81 /* We still need to flush first */
82 write_table_cb
->flush
= false;
84 bdrv_aio_flush(write_table_cb
->s
->bs
, qed_write_table_cb
,
91 qemu_vfree(write_table_cb
->table
);
92 gencb_complete(&write_table_cb
->gencb
, ret
);
96 * Write out an updated part or all of a table
99 * @offset: Offset of table in image file, in bytes
101 * @index: Index of first element
102 * @n: Number of elements
103 * @flush: Whether or not to sync to disk
104 * @cb: Completion function
105 * @opaque: Argument for completion function
107 static void qed_write_table(BDRVQEDState
*s
, uint64_t offset
, QEDTable
*table
,
108 unsigned int index
, unsigned int n
, bool flush
,
109 BlockCompletionFunc
*cb
, void *opaque
)
111 QEDWriteTableCB
*write_table_cb
;
112 unsigned int sector_mask
= BDRV_SECTOR_SIZE
/ sizeof(uint64_t) - 1;
113 unsigned int start
, end
, i
;
116 trace_qed_write_table(s
, offset
, table
, index
, n
);
118 /* Calculate indices of the first and one after last elements */
119 start
= index
& ~sector_mask
;
120 end
= (index
+ n
+ sector_mask
) & ~sector_mask
;
122 len_bytes
= (end
- start
) * sizeof(uint64_t);
124 write_table_cb
= gencb_alloc(sizeof(*write_table_cb
), cb
, opaque
);
125 write_table_cb
->s
= s
;
126 write_table_cb
->orig_table
= table
;
127 write_table_cb
->flush
= flush
;
128 write_table_cb
->table
= qemu_blockalign(s
->bs
, len_bytes
);
129 write_table_cb
->iov
.iov_base
= write_table_cb
->table
->offsets
;
130 write_table_cb
->iov
.iov_len
= len_bytes
;
131 qemu_iovec_init_external(&write_table_cb
->qiov
, &write_table_cb
->iov
, 1);
134 for (i
= start
; i
< end
; i
++) {
135 uint64_t le_offset
= cpu_to_le64(table
->offsets
[i
]);
136 write_table_cb
->table
->offsets
[i
- start
] = le_offset
;
139 /* Adjust for offset into table */
140 offset
+= start
* sizeof(uint64_t);
142 bdrv_aio_writev(s
->bs
->file
, offset
/ BDRV_SECTOR_SIZE
,
143 &write_table_cb
->qiov
,
144 write_table_cb
->qiov
.size
/ BDRV_SECTOR_SIZE
,
145 qed_write_table_cb
, write_table_cb
);
149 * Propagate return value from async callback
151 static void qed_sync_cb(void *opaque
, int ret
)
153 *(int *)opaque
= ret
;
156 int qed_read_l1_table_sync(BDRVQEDState
*s
)
158 return qed_read_table(s
, s
->header
.l1_table_offset
, s
->l1_table
);
161 void qed_write_l1_table(BDRVQEDState
*s
, unsigned int index
, unsigned int n
,
162 BlockCompletionFunc
*cb
, void *opaque
)
164 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L1_UPDATE
);
165 qed_write_table(s
, s
->header
.l1_table_offset
,
166 s
->l1_table
, index
, n
, false, cb
, opaque
);
169 int qed_write_l1_table_sync(BDRVQEDState
*s
, unsigned int index
,
172 int ret
= -EINPROGRESS
;
174 qed_write_l1_table(s
, index
, n
, qed_sync_cb
, &ret
);
175 BDRV_POLL_WHILE(s
->bs
, ret
== -EINPROGRESS
);
180 int qed_read_l2_table(BDRVQEDState
*s
, QEDRequest
*request
, uint64_t offset
)
184 qed_unref_l2_cache_entry(request
->l2_table
);
186 /* Check for cached L2 entry */
187 request
->l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, offset
);
188 if (request
->l2_table
) {
192 request
->l2_table
= qed_alloc_l2_cache_entry(&s
->l2_cache
);
193 request
->l2_table
->table
= qed_alloc_table(s
);
195 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L2_LOAD
);
196 ret
= qed_read_table(s
, offset
, request
->l2_table
->table
);
200 /* can't trust loaded L2 table anymore */
201 qed_unref_l2_cache_entry(request
->l2_table
);
202 request
->l2_table
= NULL
;
204 request
->l2_table
->offset
= offset
;
206 qed_commit_l2_cache_entry(&s
->l2_cache
, request
->l2_table
);
208 /* This is guaranteed to succeed because we just committed the entry
211 request
->l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, offset
);
212 assert(request
->l2_table
!= NULL
);
219 int qed_read_l2_table_sync(BDRVQEDState
*s
, QEDRequest
*request
, uint64_t offset
)
221 return qed_read_l2_table(s
, request
, offset
);
224 void qed_write_l2_table(BDRVQEDState
*s
, QEDRequest
*request
,
225 unsigned int index
, unsigned int n
, bool flush
,
226 BlockCompletionFunc
*cb
, void *opaque
)
228 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_L2_UPDATE
);
229 qed_write_table(s
, request
->l2_table
->offset
,
230 request
->l2_table
->table
, index
, n
, flush
, cb
, opaque
);
233 int qed_write_l2_table_sync(BDRVQEDState
*s
, QEDRequest
*request
,
234 unsigned int index
, unsigned int n
, bool flush
)
236 int ret
= -EINPROGRESS
;
238 qed_write_l2_table(s
, request
, index
, n
, flush
, qed_sync_cb
, &ret
);
239 BDRV_POLL_WHILE(s
->bs
, ret
== -EINPROGRESS
);