]> git.proxmox.com Git - mirror_qemu.git/blob - block/qed-table.c
Merge remote-tracking branch 'remotes/stefanberger/tags/pull-tpm-2018-12-04-1' into...
[mirror_qemu.git] / block / qed-table.c
1 /*
2 * QEMU Enhanced Disk Format Table I/O
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "trace.h"
17 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */
18 #include "qed.h"
19 #include "qemu/bswap.h"
20
21 /* Called with table_lock held. */
22 static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
23 {
24 QEMUIOVector qiov;
25 int noffsets;
26 int i, ret;
27
28 struct iovec iov = {
29 .iov_base = table->offsets,
30 .iov_len = s->header.cluster_size * s->header.table_size,
31 };
32 qemu_iovec_init_external(&qiov, &iov, 1);
33
34 trace_qed_read_table(s, offset, table);
35
36 qemu_co_mutex_unlock(&s->table_lock);
37 ret = bdrv_preadv(s->bs->file, offset, &qiov);
38 qemu_co_mutex_lock(&s->table_lock);
39 if (ret < 0) {
40 goto out;
41 }
42
43 /* Byteswap offsets */
44 noffsets = qiov.size / sizeof(uint64_t);
45 for (i = 0; i < noffsets; i++) {
46 table->offsets[i] = le64_to_cpu(table->offsets[i]);
47 }
48
49 ret = 0;
50 out:
51 /* Completion */
52 trace_qed_read_table_cb(s, table, ret);
53 return ret;
54 }
55
56 /**
57 * Write out an updated part or all of a table
58 *
59 * @s: QED state
60 * @offset: Offset of table in image file, in bytes
61 * @table: Table
62 * @index: Index of first element
63 * @n: Number of elements
64 * @flush: Whether or not to sync to disk
65 *
66 * Called with table_lock held.
67 */
68 static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
69 unsigned int index, unsigned int n, bool flush)
70 {
71 unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1;
72 unsigned int start, end, i;
73 QEDTable *new_table;
74 struct iovec iov;
75 QEMUIOVector qiov;
76 size_t len_bytes;
77 int ret;
78
79 trace_qed_write_table(s, offset, table, index, n);
80
81 /* Calculate indices of the first and one after last elements */
82 start = index & ~sector_mask;
83 end = (index + n + sector_mask) & ~sector_mask;
84
85 len_bytes = (end - start) * sizeof(uint64_t);
86
87 new_table = qemu_blockalign(s->bs, len_bytes);
88 iov = (struct iovec) {
89 .iov_base = new_table->offsets,
90 .iov_len = len_bytes,
91 };
92 qemu_iovec_init_external(&qiov, &iov, 1);
93
94 /* Byteswap table */
95 for (i = start; i < end; i++) {
96 uint64_t le_offset = cpu_to_le64(table->offsets[i]);
97 new_table->offsets[i - start] = le_offset;
98 }
99
100 /* Adjust for offset into table */
101 offset += start * sizeof(uint64_t);
102
103 qemu_co_mutex_unlock(&s->table_lock);
104 ret = bdrv_pwritev(s->bs->file, offset, &qiov);
105 qemu_co_mutex_lock(&s->table_lock);
106 trace_qed_write_table_cb(s, table, flush, ret);
107 if (ret < 0) {
108 goto out;
109 }
110
111 if (flush) {
112 ret = bdrv_flush(s->bs);
113 if (ret < 0) {
114 goto out;
115 }
116 }
117
118 ret = 0;
119 out:
120 qemu_vfree(new_table);
121 return ret;
122 }
123
124 int qed_read_l1_table_sync(BDRVQEDState *s)
125 {
126 return qed_read_table(s, s->header.l1_table_offset, s->l1_table);
127 }
128
129 /* Called with table_lock held. */
130 int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n)
131 {
132 BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
133 return qed_write_table(s, s->header.l1_table_offset,
134 s->l1_table, index, n, false);
135 }
136
137 int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
138 unsigned int n)
139 {
140 return qed_write_l1_table(s, index, n);
141 }
142
143 /* Called with table_lock held. */
144 int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
145 {
146 int ret;
147
148 qed_unref_l2_cache_entry(request->l2_table);
149
150 /* Check for cached L2 entry */
151 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
152 if (request->l2_table) {
153 return 0;
154 }
155
156 request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
157 request->l2_table->table = qed_alloc_table(s);
158
159 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD);
160 ret = qed_read_table(s, offset, request->l2_table->table);
161
162 if (ret) {
163 /* can't trust loaded L2 table anymore */
164 qed_unref_l2_cache_entry(request->l2_table);
165 request->l2_table = NULL;
166 } else {
167 request->l2_table->offset = offset;
168
169 qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table);
170
171 /* This is guaranteed to succeed because we just committed the entry
172 * to the cache.
173 */
174 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
175 assert(request->l2_table != NULL);
176 }
177
178 return ret;
179 }
180
181 int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
182 {
183 return qed_read_l2_table(s, request, offset);
184 }
185
186 /* Called with table_lock held. */
187 int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
188 unsigned int index, unsigned int n, bool flush)
189 {
190 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
191 return qed_write_table(s, request->l2_table->offset,
192 request->l2_table->table, index, n, flush);
193 }
194
195 int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
196 unsigned int index, unsigned int n, bool flush)
197 {
198 return qed_write_l2_table(s, request, index, n, flush);
199 }