]> git.proxmox.com Git - mirror_qemu.git/blob - block/qed-table.c
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into...
[mirror_qemu.git] / block / qed-table.c
1 /*
2 * QEMU Enhanced Disk Format Table I/O
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "trace.h"
17 #include "qemu/sockets.h" /* for EINPROGRESS on Windows */
18 #include "qed.h"
19 #include "qemu/bswap.h"
20
21 /* Called with table_lock held. */
22 static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
23 {
24 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(
25 qiov, table->offsets, s->header.cluster_size * s->header.table_size);
26 int noffsets;
27 int i, ret;
28
29 trace_qed_read_table(s, offset, table);
30
31 qemu_co_mutex_unlock(&s->table_lock);
32 ret = bdrv_preadv(s->bs->file, offset, &qiov);
33 qemu_co_mutex_lock(&s->table_lock);
34 if (ret < 0) {
35 goto out;
36 }
37
38 /* Byteswap offsets */
39 noffsets = qiov.size / sizeof(uint64_t);
40 for (i = 0; i < noffsets; i++) {
41 table->offsets[i] = le64_to_cpu(table->offsets[i]);
42 }
43
44 ret = 0;
45 out:
46 /* Completion */
47 trace_qed_read_table_cb(s, table, ret);
48 return ret;
49 }
50
51 /**
52 * Write out an updated part or all of a table
53 *
54 * @s: QED state
55 * @offset: Offset of table in image file, in bytes
56 * @table: Table
57 * @index: Index of first element
58 * @n: Number of elements
59 * @flush: Whether or not to sync to disk
60 *
61 * Called with table_lock held.
62 */
63 static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
64 unsigned int index, unsigned int n, bool flush)
65 {
66 unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1;
67 unsigned int start, end, i;
68 QEDTable *new_table;
69 QEMUIOVector qiov;
70 size_t len_bytes;
71 int ret;
72
73 trace_qed_write_table(s, offset, table, index, n);
74
75 /* Calculate indices of the first and one after last elements */
76 start = index & ~sector_mask;
77 end = (index + n + sector_mask) & ~sector_mask;
78
79 len_bytes = (end - start) * sizeof(uint64_t);
80
81 new_table = qemu_blockalign(s->bs, len_bytes);
82 qemu_iovec_init_buf(&qiov, new_table->offsets, len_bytes);
83
84 /* Byteswap table */
85 for (i = start; i < end; i++) {
86 uint64_t le_offset = cpu_to_le64(table->offsets[i]);
87 new_table->offsets[i - start] = le_offset;
88 }
89
90 /* Adjust for offset into table */
91 offset += start * sizeof(uint64_t);
92
93 qemu_co_mutex_unlock(&s->table_lock);
94 ret = bdrv_pwritev(s->bs->file, offset, &qiov);
95 qemu_co_mutex_lock(&s->table_lock);
96 trace_qed_write_table_cb(s, table, flush, ret);
97 if (ret < 0) {
98 goto out;
99 }
100
101 if (flush) {
102 ret = bdrv_flush(s->bs);
103 if (ret < 0) {
104 goto out;
105 }
106 }
107
108 ret = 0;
109 out:
110 qemu_vfree(new_table);
111 return ret;
112 }
113
114 int qed_read_l1_table_sync(BDRVQEDState *s)
115 {
116 return qed_read_table(s, s->header.l1_table_offset, s->l1_table);
117 }
118
119 /* Called with table_lock held. */
120 int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n)
121 {
122 BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
123 return qed_write_table(s, s->header.l1_table_offset,
124 s->l1_table, index, n, false);
125 }
126
127 int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
128 unsigned int n)
129 {
130 return qed_write_l1_table(s, index, n);
131 }
132
133 /* Called with table_lock held. */
134 int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
135 {
136 int ret;
137
138 qed_unref_l2_cache_entry(request->l2_table);
139
140 /* Check for cached L2 entry */
141 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
142 if (request->l2_table) {
143 return 0;
144 }
145
146 request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
147 request->l2_table->table = qed_alloc_table(s);
148
149 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD);
150 ret = qed_read_table(s, offset, request->l2_table->table);
151
152 if (ret) {
153 /* can't trust loaded L2 table anymore */
154 qed_unref_l2_cache_entry(request->l2_table);
155 request->l2_table = NULL;
156 } else {
157 request->l2_table->offset = offset;
158
159 qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table);
160
161 /* This is guaranteed to succeed because we just committed the entry
162 * to the cache.
163 */
164 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
165 assert(request->l2_table != NULL);
166 }
167
168 return ret;
169 }
170
171 int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
172 {
173 return qed_read_l2_table(s, request, offset);
174 }
175
176 /* Called with table_lock held. */
177 int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
178 unsigned int index, unsigned int n, bool flush)
179 {
180 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
181 return qed_write_table(s, request->l2_table->offset,
182 request->l2_table->table, index, n, flush);
183 }
184
185 int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
186 unsigned int index, unsigned int n, bool flush)
187 {
188 return qed_write_l2_table(s, request, index, n, flush);
189 }