]> git.proxmox.com Git - mirror_qemu.git/blame - block/qed-table.c
iotests: Check that images are in read-only mode after block-commit
[mirror_qemu.git] / block / qed-table.c
CommitLineData
298800ca
SH
1/*
2 * QEMU Enhanced Disk Format Table I/O
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
80c71a24 15#include "qemu/osdep.h"
298800ca 16#include "trace.h"
1de7afc9 17#include "qemu/sockets.h" /* for EINPROGRESS on Windows */
298800ca 18#include "qed.h"
58369e22 19#include "qemu/bswap.h"
298800ca 20
2fd61638 21/* Called with table_lock held. */
f6513529 22static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
11273076 23{
696e8cb2
VSO
24 unsigned int bytes = s->header.cluster_size * s->header.table_size;
25
11273076
KW
26 int noffsets;
27 int i, ret;
298800ca 28
11273076
KW
29 trace_qed_read_table(s, offset, table);
30
2fd61638 31 qemu_co_mutex_unlock(&s->table_lock);
696e8cb2 32 ret = bdrv_co_pread(s->bs->file, offset, bytes, table->offsets, 0);
2fd61638 33 qemu_co_mutex_lock(&s->table_lock);
11273076 34 if (ret < 0) {
298800ca
SH
35 goto out;
36 }
37
38 /* Byteswap offsets */
696e8cb2 39 noffsets = bytes / sizeof(uint64_t);
298800ca
SH
40 for (i = 0; i < noffsets; i++) {
41 table->offsets[i] = le64_to_cpu(table->offsets[i]);
42 }
43
11273076 44 ret = 0;
298800ca
SH
45out:
46 /* Completion */
11273076 47 trace_qed_read_table_cb(s, table, ret);
f6513529 48 return ret;
298800ca
SH
49}
50
298800ca
SH
51/**
52 * Write out an updated part or all of a table
53 *
54 * @s: QED state
55 * @offset: Offset of table in image file, in bytes
56 * @table: Table
57 * @index: Index of first element
58 * @n: Number of elements
59 * @flush: Whether or not to sync to disk
1f01e50b 60 *
2fd61638 61 * Called with table_lock held.
298800ca 62 */
453e53e2
KW
63static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
64 unsigned int index, unsigned int n, bool flush)
298800ca 65{
298800ca
SH
66 unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1;
67 unsigned int start, end, i;
602b57fb 68 QEDTable *new_table;
298800ca 69 size_t len_bytes;
602b57fb 70 int ret;
298800ca
SH
71
72 trace_qed_write_table(s, offset, table, index, n);
73
74 /* Calculate indices of the first and one after last elements */
75 start = index & ~sector_mask;
76 end = (index + n + sector_mask) & ~sector_mask;
77
78 len_bytes = (end - start) * sizeof(uint64_t);
79
602b57fb 80 new_table = qemu_blockalign(s->bs, len_bytes);
298800ca
SH
81
82 /* Byteswap table */
83 for (i = start; i < end; i++) {
84 uint64_t le_offset = cpu_to_le64(table->offsets[i]);
602b57fb 85 new_table->offsets[i - start] = le_offset;
298800ca
SH
86 }
87
88 /* Adjust for offset into table */
89 offset += start * sizeof(uint64_t);
90
2fd61638 91 qemu_co_mutex_unlock(&s->table_lock);
696e8cb2 92 ret = bdrv_co_pwrite(s->bs->file, offset, len_bytes, new_table->offsets, 0);
2fd61638 93 qemu_co_mutex_lock(&s->table_lock);
602b57fb
KW
94 trace_qed_write_table_cb(s, table, flush, ret);
95 if (ret < 0) {
96 goto out;
97 }
98
99 if (flush) {
602b57fb 100 ret = bdrv_flush(s->bs);
602b57fb
KW
101 if (ret < 0) {
102 goto out;
103 }
104 }
105
106 ret = 0;
107out:
108 qemu_vfree(new_table);
453e53e2 109 return ret;
298800ca
SH
110}
111
112int qed_read_l1_table_sync(BDRVQEDState *s)
113{
f6513529 114 return qed_read_table(s, s->header.l1_table_offset, s->l1_table);
298800ca
SH
115}
116
2fd61638 117/* Called with table_lock held. */
453e53e2 118int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n)
298800ca
SH
119{
120 BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
453e53e2
KW
121 return qed_write_table(s, s->header.l1_table_offset,
122 s->l1_table, index, n, false);
298800ca
SH
123}
124
125int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
126 unsigned int n)
127{
453e53e2 128 return qed_write_l1_table(s, index, n);
298800ca
SH
129}
130
2fd61638 131/* Called with table_lock held. */
a8165d2d 132int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
298800ca 133{
f6513529 134 int ret;
298800ca
SH
135
136 qed_unref_l2_cache_entry(request->l2_table);
137
138 /* Check for cached L2 entry */
139 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
140 if (request->l2_table) {
a8165d2d 141 return 0;
298800ca
SH
142 }
143
144 request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
145 request->l2_table->table = qed_alloc_table(s);
146
298800ca 147 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD);
f6513529
KW
148 ret = qed_read_table(s, offset, request->l2_table->table);
149
f6513529
KW
150 if (ret) {
151 /* can't trust loaded L2 table anymore */
152 qed_unref_l2_cache_entry(request->l2_table);
153 request->l2_table = NULL;
154 } else {
155 request->l2_table->offset = offset;
156
157 qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table);
158
159 /* This is guaranteed to succeed because we just committed the entry
160 * to the cache.
161 */
162 request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
163 assert(request->l2_table != NULL);
164 }
f6513529 165
a8165d2d 166 return ret;
298800ca
SH
167}
168
169int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
170{
a8165d2d 171 return qed_read_l2_table(s, request, offset);
298800ca
SH
172}
173
2fd61638 174/* Called with table_lock held. */
453e53e2
KW
175int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
176 unsigned int index, unsigned int n, bool flush)
298800ca
SH
177{
178 BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE);
453e53e2
KW
179 return qed_write_table(s, request->l2_table->offset,
180 request->l2_table->table, index, n, flush);
298800ca
SH
181}
182
183int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
184 unsigned int index, unsigned int n, bool flush)
185{
453e53e2 186 return qed_write_l2_table(s, request, index, n, flush);
298800ca 187}