]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/pve/0050-Fix-backup-for-not-64k-aligned-storages.patch
Fix backup for not 64k-aligned storages
[pve-qemu.git] / debian / patches / pve / 0050-Fix-backup-for-not-64k-aligned-storages.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Stefan Reiter <s.reiter@proxmox.com>
3 Date: Mon, 22 Jun 2020 14:54:02 +0200
4 Subject: [PATCH] Fix backup for not 64k-aligned storages
5
6 Zero out clusters after the end of the device, this makes restore handle
7 it correctly (even if it may try to write those zeros, it won't fail and
8 just ignore the out-of-bounds write to disk).
9
10 For not even 4k-aligned disks, there is a potential buffer overrun in
11 the memcpy (since always 4k are copied), which causes host-memory
12 leakage into VMA archives. Fix this by always zeroing the affected area
13 in the output-buffer.
14
15 Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
16 Reported-by: Roland Kammerer <roland.kammerer@linbit.com>
17 Suggested-by: Lars Ellenberg <lars.ellenberg@linbit.com>
18 Tested-by: Roland Kammerer <roland.kammerer@linbit.com>
19 ---
20 vma-writer.c | 23 ++++++++++++++++++++---
21 1 file changed, 20 insertions(+), 3 deletions(-)
22
23 diff --git a/vma-writer.c b/vma-writer.c
24 index 06cbc02b1e..f5d2c5d23c 100644
25 --- a/vma-writer.c
26 +++ b/vma-writer.c
27 @@ -633,17 +633,33 @@ vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
28
29 DPRINTF("VMA WRITE %d %zd\n", dev_id, cluster_num);
30
31 + uint64_t dev_size = vmaw->stream_info[dev_id].size;
32 uint16_t mask = 0;
33
34 if (buf) {
35 int i;
36 int bit = 1;
37 + uint64_t byte_offset = cluster_num * VMA_CLUSTER_SIZE;
38 for (i = 0; i < 16; i++) {
39 const unsigned char *vmablock = buf + (i*VMA_BLOCK_SIZE);
40 - if (!buffer_is_zero(vmablock, VMA_BLOCK_SIZE)) {
41 +
42 + // Note: If the source is not 64k-aligned, we might reach 4k blocks
43 + // after the end of the device. Always mark these as zero in the
44 + // mask, so the restore handles them correctly.
45 + if (byte_offset < dev_size &&
46 + !buffer_is_zero(vmablock, VMA_BLOCK_SIZE))
47 + {
48 mask |= bit;
49 memcpy(vmaw->outbuf + vmaw->outbuf_pos, vmablock,
50 VMA_BLOCK_SIZE);
51 +
52 + // prevent memory leakage on unaligned last block
53 + if (byte_offset + VMA_BLOCK_SIZE > dev_size) {
54 + uint64_t real_data_in_block = dev_size - byte_offset;
55 + memset(vmaw->outbuf + vmaw->outbuf_pos + real_data_in_block,
56 + 0, VMA_BLOCK_SIZE - real_data_in_block);
57 + }
58 +
59 vmaw->outbuf_pos += VMA_BLOCK_SIZE;
60 } else {
61 DPRINTF("VMA WRITE %zd ZERO BLOCK %d\n", cluster_num, i);
62 @@ -651,6 +667,7 @@ vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
63 *zero_bytes += VMA_BLOCK_SIZE;
64 }
65
66 + byte_offset += VMA_BLOCK_SIZE;
67 bit = bit << 1;
68 }
69 } else {
70 @@ -676,8 +693,8 @@ vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
71
72 if (dev_id != vmaw->vmstate_stream) {
73 uint64_t last = (cluster_num + 1) * VMA_CLUSTER_SIZE;
74 - if (last > vmaw->stream_info[dev_id].size) {
75 - uint64_t diff = last - vmaw->stream_info[dev_id].size;
76 + if (last > dev_size) {
77 + uint64_t diff = last - dev_size;
78 if (diff >= VMA_CLUSTER_SIZE) {
79 vma_writer_set_error(vmaw, "vma_writer_write: "
80 "read after last cluster");