size: usize,
chunk_size: u32,
chunk_count: u64,
+ small_chunk_count: usize, // allow 0..1 small chunks (last chunk may be smaller)
upload_stat: UploadStatistic,
}
None => bail!("fixed writer '{}' not registered", wid),
};
- if size != data.chunk_size {
- bail!("fixed writer '{}' - got unexpected chunk size ({} != {}", data.name, size, data.chunk_size);
+ if size > data.chunk_size {
+ bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size);
+ } else if size < data.chunk_size {
+ data.small_chunk_count += 1;
+ if data.small_chunk_count > 1 {
+ bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)");
+ }
}
// record statistics
let uid = state.next_uid();
state.fixed_writers.insert(uid, FixedWriterState {
- index, name, chunk_count: 0, size, chunk_size, upload_stat: UploadStatistic::new(),
+ index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(),
});
Ok(uid)
None => bail!("fixed writer '{}' not registered", wid),
};
- data.chunk_count += 1;
+ let end = (offset as usize) + (size as usize);
+ let idx = data.index.check_chunk_alignment(end, size as usize)?;
- if size != data.chunk_size {
- bail!("fixed writer '{}' - got unexpected chunk size ({} != {}", data.name, size, data.chunk_size);
- }
+ data.chunk_count += 1;
- let pos = (offset as usize)/(data.chunk_size as usize);
- data.index.add_digest(pos, digest)?;
+ data.index.add_digest(idx, digest)?;
Ok(())
}
Ok(index_csum)
}
- // Note: We want to add data out of order, so do not assume any order here.
- pub fn add_chunk(&mut self, chunk_info: &ChunkInfo, stat: &mut ChunkStat) -> Result<(), Error> {
-
- let chunk_len = chunk_info.chunk_len as usize;
- let end = chunk_info.offset as usize;
+ pub fn check_chunk_alignment(&self, offset: usize, chunk_len: usize) -> Result<usize, Error> {
- if end < chunk_len {
- bail!("got chunk with small offset ({} < {}", end, chunk_len);
+ if offset < chunk_len {
+ bail!("got chunk with small offset ({} < {}", offset, chunk_len);
}
- let pos = end - chunk_len;
+ let pos = offset - chunk_len;
- if end > self.size {
- bail!("write chunk data exceeds size ({} >= {})", end, self.size);
+ if offset > self.size {
+ bail!("chunk data exceeds size ({} >= {})", offset, self.size);
}
// last chunk can be smaller
- if ((end != self.size) && (chunk_len != self.chunk_size)) ||
+ if ((offset != self.size) && (chunk_len != self.chunk_size)) ||
(chunk_len > self.chunk_size) || (chunk_len == 0) {
- bail!("got chunk with wrong length ({} != {}", chunk_len, self.chunk_size);
+ bail!("chunk with unexpected length ({} != {}", chunk_len, self.chunk_size);
}
- if pos & (self.chunk_size-1) != 0 { bail!("add unaligned chunk (pos = {})", pos); }
-
- if (end as u64) != chunk_info.offset {
- bail!("got chunk with wrong offset ({} != {}", end, chunk_info.offset);
+ if pos & (self.chunk_size-1) != 0 {
+ bail!("got unaligned chunk (pos = {})", pos);
}
+ Ok(pos / self.chunk_size)
+ }
+
+ // Note: We want to add data out of order, so do not assume any order here.
+ pub fn add_chunk(&mut self, chunk_info: &ChunkInfo, stat: &mut ChunkStat) -> Result<(), Error> {
+
+ let chunk_len = chunk_info.chunk_len as usize;
+ let offset = chunk_info.offset as usize; // end of chunk
+
+ let idx = self.check_chunk_alignment(offset, chunk_len)?;
+
let (is_duplicate, compressed_size) = self.store.insert_chunk(&chunk_info.chunk)?;
stat.chunk_count += 1;
let digest = chunk_info.chunk.digest();
- println!("ADD CHUNK {} {} {}% {} {}", pos, chunk_len,
+ println!("ADD CHUNK {} {} {}% {} {}", idx, chunk_len,
(compressed_size*100)/(chunk_len as u64), is_duplicate, proxmox::tools::digest_to_hex(digest));
if is_duplicate {
stat.disk_size += compressed_size;
}
- self.add_digest(pos / self.chunk_size, digest)
+ self.add_digest(idx, digest)
}
pub fn add_digest(&mut self, index: usize, digest: &[u8; 32]) -> Result<(), Error> {