1 use std
::collections
::HashSet
;
4 use anyhow
::{bail, Error}
;
8 api
::section_config
::SectionConfigData
,
18 MAX_CHUNK_ARCHIVE_SIZE
,
24 SnapshotChunkIterator
,
29 tape_write_snapshot_archive
,
30 request_and_load_media
,
35 struct PoolWriterState
{
36 drive
: Box
<dyn TapeDriver
>,
37 catalog
: MediaCatalog
,
38 // tell if we already moved to EOM
40 // bytes written after the last tape fush/sync
44 impl PoolWriterState
{
46 fn commit(&mut self) -> Result
<(), Error
> {
47 self.drive
.sync()?
; // sync all data to the tape
48 self.catalog
.commit()?
; // then commit the catalog
49 self.bytes_written
= 0;
54 /// Helper to manage a backup job, writing several tapes of a pool
55 pub struct PoolWriter
{
58 status
: Option
<PoolWriterState
>,
59 media_set_catalog
: MediaSetCatalog
,
64 pub fn new(mut pool
: MediaPool
, drive_name
: &str) -> Result
<Self, Error
> {
66 let current_time
= proxmox
::tools
::time
::epoch_i64();
68 pool
.start_write_session(current_time
)?
;
70 let mut media_set_catalog
= MediaSetCatalog
::new();
72 // load all catalogs read-only at start
73 for media_uuid
in pool
.current_media_list()?
{
74 let media_catalog
= MediaCatalog
::open(
75 Path
::new(TAPE_STATUS_DIR
),
80 media_set_catalog
.append_catalog(media_catalog
)?
;
85 drive_name
: drive_name
.to_string(),
91 pub fn pool(&mut self) -> &mut MediaPool
{
95 /// Set media status to FULL (persistent - stores pool status)
96 pub fn set_media_status_full(&mut self, uuid
: &Uuid
) -> Result
<(), Error
> {
97 self.pool
.set_media_status_full(&uuid
)?
;
101 pub fn contains_snapshot(&self, snapshot
: &str) -> bool
{
102 if let Some(PoolWriterState { ref catalog, .. }
) = self.status
{
103 if catalog
.contains_snapshot(snapshot
) {
107 self.media_set_catalog
.contains_snapshot(snapshot
)
110 /// commit changes to tape and catalog
112 /// This is done automatically during a backupsession, but needs to
113 /// be called explicitly before dropping the PoolWriter
114 pub fn commit(&mut self) -> Result
<(), Error
> {
115 if let Some(ref mut status
) = self.status
{
121 /// Load a writable media into the drive
122 pub fn load_writable_media(&mut self, worker
: &WorkerTask
) -> Result
<Uuid
, Error
> {
123 let last_media_uuid
= match self.status
{
124 Some(PoolWriterState { ref catalog, .. }
) => Some(catalog
.uuid().clone()),
128 let current_time
= proxmox
::tools
::time
::epoch_i64();
129 let media_uuid
= self.pool
.alloc_writable_media(current_time
)?
;
131 let media
= self.pool
.lookup_media(&media_uuid
).unwrap();
133 let media_changed
= match last_media_uuid
{
134 Some(ref last_media_uuid
) => last_media_uuid
!= &media_uuid
,
139 return Ok(media_uuid
);
142 // remove read-only catalog (we store a writable version in status)
143 self.media_set_catalog
.remove_catalog(&media_uuid
);
145 if let Some(PoolWriterState {mut drive, catalog, .. }
) = self.status
.take() {
146 self.media_set_catalog
.append_catalog(catalog
)?
;
147 drive
.eject_media()?
;
150 let (drive_config
, _digest
) = crate::config
::drive
::config()?
;
151 let (drive
, catalog
) = drive_load_and_label_media(worker
, &drive_config
, &self.drive_name
, &media
.id())?
;
152 self.status
= Some(PoolWriterState { drive, catalog, at_eom: false, bytes_written: 0 }
);
157 /// uuid of currently loaded BackupMedia
158 pub fn current_media_uuid(&self) -> Result
<&Uuid
, Error
> {
160 Some(PoolWriterState { ref catalog, ..}
) => Ok(catalog
.uuid()),
161 None
=> bail
!("PoolWriter - no media loaded"),
165 /// Move to EOM (if not aleady there), then creates a new snapshot
166 /// archive writing specified files (as .pxar) into it. On
167 /// success, this return 'Ok(true)' and the media catalog gets
170 /// Please note that this may fail when there is not enough space
171 /// on the media (return value 'Ok(false, _)'). In that case, the
172 /// archive is marked incomplete, and we do not use it. The caller
173 /// should mark the media as full and try again using another
175 pub fn append_snapshot_archive(
177 snapshot_reader
: &SnapshotReader
,
178 ) -> Result
<(bool
, usize), Error
> {
180 let status
= match self.status
{
181 Some(ref mut status
) => status
,
182 None
=> bail
!("PoolWriter - no media loaded"),
186 status
.drive
.move_to_eom()?
;
187 status
.at_eom
= true;
190 let current_file_number
= status
.drive
.current_file_number()?
;
191 if current_file_number
< 2 {
192 bail
!("got strange file position number from drive ({})", current_file_number
);
195 let (done
, bytes_written
) = {
196 let mut writer
: Box
<dyn TapeWrite
> = status
.drive
.write_file()?
;
198 match tape_write_snapshot_archive(writer
.as_mut(), snapshot_reader
)?
{
199 Some(content_uuid
) => {
200 status
.catalog
.register_snapshot(
203 &snapshot_reader
.snapshot().to_string(),
205 (true, writer
.bytes_written())
207 None
=> (false, writer
.bytes_written()),
211 status
.bytes_written
+= bytes_written
;
213 let request_sync
= if status
.bytes_written
>= COMMIT_BLOCK_SIZE { true }
else { false }
;
215 if !done
|| request_sync
{
219 Ok((done
, bytes_written
))
222 /// Move to EOM (if not aleady there), then creates a new chunk
223 /// archive and writes chunks from 'chunk_iter'. This stops when
224 /// it detect LEOM or when we reach max archive size
225 /// (4GB). Written chunks are registered in the media catalog.
226 pub fn append_chunk_archive(
228 datastore
: &DataStore
,
229 chunk_iter
: &mut std
::iter
::Peekable
<SnapshotChunkIterator
>,
230 ) -> Result
<(bool
, usize), Error
> {
232 let status
= match self.status
{
233 Some(ref mut status
) => status
,
234 None
=> bail
!("PoolWriter - no media loaded"),
238 status
.drive
.move_to_eom()?
;
239 status
.at_eom
= true;
242 let current_file_number
= status
.drive
.current_file_number()?
;
243 if current_file_number
< 2 {
244 bail
!("got strange file position number from drive ({})", current_file_number
);
246 let writer
= status
.drive
.write_file()?
;
248 let (saved_chunks
, content_uuid
, leom
, bytes_written
) = write_chunk_archive(
252 &self.media_set_catalog
,
254 MAX_CHUNK_ARCHIVE_SIZE
,
257 status
.bytes_written
+= bytes_written
;
259 let request_sync
= if status
.bytes_written
>= COMMIT_BLOCK_SIZE { true }
else { false }
;
261 // register chunks in media_catalog
262 status
.catalog
.start_chunk_archive(content_uuid
, current_file_number
)?
;
263 for digest
in saved_chunks
{
264 status
.catalog
.register_chunk(&digest
)?
;
266 status
.catalog
.end_chunk_archive()?
;
268 if leom
|| request_sync
{
272 Ok((leom
, bytes_written
))
276 /// write up to <max_size> of chunks
277 fn write_chunk_archive
<'a
>(
278 writer
: Box
<dyn 'a
+ TapeWrite
>,
279 datastore
: &DataStore
,
280 chunk_iter
: &mut std
::iter
::Peekable
<SnapshotChunkIterator
>,
281 media_set_catalog
: &MediaSetCatalog
,
282 media_catalog
: &MediaCatalog
,
284 ) -> Result
<(Vec
<[u8;32]>, Uuid
, bool
, usize), Error
> {
286 let (mut writer
, content_uuid
) = ChunkArchiveWriter
::new(writer
, true)?
;
288 let mut chunk_index
: HashSet
<[u8;32]> = HashSet
::new();
290 // we want to get the chunk list in correct order
291 let mut chunk_list
: Vec
<[u8;32]> = Vec
::new();
293 let mut leom
= false;
296 let digest
= match chunk_iter
.next() {
298 Some(digest
) => digest?
,
300 if media_catalog
.contains_chunk(&digest
)
301 || chunk_index
.contains(&digest
)
302 || media_set_catalog
.contains_chunk(&digest
)
307 let blob
= datastore
.load_chunk(&digest
)?
;
308 println
!("CHUNK {} size {}", proxmox
::tools
::digest_to_hex(&digest
), blob
.raw_size());
310 match writer
.try_write_chunk(&digest
, &blob
) {
312 chunk_index
.insert(digest
);
313 chunk_list
.push(digest
);
319 Err(err
) => bail
!("write chunk failed - {}", err
),
322 if writer
.bytes_written() > max_size
{
323 println
!("Chunk Archive max size reached, closing archive");
330 Ok((chunk_list
, content_uuid
, leom
, writer
.bytes_written()))
333 // Requests and load 'media' into the drive. Then compare the media
334 // set label. If the tabe is empty, or the existing set label does not
335 // match the expected media set, overwrite the media set label.
336 fn drive_load_and_label_media(
338 drive_config
: &SectionConfigData
,
341 ) -> Result
<(Box
<dyn TapeDriver
>, MediaCatalog
), Error
> {
343 let (mut tmp_drive
, info
) =
344 request_and_load_media(worker
, &drive_config
, &drive_name
, &media_id
.label
)?
;
348 let new_set
= match media_id
.media_set_label
{
350 bail
!("got media without media set - internal error");
352 Some(ref set
) => set
,
355 let status_path
= Path
::new(TAPE_STATUS_DIR
);
357 match &info
.media_set_label
{
359 println
!("wrinting new media set label");
360 tmp_drive
.write_media_set_label(new_set
)?
;
364 media_set_label
: Some(new_set
.clone()),
366 media_catalog
= MediaCatalog
::overwrite(status_path
, &info
, true)?
;
368 Some(media_set_label
) => {
369 if new_set
.uuid
== media_set_label
.uuid
{
370 if new_set
.seq_nr
!= media_set_label
.seq_nr
{
371 bail
!("got media with wrong media sequence number ({} != {}",
372 new_set
.seq_nr
,media_set_label
.seq_nr
);
374 media_catalog
= MediaCatalog
::open(status_path
, &media_id
.label
.uuid
, true, false)?
;
376 println
!("wrinting new media set label (overwrite '{}/{}')",
377 media_set_label
.uuid
.to_string(), media_set_label
.seq_nr
);
379 tmp_drive
.write_media_set_label(new_set
)?
;
383 media_set_label
: Some(new_set
.clone()),
385 media_catalog
= MediaCatalog
::overwrite(status_path
, &info
, true)?
;
390 // todo: verify last content/media_catalog somehow?
391 tmp_drive
.move_to_eom()?
;
393 Ok((tmp_drive
, media_catalog
))