]>
Commit | Line | Data |
---|---|---|
166a48f9 DM |
1 | mod catalog_set; |
2 | pub use catalog_set::*; | |
3 | ||
4 | mod new_chunks_iterator; | |
5 | pub use new_chunks_iterator::*; | |
6 | ||
d37da6b7 | 7 | use std::path::Path; |
32b75d36 | 8 | use std::fs::File; |
31cf625a | 9 | use std::time::SystemTime; |
5c4755ad | 10 | use std::sync::{Arc, Mutex}; |
d37da6b7 | 11 | |
166a48f9 | 12 | use anyhow::{bail, Error}; |
d37da6b7 | 13 | |
6ef1b649 | 14 | use proxmox_uuid::Uuid; |
d37da6b7 | 15 | |
1ec0d70d | 16 | use pbs_tools::{task_log, task_warn}; |
5839c469 | 17 | use pbs_config::tape_encryption_keys::load_key_configs; |
048b43af DM |
18 | use pbs_tape::{ |
19 | TapeWrite, | |
20 | sg_tape::tape_alert_flags_critical, | |
21 | }; | |
c95c1c83 | 22 | use pbs_datastore::{DataStore, SnapshotReader}; |
b9700a9f | 23 | use proxmox_rest_server::WorkerTask; |
c23192d3 | 24 | |
d37da6b7 | 25 | use crate::{ |
d37da6b7 DM |
26 | tape::{ |
27 | TAPE_STATUS_DIR, | |
28 | MAX_CHUNK_ARCHIVE_SIZE, | |
29 | COMMIT_BLOCK_SIZE, | |
d37da6b7 DM |
30 | MediaPool, |
31 | MediaId, | |
32 | MediaCatalog, | |
f47e0357 DM |
33 | file_formats::{ |
34 | MediaSetLabel, | |
35 | ChunkArchiveWriter, | |
36 | tape_write_snapshot_archive, | |
32b75d36 | 37 | tape_write_catalog, |
f47e0357 | 38 | }, |
37796ff7 DM |
39 | drive::{ |
40 | TapeDriver, | |
41 | request_and_load_media, | |
37796ff7 DM |
42 | media_changer, |
43 | }, | |
d37da6b7 DM |
44 | }, |
45 | }; | |
46 | ||
47 | ||
48 | struct PoolWriterState { | |
49 | drive: Box<dyn TapeDriver>, | |
88bc9635 DM |
50 | // Media Uuid from loaded media |
51 | media_uuid: Uuid, | |
d37da6b7 DM |
52 | // tell if we already moved to EOM |
53 | at_eom: bool, | |
54 | // bytes written after the last tape fush/sync | |
55 | bytes_written: usize, | |
56 | } | |
57 | ||
d37da6b7 DM |
58 | /// Helper to manage a backup job, writing several tapes of a pool |
59 | pub struct PoolWriter { | |
60 | pool: MediaPool, | |
61 | drive_name: String, | |
62 | status: Option<PoolWriterState>, | |
1037f2bc | 63 | catalog_set: Arc<Mutex<CatalogSet>>, |
c9793d47 | 64 | notify_email: Option<String>, |
d37da6b7 DM |
65 | } |
66 | ||
67 | impl PoolWriter { | |
68 | ||
32b75d36 DM |
69 | pub fn new( |
70 | mut pool: MediaPool, | |
71 | drive_name: &str, | |
72 | worker: &WorkerTask, | |
73 | notify_email: Option<String>, | |
e953029e | 74 | force_media_set: bool, |
32b75d36 | 75 | ) -> Result<Self, Error> { |
d37da6b7 | 76 | |
6ef1b649 | 77 | let current_time = proxmox_time::epoch_i64(); |
d37da6b7 | 78 | |
e953029e | 79 | let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?; |
90e16be3 DM |
80 | if let Some(reason) = new_media_set_reason { |
81 | task_log!( | |
82 | worker, | |
83 | "starting new media set - reason: {}", | |
84 | reason, | |
85 | ); | |
86 | } | |
87 | ||
32b75d36 DM |
88 | let media_set_uuid = pool.current_media_set().uuid(); |
89 | task_log!(worker, "media set uuid: {}", media_set_uuid); | |
d37da6b7 | 90 | |
166a48f9 | 91 | let mut catalog_set = CatalogSet::new(); |
d37da6b7 DM |
92 | |
93 | // load all catalogs read-only at start | |
94 | for media_uuid in pool.current_media_list()? { | |
237314ad | 95 | let media_info = pool.lookup_media(media_uuid).unwrap(); |
d37da6b7 DM |
96 | let media_catalog = MediaCatalog::open( |
97 | Path::new(TAPE_STATUS_DIR), | |
237314ad | 98 | media_info.id(), |
d37da6b7 DM |
99 | false, |
100 | false, | |
101 | )?; | |
166a48f9 | 102 | catalog_set.append_read_only_catalog(media_catalog)?; |
d37da6b7 DM |
103 | } |
104 | ||
105 | Ok(Self { | |
106 | pool, | |
107 | drive_name: drive_name.to_string(), | |
108 | status: None, | |
1037f2bc | 109 | catalog_set: Arc::new(Mutex::new(catalog_set)), |
c9793d47 | 110 | notify_email, |
d37da6b7 DM |
111 | }) |
112 | } | |
113 | ||
114 | pub fn pool(&mut self) -> &mut MediaPool { | |
115 | &mut self.pool | |
116 | } | |
117 | ||
118 | /// Set media status to FULL (persistent - stores pool status) | |
119 | pub fn set_media_status_full(&mut self, uuid: &Uuid) -> Result<(), Error> { | |
120 | self.pool.set_media_status_full(&uuid)?; | |
121 | Ok(()) | |
122 | } | |
123 | ||
54722aca | 124 | pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool { |
1037f2bc | 125 | self.catalog_set.lock().unwrap().contains_snapshot(store, snapshot) |
d37da6b7 DM |
126 | } |
127 | ||
42967bf1 | 128 | /// Eject media and drop PoolWriterState (close drive) |
5654d8ce | 129 | pub fn eject_media(&mut self, worker: &WorkerTask) -> Result<(), Error> { |
42967bf1 DM |
130 | let mut status = match self.status.take() { |
131 | Some(status) => status, | |
132 | None => return Ok(()), // no media loaded | |
133 | }; | |
134 | ||
1ce8e905 | 135 | let (drive_config, _digest) = pbs_config::drive::config()?; |
42967bf1 DM |
136 | |
137 | if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? { | |
1ec0d70d | 138 | task_log!(worker, "eject media"); |
5654d8ce | 139 | status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster |
edb90f6a | 140 | drop(status); // close drive |
1ec0d70d | 141 | task_log!(worker, "unload media"); |
5654d8ce | 142 | changer.unload_media(None)?; //eject and unload |
42967bf1 | 143 | } else { |
1ec0d70d | 144 | task_log!(worker, "standalone drive - ejecting media"); |
42967bf1 DM |
145 | status.drive.eject_media()?; |
146 | } | |
147 | ||
148 | Ok(()) | |
149 | } | |
150 | ||
edb90f6a DM |
151 | /// Export current media set and drop PoolWriterState (close drive) |
152 | pub fn export_media_set(&mut self, worker: &WorkerTask) -> Result<(), Error> { | |
5654d8ce | 153 | let mut status = self.status.take(); |
edb90f6a | 154 | |
1ce8e905 | 155 | let (drive_config, _digest) = pbs_config::drive::config()?; |
edb90f6a DM |
156 | |
157 | if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? { | |
5654d8ce DM |
158 | |
159 | if let Some(ref mut status) = status { | |
1ec0d70d | 160 | task_log!(worker, "eject media"); |
5654d8ce DM |
161 | status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster |
162 | } | |
edb90f6a DM |
163 | drop(status); // close drive |
164 | ||
1ec0d70d | 165 | task_log!(worker, "unload media"); |
edb90f6a DM |
166 | changer.unload_media(None)?; |
167 | ||
168 | for media_uuid in self.pool.current_media_list()? { | |
169 | let media = self.pool.lookup_media(media_uuid)?; | |
8446fbca DM |
170 | let label_text = media.label_text(); |
171 | if let Some(slot) = changer.export_media(label_text)? { | |
1ec0d70d | 172 | task_log!(worker, "exported media '{}' to import/export slot {}", label_text, slot); |
edb90f6a | 173 | } else { |
1ec0d70d | 174 | task_warn!(worker, "export failed - media '{}' is not online", label_text); |
edb90f6a DM |
175 | } |
176 | } | |
177 | ||
6334bdc1 | 178 | } else if let Some(mut status) = status { |
1ec0d70d | 179 | task_log!(worker, "standalone drive - ejecting media instead of export"); |
6334bdc1 | 180 | status.drive.eject_media()?; |
edb90f6a DM |
181 | } |
182 | ||
183 | Ok(()) | |
184 | } | |
185 | ||
d37da6b7 DM |
186 | /// commit changes to tape and catalog |
187 | /// | |
188 | /// This is done automatically during a backupsession, but needs to | |
189 | /// be called explicitly before dropping the PoolWriter | |
190 | pub fn commit(&mut self) -> Result<(), Error> { | |
5c4755ad DM |
191 | if let Some(PoolWriterState {ref mut drive, .. }) = self.status { |
192 | drive.sync()?; // sync all data to the tape | |
d37da6b7 | 193 | } |
1037f2bc | 194 | self.catalog_set.lock().unwrap().commit()?; // then commit the catalog |
d37da6b7 DM |
195 | Ok(()) |
196 | } | |
197 | ||
198 | /// Load a writable media into the drive | |
ff58c519 | 199 | pub fn load_writable_media(&mut self, worker: &WorkerTask) -> Result<Uuid, Error> { |
88bc9635 DM |
200 | let last_media_uuid = match self.status { |
201 | Some(PoolWriterState { ref media_uuid, ..}) => Some(media_uuid.clone()), | |
d37da6b7 DM |
202 | None => None, |
203 | }; | |
204 | ||
6ef1b649 | 205 | let current_time = proxmox_time::epoch_i64(); |
d37da6b7 DM |
206 | let media_uuid = self.pool.alloc_writable_media(current_time)?; |
207 | ||
208 | let media = self.pool.lookup_media(&media_uuid).unwrap(); | |
209 | ||
210 | let media_changed = match last_media_uuid { | |
211 | Some(ref last_media_uuid) => last_media_uuid != &media_uuid, | |
212 | None => true, | |
213 | }; | |
214 | ||
215 | if !media_changed { | |
216 | return Ok(media_uuid); | |
217 | } | |
218 | ||
3fbf2311 DM |
219 | task_log!(worker, "allocated new writable media '{}'", media.label_text()); |
220 | ||
5c4755ad DM |
221 | if let Some(PoolWriterState {mut drive, .. }) = self.status.take() { |
222 | if last_media_uuid.is_some() { | |
223 | task_log!(worker, "eject current media"); | |
224 | drive.eject_media()?; | |
225 | } | |
d37da6b7 DM |
226 | } |
227 | ||
1ce8e905 | 228 | let (drive_config, _digest) = pbs_config::drive::config()?; |
66e42bec DM |
229 | |
230 | let (mut drive, old_media_id) = | |
c9793d47 | 231 | request_and_load_media(worker, &drive_config, &self.drive_name, media.label(), &self.notify_email)?; |
66e42bec | 232 | |
5843268c | 233 | // test for critical tape alert flags |
a08a1985 DM |
234 | if let Ok(alert_flags) = drive.tape_alert_flags() { |
235 | if !alert_flags.is_empty() { | |
1ec0d70d | 236 | task_log!(worker, "TapeAlertFlags: {:?}", alert_flags); |
a08a1985 | 237 | if tape_alert_flags_critical(alert_flags) { |
25350f33 | 238 | self.pool.set_media_status_damaged(&media_uuid)?; |
a08a1985 DM |
239 | bail!("aborting due to critical tape alert flags: {:?}", alert_flags); |
240 | } | |
5843268c DM |
241 | } |
242 | } | |
243 | ||
88bc9635 | 244 | let (catalog, is_new_media) = update_media_set_label( |
c503ea70 | 245 | worker, |
66e42bec DM |
246 | drive.as_mut(), |
247 | old_media_id.media_set_label, | |
248 | media.id(), | |
c503ea70 DM |
249 | )?; |
250 | ||
1037f2bc | 251 | self.catalog_set.lock().unwrap().append_catalog(catalog)?; |
5c4755ad | 252 | |
2b191385 DM |
253 | let media_set = media.media_set_label().clone().unwrap(); |
254 | ||
255 | let encrypt_fingerprint = media_set | |
8a0046f5 | 256 | .encryption_key_fingerprint |
2b191385 DM |
257 | .clone() |
258 | .map(|fp| (fp, media_set.uuid.clone())); | |
8a0046f5 DM |
259 | |
260 | drive.set_encryption(encrypt_fingerprint)?; | |
261 | ||
88bc9635 DM |
262 | self.status = Some(PoolWriterState { |
263 | drive, | |
264 | media_uuid: media_uuid.clone(), | |
265 | at_eom: false, | |
266 | bytes_written: 0, | |
267 | }); | |
d37da6b7 | 268 | |
88bc9635 | 269 | if is_new_media { |
32b75d36 DM |
270 | // add catalogs from previous media |
271 | self.append_media_set_catalogs(worker)?; | |
272 | } | |
273 | ||
d37da6b7 DM |
274 | Ok(media_uuid) |
275 | } | |
276 | ||
32b75d36 DM |
277 | fn open_catalog_file(uuid: &Uuid) -> Result<File, Error> { |
278 | ||
279 | let status_path = Path::new(TAPE_STATUS_DIR); | |
280 | let mut path = status_path.to_owned(); | |
281 | path.push(uuid.to_string()); | |
282 | path.set_extension("log"); | |
283 | ||
284 | let file = std::fs::OpenOptions::new() | |
285 | .read(true) | |
286 | .open(&path)?; | |
287 | ||
288 | Ok(file) | |
289 | } | |
290 | ||
eac1beef DM |
291 | // Check it tape is loaded, then move to EOM (if not already there) |
292 | // | |
293 | // Returns the tape position at EOM. | |
294 | fn prepare_tape_write( | |
295 | status: &mut PoolWriterState, | |
296 | worker: &WorkerTask, | |
297 | ) -> Result<u64, Error> { | |
298 | ||
299 | if !status.at_eom { | |
1ec0d70d | 300 | task_log!(worker, "moving to end of media"); |
7b11a809 | 301 | status.drive.move_to_eom(true)?; |
eac1beef DM |
302 | status.at_eom = true; |
303 | } | |
304 | ||
305 | let current_file_number = status.drive.current_file_number()?; | |
306 | if current_file_number < 2 { | |
307 | bail!("got strange file position number from drive ({})", current_file_number); | |
308 | } | |
309 | ||
310 | Ok(current_file_number) | |
311 | } | |
312 | ||
32b75d36 DM |
313 | /// Move to EOM (if not already there), then write the current |
314 | /// catalog to the tape. On success, this return 'Ok(true)'. | |
315 | ||
316 | /// Please note that this may fail when there is not enough space | |
317 | /// on the media (return value 'Ok(false, _)'). In that case, the | |
318 | /// archive is marked incomplete. The caller should mark the media | |
319 | /// as full and try again using another media. | |
320 | pub fn append_catalog_archive( | |
321 | &mut self, | |
322 | worker: &WorkerTask, | |
323 | ) -> Result<bool, Error> { | |
324 | ||
325 | let status = match self.status { | |
326 | Some(ref mut status) => status, | |
327 | None => bail!("PoolWriter - no media loaded"), | |
328 | }; | |
329 | ||
eac1beef | 330 | Self::prepare_tape_write(status, worker)?; |
32b75d36 | 331 | |
1037f2bc | 332 | let catalog_set = self.catalog_set.lock().unwrap(); |
32b75d36 | 333 | |
1037f2bc | 334 | let catalog = match catalog_set.catalog { |
32b75d36 DM |
335 | None => bail!("append_catalog_archive failed: no catalog - internal error"), |
336 | Some(ref catalog) => catalog, | |
337 | }; | |
338 | ||
339 | let media_set = self.pool.current_media_set(); | |
340 | ||
341 | let media_list = media_set.media_list(); | |
342 | let uuid = match media_list.last() { | |
343 | None => bail!("got empty media list - internal error"), | |
344 | Some(None) => bail!("got incomplete media list - internal error"), | |
345 | Some(Some(last_uuid)) => { | |
346 | if last_uuid != catalog.uuid() { | |
347 | bail!("got wrong media - internal error"); | |
348 | } | |
349 | last_uuid | |
350 | } | |
351 | }; | |
352 | ||
353 | let seq_nr = media_list.len() - 1; | |
354 | ||
355 | let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?; | |
356 | ||
357 | let mut file = Self::open_catalog_file(uuid)?; | |
358 | ||
359 | let done = tape_write_catalog( | |
360 | writer.as_mut(), | |
361 | uuid, | |
362 | media_set.uuid(), | |
363 | seq_nr, | |
364 | &mut file, | |
365 | )?.is_some(); | |
366 | ||
367 | Ok(done) | |
368 | } | |
369 | ||
370 | // Append catalogs for all previous media in set (without last) | |
371 | fn append_media_set_catalogs( | |
372 | &mut self, | |
373 | worker: &WorkerTask, | |
374 | ) -> Result<(), Error> { | |
375 | ||
376 | let media_set = self.pool.current_media_set(); | |
377 | ||
378 | let mut media_list = &media_set.media_list()[..]; | |
379 | if media_list.len() < 2 { | |
380 | return Ok(()); | |
381 | } | |
382 | media_list = &media_list[..(media_list.len()-1)]; | |
383 | ||
384 | let status = match self.status { | |
385 | Some(ref mut status) => status, | |
386 | None => bail!("PoolWriter - no media loaded"), | |
387 | }; | |
388 | ||
eac1beef | 389 | Self::prepare_tape_write(status, worker)?; |
32b75d36 DM |
390 | |
391 | for (seq_nr, uuid) in media_list.iter().enumerate() { | |
392 | ||
393 | let uuid = match uuid { | |
394 | None => bail!("got incomplete media list - internal error"), | |
395 | Some(uuid) => uuid, | |
396 | }; | |
397 | ||
398 | let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?; | |
399 | ||
400 | let mut file = Self::open_catalog_file(uuid)?; | |
401 | ||
402 | task_log!(worker, "write catalog for previous media: {}", uuid); | |
403 | ||
404 | if tape_write_catalog( | |
405 | writer.as_mut(), | |
406 | uuid, | |
407 | media_set.uuid(), | |
408 | seq_nr, | |
409 | &mut file, | |
410 | )?.is_none() { | |
411 | bail!("got EOM while writing start catalog"); | |
412 | } | |
413 | } | |
414 | ||
415 | Ok(()) | |
416 | } | |
417 | ||
d1d74c43 | 418 | /// Move to EOM (if not already there), then creates a new snapshot |
d37da6b7 DM |
419 | /// archive writing specified files (as .pxar) into it. On |
420 | /// success, this return 'Ok(true)' and the media catalog gets | |
421 | /// updated. | |
422 | ||
423 | /// Please note that this may fail when there is not enough space | |
424 | /// on the media (return value 'Ok(false, _)'). In that case, the | |
425 | /// archive is marked incomplete, and we do not use it. The caller | |
426 | /// should mark the media as full and try again using another | |
427 | /// media. | |
428 | pub fn append_snapshot_archive( | |
429 | &mut self, | |
5654d8ce | 430 | worker: &WorkerTask, |
d37da6b7 DM |
431 | snapshot_reader: &SnapshotReader, |
432 | ) -> Result<(bool, usize), Error> { | |
433 | ||
434 | let status = match self.status { | |
435 | Some(ref mut status) => status, | |
436 | None => bail!("PoolWriter - no media loaded"), | |
437 | }; | |
438 | ||
eac1beef | 439 | let current_file_number = Self::prepare_tape_write(status, worker)?; |
d37da6b7 DM |
440 | |
441 | let (done, bytes_written) = { | |
442 | let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?; | |
443 | ||
444 | match tape_write_snapshot_archive(writer.as_mut(), snapshot_reader)? { | |
445 | Some(content_uuid) => { | |
1037f2bc | 446 | self.catalog_set.lock().unwrap().register_snapshot( |
d37da6b7 DM |
447 | content_uuid, |
448 | current_file_number, | |
54722aca | 449 | &snapshot_reader.datastore_name().to_string(), |
d37da6b7 DM |
450 | &snapshot_reader.snapshot().to_string(), |
451 | )?; | |
452 | (true, writer.bytes_written()) | |
453 | } | |
454 | None => (false, writer.bytes_written()), | |
455 | } | |
456 | }; | |
457 | ||
458 | status.bytes_written += bytes_written; | |
459 | ||
39735609 | 460 | let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE; |
d37da6b7 DM |
461 | |
462 | if !done || request_sync { | |
5c4755ad | 463 | self.commit()?; |
d37da6b7 DM |
464 | } |
465 | ||
466 | Ok((done, bytes_written)) | |
467 | } | |
468 | ||
d1d74c43 | 469 | /// Move to EOM (if not already there), then creates a new chunk |
d37da6b7 DM |
470 | /// archive and writes chunks from 'chunk_iter'. This stops when |
471 | /// it detect LEOM or when we reach max archive size | |
472 | /// (4GB). Written chunks are registered in the media catalog. | |
473 | pub fn append_chunk_archive( | |
474 | &mut self, | |
31cf625a | 475 | worker: &WorkerTask, |
5c4755ad | 476 | chunk_iter: &mut std::iter::Peekable<NewChunksIterator>, |
54722aca | 477 | store: &str, |
d37da6b7 DM |
478 | ) -> Result<(bool, usize), Error> { |
479 | ||
480 | let status = match self.status { | |
481 | Some(ref mut status) => status, | |
482 | None => bail!("PoolWriter - no media loaded"), | |
483 | }; | |
484 | ||
eac1beef | 485 | let current_file_number = Self::prepare_tape_write(status, worker)?; |
d37da6b7 | 486 | |
d37da6b7 DM |
487 | let writer = status.drive.write_file()?; |
488 | ||
31cf625a DM |
489 | let start_time = SystemTime::now(); |
490 | ||
d37da6b7 | 491 | let (saved_chunks, content_uuid, leom, bytes_written) = write_chunk_archive( |
31cf625a | 492 | worker, |
d37da6b7 | 493 | writer, |
d37da6b7 | 494 | chunk_iter, |
54722aca | 495 | store, |
d37da6b7 DM |
496 | MAX_CHUNK_ARCHIVE_SIZE, |
497 | )?; | |
498 | ||
499 | status.bytes_written += bytes_written; | |
500 | ||
31cf625a | 501 | let elapsed = start_time.elapsed()?.as_secs_f64(); |
1ec0d70d DM |
502 | task_log!( |
503 | worker, | |
776dabfb | 504 | "wrote {} chunks ({:.2} MB at {:.2} MB/s)", |
2c10410b | 505 | saved_chunks.len(), |
776dabfb DM |
506 | bytes_written as f64 /1_000_000.0, |
507 | (bytes_written as f64)/(1_000_000.0*elapsed), | |
1ec0d70d | 508 | ); |
31cf625a | 509 | |
39735609 | 510 | let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE; |
d37da6b7 DM |
511 | |
512 | // register chunks in media_catalog | |
1037f2bc | 513 | self.catalog_set.lock().unwrap() |
54722aca | 514 | .register_chunk_archive(content_uuid, current_file_number, store, &saved_chunks)?; |
d37da6b7 DM |
515 | |
516 | if leom || request_sync { | |
5c4755ad | 517 | self.commit()?; |
d37da6b7 DM |
518 | } |
519 | ||
520 | Ok((leom, bytes_written)) | |
521 | } | |
5c4755ad DM |
522 | |
523 | pub fn spawn_chunk_reader_thread( | |
524 | &self, | |
525 | datastore: Arc<DataStore>, | |
526 | snapshot_reader: Arc<Mutex<SnapshotReader>>, | |
527 | ) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> { | |
528 | NewChunksIterator::spawn( | |
529 | datastore, | |
530 | snapshot_reader, | |
1037f2bc | 531 | Arc::clone(&self.catalog_set), |
5c4755ad DM |
532 | ) |
533 | } | |
d37da6b7 DM |
534 | } |
535 | ||
536 | /// write up to <max_size> of chunks | |
537 | fn write_chunk_archive<'a>( | |
2c10410b | 538 | _worker: &WorkerTask, |
d37da6b7 | 539 | writer: Box<dyn 'a + TapeWrite>, |
5c4755ad | 540 | chunk_iter: &mut std::iter::Peekable<NewChunksIterator>, |
54722aca | 541 | store: &str, |
d37da6b7 DM |
542 | max_size: usize, |
543 | ) -> Result<(Vec<[u8;32]>, Uuid, bool, usize), Error> { | |
544 | ||
54722aca | 545 | let (mut writer, content_uuid) = ChunkArchiveWriter::new(writer, store, true)?; |
d37da6b7 | 546 | |
d37da6b7 DM |
547 | // we want to get the chunk list in correct order |
548 | let mut chunk_list: Vec<[u8;32]> = Vec::new(); | |
549 | ||
550 | let mut leom = false; | |
551 | ||
552 | loop { | |
5c4755ad | 553 | let (digest, blob) = match chunk_iter.peek() { |
d37da6b7 | 554 | None => break, |
5c4755ad | 555 | Some(Ok((digest, blob))) => (digest, blob), |
e8913fea | 556 | Some(Err(err)) => bail!("{}", err), |
d37da6b7 | 557 | }; |
e8913fea | 558 | |
5c4755ad | 559 | //println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(digest), blob.raw_size()); |
d37da6b7 DM |
560 | |
561 | match writer.try_write_chunk(&digest, &blob) { | |
e8913fea | 562 | Ok(true) => { |
5c4755ad | 563 | chunk_list.push(*digest); |
e8913fea | 564 | chunk_iter.next(); // consume |
d37da6b7 DM |
565 | } |
566 | Ok(false) => { | |
e8913fea | 567 | // Note; we do not consume the chunk (no chunk_iter.next()) |
d37da6b7 DM |
568 | leom = true; |
569 | break; | |
570 | } | |
571 | Err(err) => bail!("write chunk failed - {}", err), | |
572 | } | |
573 | ||
574 | if writer.bytes_written() > max_size { | |
1ec0d70d | 575 | //task_log!(worker, "Chunk Archive max size reached, closing archive"); |
d37da6b7 DM |
576 | break; |
577 | } | |
578 | } | |
579 | ||
580 | writer.finish()?; | |
581 | ||
582 | Ok((chunk_list, content_uuid, leom, writer.bytes_written())) | |
583 | } | |
584 | ||
66e42bec DM |
585 | // Compare the media set label. If the media is empty, or the existing |
586 | // set label does not match the expected media set, overwrite the | |
587 | // media set label. | |
588 | fn update_media_set_label( | |
ff58c519 | 589 | worker: &WorkerTask, |
66e42bec DM |
590 | drive: &mut dyn TapeDriver, |
591 | old_set: Option<MediaSetLabel>, | |
d37da6b7 | 592 | media_id: &MediaId, |
32b75d36 | 593 | ) -> Result<(MediaCatalog, bool), Error> { |
d37da6b7 DM |
594 | |
595 | let media_catalog; | |
596 | ||
597 | let new_set = match media_id.media_set_label { | |
66e42bec | 598 | None => bail!("got media without media set - internal error"), |
d37da6b7 DM |
599 | Some(ref set) => set, |
600 | }; | |
601 | ||
feb1645f DM |
602 | let key_config = if let Some(ref fingerprint) = new_set.encryption_key_fingerprint { |
603 | let (config_map, _digest) = load_key_configs()?; | |
604 | match config_map.get(fingerprint) { | |
82a103c8 | 605 | Some(key_config) => Some(key_config.clone()), |
feb1645f DM |
606 | None => { |
607 | bail!("unable to find tape encryption key config '{}'", fingerprint); | |
608 | } | |
609 | } | |
610 | } else { | |
611 | None | |
612 | }; | |
613 | ||
d37da6b7 DM |
614 | let status_path = Path::new(TAPE_STATUS_DIR); |
615 | ||
32b75d36 | 616 | let new_media = match old_set { |
d37da6b7 | 617 | None => { |
1ec0d70d | 618 | task_log!(worker, "writing new media set label"); |
feb1645f | 619 | drive.write_media_set_label(new_set, key_config.as_ref())?; |
31cf625a | 620 | media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?; |
32b75d36 | 621 | true |
d37da6b7 DM |
622 | } |
623 | Some(media_set_label) => { | |
624 | if new_set.uuid == media_set_label.uuid { | |
625 | if new_set.seq_nr != media_set_label.seq_nr { | |
626 | bail!("got media with wrong media sequence number ({} != {}", | |
627 | new_set.seq_nr,media_set_label.seq_nr); | |
628 | } | |
8a0046f5 DM |
629 | if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint { |
630 | bail!("detected changed encryption fingerprint - internal error"); | |
631 | } | |
237314ad | 632 | media_catalog = MediaCatalog::open(status_path, &media_id, true, false)?; |
32b75d36 DM |
633 | |
634 | // todo: verify last content/media_catalog somehow? | |
635 | ||
636 | false | |
d37da6b7 | 637 | } else { |
1ec0d70d DM |
638 | task_log!( |
639 | worker, | |
640 | "writing new media set label (overwrite '{}/{}')", | |
641 | media_set_label.uuid.to_string(), | |
642 | media_set_label.seq_nr, | |
66e42bec | 643 | ); |
d37da6b7 | 644 | |
feb1645f | 645 | drive.write_media_set_label(new_set, key_config.as_ref())?; |
31cf625a | 646 | media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?; |
32b75d36 | 647 | true |
d37da6b7 DM |
648 | } |
649 | } | |
32b75d36 | 650 | }; |
d37da6b7 | 651 | |
32b75d36 | 652 | Ok((media_catalog, new_media)) |
d37da6b7 | 653 | } |