]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/tape/backup.rs
cleanup WorkerTaskContext
[proxmox-backup.git] / src / api2 / tape / backup.rs
CommitLineData
88356646 1use std::path::Path;
5c4755ad 2use std::sync::{Mutex, Arc};
88356646 3
7690a8e7 4use anyhow::{bail, format_err, Error};
88356646
DM
5use serde_json::Value;
6
7use proxmox::{
54fcb7f5 8 try_block,
88356646
DM
9 api::{
10 api,
11 RpcEnvironment,
cb022525 12 RpcEnvironmentType,
88356646 13 Router,
396fd747 14 Permission,
88356646
DM
15 },
16};
17
e3619d41
DM
18use pbs_api_types::{
19 Authid, Userid, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, MediaPoolConfig,
8cc3760e 20 UPID_SCHEMA, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE,
e3619d41
DM
21};
22
b9700a9f 23use pbs_datastore::StoreProgress;
b2065dc7 24use pbs_datastore::backup_info::{BackupDir, BackupInfo};
619cd5cb 25use pbs_tools::{task_log, task_warn, task::WorkerTaskContext};
ba3d7e19 26use pbs_config::CachedUserInfo;
b9700a9f 27use proxmox_rest_server::WorkerTask;
c23192d3 28
88356646 29use crate::{
7690a8e7 30 server::{
c9793d47 31 lookup_user_email,
4abd4dbe 32 TapeBackupJobSummary,
7690a8e7
DM
33 jobstate::{
34 Job,
35 JobState,
36 compute_schedule_status,
37 },
8513626b 38 },
cef5c726 39 backup::{DataStore, SnapshotReader},
88356646
DM
40 tape::{
41 TAPE_STATUS_DIR,
42 Inventory,
88356646
DM
43 PoolWriter,
44 MediaPool,
25aa55b5
DM
45 drive::{
46 media_changer,
47 lock_tape_device,
e5950360 48 TapeLockError,
926d05ef 49 set_tape_device_state,
25aa55b5 50 },
37796ff7 51 changer::update_changer_online_status,
88356646
DM
52 },
53};
54
41a8db35
DM
55const TAPE_BACKUP_JOB_ROUTER: Router = Router::new()
56 .post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
57
58pub const ROUTER: Router = Router::new()
7690a8e7 59 .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
41a8db35
DM
60 .post(&API_METHOD_BACKUP)
61 .match_all("id", &TAPE_BACKUP_JOB_ROUTER);
62
b4975d31
DM
63fn check_backup_permission(
64 auth_id: &Authid,
65 store: &str,
66 pool: &str,
67 drive: &str,
68) -> Result<(), Error> {
69
70 let user_info = CachedUserInfo::new()?;
71
72 let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
73 if (privs & PRIV_DATASTORE_READ) == 0 {
74 bail!("no permissions on /datastore/{}", store);
75 }
76
77 let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]);
78 if (privs & PRIV_TAPE_WRITE) == 0 {
79 bail!("no permissions on /tape/drive/{}", drive);
80 }
81
82 let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]);
83 if (privs & PRIV_TAPE_WRITE) == 0 {
84 bail!("no permissions on /tape/pool/{}", pool);
85 }
86
87 Ok(())
88}
89
7690a8e7
DM
90#[api(
91 returns: {
92 description: "List configured thape backup jobs and their status",
93 type: Array,
94 items: { type: TapeBackupJobStatus },
95 },
396fd747
DM
96 access: {
97 description: "List configured tape jobs filtered by Tape.Audit privileges",
98 permission: &Permission::Anybody,
99 },
7690a8e7
DM
100)]
101/// List all tape backup jobs
102pub fn list_tape_backup_jobs(
103 _param: Value,
104 mut rpcenv: &mut dyn RpcEnvironment,
105) -> Result<Vec<TapeBackupJobStatus>, Error> {
396fd747
DM
106 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
107 let user_info = CachedUserInfo::new()?;
7690a8e7 108
e3619d41 109 let (job_config, digest) = pbs_config::tape_job::config()?;
aad2d162 110 let (pool_config, _pool_digest) = pbs_config::media_pool::config()?;
1ce8e905 111 let (drive_config, _digest) = pbs_config::drive::config()?;
7690a8e7 112
efe96ec0 113 let job_list_iter = job_config
7690a8e7
DM
114 .convert_to_typed_array("backup")?
115 .into_iter()
116 .filter(|_job: &TapeBackupJobConfig| {
117 // fixme: check access permission
118 true
119 });
120
121 let mut list = Vec::new();
efe96ec0
DM
122 let status_path = Path::new(TAPE_STATUS_DIR);
123 let current_time = proxmox::tools::time::epoch_i64();
7690a8e7
DM
124
125 for job in job_list_iter {
396fd747
DM
126 let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
127 if (privs & PRIV_TAPE_AUDIT) == 0 {
128 continue;
129 }
130
7690a8e7
DM
131 let last_state = JobState::load("tape-backup-job", &job.id)
132 .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
133
134 let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
135
efe96ec0
DM
136 let next_run = status.next_run.unwrap_or(current_time);
137
138 let mut next_media_label = None;
139
140 if let Ok(pool) = pool_config.lookup::<MediaPoolConfig>("pool", &job.setup.pool) {
141 let mut changer_name = None;
142 if let Ok(Some((_, name))) = media_changer(&drive_config, &job.setup.drive) {
143 changer_name = Some(name);
144 }
145 if let Ok(mut pool) = MediaPool::with_config(status_path, &pool, changer_name, true) {
146 if pool.start_write_session(next_run, false).is_ok() {
147 if let Ok(media_id) = pool.guess_next_writable_media(next_run) {
148 next_media_label = Some(media_id.label.label_text);
149 }
150 }
151 }
152 }
153
154 list.push(TapeBackupJobStatus { config: job, status, next_media_label });
7690a8e7
DM
155 }
156
157 rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
158
159 Ok(list)
160}
161
8513626b
DM
162pub fn do_tape_backup_job(
163 mut job: Job,
5830e562 164 setup: TapeBackupJobSetup,
8513626b
DM
165 auth_id: &Authid,
166 schedule: Option<String>,
bfa942c0 167 to_stdout: bool,
8513626b
DM
168) -> Result<String, Error> {
169
170 let job_id = format!("{}:{}:{}:{}",
5830e562
DM
171 setup.store,
172 setup.pool,
173 setup.drive,
8513626b
DM
174 job.jobname());
175
176 let worker_type = job.jobtype().to_string();
177
5830e562 178 let datastore = DataStore::lookup_datastore(&setup.store)?;
8513626b 179
aad2d162 180 let (config, _digest) = pbs_config::media_pool::config()?;
5830e562 181 let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
8513626b 182
1ce8e905 183 let (drive_config, _digest) = pbs_config::drive::config()?;
8513626b 184
54fcb7f5
DC
185 // for scheduled jobs we acquire the lock later in the worker
186 let drive_lock = if schedule.is_some() {
187 None
188 } else {
189 Some(lock_tape_device(&drive_config, &setup.drive)?)
190 };
191
192 let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
193 let email = lookup_user_email(notify_user);
8513626b
DM
194
195 let upid_str = WorkerTask::new_thread(
196 &worker_type,
197 Some(job_id.clone()),
049a22a3 198 auth_id.to_string(),
bfa942c0 199 to_stdout,
8513626b 200 move |worker| {
8513626b 201 job.start(&worker.upid().to_string())?;
54fcb7f5
DC
202 let mut drive_lock = drive_lock;
203
4ca3f0c6
DC
204 let mut summary = Default::default();
205 let job_result = try_block!({
54fcb7f5
DC
206 if schedule.is_some() {
207 // for scheduled tape backup jobs, we wait indefinitely for the lock
208 task_log!(worker, "waiting for drive lock...");
209 loop {
54fcb7f5 210 worker.check_abort()?;
e5950360
DC
211 match lock_tape_device(&drive_config, &setup.drive) {
212 Ok(lock) => {
213 drive_lock = Some(lock);
214 break;
215 }
216 Err(TapeLockError::TimeOut) => continue,
217 Err(TapeLockError::Other(err)) => return Err(err),
218 }
54fcb7f5
DC
219 }
220 }
221 set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
8513626b 222
54fcb7f5
DC
223 task_log!(worker,"Starting tape backup job '{}'", job_id);
224 if let Some(event_str) = schedule {
225 task_log!(worker,"task triggered by schedule '{}'", event_str);
226 }
8703a68a 227
4ca3f0c6 228
54fcb7f5
DC
229 backup_worker(
230 &worker,
231 datastore,
232 &pool_config,
233 &setup,
234 email.clone(),
4ca3f0c6 235 &mut summary,
e953029e 236 false,
54fcb7f5 237 )
4ca3f0c6 238 });
8513626b
DM
239
240 let status = worker.create_state(&job_result);
241
8703a68a
DC
242 if let Some(email) = email {
243 if let Err(err) = crate::server::send_tape_backup_status(
244 &email,
245 Some(job.jobname()),
246 &setup,
247 &job_result,
4abd4dbe 248 summary,
8703a68a
DC
249 ) {
250 eprintln!("send tape backup notification failed: {}", err);
251 }
252 }
253
8513626b
DM
254 if let Err(err) = job.finish(status) {
255 eprintln!(
256 "could not finish job state for {}: {}",
257 job.jobtype().to_string(),
258 err
259 );
260 }
261
5830e562 262 if let Err(err) = set_tape_device_state(&setup.drive, "") {
926d05ef
DC
263 eprintln!(
264 "could not unset drive state for {}: {}",
5830e562 265 setup.drive,
926d05ef
DC
266 err
267 );
268 }
269
8513626b
DM
270 job_result
271 }
272 )?;
273
274 Ok(upid_str)
275}
276
41a8db35
DM
277#[api(
278 input: {
279 properties: {
280 id: {
281 schema: JOB_ID_SCHEMA,
282 },
283 },
284 },
b4975d31
DM
285 access: {
286 // Note: parameters are from job config, so we need to test inside function body
287 description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
288 and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
289 permission: &Permission::Anybody,
290 },
41a8db35
DM
291)]
292/// Runs a tape backup job manually.
293pub fn run_tape_backup_job(
294 id: String,
295 rpcenv: &mut dyn RpcEnvironment,
296) -> Result<String, Error> {
297 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
298
e3619d41 299 let (config, _digest) = pbs_config::tape_job::config()?;
41a8db35
DM
300 let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
301
b4975d31
DM
302 check_backup_permission(
303 &auth_id,
304 &backup_job.setup.store,
305 &backup_job.setup.pool,
306 &backup_job.setup.drive,
307 )?;
308
41a8db35
DM
309 let job = Job::new("tape-backup-job", &id)?;
310
bfa942c0
DC
311 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
312
313 let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None, to_stdout)?;
41a8db35
DM
314
315 Ok(upid_str)
316}
317
88356646 318#[api(
0dadf66d 319 input: {
88356646 320 properties: {
5830e562
DM
321 setup: {
322 type: TapeBackupJobSetup,
323 flatten: true,
0dadf66d 324 },
e953029e
DC
325 "force-media-set": {
326 description: "Ignore the allocation policy and start a new media-set.",
327 optional: true,
328 type: bool,
329 default: false,
330 },
88356646
DM
331 },
332 },
333 returns: {
334 schema: UPID_SCHEMA,
335 },
b4975d31
DM
336 access: {
337 // Note: parameters are no uri parameter, so we need to test inside function body
338 description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
339 and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
340 permission: &Permission::Anybody,
341 },
88356646
DM
342)]
343/// Backup datastore to tape media pool
344pub fn backup(
5830e562 345 setup: TapeBackupJobSetup,
e953029e 346 force_media_set: bool,
88356646
DM
347 rpcenv: &mut dyn RpcEnvironment,
348) -> Result<Value, Error> {
349
350 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
351
b4975d31
DM
352 check_backup_permission(
353 &auth_id,
354 &setup.store,
355 &setup.pool,
356 &setup.drive,
357 )?;
358
5830e562 359 let datastore = DataStore::lookup_datastore(&setup.store)?;
88356646 360
aad2d162 361 let (config, _digest) = pbs_config::media_pool::config()?;
5830e562 362 let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
88356646 363
1ce8e905 364 let (drive_config, _digest) = pbs_config::drive::config()?;
25aa55b5
DM
365
366 // early check/lock before starting worker
5830e562 367 let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
88356646 368
39735609 369 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
cb022525 370
5830e562 371 let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
8513626b 372
8703a68a
DC
373 let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
374 let email = lookup_user_email(notify_user);
375
88356646
DM
376 let upid_str = WorkerTask::new_thread(
377 "tape-backup",
8513626b 378 Some(job_id),
049a22a3 379 auth_id.to_string(),
cb022525 380 to_stdout,
88356646 381 move |worker| {
25aa55b5 382 let _drive_lock = drive_lock; // keep lock guard
5830e562 383 set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
4abd4dbe 384
4ca3f0c6
DC
385 let mut summary = Default::default();
386 let job_result = backup_worker(
0dadf66d
DM
387 &worker,
388 datastore,
0dadf66d 389 &pool_config,
5830e562 390 &setup,
8703a68a 391 email.clone(),
4ca3f0c6 392 &mut summary,
e953029e 393 force_media_set,
4ca3f0c6 394 );
8703a68a
DC
395
396 if let Some(email) = email {
397 if let Err(err) = crate::server::send_tape_backup_status(
398 &email,
399 None,
400 &setup,
401 &job_result,
4abd4dbe 402 summary,
8703a68a
DC
403 ) {
404 eprintln!("send tape backup notification failed: {}", err);
405 }
406 }
0dadf66d 407
926d05ef 408 // ignore errors
5830e562 409 let _ = set_tape_device_state(&setup.drive, "");
8703a68a 410 job_result
88356646
DM
411 }
412 )?;
413
414 Ok(upid_str.into())
88356646
DM
415}
416
88356646
DM
417fn backup_worker(
418 worker: &WorkerTask,
419 datastore: Arc<DataStore>,
420 pool_config: &MediaPoolConfig,
5830e562 421 setup: &TapeBackupJobSetup,
8703a68a 422 email: Option<String>,
4ca3f0c6 423 summary: &mut TapeBackupJobSummary,
e953029e 424 force_media_set: bool,
4ca3f0c6 425) -> Result<(), Error> {
88356646
DM
426
427 let status_path = Path::new(TAPE_STATUS_DIR);
4abd4dbe 428 let start = std::time::Instant::now();
88356646 429
271764de 430 task_log!(worker, "update media online status");
5830e562 431 let changer_name = update_media_online_status(&setup.drive)?;
88356646 432
30316192 433 let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
88356646 434
e953029e
DC
435 let mut pool_writer = PoolWriter::new(
436 pool,
437 &setup.drive,
438 worker,
439 email,
440 force_media_set
441 )?;
88356646
DM
442
443 let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
444
445 group_list.sort_unstable();
446
6396bace
DM
447 let group_count = group_list.len();
448 task_log!(worker, "found {} groups", group_count);
449
450 let mut progress = StoreProgress::new(group_count as u64);
451
5830e562
DM
452 let latest_only = setup.latest_only.unwrap_or(false);
453
0dadf66d
DM
454 if latest_only {
455 task_log!(worker, "latest-only: true (only considering latest snapshots)");
456 }
5830e562 457
54722aca
DM
458 let datastore_name = datastore.name();
459
2a06e086
DC
460 let mut errors = false;
461
4830de40
DM
462 let mut need_catalog = false; // avoid writing catalog for empty jobs
463
6396bace
DM
464 for (group_number, group) in group_list.into_iter().enumerate() {
465 progress.done_groups = group_number as u64;
466 progress.done_snapshots = 0;
467 progress.group_snapshots = 0;
468
80acdd71
DM
469 let snapshot_list = group.list_backups(&datastore.base_path())?;
470
471 // filter out unfinished backups
cffe0b81 472 let mut snapshot_list: Vec<_> = snapshot_list
80acdd71
DM
473 .into_iter()
474 .filter(|item| item.is_finished())
475 .collect();
0dadf66d 476
cffe0b81
DC
477 if snapshot_list.is_empty() {
478 task_log!(worker, "group {} was empty", group);
479 continue;
480 }
481
88356646
DM
482 BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
483
0dadf66d 484 if latest_only {
6396bace 485 progress.group_snapshots = 1;
0dadf66d 486 if let Some(info) = snapshot_list.pop() {
54722aca 487 if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
2c10410b 488 task_log!(worker, "skip snapshot {}", info.backup_dir);
0dadf66d
DM
489 continue;
490 }
4830de40
DM
491
492 need_catalog = true;
493
4abd4dbe 494 let snapshot_name = info.backup_dir.to_string();
2a06e086
DC
495 if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
496 errors = true;
4abd4dbe
DC
497 } else {
498 summary.snapshot_list.push(snapshot_name);
2a06e086 499 }
6396bace
DM
500 progress.done_snapshots = 1;
501 task_log!(
502 worker,
503 "percentage done: {}",
504 progress
505 );
0dadf66d
DM
506 }
507 } else {
6396bace
DM
508 progress.group_snapshots = snapshot_list.len() as u64;
509 for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
54722aca 510 if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
2c10410b 511 task_log!(worker, "skip snapshot {}", info.backup_dir);
0dadf66d
DM
512 continue;
513 }
4830de40
DM
514
515 need_catalog = true;
516
4abd4dbe 517 let snapshot_name = info.backup_dir.to_string();
2a06e086
DC
518 if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
519 errors = true;
4abd4dbe
DC
520 } else {
521 summary.snapshot_list.push(snapshot_name);
2a06e086 522 }
6396bace
DM
523 progress.done_snapshots = snapshot_number as u64 + 1;
524 task_log!(
525 worker,
526 "percentage done: {}",
527 progress
528 );
88356646 529 }
88356646
DM
530 }
531 }
532
533 pool_writer.commit()?;
534
4830de40
DM
535 if need_catalog {
536 task_log!(worker, "append media catalog");
32b75d36 537
4830de40 538 let uuid = pool_writer.load_writable_media(worker)?;
32b75d36
DM
539 let done = pool_writer.append_catalog_archive(worker)?;
540 if !done {
4830de40
DM
541 task_log!(worker, "catalog does not fit on tape, writing to next volume");
542 pool_writer.set_media_status_full(&uuid)?;
543 pool_writer.load_writable_media(worker)?;
544 let done = pool_writer.append_catalog_archive(worker)?;
545 if !done {
546 bail!("write_catalog_archive failed on second media");
547 }
32b75d36
DM
548 }
549 }
550
5830e562 551 if setup.export_media_set.unwrap_or(false) {
edb90f6a 552 pool_writer.export_media_set(worker)?;
5830e562 553 } else if setup.eject_media.unwrap_or(false) {
5654d8ce 554 pool_writer.eject_media(worker)?;
42967bf1
DM
555 }
556
2a06e086
DC
557 if errors {
558 bail!("Tape backup finished with some errors. Please check the task log.");
559 }
560
4abd4dbe
DC
561 summary.duration = start.elapsed();
562
4ca3f0c6 563 Ok(())
88356646
DM
564}
565
566// Try to update the the media online status
cdf39e62 567fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
88356646 568
1ce8e905 569 let (config, _digest) = pbs_config::drive::config()?;
88356646 570
46a1863f 571 if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
88356646 572
cdf39e62 573 let label_text_list = changer.online_media_label_texts()?;
88356646
DM
574
575 let status_path = Path::new(TAPE_STATUS_DIR);
576 let mut inventory = Inventory::load(status_path)?;
88356646
DM
577
578 update_changer_online_status(
579 &config,
580 &mut inventory,
88356646 581 &changer_name,
8446fbca 582 &label_text_list,
88356646 583 )?;
88356646 584
cdf39e62
DM
585 Ok(Some(changer_name))
586 } else {
587 Ok(None)
588 }
88356646
DM
589}
590
591pub fn backup_snapshot(
592 worker: &WorkerTask,
593 pool_writer: &mut PoolWriter,
594 datastore: Arc<DataStore>,
595 snapshot: BackupDir,
2a06e086 596) -> Result<bool, Error> {
88356646 597
2c10410b 598 task_log!(worker, "backup snapshot {}", snapshot);
88356646 599
2a06e086
DC
600 let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
601 Ok(reader) => reader,
602 Err(err) => {
603 // ignore missing snapshots and continue
604 task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err);
605 return Ok(false);
606 }
607 };
88356646 608
5c4755ad
DM
609 let snapshot_reader = Arc::new(Mutex::new(snapshot_reader));
610
611 let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread(
612 datastore.clone(),
613 snapshot_reader.clone(),
614 )?;
615
616 let mut chunk_iter = chunk_iter.peekable();
88356646
DM
617
618 loop {
271764de
DM
619 worker.check_abort()?;
620
88356646 621 // test is we have remaining chunks
5c4755ad
DM
622 match chunk_iter.peek() {
623 None => break,
624 Some(Ok(_)) => { /* Ok */ },
625 Some(Err(err)) => bail!("{}", err),
88356646
DM
626 }
627
ff58c519 628 let uuid = pool_writer.load_writable_media(worker)?;
88356646 629
1f2c4713
DM
630 worker.check_abort()?;
631
54722aca 632 let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
88356646
DM
633
634 if leom {
635 pool_writer.set_media_status_full(&uuid)?;
636 }
637 }
638
5c4755ad
DM
639 if let Err(_) = reader_thread.join() {
640 bail!("chunk reader thread failed");
641 }
642
271764de
DM
643 worker.check_abort()?;
644
ff58c519 645 let uuid = pool_writer.load_writable_media(worker)?;
88356646 646
1f2c4713
DM
647 worker.check_abort()?;
648
5c4755ad
DM
649 let snapshot_reader = snapshot_reader.lock().unwrap();
650
5654d8ce 651 let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
88356646
DM
652
653 if !done {
654 // does not fit on tape, so we try on next volume
655 pool_writer.set_media_status_full(&uuid)?;
656
271764de
DM
657 worker.check_abort()?;
658
ff58c519 659 pool_writer.load_writable_media(worker)?;
5654d8ce 660 let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
88356646
DM
661
662 if !done {
663 bail!("write_snapshot_archive failed on second media");
664 }
665 }
666
271764de 667 task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
88356646 668
2a06e086 669 Ok(true)
88356646 670}