]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/tape/backup.rs
fix #4315: jobs: modify GroupFilter so include/exclude is tracked
[proxmox-backup.git] / src / api2 / tape / backup.rs
CommitLineData
085ae873 1use std::sync::{Arc, Mutex};
88356646 2
7690a8e7 3use anyhow::{bail, format_err, Error};
88356646
DM
4use serde_json::Value;
5
6ef1b649
WB
6use proxmox_lang::try_block;
7use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
8use proxmox_schema::api;
25877d05 9use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
88356646 10
e3619d41 11use pbs_api_types::{
59c92736 12 print_ns_and_snapshot, print_store_and_ns, Authid, MediaPoolConfig, Operation,
e13303fc
FG
13 TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA,
14 PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA,
e3619d41
DM
15};
16
ba3d7e19 17use pbs_config::CachedUserInfo;
59c92736 18use pbs_datastore::backup_info::{BackupDir, BackupInfo};
133d718f 19use pbs_datastore::{DataStore, StoreProgress};
b9700a9f 20use proxmox_rest_server::WorkerTask;
c23192d3 21
88356646 22use crate::{
7690a8e7 23 server::{
085ae873
TL
24 jobstate::{compute_schedule_status, Job, JobState},
25 lookup_user_email, TapeBackupJobSummary,
8513626b 26 },
88356646 27 tape::{
37796ff7 28 changer::update_changer_online_status,
085ae873
TL
29 drive::{lock_tape_device, media_changer, set_tape_device_state, TapeLockError},
30 Inventory, MediaPool, PoolWriter, TAPE_STATUS_DIR,
88356646
DM
31 },
32};
33
085ae873 34const TAPE_BACKUP_JOB_ROUTER: Router = Router::new().post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
41a8db35
DM
35
36pub const ROUTER: Router = Router::new()
7690a8e7 37 .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
41a8db35
DM
38 .post(&API_METHOD_BACKUP)
39 .match_all("id", &TAPE_BACKUP_JOB_ROUTER);
40
b4975d31
DM
41fn check_backup_permission(
42 auth_id: &Authid,
43 store: &str,
44 pool: &str,
45 drive: &str,
46) -> Result<(), Error> {
b4975d31
DM
47 let user_info = CachedUserInfo::new()?;
48
3e4994a5 49 user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_READ, false)?;
b4975d31 50
3e4994a5 51 user_info.check_privs(auth_id, &["tape", "drive", drive], PRIV_TAPE_WRITE, false)?;
b4975d31 52
3e4994a5 53 user_info.check_privs(auth_id, &["tape", "pool", pool], PRIV_TAPE_WRITE, false)?;
b4975d31
DM
54
55 Ok(())
56}
57
7690a8e7
DM
58#[api(
59 returns: {
60 description: "List configured thape backup jobs and their status",
61 type: Array,
62 items: { type: TapeBackupJobStatus },
63 },
396fd747
DM
64 access: {
65 description: "List configured tape jobs filtered by Tape.Audit privileges",
66 permission: &Permission::Anybody,
67 },
7690a8e7
DM
68)]
69/// List all tape backup jobs
70pub fn list_tape_backup_jobs(
71 _param: Value,
41c1a179 72 rpcenv: &mut dyn RpcEnvironment,
7690a8e7 73) -> Result<Vec<TapeBackupJobStatus>, Error> {
396fd747
DM
74 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
75 let user_info = CachedUserInfo::new()?;
7690a8e7 76
e3619d41 77 let (job_config, digest) = pbs_config::tape_job::config()?;
aad2d162 78 let (pool_config, _pool_digest) = pbs_config::media_pool::config()?;
1ce8e905 79 let (drive_config, _digest) = pbs_config::drive::config()?;
7690a8e7 80
efe96ec0 81 let job_list_iter = job_config
7690a8e7
DM
82 .convert_to_typed_array("backup")?
83 .into_iter()
84 .filter(|_job: &TapeBackupJobConfig| {
85 // fixme: check access permission
86 true
87 });
88
89 let mut list = Vec::new();
6ef1b649 90 let current_time = proxmox_time::epoch_i64();
7690a8e7
DM
91
92 for job in job_list_iter {
396fd747
DM
93 let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
94 if (privs & PRIV_TAPE_AUDIT) == 0 {
95 continue;
96 }
97
7690a8e7
DM
98 let last_state = JobState::load("tape-backup-job", &job.id)
99 .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
100
101 let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
102
efe96ec0
DM
103 let next_run = status.next_run.unwrap_or(current_time);
104
105 let mut next_media_label = None;
106
107 if let Ok(pool) = pool_config.lookup::<MediaPoolConfig>("pool", &job.setup.pool) {
108 let mut changer_name = None;
109 if let Ok(Some((_, name))) = media_changer(&drive_config, &job.setup.drive) {
110 changer_name = Some(name);
111 }
3921deb2
DC
112 if let Ok(mut pool) = MediaPool::with_config(TAPE_STATUS_DIR, &pool, changer_name, true)
113 {
efe96ec0
DM
114 if pool.start_write_session(next_run, false).is_ok() {
115 if let Ok(media_id) = pool.guess_next_writable_media(next_run) {
116 next_media_label = Some(media_id.label.label_text);
117 }
118 }
119 }
120 }
121
085ae873
TL
122 list.push(TapeBackupJobStatus {
123 config: job,
124 status,
125 next_media_label,
126 });
7690a8e7
DM
127 }
128
16f6766a 129 rpcenv["digest"] = hex::encode(digest).into();
7690a8e7
DM
130
131 Ok(list)
132}
133
8513626b
DM
134pub fn do_tape_backup_job(
135 mut job: Job,
5830e562 136 setup: TapeBackupJobSetup,
8513626b
DM
137 auth_id: &Authid,
138 schedule: Option<String>,
bfa942c0 139 to_stdout: bool,
8513626b 140) -> Result<String, Error> {
085ae873
TL
141 let job_id = format!(
142 "{}:{}:{}:{}",
143 setup.store,
144 setup.pool,
145 setup.drive,
146 job.jobname()
147 );
8513626b
DM
148
149 let worker_type = job.jobtype().to_string();
150
e9d2fc93 151 let datastore = DataStore::lookup_datastore(&setup.store, Some(Operation::Read))?;
8513626b 152
aad2d162 153 let (config, _digest) = pbs_config::media_pool::config()?;
5830e562 154 let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
8513626b 155
1ce8e905 156 let (drive_config, _digest) = pbs_config::drive::config()?;
8513626b 157
54fcb7f5
DC
158 // for scheduled jobs we acquire the lock later in the worker
159 let drive_lock = if schedule.is_some() {
160 None
161 } else {
162 Some(lock_tape_device(&drive_config, &setup.drive)?)
163 };
164
085ae873
TL
165 let notify_user = setup
166 .notify_user
167 .as_ref()
168 .unwrap_or_else(|| Userid::root_userid());
54fcb7f5 169 let email = lookup_user_email(notify_user);
8513626b
DM
170
171 let upid_str = WorkerTask::new_thread(
172 &worker_type,
173 Some(job_id.clone()),
049a22a3 174 auth_id.to_string(),
bfa942c0 175 to_stdout,
8513626b 176 move |worker| {
8513626b 177 job.start(&worker.upid().to_string())?;
54fcb7f5
DC
178 let mut drive_lock = drive_lock;
179
4ca3f0c6
DC
180 let mut summary = Default::default();
181 let job_result = try_block!({
54fcb7f5
DC
182 if schedule.is_some() {
183 // for scheduled tape backup jobs, we wait indefinitely for the lock
184 task_log!(worker, "waiting for drive lock...");
185 loop {
54fcb7f5 186 worker.check_abort()?;
e5950360
DC
187 match lock_tape_device(&drive_config, &setup.drive) {
188 Ok(lock) => {
189 drive_lock = Some(lock);
190 break;
191 }
192 Err(TapeLockError::TimeOut) => continue,
193 Err(TapeLockError::Other(err)) => return Err(err),
194 }
54fcb7f5
DC
195 }
196 }
197 set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
8513626b 198
085ae873 199 task_log!(worker, "Starting tape backup job '{}'", job_id);
54fcb7f5 200 if let Some(event_str) = schedule {
085ae873 201 task_log!(worker, "task triggered by schedule '{}'", event_str);
54fcb7f5 202 }
8703a68a 203
54fcb7f5
DC
204 backup_worker(
205 &worker,
206 datastore,
207 &pool_config,
208 &setup,
209 email.clone(),
4ca3f0c6 210 &mut summary,
e953029e 211 false,
54fcb7f5 212 )
4ca3f0c6 213 });
8513626b
DM
214
215 let status = worker.create_state(&job_result);
216
8703a68a
DC
217 if let Some(email) = email {
218 if let Err(err) = crate::server::send_tape_backup_status(
219 &email,
220 Some(job.jobname()),
221 &setup,
222 &job_result,
4abd4dbe 223 summary,
8703a68a
DC
224 ) {
225 eprintln!("send tape backup notification failed: {}", err);
226 }
227 }
228
8513626b 229 if let Err(err) = job.finish(status) {
e2aeff40 230 eprintln!("could not finish job state for {}: {}", job.jobtype(), err);
8513626b
DM
231 }
232
5830e562 233 if let Err(err) = set_tape_device_state(&setup.drive, "") {
085ae873 234 eprintln!("could not unset drive state for {}: {}", setup.drive, err);
926d05ef
DC
235 }
236
8513626b 237 job_result
085ae873 238 },
8513626b
DM
239 )?;
240
241 Ok(upid_str)
242}
243
41a8db35
DM
244#[api(
245 input: {
246 properties: {
247 id: {
248 schema: JOB_ID_SCHEMA,
249 },
250 },
251 },
b4975d31
DM
252 access: {
253 // Note: parameters are from job config, so we need to test inside function body
254 description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
255 and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
256 permission: &Permission::Anybody,
257 },
41a8db35
DM
258)]
259/// Runs a tape backup job manually.
085ae873 260pub fn run_tape_backup_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
41a8db35
DM
261 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
262
e3619d41 263 let (config, _digest) = pbs_config::tape_job::config()?;
41a8db35
DM
264 let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
265
b4975d31
DM
266 check_backup_permission(
267 &auth_id,
268 &backup_job.setup.store,
269 &backup_job.setup.pool,
270 &backup_job.setup.drive,
271 )?;
272
41a8db35
DM
273 let job = Job::new("tape-backup-job", &id)?;
274
bfa942c0
DC
275 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
276
277 let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None, to_stdout)?;
41a8db35
DM
278
279 Ok(upid_str)
280}
281
88356646 282#[api(
0dadf66d 283 input: {
88356646 284 properties: {
5830e562
DM
285 setup: {
286 type: TapeBackupJobSetup,
287 flatten: true,
0dadf66d 288 },
e953029e
DC
289 "force-media-set": {
290 description: "Ignore the allocation policy and start a new media-set.",
291 optional: true,
292 type: bool,
293 default: false,
294 },
88356646
DM
295 },
296 },
297 returns: {
298 schema: UPID_SCHEMA,
299 },
b4975d31
DM
300 access: {
301 // Note: parameters are no uri parameter, so we need to test inside function body
302 description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
303 and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
304 permission: &Permission::Anybody,
305 },
88356646
DM
306)]
307/// Backup datastore to tape media pool
308pub fn backup(
5830e562 309 setup: TapeBackupJobSetup,
e953029e 310 force_media_set: bool,
88356646
DM
311 rpcenv: &mut dyn RpcEnvironment,
312) -> Result<Value, Error> {
88356646
DM
313 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
314
085ae873 315 check_backup_permission(&auth_id, &setup.store, &setup.pool, &setup.drive)?;
b4975d31 316
e9d2fc93 317 let datastore = DataStore::lookup_datastore(&setup.store, Some(Operation::Read))?;
88356646 318
aad2d162 319 let (config, _digest) = pbs_config::media_pool::config()?;
5830e562 320 let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
88356646 321
1ce8e905 322 let (drive_config, _digest) = pbs_config::drive::config()?;
25aa55b5
DM
323
324 // early check/lock before starting worker
5830e562 325 let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
88356646 326
39735609 327 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
cb022525 328
5830e562 329 let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
8513626b 330
085ae873
TL
331 let notify_user = setup
332 .notify_user
333 .as_ref()
334 .unwrap_or_else(|| Userid::root_userid());
8703a68a
DC
335 let email = lookup_user_email(notify_user);
336
88356646
DM
337 let upid_str = WorkerTask::new_thread(
338 "tape-backup",
8513626b 339 Some(job_id),
049a22a3 340 auth_id.to_string(),
cb022525 341 to_stdout,
88356646 342 move |worker| {
25aa55b5 343 let _drive_lock = drive_lock; // keep lock guard
5830e562 344 set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
4abd4dbe 345
4ca3f0c6
DC
346 let mut summary = Default::default();
347 let job_result = backup_worker(
0dadf66d
DM
348 &worker,
349 datastore,
0dadf66d 350 &pool_config,
5830e562 351 &setup,
8703a68a 352 email.clone(),
4ca3f0c6 353 &mut summary,
e953029e 354 force_media_set,
4ca3f0c6 355 );
8703a68a
DC
356
357 if let Some(email) = email {
358 if let Err(err) = crate::server::send_tape_backup_status(
359 &email,
360 None,
361 &setup,
362 &job_result,
4abd4dbe 363 summary,
8703a68a
DC
364 ) {
365 eprintln!("send tape backup notification failed: {}", err);
366 }
367 }
0dadf66d 368
926d05ef 369 // ignore errors
5830e562 370 let _ = set_tape_device_state(&setup.drive, "");
8703a68a 371 job_result
085ae873 372 },
88356646
DM
373 )?;
374
375 Ok(upid_str.into())
88356646
DM
376}
377
4a2bb3e6
DC
378enum SnapshotBackupResult {
379 Success,
380 Error,
381 Ignored,
382}
383
88356646
DM
384fn backup_worker(
385 worker: &WorkerTask,
386 datastore: Arc<DataStore>,
387 pool_config: &MediaPoolConfig,
5830e562 388 setup: &TapeBackupJobSetup,
8703a68a 389 email: Option<String>,
4ca3f0c6 390 summary: &mut TapeBackupJobSummary,
e953029e 391 force_media_set: bool,
4ca3f0c6 392) -> Result<(), Error> {
4abd4dbe 393 let start = std::time::Instant::now();
88356646 394
271764de 395 task_log!(worker, "update media online status");
5830e562 396 let changer_name = update_media_online_status(&setup.drive)?;
88356646 397
707c48ad 398 let root_namespace = setup.ns.clone().unwrap_or_default();
12d33461 399 let ns_magic = !root_namespace.is_root() || setup.max_depth != Some(0);
88356646 400
3921deb2 401 let pool = MediaPool::with_config(TAPE_STATUS_DIR, pool_config, changer_name, false)?;
88356646 402
707c48ad
FG
403 let mut pool_writer =
404 PoolWriter::new(pool, &setup.drive, worker, email, force_media_set, ns_magic)?;
999293bb
DC
405
406 let mut group_list = Vec::new();
12d33461 407 let namespaces = datastore.recursive_iter_backup_ns_ok(root_namespace, setup.max_depth)?;
999293bb
DC
408 for ns in namespaces {
409 group_list.extend(datastore.list_backup_groups(ns)?);
410 }
88356646 411
5116453b 412 group_list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
88356646 413
59c92736 414 let group_count_full = group_list.len();
c8c5c7f5 415
59c92736
PH
416 let group_list = match &setup.group_filter {
417 Some(f) => group_list
085ae873 418 .into_iter()
59c92736
PH
419 .filter(|group| group.group().apply_filters(f))
420 .collect(),
421 None => group_list,
c8c5c7f5 422 };
6396bace 423
59c92736
PH
424 task_log!(
425 worker,
426 "found {} groups (out of {} total)",
427 group_list.len(),
428 group_count_full
429 );
430
431 let mut progress = StoreProgress::new(group_list.len() as u64);
6396bace 432
5830e562
DM
433 let latest_only = setup.latest_only.unwrap_or(false);
434
0dadf66d 435 if latest_only {
085ae873
TL
436 task_log!(
437 worker,
438 "latest-only: true (only considering latest snapshots)"
439 );
0dadf66d 440 }
5830e562 441
54722aca
DM
442 let datastore_name = datastore.name();
443
2a06e086
DC
444 let mut errors = false;
445
4830de40
DM
446 let mut need_catalog = false; // avoid writing catalog for empty jobs
447
6396bace
DM
448 for (group_number, group) in group_list.into_iter().enumerate() {
449 progress.done_groups = group_number as u64;
450 progress.done_snapshots = 0;
451 progress.group_snapshots = 0;
452
6da20161 453 let snapshot_list = group.list_backups()?;
80acdd71
DM
454
455 // filter out unfinished backups
cffe0b81 456 let mut snapshot_list: Vec<_> = snapshot_list
80acdd71
DM
457 .into_iter()
458 .filter(|item| item.is_finished())
459 .collect();
0dadf66d 460
cffe0b81 461 if snapshot_list.is_empty() {
e13303fc
FG
462 task_log!(
463 worker,
464 "{}, group {} was empty",
abd82485 465 print_store_and_ns(datastore_name, group.backup_ns()),
e13303fc
FG
466 group.group()
467 );
cffe0b81
DC
468 continue;
469 }
470
88356646
DM
471 BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
472
0dadf66d 473 if latest_only {
6396bace 474 progress.group_snapshots = 1;
0dadf66d 475 if let Some(info) = snapshot_list.pop() {
5ae393af
FG
476 let rel_path =
477 print_ns_and_snapshot(info.backup_dir.backup_ns(), info.backup_dir.as_ref());
8bec3ff6
DC
478 if pool_writer.contains_snapshot(
479 datastore_name,
3be9106f 480 info.backup_dir.backup_ns(),
8bec3ff6
DC
481 info.backup_dir.as_ref(),
482 ) {
5ae393af 483 task_log!(worker, "skip snapshot {}", rel_path);
0dadf66d
DM
484 continue;
485 }
4830de40
DM
486
487 need_catalog = true;
488
4a2bb3e6
DC
489 match backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?
490 {
491 SnapshotBackupResult::Success => summary.snapshot_list.push(rel_path),
492 SnapshotBackupResult::Error => errors = true,
493 SnapshotBackupResult::Ignored => {}
2a06e086 494 }
6396bace 495 progress.done_snapshots = 1;
085ae873 496 task_log!(worker, "percentage done: {}", progress);
0dadf66d
DM
497 }
498 } else {
6396bace
DM
499 progress.group_snapshots = snapshot_list.len() as u64;
500 for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
5ae393af
FG
501 let rel_path =
502 print_ns_and_snapshot(info.backup_dir.backup_ns(), info.backup_dir.as_ref());
503
8bec3ff6
DC
504 if pool_writer.contains_snapshot(
505 datastore_name,
3be9106f 506 info.backup_dir.backup_ns(),
8bec3ff6
DC
507 info.backup_dir.as_ref(),
508 ) {
5ae393af 509 task_log!(worker, "skip snapshot {}", rel_path);
0dadf66d
DM
510 continue;
511 }
4830de40
DM
512
513 need_catalog = true;
514
4a2bb3e6
DC
515 match backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?
516 {
517 SnapshotBackupResult::Success => summary.snapshot_list.push(rel_path),
518 SnapshotBackupResult::Error => errors = true,
519 SnapshotBackupResult::Ignored => {}
2a06e086 520 }
6396bace 521 progress.done_snapshots = snapshot_number as u64 + 1;
085ae873 522 task_log!(worker, "percentage done: {}", progress);
88356646 523 }
88356646
DM
524 }
525 }
526
527 pool_writer.commit()?;
528
4830de40
DM
529 if need_catalog {
530 task_log!(worker, "append media catalog");
32b75d36 531
4830de40 532 let uuid = pool_writer.load_writable_media(worker)?;
32b75d36
DM
533 let done = pool_writer.append_catalog_archive(worker)?;
534 if !done {
085ae873
TL
535 task_log!(
536 worker,
537 "catalog does not fit on tape, writing to next volume"
538 );
4830de40
DM
539 pool_writer.set_media_status_full(&uuid)?;
540 pool_writer.load_writable_media(worker)?;
541 let done = pool_writer.append_catalog_archive(worker)?;
542 if !done {
543 bail!("write_catalog_archive failed on second media");
544 }
32b75d36
DM
545 }
546 }
547
5830e562 548 if setup.export_media_set.unwrap_or(false) {
edb90f6a 549 pool_writer.export_media_set(worker)?;
5830e562 550 } else if setup.eject_media.unwrap_or(false) {
5654d8ce 551 pool_writer.eject_media(worker)?;
42967bf1
DM
552 }
553
2a06e086
DC
554 if errors {
555 bail!("Tape backup finished with some errors. Please check the task log.");
556 }
557
36156038
DC
558 summary.used_tapes = match pool_writer.get_used_media_labels() {
559 Ok(tapes) => Some(tapes),
560 Err(err) => {
561 task_warn!(worker, "could not collect list of used tapes: {err}");
562 None
563 }
564 };
565
4abd4dbe
DC
566 summary.duration = start.elapsed();
567
4ca3f0c6 568 Ok(())
88356646
DM
569}
570
571// Try to update the the media online status
cdf39e62 572fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
1ce8e905 573 let (config, _digest) = pbs_config::drive::config()?;
88356646 574
46a1863f 575 if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
085ae873 576 let label_text_list = changer.online_media_label_texts()?;
88356646 577
3921deb2 578 let mut inventory = Inventory::load(TAPE_STATUS_DIR)?;
88356646 579
085ae873 580 update_changer_online_status(&config, &mut inventory, &changer_name, &label_text_list)?;
88356646 581
cdf39e62
DM
582 Ok(Some(changer_name))
583 } else {
584 Ok(None)
585 }
88356646
DM
586}
587
4a2bb3e6 588fn backup_snapshot(
88356646
DM
589 worker: &WorkerTask,
590 pool_writer: &mut PoolWriter,
591 datastore: Arc<DataStore>,
592 snapshot: BackupDir,
4a2bb3e6 593) -> Result<SnapshotBackupResult, Error> {
5ae393af
FG
594 let snapshot_path = snapshot.relative_path();
595 task_log!(worker, "backup snapshot {:?}", snapshot_path);
88356646 596
133d718f 597 let snapshot_reader = match snapshot.locked_reader() {
2a06e086
DC
598 Ok(reader) => reader,
599 Err(err) => {
4a2bb3e6
DC
600 if !snapshot.full_path().exists() {
601 // we got an error and the dir does not exist,
602 // it probably just vanished, so continue
603 task_log!(worker, "snapshot {:?} vanished, skipping", snapshot_path);
604 return Ok(SnapshotBackupResult::Ignored);
605 }
5ae393af
FG
606 task_warn!(
607 worker,
608 "failed opening snapshot {:?}: {}",
609 snapshot_path,
610 err
611 );
4a2bb3e6 612 return Ok(SnapshotBackupResult::Error);
2a06e086
DC
613 }
614 };
88356646 615
5c4755ad
DM
616 let snapshot_reader = Arc::new(Mutex::new(snapshot_reader));
617
085ae873
TL
618 let (reader_thread, chunk_iter) =
619 pool_writer.spawn_chunk_reader_thread(datastore.clone(), snapshot_reader.clone())?;
5c4755ad
DM
620
621 let mut chunk_iter = chunk_iter.peekable();
88356646
DM
622
623 loop {
271764de
DM
624 worker.check_abort()?;
625
88356646 626 // test is we have remaining chunks
5c4755ad
DM
627 match chunk_iter.peek() {
628 None => break,
085ae873 629 Some(Ok(_)) => { /* Ok */ }
5c4755ad 630 Some(Err(err)) => bail!("{}", err),
88356646
DM
631 }
632
ff58c519 633 let uuid = pool_writer.load_writable_media(worker)?;
88356646 634
1f2c4713
DM
635 worker.check_abort()?;
636
085ae873
TL
637 let (leom, _bytes) =
638 pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
88356646
DM
639
640 if leom {
641 pool_writer.set_media_status_full(&uuid)?;
642 }
643 }
644
6f1c26b0 645 if reader_thread.join().is_err() {
5c4755ad
DM
646 bail!("chunk reader thread failed");
647 }
648
271764de
DM
649 worker.check_abort()?;
650
ff58c519 651 let uuid = pool_writer.load_writable_media(worker)?;
88356646 652
1f2c4713
DM
653 worker.check_abort()?;
654
5c4755ad
DM
655 let snapshot_reader = snapshot_reader.lock().unwrap();
656
5654d8ce 657 let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
88356646
DM
658
659 if !done {
660 // does not fit on tape, so we try on next volume
661 pool_writer.set_media_status_full(&uuid)?;
662
271764de
DM
663 worker.check_abort()?;
664
ff58c519 665 pool_writer.load_writable_media(worker)?;
5654d8ce 666 let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
88356646
DM
667
668 if !done {
669 bail!("write_snapshot_archive failed on second media");
670 }
671 }
672
5ae393af
FG
673 task_log!(
674 worker,
675 "end backup {}:{:?}",
676 datastore.name(),
677 snapshot_path
678 );
88356646 679
4a2bb3e6 680 Ok(SnapshotBackupResult::Success)
88356646 681}