2 use std
::sync
::{Mutex, Arc}
;
4 use anyhow
::{bail, format_err, Error}
;
23 cached_user_info
::CachedUserInfo
,
41 compute_schedule_status
,
68 set_tape_device_state
,
70 changer
::update_changer_online_status
,
74 const TAPE_BACKUP_JOB_ROUTER
: Router
= Router
::new()
75 .post(&API_METHOD_RUN_TAPE_BACKUP_JOB
);
77 pub const ROUTER
: Router
= Router
::new()
78 .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS
)
79 .post(&API_METHOD_BACKUP
)
80 .match_all("id", &TAPE_BACKUP_JOB_ROUTER
);
82 fn check_backup_permission(
87 ) -> Result
<(), Error
> {
89 let user_info
= CachedUserInfo
::new()?
;
91 let privs
= user_info
.lookup_privs(auth_id
, &["datastore", store
]);
92 if (privs
& PRIV_DATASTORE_READ
) == 0 {
93 bail
!("no permissions on /datastore/{}", store
);
96 let privs
= user_info
.lookup_privs(auth_id
, &["tape", "drive", drive
]);
97 if (privs
& PRIV_TAPE_WRITE
) == 0 {
98 bail
!("no permissions on /tape/drive/{}", drive
);
101 let privs
= user_info
.lookup_privs(auth_id
, &["tape", "pool", pool
]);
102 if (privs
& PRIV_TAPE_WRITE
) == 0 {
103 bail
!("no permissions on /tape/pool/{}", pool
);
111 description
: "List configured thape backup jobs and their status",
113 items
: { type: TapeBackupJobStatus }
,
116 description
: "List configured tape jobs filtered by Tape.Audit privileges",
117 permission
: &Permission
::Anybody
,
120 /// List all tape backup jobs
121 pub fn list_tape_backup_jobs(
123 mut rpcenv
: &mut dyn RpcEnvironment
,
124 ) -> Result
<Vec
<TapeBackupJobStatus
>, Error
> {
125 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
126 let user_info
= CachedUserInfo
::new()?
;
128 let (config
, digest
) = config
::tape_job
::config()?
;
130 let job_list_iter
= config
131 .convert_to_typed_array("backup")?
133 .filter(|_job
: &TapeBackupJobConfig
| {
134 // fixme: check access permission
138 let mut list
= Vec
::new();
140 for job
in job_list_iter
{
141 let privs
= user_info
.lookup_privs(&auth_id
, &["tape", "job", &job
.id
]);
142 if (privs
& PRIV_TAPE_AUDIT
) == 0 {
146 let last_state
= JobState
::load("tape-backup-job", &job
.id
)
147 .map_err(|err
| format_err
!("could not open statefile for {}: {}", &job
.id
, err
))?
;
149 let status
= compute_schedule_status(&last_state
, job
.schedule
.as_deref())?
;
151 list
.push(TapeBackupJobStatus { config: job, status }
);
154 rpcenv
["digest"] = proxmox
::tools
::digest_to_hex(&digest
).into();
159 pub fn do_tape_backup_job(
161 setup
: TapeBackupJobSetup
,
163 schedule
: Option
<String
>,
164 ) -> Result
<String
, Error
> {
166 let job_id
= format
!("{}:{}:{}:{}",
172 let worker_type
= job
.jobtype().to_string();
174 let datastore
= DataStore
::lookup_datastore(&setup
.store
)?
;
176 let (config
, _digest
) = config
::media_pool
::config()?
;
177 let pool_config
: MediaPoolConfig
= config
.lookup("pool", &setup
.pool
)?
;
179 let (drive_config
, _digest
) = config
::drive
::config()?
;
181 // for scheduled jobs we acquire the lock later in the worker
182 let drive_lock
= if schedule
.is_some() {
185 Some(lock_tape_device(&drive_config
, &setup
.drive
)?
)
188 let notify_user
= setup
.notify_user
.as_ref().unwrap_or_else(|| &Userid
::root_userid());
189 let email
= lookup_user_email(notify_user
);
191 let upid_str
= WorkerTask
::new_thread(
193 Some(job_id
.clone()),
197 job
.start(&worker
.upid().to_string())?
;
198 let mut drive_lock
= drive_lock
;
200 let (job_result
, summary
) = match try_block
!({
201 if schedule
.is_some() {
202 // for scheduled tape backup jobs, we wait indefinitely for the lock
203 task_log
!(worker
, "waiting for drive lock...");
205 if let Ok(lock
) = lock_tape_device(&drive_config
, &setup
.drive
) {
206 drive_lock
= Some(lock
);
210 worker
.check_abort()?
;
213 set_tape_device_state(&setup
.drive
, &worker
.upid().to_string())?
;
215 task_log
!(worker
,"Starting tape backup job '{}'", job_id
);
216 if let Some(event_str
) = schedule
{
217 task_log
!(worker
,"task triggered by schedule '{}'", event_str
);
228 Ok(summary
) => (Ok(()), summary
),
229 Err(err
) => (Err(err
), Default
::default()),
232 let status
= worker
.create_state(&job_result
);
234 if let Some(email
) = email
{
235 if let Err(err
) = crate::server
::send_tape_backup_status(
242 eprintln
!("send tape backup notification failed: {}", err
);
246 if let Err(err
) = job
.finish(status
) {
248 "could not finish job state for {}: {}",
249 job
.jobtype().to_string(),
254 if let Err(err
) = set_tape_device_state(&setup
.drive
, "") {
256 "could not unset drive state for {}: {}",
273 schema
: JOB_ID_SCHEMA
,
278 // Note: parameters are from job config, so we need to test inside function body
279 description
: "The user needs Tape.Write privilege on /tape/pool/{pool} \
280 and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
281 permission
: &Permission
::Anybody
,
284 /// Runs a tape backup job manually.
285 pub fn run_tape_backup_job(
287 rpcenv
: &mut dyn RpcEnvironment
,
288 ) -> Result
<String
, Error
> {
289 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
291 let (config
, _digest
) = config
::tape_job
::config()?
;
292 let backup_job
: TapeBackupJobConfig
= config
.lookup("backup", &id
)?
;
294 check_backup_permission(
296 &backup_job
.setup
.store
,
297 &backup_job
.setup
.pool
,
298 &backup_job
.setup
.drive
,
301 let job
= Job
::new("tape-backup-job", &id
)?
;
303 let upid_str
= do_tape_backup_job(job
, backup_job
.setup
, &auth_id
, None
)?
;
312 type: TapeBackupJobSetup
,
321 // Note: parameters are no uri parameter, so we need to test inside function body
322 description
: "The user needs Tape.Write privilege on /tape/pool/{pool} \
323 and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
324 permission
: &Permission
::Anybody
,
327 /// Backup datastore to tape media pool
329 setup
: TapeBackupJobSetup
,
330 rpcenv
: &mut dyn RpcEnvironment
,
331 ) -> Result
<Value
, Error
> {
333 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
335 check_backup_permission(
342 let datastore
= DataStore
::lookup_datastore(&setup
.store
)?
;
344 let (config
, _digest
) = config
::media_pool
::config()?
;
345 let pool_config
: MediaPoolConfig
= config
.lookup("pool", &setup
.pool
)?
;
347 let (drive_config
, _digest
) = config
::drive
::config()?
;
349 // early check/lock before starting worker
350 let drive_lock
= lock_tape_device(&drive_config
, &setup
.drive
)?
;
352 let to_stdout
= rpcenv
.env_type() == RpcEnvironmentType
::CLI
;
354 let job_id
= format
!("{}:{}:{}", setup
.store
, setup
.pool
, setup
.drive
);
356 let notify_user
= setup
.notify_user
.as_ref().unwrap_or_else(|| &Userid
::root_userid());
357 let email
= lookup_user_email(notify_user
);
359 let upid_str
= WorkerTask
::new_thread(
365 let _drive_lock
= drive_lock
; // keep lock guard
366 set_tape_device_state(&setup
.drive
, &worker
.upid().to_string())?
;
368 let (job_result
, summary
) = match backup_worker(
375 Ok(summary
) => (Ok(()), summary
),
376 Err(err
) => (Err(err
), Default
::default()),
379 if let Some(email
) = email
{
380 if let Err(err
) = crate::server
::send_tape_backup_status(
387 eprintln
!("send tape backup notification failed: {}", err
);
392 let _
= set_tape_device_state(&setup
.drive
, "");
402 datastore
: Arc
<DataStore
>,
403 pool_config
: &MediaPoolConfig
,
404 setup
: &TapeBackupJobSetup
,
405 email
: Option
<String
>,
406 ) -> Result
<TapeBackupJobSummary
, Error
> {
408 let status_path
= Path
::new(TAPE_STATUS_DIR
);
409 let start
= std
::time
::Instant
::now();
410 let mut summary
: TapeBackupJobSummary
= Default
::default();
412 task_log
!(worker
, "update media online status");
413 let changer_name
= update_media_online_status(&setup
.drive
)?
;
415 let pool
= MediaPool
::with_config(status_path
, &pool_config
, changer_name
, false)?
;
417 let mut pool_writer
= PoolWriter
::new(pool
, &setup
.drive
, worker
, email
)?
;
419 let mut group_list
= BackupInfo
::list_backup_groups(&datastore
.base_path())?
;
421 group_list
.sort_unstable();
423 let group_count
= group_list
.len();
424 task_log
!(worker
, "found {} groups", group_count
);
426 let mut progress
= StoreProgress
::new(group_count
as u64);
428 let latest_only
= setup
.latest_only
.unwrap_or(false);
431 task_log
!(worker
, "latest-only: true (only considering latest snapshots)");
434 let datastore_name
= datastore
.name();
436 let mut errors
= false;
438 for (group_number
, group
) in group_list
.into_iter().enumerate() {
439 progress
.done_groups
= group_number
as u64;
440 progress
.done_snapshots
= 0;
441 progress
.group_snapshots
= 0;
443 let mut snapshot_list
= group
.list_backups(&datastore
.base_path())?
;
445 BackupInfo
::sort_list(&mut snapshot_list
, true); // oldest first
448 progress
.group_snapshots
= 1;
449 if let Some(info
) = snapshot_list
.pop() {
450 if pool_writer
.contains_snapshot(datastore_name
, &info
.backup_dir
.to_string()) {
451 task_log
!(worker
, "skip snapshot {}", info
.backup_dir
);
454 let snapshot_name
= info
.backup_dir
.to_string();
455 if !backup_snapshot(worker
, &mut pool_writer
, datastore
.clone(), info
.backup_dir
)?
{
458 summary
.snapshot_list
.push(snapshot_name
);
460 progress
.done_snapshots
= 1;
463 "percentage done: {}",
468 progress
.group_snapshots
= snapshot_list
.len() as u64;
469 for (snapshot_number
, info
) in snapshot_list
.into_iter().enumerate() {
470 if pool_writer
.contains_snapshot(datastore_name
, &info
.backup_dir
.to_string()) {
471 task_log
!(worker
, "skip snapshot {}", info
.backup_dir
);
474 let snapshot_name
= info
.backup_dir
.to_string();
475 if !backup_snapshot(worker
, &mut pool_writer
, datastore
.clone(), info
.backup_dir
)?
{
478 summary
.snapshot_list
.push(snapshot_name
);
480 progress
.done_snapshots
= snapshot_number
as u64 + 1;
483 "percentage done: {}",
490 pool_writer
.commit()?
;
492 task_log
!(worker
, "append media catalog");
494 let uuid
= pool_writer
.load_writable_media(worker
)?
;
495 let done
= pool_writer
.append_catalog_archive(worker
)?
;
497 task_log
!(worker
, "catalog does not fit on tape, writing to next volume");
498 pool_writer
.set_media_status_full(&uuid
)?
;
499 pool_writer
.load_writable_media(worker
)?
;
500 let done
= pool_writer
.append_catalog_archive(worker
)?
;
502 bail
!("write_catalog_archive failed on second media");
506 if setup
.export_media_set
.unwrap_or(false) {
507 pool_writer
.export_media_set(worker
)?
;
508 } else if setup
.eject_media
.unwrap_or(false) {
509 pool_writer
.eject_media(worker
)?
;
513 bail
!("Tape backup finished with some errors. Please check the task log.");
516 summary
.duration
= start
.elapsed();
521 // Try to update the the media online status
522 fn update_media_online_status(drive
: &str) -> Result
<Option
<String
>, Error
> {
524 let (config
, _digest
) = config
::drive
::config()?
;
526 if let Ok(Some((mut changer
, changer_name
))) = media_changer(&config
, drive
) {
528 let label_text_list
= changer
.online_media_label_texts()?
;
530 let status_path
= Path
::new(TAPE_STATUS_DIR
);
531 let mut inventory
= Inventory
::load(status_path
)?
;
533 update_changer_online_status(
540 Ok(Some(changer_name
))
546 pub fn backup_snapshot(
548 pool_writer
: &mut PoolWriter
,
549 datastore
: Arc
<DataStore
>,
551 ) -> Result
<bool
, Error
> {
553 task_log
!(worker
, "backup snapshot {}", snapshot
);
555 let snapshot_reader
= match SnapshotReader
::new(datastore
.clone(), snapshot
.clone()) {
556 Ok(reader
) => reader
,
558 // ignore missing snapshots and continue
559 task_warn
!(worker
, "failed opening snapshot '{}': {}", snapshot
, err
);
564 let snapshot_reader
= Arc
::new(Mutex
::new(snapshot_reader
));
566 let (reader_thread
, chunk_iter
) = pool_writer
.spawn_chunk_reader_thread(
568 snapshot_reader
.clone(),
571 let mut chunk_iter
= chunk_iter
.peekable();
574 worker
.check_abort()?
;
576 // test is we have remaining chunks
577 match chunk_iter
.peek() {
579 Some(Ok(_
)) => { /* Ok */ }
,
580 Some(Err(err
)) => bail
!("{}", err
),
583 let uuid
= pool_writer
.load_writable_media(worker
)?
;
585 worker
.check_abort()?
;
587 let (leom
, _bytes
) = pool_writer
.append_chunk_archive(worker
, &mut chunk_iter
, datastore
.name())?
;
590 pool_writer
.set_media_status_full(&uuid
)?
;
594 if let Err(_
) = reader_thread
.join() {
595 bail
!("chunk reader thread failed");
598 worker
.check_abort()?
;
600 let uuid
= pool_writer
.load_writable_media(worker
)?
;
602 worker
.check_abort()?
;
604 let snapshot_reader
= snapshot_reader
.lock().unwrap();
606 let (done
, _bytes
) = pool_writer
.append_snapshot_archive(worker
, &snapshot_reader
)?
;
609 // does not fit on tape, so we try on next volume
610 pool_writer
.set_media_status_full(&uuid
)?
;
612 worker
.check_abort()?
;
614 pool_writer
.load_writable_media(worker
)?
;
615 let (done
, _bytes
) = pool_writer
.append_snapshot_archive(worker
, &snapshot_reader
)?
;
618 bail
!("write_snapshot_archive failed on second media");
622 task_log
!(worker
, "end backup {}:{}", datastore
.name(), snapshot
);