4 use anyhow
::{bail, format_err, Error}
;
29 compute_schedule_status
,
40 MEDIA_POOL_NAME_SCHEMA
,
58 changer
::update_changer_online_status
,
62 const TAPE_BACKUP_JOB_ROUTER
: Router
= Router
::new()
63 .post(&API_METHOD_RUN_TAPE_BACKUP_JOB
);
65 pub const ROUTER
: Router
= Router
::new()
66 .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS
)
67 .post(&API_METHOD_BACKUP
)
68 .match_all("id", &TAPE_BACKUP_JOB_ROUTER
);
72 description
: "List configured thape backup jobs and their status",
74 items
: { type: TapeBackupJobStatus }
,
77 /// List all tape backup jobs
78 pub fn list_tape_backup_jobs(
80 mut rpcenv
: &mut dyn RpcEnvironment
,
81 ) -> Result
<Vec
<TapeBackupJobStatus
>, Error
> {
83 let (config
, digest
) = config
::tape_job
::config()?
;
85 let job_list_iter
= config
86 .convert_to_typed_array("backup")?
88 .filter(|_job
: &TapeBackupJobConfig
| {
89 // fixme: check access permission
93 let mut list
= Vec
::new();
95 for job
in job_list_iter
{
96 let last_state
= JobState
::load("tape-backup-job", &job
.id
)
97 .map_err(|err
| format_err
!("could not open statefile for {}: {}", &job
.id
, err
))?
;
99 let status
= compute_schedule_status(&last_state
, job
.schedule
.as_deref())?
;
101 list
.push(TapeBackupJobStatus { config: job, status }
);
104 rpcenv
["digest"] = proxmox
::tools
::digest_to_hex(&digest
).into();
109 pub fn do_tape_backup_job(
111 tape_job
: TapeBackupJobConfig
,
113 schedule
: Option
<String
>,
114 ) -> Result
<String
, Error
> {
116 let job_id
= format
!("{}:{}:{}:{}",
122 let worker_type
= job
.jobtype().to_string();
124 let datastore
= DataStore
::lookup_datastore(&tape_job
.store
)?
;
126 let (config
, _digest
) = config
::media_pool
::config()?
;
127 let pool_config
: MediaPoolConfig
= config
.lookup("pool", &tape_job
.pool
)?
;
129 let (drive_config
, _digest
) = config
::drive
::config()?
;
131 // early check/lock before starting worker
132 let drive_lock
= lock_tape_device(&drive_config
, &tape_job
.drive
)?
;
134 let upid_str
= WorkerTask
::new_thread(
136 Some(job_id
.clone()),
140 let _drive_lock
= drive_lock
; // keep lock guard
142 job
.start(&worker
.upid().to_string())?
;
144 let eject_media
= false;
145 let export_media_set
= false;
147 task_log
!(worker
,"Starting tape backup job '{}'", job_id
);
148 if let Some(event_str
) = schedule
{
149 task_log
!(worker
,"task triggered by schedule '{}'", event_str
);
152 let job_result
= backup_worker(
161 let status
= worker
.create_state(&job_result
);
163 if let Err(err
) = job
.finish(status
) {
165 "could not finish job state for {}: {}",
166 job
.jobtype().to_string(),
182 schema
: JOB_ID_SCHEMA
,
187 /// Runs a tape backup job manually.
188 pub fn run_tape_backup_job(
190 rpcenv
: &mut dyn RpcEnvironment
,
191 ) -> Result
<String
, Error
> {
192 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
194 let (config
, _digest
) = config
::tape_job
::config()?
;
195 let backup_job
: TapeBackupJobConfig
= config
.lookup("backup", &id
)?
;
197 let job
= Job
::new("tape-backup-job", &id
)?
;
199 let upid_str
= do_tape_backup_job(job
, backup_job
, &auth_id
, None
)?
;
208 schema
: DATASTORE_SCHEMA
,
211 schema
: MEDIA_POOL_NAME_SCHEMA
,
214 schema
: DRIVE_NAME_SCHEMA
,
217 description
: "Eject media upon job completion.",
221 "export-media-set": {
222 description
: "Export media set upon job completion.",
232 /// Backup datastore to tape media pool
237 eject_media
: Option
<bool
>,
238 export_media_set
: Option
<bool
>,
239 rpcenv
: &mut dyn RpcEnvironment
,
240 ) -> Result
<Value
, Error
> {
242 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
244 let datastore
= DataStore
::lookup_datastore(&store
)?
;
246 let (config
, _digest
) = config
::media_pool
::config()?
;
247 let pool_config
: MediaPoolConfig
= config
.lookup("pool", &pool
)?
;
249 let (drive_config
, _digest
) = config
::drive
::config()?
;
251 // early check/lock before starting worker
252 let drive_lock
= lock_tape_device(&drive_config
, &drive
)?
;
254 let to_stdout
= rpcenv
.env_type() == RpcEnvironmentType
::CLI
;
256 let eject_media
= eject_media
.unwrap_or(false);
257 let export_media_set
= export_media_set
.unwrap_or(false);
259 let job_id
= format
!("{}:{}:{}", store
, pool
, drive
);
261 let upid_str
= WorkerTask
::new_thread(
267 let _drive_lock
= drive_lock
; // keep lock guard
268 backup_worker(&worker
, datastore
, &drive
, &pool_config
, eject_media
, export_media_set
)?
;
278 datastore
: Arc
<DataStore
>,
280 pool_config
: &MediaPoolConfig
,
282 export_media_set
: bool
,
283 ) -> Result
<(), Error
> {
285 let status_path
= Path
::new(TAPE_STATUS_DIR
);
287 let _lock
= MediaPool
::lock(status_path
, &pool_config
.name
)?
;
289 task_log
!(worker
, "update media online status");
290 let changer_name
= update_media_online_status(drive
)?
;
292 let pool
= MediaPool
::with_config(status_path
, &pool_config
, changer_name
)?
;
294 let mut pool_writer
= PoolWriter
::new(pool
, drive
)?
;
296 let mut group_list
= BackupInfo
::list_backup_groups(&datastore
.base_path())?
;
298 group_list
.sort_unstable();
300 for group
in group_list
{
301 let mut snapshot_list
= group
.list_backups(&datastore
.base_path())?
;
302 BackupInfo
::sort_list(&mut snapshot_list
, true); // oldest first
304 for info
in snapshot_list
{
305 if pool_writer
.contains_snapshot(&info
.backup_dir
.to_string()) {
308 task_log
!(worker
, "backup snapshot {}", info
.backup_dir
);
309 backup_snapshot(worker
, &mut pool_writer
, datastore
.clone(), info
.backup_dir
)?
;
313 pool_writer
.commit()?
;
315 if export_media_set
{
316 pool_writer
.export_media_set(worker
)?
;
317 } else if eject_media
{
318 pool_writer
.eject_media(worker
)?
;
324 // Try to update the the media online status
325 fn update_media_online_status(drive
: &str) -> Result
<Option
<String
>, Error
> {
327 let (config
, _digest
) = config
::drive
::config()?
;
329 if let Ok(Some((mut changer
, changer_name
))) = media_changer(&config
, drive
) {
331 let label_text_list
= changer
.online_media_label_texts()?
;
333 let status_path
= Path
::new(TAPE_STATUS_DIR
);
334 let mut inventory
= Inventory
::load(status_path
)?
;
336 update_changer_online_status(
343 Ok(Some(changer_name
))
349 pub fn backup_snapshot(
351 pool_writer
: &mut PoolWriter
,
352 datastore
: Arc
<DataStore
>,
354 ) -> Result
<(), Error
> {
356 task_log
!(worker
, "start backup {}:{}", datastore
.name(), snapshot
);
358 let snapshot_reader
= SnapshotReader
::new(datastore
.clone(), snapshot
.clone())?
;
360 let mut chunk_iter
= snapshot_reader
.chunk_iterator()?
.peekable();
363 worker
.check_abort()?
;
365 // test is we have remaining chunks
366 if chunk_iter
.peek().is_none() {
370 let uuid
= pool_writer
.load_writable_media(worker
)?
;
372 worker
.check_abort()?
;
374 let (leom
, _bytes
) = pool_writer
.append_chunk_archive(worker
, &datastore
, &mut chunk_iter
)?
;
377 pool_writer
.set_media_status_full(&uuid
)?
;
381 worker
.check_abort()?
;
383 let uuid
= pool_writer
.load_writable_media(worker
)?
;
385 worker
.check_abort()?
;
387 let (done
, _bytes
) = pool_writer
.append_snapshot_archive(worker
, &snapshot_reader
)?
;
390 // does not fit on tape, so we try on next volume
391 pool_writer
.set_media_status_full(&uuid
)?
;
393 worker
.check_abort()?
;
395 pool_writer
.load_writable_media(worker
)?
;
396 let (done
, _bytes
) = pool_writer
.append_snapshot_archive(worker
, &snapshot_reader
)?
;
399 bail
!("write_snapshot_archive failed on second media");
403 task_log
!(worker
, "end backup {}:{}", datastore
.name(), snapshot
);