4 use anyhow
::{bail, Error}
;
19 drive
::check_drive_exists
,
29 MEDIA_POOL_NAME_SCHEMA
,
41 update_changer_online_status
,
49 schema
: DATASTORE_SCHEMA
,
52 schema
: MEDIA_POOL_NAME_SCHEMA
,
55 description
: "Eject media upon job completion.",
60 description
: "Export media set upon job completion.",
70 /// Backup datastore to tape media pool
74 eject_media
: Option
<bool
>,
75 export_media_set
: Option
<bool
>,
76 rpcenv
: &mut dyn RpcEnvironment
,
77 ) -> Result
<Value
, Error
> {
79 let auth_id
: Authid
= rpcenv
.get_auth_id().unwrap().parse()?
;
81 let datastore
= DataStore
::lookup_datastore(&store
)?
;
83 let (config
, _digest
) = config
::media_pool
::config()?
;
84 let pool_config
: MediaPoolConfig
= config
.lookup("pool", &pool
)?
;
86 let (drive_config
, _digest
) = config
::drive
::config()?
;
87 // early check before starting worker
88 check_drive_exists(&drive_config
, &pool_config
.drive
)?
;
90 let to_stdout
= if rpcenv
.env_type() == RpcEnvironmentType
::CLI { true }
else { false }
;
92 let eject_media
= eject_media
.unwrap_or(false);
93 let export_media_set
= export_media_set
.unwrap_or(false);
95 let upid_str
= WorkerTask
::new_thread(
101 backup_worker(&worker
, datastore
, &pool_config
, eject_media
, export_media_set
)?
;
109 pub const ROUTER
: Router
= Router
::new()
110 .post(&API_METHOD_BACKUP
);
115 datastore
: Arc
<DataStore
>,
116 pool_config
: &MediaPoolConfig
,
118 export_media_set
: bool
,
119 ) -> Result
<(), Error
> {
121 let status_path
= Path
::new(TAPE_STATUS_DIR
);
123 let _lock
= MediaPool
::lock(status_path
, &pool_config
.name
)?
;
125 worker
.log("update media online status");
126 let has_changer
= update_media_online_status(&pool_config
.drive
)?
;
128 let use_offline_media
= !has_changer
;
130 let pool
= MediaPool
::with_config(status_path
, &pool_config
, use_offline_media
)?
;
132 let mut pool_writer
= PoolWriter
::new(pool
, &pool_config
.drive
)?
;
134 let mut group_list
= BackupInfo
::list_backup_groups(&datastore
.base_path())?
;
136 group_list
.sort_unstable();
138 for group
in group_list
{
139 let mut snapshot_list
= group
.list_backups(&datastore
.base_path())?
;
140 BackupInfo
::sort_list(&mut snapshot_list
, true); // oldest first
142 for info
in snapshot_list
{
143 if pool_writer
.contains_snapshot(&info
.backup_dir
.to_string()) {
146 worker
.log(format
!("backup snapshot {}", info
.backup_dir
));
147 backup_snapshot(worker
, &mut pool_writer
, datastore
.clone(), info
.backup_dir
)?
;
151 pool_writer
.commit()?
;
153 if export_media_set
{
154 worker
.log(format
!("exporting current media set"));
155 pool_writer
.export_media_set(worker
)?
;
156 } else if eject_media
{
157 worker
.log(format
!("ejection backup media"));
158 pool_writer
.eject_media()?
;
164 // Try to update the the media online status
165 fn update_media_online_status(drive
: &str) -> Result
<bool
, Error
> {
167 let (config
, _digest
) = config
::drive
::config()?
;
169 let mut has_changer
= false;
171 if let Ok(Some((mut changer
, changer_name
))) = media_changer(&config
, drive
) {
175 let changer_id_list
= changer
.online_media_changer_ids()?
;
177 let status_path
= Path
::new(TAPE_STATUS_DIR
);
178 let mut inventory
= Inventory
::load(status_path
)?
;
180 update_changer_online_status(
191 pub fn backup_snapshot(
193 pool_writer
: &mut PoolWriter
,
194 datastore
: Arc
<DataStore
>,
196 ) -> Result
<(), Error
> {
198 worker
.log(format
!("start backup {}:{}", datastore
.name(), snapshot
));
200 let snapshot_reader
= SnapshotReader
::new(datastore
.clone(), snapshot
.clone())?
;
202 let mut chunk_iter
= snapshot_reader
.chunk_iterator()?
.peekable();
205 // test is we have remaining chunks
206 if chunk_iter
.peek().is_none() {
210 let uuid
= pool_writer
.load_writable_media(worker
)?
;
212 let (leom
, _bytes
) = pool_writer
.append_chunk_archive(&datastore
, &mut chunk_iter
)?
;
215 pool_writer
.set_media_status_full(&uuid
)?
;
219 let uuid
= pool_writer
.load_writable_media(worker
)?
;
221 let (done
, _bytes
) = pool_writer
.append_snapshot_archive(&snapshot_reader
)?
;
224 // does not fit on tape, so we try on next volume
225 pool_writer
.set_media_status_full(&uuid
)?
;
227 pool_writer
.load_writable_media(worker
)?
;
228 let (done
, _bytes
) = pool_writer
.append_snapshot_archive(&snapshot_reader
)?
;
231 bail
!("write_snapshot_archive failed on second media");
235 worker
.log(format
!("end backup {}:{}", datastore
.name(), snapshot
));