]>
Commit | Line | Data |
---|---|---|
88356646 | 1 | use std::path::Path; |
085ae873 | 2 | use std::sync::{Arc, Mutex}; |
88356646 | 3 | |
7690a8e7 | 4 | use anyhow::{bail, format_err, Error}; |
88356646 DM |
5 | use serde_json::Value; |
6 | ||
6ef1b649 WB |
7 | use proxmox_lang::try_block; |
8 | use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; | |
9 | use proxmox_schema::api; | |
25877d05 | 10 | use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; |
88356646 | 11 | |
e3619d41 | 12 | use pbs_api_types::{ |
e9d2fc93 | 13 | Authid, GroupFilter, MediaPoolConfig, Operation, TapeBackupJobConfig, TapeBackupJobSetup, |
085ae873 TL |
14 | TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, |
15 | PRIV_TAPE_WRITE, UPID_SCHEMA, | |
e3619d41 DM |
16 | }; |
17 | ||
ba3d7e19 | 18 | use pbs_config::CachedUserInfo; |
085ae873 TL |
19 | use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; |
20 | use pbs_datastore::{DataStore, SnapshotReader, StoreProgress}; | |
b9700a9f | 21 | use proxmox_rest_server::WorkerTask; |
c23192d3 | 22 | |
88356646 | 23 | use crate::{ |
7690a8e7 | 24 | server::{ |
085ae873 TL |
25 | jobstate::{compute_schedule_status, Job, JobState}, |
26 | lookup_user_email, TapeBackupJobSummary, | |
8513626b | 27 | }, |
88356646 | 28 | tape::{ |
37796ff7 | 29 | changer::update_changer_online_status, |
085ae873 TL |
30 | drive::{lock_tape_device, media_changer, set_tape_device_state, TapeLockError}, |
31 | Inventory, MediaPool, PoolWriter, TAPE_STATUS_DIR, | |
88356646 DM |
32 | }, |
33 | }; | |
34 | ||
085ae873 | 35 | const TAPE_BACKUP_JOB_ROUTER: Router = Router::new().post(&API_METHOD_RUN_TAPE_BACKUP_JOB); |
41a8db35 DM |
36 | |
37 | pub const ROUTER: Router = Router::new() | |
7690a8e7 | 38 | .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS) |
41a8db35 DM |
39 | .post(&API_METHOD_BACKUP) |
40 | .match_all("id", &TAPE_BACKUP_JOB_ROUTER); | |
41 | ||
b4975d31 DM |
42 | fn check_backup_permission( |
43 | auth_id: &Authid, | |
44 | store: &str, | |
45 | pool: &str, | |
46 | drive: &str, | |
47 | ) -> Result<(), Error> { | |
b4975d31 DM |
48 | let user_info = CachedUserInfo::new()?; |
49 | ||
50 | let privs = user_info.lookup_privs(auth_id, &["datastore", store]); | |
51 | if (privs & PRIV_DATASTORE_READ) == 0 { | |
52 | bail!("no permissions on /datastore/{}", store); | |
53 | } | |
54 | ||
55 | let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]); | |
56 | if (privs & PRIV_TAPE_WRITE) == 0 { | |
57 | bail!("no permissions on /tape/drive/{}", drive); | |
58 | } | |
59 | ||
60 | let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]); | |
61 | if (privs & PRIV_TAPE_WRITE) == 0 { | |
62 | bail!("no permissions on /tape/pool/{}", pool); | |
63 | } | |
64 | ||
65 | Ok(()) | |
66 | } | |
67 | ||
7690a8e7 DM |
68 | #[api( |
69 | returns: { | |
70 | description: "List configured thape backup jobs and their status", | |
71 | type: Array, | |
72 | items: { type: TapeBackupJobStatus }, | |
73 | }, | |
396fd747 DM |
74 | access: { |
75 | description: "List configured tape jobs filtered by Tape.Audit privileges", | |
76 | permission: &Permission::Anybody, | |
77 | }, | |
7690a8e7 DM |
78 | )] |
79 | /// List all tape backup jobs | |
80 | pub fn list_tape_backup_jobs( | |
81 | _param: Value, | |
41c1a179 | 82 | rpcenv: &mut dyn RpcEnvironment, |
7690a8e7 | 83 | ) -> Result<Vec<TapeBackupJobStatus>, Error> { |
396fd747 DM |
84 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
85 | let user_info = CachedUserInfo::new()?; | |
7690a8e7 | 86 | |
e3619d41 | 87 | let (job_config, digest) = pbs_config::tape_job::config()?; |
aad2d162 | 88 | let (pool_config, _pool_digest) = pbs_config::media_pool::config()?; |
1ce8e905 | 89 | let (drive_config, _digest) = pbs_config::drive::config()?; |
7690a8e7 | 90 | |
efe96ec0 | 91 | let job_list_iter = job_config |
7690a8e7 DM |
92 | .convert_to_typed_array("backup")? |
93 | .into_iter() | |
94 | .filter(|_job: &TapeBackupJobConfig| { | |
95 | // fixme: check access permission | |
96 | true | |
97 | }); | |
98 | ||
99 | let mut list = Vec::new(); | |
efe96ec0 | 100 | let status_path = Path::new(TAPE_STATUS_DIR); |
6ef1b649 | 101 | let current_time = proxmox_time::epoch_i64(); |
7690a8e7 DM |
102 | |
103 | for job in job_list_iter { | |
396fd747 DM |
104 | let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]); |
105 | if (privs & PRIV_TAPE_AUDIT) == 0 { | |
106 | continue; | |
107 | } | |
108 | ||
7690a8e7 DM |
109 | let last_state = JobState::load("tape-backup-job", &job.id) |
110 | .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?; | |
111 | ||
112 | let status = compute_schedule_status(&last_state, job.schedule.as_deref())?; | |
113 | ||
efe96ec0 DM |
114 | let next_run = status.next_run.unwrap_or(current_time); |
115 | ||
116 | let mut next_media_label = None; | |
117 | ||
118 | if let Ok(pool) = pool_config.lookup::<MediaPoolConfig>("pool", &job.setup.pool) { | |
119 | let mut changer_name = None; | |
120 | if let Ok(Some((_, name))) = media_changer(&drive_config, &job.setup.drive) { | |
121 | changer_name = Some(name); | |
122 | } | |
123 | if let Ok(mut pool) = MediaPool::with_config(status_path, &pool, changer_name, true) { | |
124 | if pool.start_write_session(next_run, false).is_ok() { | |
125 | if let Ok(media_id) = pool.guess_next_writable_media(next_run) { | |
126 | next_media_label = Some(media_id.label.label_text); | |
127 | } | |
128 | } | |
129 | } | |
130 | } | |
131 | ||
085ae873 TL |
132 | list.push(TapeBackupJobStatus { |
133 | config: job, | |
134 | status, | |
135 | next_media_label, | |
136 | }); | |
7690a8e7 DM |
137 | } |
138 | ||
25877d05 | 139 | rpcenv["digest"] = hex::encode(&digest).into(); |
7690a8e7 DM |
140 | |
141 | Ok(list) | |
142 | } | |
143 | ||
8513626b DM |
144 | pub fn do_tape_backup_job( |
145 | mut job: Job, | |
5830e562 | 146 | setup: TapeBackupJobSetup, |
8513626b DM |
147 | auth_id: &Authid, |
148 | schedule: Option<String>, | |
bfa942c0 | 149 | to_stdout: bool, |
8513626b | 150 | ) -> Result<String, Error> { |
085ae873 TL |
151 | let job_id = format!( |
152 | "{}:{}:{}:{}", | |
153 | setup.store, | |
154 | setup.pool, | |
155 | setup.drive, | |
156 | job.jobname() | |
157 | ); | |
8513626b DM |
158 | |
159 | let worker_type = job.jobtype().to_string(); | |
160 | ||
e9d2fc93 | 161 | let datastore = DataStore::lookup_datastore(&setup.store, Some(Operation::Read))?; |
8513626b | 162 | |
aad2d162 | 163 | let (config, _digest) = pbs_config::media_pool::config()?; |
5830e562 | 164 | let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?; |
8513626b | 165 | |
1ce8e905 | 166 | let (drive_config, _digest) = pbs_config::drive::config()?; |
8513626b | 167 | |
54fcb7f5 DC |
168 | // for scheduled jobs we acquire the lock later in the worker |
169 | let drive_lock = if schedule.is_some() { | |
170 | None | |
171 | } else { | |
172 | Some(lock_tape_device(&drive_config, &setup.drive)?) | |
173 | }; | |
174 | ||
085ae873 TL |
175 | let notify_user = setup |
176 | .notify_user | |
177 | .as_ref() | |
178 | .unwrap_or_else(|| Userid::root_userid()); | |
54fcb7f5 | 179 | let email = lookup_user_email(notify_user); |
8513626b DM |
180 | |
181 | let upid_str = WorkerTask::new_thread( | |
182 | &worker_type, | |
183 | Some(job_id.clone()), | |
049a22a3 | 184 | auth_id.to_string(), |
bfa942c0 | 185 | to_stdout, |
8513626b | 186 | move |worker| { |
8513626b | 187 | job.start(&worker.upid().to_string())?; |
54fcb7f5 DC |
188 | let mut drive_lock = drive_lock; |
189 | ||
4ca3f0c6 DC |
190 | let mut summary = Default::default(); |
191 | let job_result = try_block!({ | |
54fcb7f5 DC |
192 | if schedule.is_some() { |
193 | // for scheduled tape backup jobs, we wait indefinitely for the lock | |
194 | task_log!(worker, "waiting for drive lock..."); | |
195 | loop { | |
54fcb7f5 | 196 | worker.check_abort()?; |
e5950360 DC |
197 | match lock_tape_device(&drive_config, &setup.drive) { |
198 | Ok(lock) => { | |
199 | drive_lock = Some(lock); | |
200 | break; | |
201 | } | |
202 | Err(TapeLockError::TimeOut) => continue, | |
203 | Err(TapeLockError::Other(err)) => return Err(err), | |
204 | } | |
54fcb7f5 DC |
205 | } |
206 | } | |
207 | set_tape_device_state(&setup.drive, &worker.upid().to_string())?; | |
8513626b | 208 | |
085ae873 | 209 | task_log!(worker, "Starting tape backup job '{}'", job_id); |
54fcb7f5 | 210 | if let Some(event_str) = schedule { |
085ae873 | 211 | task_log!(worker, "task triggered by schedule '{}'", event_str); |
54fcb7f5 | 212 | } |
8703a68a | 213 | |
54fcb7f5 DC |
214 | backup_worker( |
215 | &worker, | |
216 | datastore, | |
217 | &pool_config, | |
218 | &setup, | |
219 | email.clone(), | |
4ca3f0c6 | 220 | &mut summary, |
e953029e | 221 | false, |
54fcb7f5 | 222 | ) |
4ca3f0c6 | 223 | }); |
8513626b DM |
224 | |
225 | let status = worker.create_state(&job_result); | |
226 | ||
8703a68a DC |
227 | if let Some(email) = email { |
228 | if let Err(err) = crate::server::send_tape_backup_status( | |
229 | &email, | |
230 | Some(job.jobname()), | |
231 | &setup, | |
232 | &job_result, | |
4abd4dbe | 233 | summary, |
8703a68a DC |
234 | ) { |
235 | eprintln!("send tape backup notification failed: {}", err); | |
236 | } | |
237 | } | |
238 | ||
8513626b DM |
239 | if let Err(err) = job.finish(status) { |
240 | eprintln!( | |
241 | "could not finish job state for {}: {}", | |
242 | job.jobtype().to_string(), | |
243 | err | |
244 | ); | |
245 | } | |
246 | ||
5830e562 | 247 | if let Err(err) = set_tape_device_state(&setup.drive, "") { |
085ae873 | 248 | eprintln!("could not unset drive state for {}: {}", setup.drive, err); |
926d05ef DC |
249 | } |
250 | ||
8513626b | 251 | job_result |
085ae873 | 252 | }, |
8513626b DM |
253 | )?; |
254 | ||
255 | Ok(upid_str) | |
256 | } | |
257 | ||
41a8db35 DM |
258 | #[api( |
259 | input: { | |
260 | properties: { | |
261 | id: { | |
262 | schema: JOB_ID_SCHEMA, | |
263 | }, | |
264 | }, | |
265 | }, | |
b4975d31 DM |
266 | access: { |
267 | // Note: parameters are from job config, so we need to test inside function body | |
268 | description: "The user needs Tape.Write privilege on /tape/pool/{pool} \ | |
269 | and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.", | |
270 | permission: &Permission::Anybody, | |
271 | }, | |
41a8db35 DM |
272 | )] |
273 | /// Runs a tape backup job manually. | |
085ae873 | 274 | pub fn run_tape_backup_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> { |
41a8db35 DM |
275 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
276 | ||
e3619d41 | 277 | let (config, _digest) = pbs_config::tape_job::config()?; |
41a8db35 DM |
278 | let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?; |
279 | ||
b4975d31 DM |
280 | check_backup_permission( |
281 | &auth_id, | |
282 | &backup_job.setup.store, | |
283 | &backup_job.setup.pool, | |
284 | &backup_job.setup.drive, | |
285 | )?; | |
286 | ||
41a8db35 DM |
287 | let job = Job::new("tape-backup-job", &id)?; |
288 | ||
bfa942c0 DC |
289 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
290 | ||
291 | let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None, to_stdout)?; | |
41a8db35 DM |
292 | |
293 | Ok(upid_str) | |
294 | } | |
295 | ||
88356646 | 296 | #[api( |
0dadf66d | 297 | input: { |
88356646 | 298 | properties: { |
5830e562 DM |
299 | setup: { |
300 | type: TapeBackupJobSetup, | |
301 | flatten: true, | |
0dadf66d | 302 | }, |
e953029e DC |
303 | "force-media-set": { |
304 | description: "Ignore the allocation policy and start a new media-set.", | |
305 | optional: true, | |
306 | type: bool, | |
307 | default: false, | |
308 | }, | |
88356646 DM |
309 | }, |
310 | }, | |
311 | returns: { | |
312 | schema: UPID_SCHEMA, | |
313 | }, | |
b4975d31 DM |
314 | access: { |
315 | // Note: parameters are no uri parameter, so we need to test inside function body | |
316 | description: "The user needs Tape.Write privilege on /tape/pool/{pool} \ | |
317 | and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.", | |
318 | permission: &Permission::Anybody, | |
319 | }, | |
88356646 DM |
320 | )] |
321 | /// Backup datastore to tape media pool | |
322 | pub fn backup( | |
5830e562 | 323 | setup: TapeBackupJobSetup, |
e953029e | 324 | force_media_set: bool, |
88356646 DM |
325 | rpcenv: &mut dyn RpcEnvironment, |
326 | ) -> Result<Value, Error> { | |
88356646 DM |
327 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
328 | ||
085ae873 | 329 | check_backup_permission(&auth_id, &setup.store, &setup.pool, &setup.drive)?; |
b4975d31 | 330 | |
e9d2fc93 | 331 | let datastore = DataStore::lookup_datastore(&setup.store, Some(Operation::Read))?; |
88356646 | 332 | |
aad2d162 | 333 | let (config, _digest) = pbs_config::media_pool::config()?; |
5830e562 | 334 | let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?; |
88356646 | 335 | |
1ce8e905 | 336 | let (drive_config, _digest) = pbs_config::drive::config()?; |
25aa55b5 DM |
337 | |
338 | // early check/lock before starting worker | |
5830e562 | 339 | let drive_lock = lock_tape_device(&drive_config, &setup.drive)?; |
88356646 | 340 | |
39735609 | 341 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
cb022525 | 342 | |
5830e562 | 343 | let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive); |
8513626b | 344 | |
085ae873 TL |
345 | let notify_user = setup |
346 | .notify_user | |
347 | .as_ref() | |
348 | .unwrap_or_else(|| Userid::root_userid()); | |
8703a68a DC |
349 | let email = lookup_user_email(notify_user); |
350 | ||
88356646 DM |
351 | let upid_str = WorkerTask::new_thread( |
352 | "tape-backup", | |
8513626b | 353 | Some(job_id), |
049a22a3 | 354 | auth_id.to_string(), |
cb022525 | 355 | to_stdout, |
88356646 | 356 | move |worker| { |
25aa55b5 | 357 | let _drive_lock = drive_lock; // keep lock guard |
5830e562 | 358 | set_tape_device_state(&setup.drive, &worker.upid().to_string())?; |
4abd4dbe | 359 | |
4ca3f0c6 DC |
360 | let mut summary = Default::default(); |
361 | let job_result = backup_worker( | |
0dadf66d DM |
362 | &worker, |
363 | datastore, | |
0dadf66d | 364 | &pool_config, |
5830e562 | 365 | &setup, |
8703a68a | 366 | email.clone(), |
4ca3f0c6 | 367 | &mut summary, |
e953029e | 368 | force_media_set, |
4ca3f0c6 | 369 | ); |
8703a68a DC |
370 | |
371 | if let Some(email) = email { | |
372 | if let Err(err) = crate::server::send_tape_backup_status( | |
373 | &email, | |
374 | None, | |
375 | &setup, | |
376 | &job_result, | |
4abd4dbe | 377 | summary, |
8703a68a DC |
378 | ) { |
379 | eprintln!("send tape backup notification failed: {}", err); | |
380 | } | |
381 | } | |
0dadf66d | 382 | |
926d05ef | 383 | // ignore errors |
5830e562 | 384 | let _ = set_tape_device_state(&setup.drive, ""); |
8703a68a | 385 | job_result |
085ae873 | 386 | }, |
88356646 DM |
387 | )?; |
388 | ||
389 | Ok(upid_str.into()) | |
88356646 DM |
390 | } |
391 | ||
88356646 DM |
392 | fn backup_worker( |
393 | worker: &WorkerTask, | |
394 | datastore: Arc<DataStore>, | |
395 | pool_config: &MediaPoolConfig, | |
5830e562 | 396 | setup: &TapeBackupJobSetup, |
8703a68a | 397 | email: Option<String>, |
4ca3f0c6 | 398 | summary: &mut TapeBackupJobSummary, |
e953029e | 399 | force_media_set: bool, |
4ca3f0c6 | 400 | ) -> Result<(), Error> { |
88356646 | 401 | let status_path = Path::new(TAPE_STATUS_DIR); |
4abd4dbe | 402 | let start = std::time::Instant::now(); |
88356646 | 403 | |
271764de | 404 | task_log!(worker, "update media online status"); |
5830e562 | 405 | let changer_name = update_media_online_status(&setup.drive)?; |
88356646 | 406 | |
9a37bd6c | 407 | let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?; |
88356646 | 408 | |
085ae873 | 409 | let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email, force_media_set)?; |
88356646 | 410 | |
7d9cb8c4 | 411 | let mut group_list = datastore.list_backup_groups()?; |
88356646 | 412 | |
5116453b | 413 | group_list.sort_unstable_by(|a, b| a.group().cmp(b.group())); |
88356646 | 414 | |
062edce2 | 415 | let (group_list, group_count) = if let Some(group_filters) = &setup.group_filter { |
c8c5c7f5 DC |
416 | let filter_fn = |group: &BackupGroup, group_filters: &[GroupFilter]| { |
417 | group_filters.iter().any(|filter| group.matches(filter)) | |
418 | }; | |
419 | ||
420 | let group_count_full = group_list.len(); | |
085ae873 TL |
421 | let list: Vec<BackupGroup> = group_list |
422 | .into_iter() | |
423 | .filter(|group| filter_fn(group, group_filters)) | |
424 | .collect(); | |
c8c5c7f5 | 425 | let group_count = list.len(); |
085ae873 TL |
426 | task_log!( |
427 | worker, | |
428 | "found {} groups (out of {} total)", | |
429 | group_count, | |
430 | group_count_full | |
431 | ); | |
c8c5c7f5 DC |
432 | (list, group_count) |
433 | } else { | |
434 | let group_count = group_list.len(); | |
435 | task_log!(worker, "found {} groups", group_count); | |
436 | (group_list, group_count) | |
437 | }; | |
6396bace DM |
438 | |
439 | let mut progress = StoreProgress::new(group_count as u64); | |
440 | ||
5830e562 DM |
441 | let latest_only = setup.latest_only.unwrap_or(false); |
442 | ||
0dadf66d | 443 | if latest_only { |
085ae873 TL |
444 | task_log!( |
445 | worker, | |
446 | "latest-only: true (only considering latest snapshots)" | |
447 | ); | |
0dadf66d | 448 | } |
5830e562 | 449 | |
54722aca DM |
450 | let datastore_name = datastore.name(); |
451 | ||
2a06e086 DC |
452 | let mut errors = false; |
453 | ||
4830de40 DM |
454 | let mut need_catalog = false; // avoid writing catalog for empty jobs |
455 | ||
6396bace DM |
456 | for (group_number, group) in group_list.into_iter().enumerate() { |
457 | progress.done_groups = group_number as u64; | |
458 | progress.done_snapshots = 0; | |
459 | progress.group_snapshots = 0; | |
460 | ||
6da20161 | 461 | let snapshot_list = group.list_backups()?; |
80acdd71 DM |
462 | |
463 | // filter out unfinished backups | |
cffe0b81 | 464 | let mut snapshot_list: Vec<_> = snapshot_list |
80acdd71 DM |
465 | .into_iter() |
466 | .filter(|item| item.is_finished()) | |
467 | .collect(); | |
0dadf66d | 468 | |
cffe0b81 DC |
469 | if snapshot_list.is_empty() { |
470 | task_log!(worker, "group {} was empty", group); | |
471 | continue; | |
472 | } | |
473 | ||
88356646 DM |
474 | BackupInfo::sort_list(&mut snapshot_list, true); // oldest first |
475 | ||
0dadf66d | 476 | if latest_only { |
6396bace | 477 | progress.group_snapshots = 1; |
0dadf66d | 478 | if let Some(info) = snapshot_list.pop() { |
54722aca | 479 | if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) { |
2c10410b | 480 | task_log!(worker, "skip snapshot {}", info.backup_dir); |
0dadf66d DM |
481 | continue; |
482 | } | |
4830de40 DM |
483 | |
484 | need_catalog = true; | |
485 | ||
4abd4dbe | 486 | let snapshot_name = info.backup_dir.to_string(); |
2a06e086 DC |
487 | if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? { |
488 | errors = true; | |
4abd4dbe DC |
489 | } else { |
490 | summary.snapshot_list.push(snapshot_name); | |
2a06e086 | 491 | } |
6396bace | 492 | progress.done_snapshots = 1; |
085ae873 | 493 | task_log!(worker, "percentage done: {}", progress); |
0dadf66d DM |
494 | } |
495 | } else { | |
6396bace DM |
496 | progress.group_snapshots = snapshot_list.len() as u64; |
497 | for (snapshot_number, info) in snapshot_list.into_iter().enumerate() { | |
54722aca | 498 | if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) { |
2c10410b | 499 | task_log!(worker, "skip snapshot {}", info.backup_dir); |
0dadf66d DM |
500 | continue; |
501 | } | |
4830de40 DM |
502 | |
503 | need_catalog = true; | |
504 | ||
4abd4dbe | 505 | let snapshot_name = info.backup_dir.to_string(); |
2a06e086 DC |
506 | if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? { |
507 | errors = true; | |
4abd4dbe DC |
508 | } else { |
509 | summary.snapshot_list.push(snapshot_name); | |
2a06e086 | 510 | } |
6396bace | 511 | progress.done_snapshots = snapshot_number as u64 + 1; |
085ae873 | 512 | task_log!(worker, "percentage done: {}", progress); |
88356646 | 513 | } |
88356646 DM |
514 | } |
515 | } | |
516 | ||
517 | pool_writer.commit()?; | |
518 | ||
4830de40 DM |
519 | if need_catalog { |
520 | task_log!(worker, "append media catalog"); | |
32b75d36 | 521 | |
4830de40 | 522 | let uuid = pool_writer.load_writable_media(worker)?; |
32b75d36 DM |
523 | let done = pool_writer.append_catalog_archive(worker)?; |
524 | if !done { | |
085ae873 TL |
525 | task_log!( |
526 | worker, | |
527 | "catalog does not fit on tape, writing to next volume" | |
528 | ); | |
4830de40 DM |
529 | pool_writer.set_media_status_full(&uuid)?; |
530 | pool_writer.load_writable_media(worker)?; | |
531 | let done = pool_writer.append_catalog_archive(worker)?; | |
532 | if !done { | |
533 | bail!("write_catalog_archive failed on second media"); | |
534 | } | |
32b75d36 DM |
535 | } |
536 | } | |
537 | ||
5830e562 | 538 | if setup.export_media_set.unwrap_or(false) { |
edb90f6a | 539 | pool_writer.export_media_set(worker)?; |
5830e562 | 540 | } else if setup.eject_media.unwrap_or(false) { |
5654d8ce | 541 | pool_writer.eject_media(worker)?; |
42967bf1 DM |
542 | } |
543 | ||
2a06e086 DC |
544 | if errors { |
545 | bail!("Tape backup finished with some errors. Please check the task log."); | |
546 | } | |
547 | ||
4abd4dbe DC |
548 | summary.duration = start.elapsed(); |
549 | ||
4ca3f0c6 | 550 | Ok(()) |
88356646 DM |
551 | } |
552 | ||
553 | // Try to update the the media online status | |
cdf39e62 | 554 | fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> { |
1ce8e905 | 555 | let (config, _digest) = pbs_config::drive::config()?; |
88356646 | 556 | |
46a1863f | 557 | if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) { |
085ae873 | 558 | let label_text_list = changer.online_media_label_texts()?; |
88356646 DM |
559 | |
560 | let status_path = Path::new(TAPE_STATUS_DIR); | |
561 | let mut inventory = Inventory::load(status_path)?; | |
88356646 | 562 | |
085ae873 | 563 | update_changer_online_status(&config, &mut inventory, &changer_name, &label_text_list)?; |
88356646 | 564 | |
cdf39e62 DM |
565 | Ok(Some(changer_name)) |
566 | } else { | |
567 | Ok(None) | |
568 | } | |
88356646 DM |
569 | } |
570 | ||
571 | pub fn backup_snapshot( | |
572 | worker: &WorkerTask, | |
573 | pool_writer: &mut PoolWriter, | |
574 | datastore: Arc<DataStore>, | |
575 | snapshot: BackupDir, | |
2a06e086 | 576 | ) -> Result<bool, Error> { |
2c10410b | 577 | task_log!(worker, "backup snapshot {}", snapshot); |
88356646 | 578 | |
db87d93e | 579 | let snapshot_reader = match SnapshotReader::new(datastore.clone(), (&snapshot).into()) { |
2a06e086 DC |
580 | Ok(reader) => reader, |
581 | Err(err) => { | |
582 | // ignore missing snapshots and continue | |
583 | task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err); | |
584 | return Ok(false); | |
585 | } | |
586 | }; | |
88356646 | 587 | |
5c4755ad DM |
588 | let snapshot_reader = Arc::new(Mutex::new(snapshot_reader)); |
589 | ||
085ae873 TL |
590 | let (reader_thread, chunk_iter) = |
591 | pool_writer.spawn_chunk_reader_thread(datastore.clone(), snapshot_reader.clone())?; | |
5c4755ad DM |
592 | |
593 | let mut chunk_iter = chunk_iter.peekable(); | |
88356646 DM |
594 | |
595 | loop { | |
271764de DM |
596 | worker.check_abort()?; |
597 | ||
88356646 | 598 | // test is we have remaining chunks |
5c4755ad DM |
599 | match chunk_iter.peek() { |
600 | None => break, | |
085ae873 | 601 | Some(Ok(_)) => { /* Ok */ } |
5c4755ad | 602 | Some(Err(err)) => bail!("{}", err), |
88356646 DM |
603 | } |
604 | ||
ff58c519 | 605 | let uuid = pool_writer.load_writable_media(worker)?; |
88356646 | 606 | |
1f2c4713 DM |
607 | worker.check_abort()?; |
608 | ||
085ae873 TL |
609 | let (leom, _bytes) = |
610 | pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?; | |
88356646 DM |
611 | |
612 | if leom { | |
613 | pool_writer.set_media_status_full(&uuid)?; | |
614 | } | |
615 | } | |
616 | ||
6f1c26b0 | 617 | if reader_thread.join().is_err() { |
5c4755ad DM |
618 | bail!("chunk reader thread failed"); |
619 | } | |
620 | ||
271764de DM |
621 | worker.check_abort()?; |
622 | ||
ff58c519 | 623 | let uuid = pool_writer.load_writable_media(worker)?; |
88356646 | 624 | |
1f2c4713 DM |
625 | worker.check_abort()?; |
626 | ||
5c4755ad DM |
627 | let snapshot_reader = snapshot_reader.lock().unwrap(); |
628 | ||
5654d8ce | 629 | let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; |
88356646 DM |
630 | |
631 | if !done { | |
632 | // does not fit on tape, so we try on next volume | |
633 | pool_writer.set_media_status_full(&uuid)?; | |
634 | ||
271764de DM |
635 | worker.check_abort()?; |
636 | ||
ff58c519 | 637 | pool_writer.load_writable_media(worker)?; |
5654d8ce | 638 | let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; |
88356646 DM |
639 | |
640 | if !done { | |
641 | bail!("write_snapshot_archive failed on second media"); | |
642 | } | |
643 | } | |
644 | ||
271764de | 645 | task_log!(worker, "end backup {}:{}", datastore.name(), snapshot); |
88356646 | 646 | |
2a06e086 | 647 | Ok(true) |
88356646 | 648 | } |