]>
Commit | Line | Data |
---|---|---|
88356646 | 1 | use std::path::Path; |
5c4755ad | 2 | use std::sync::{Mutex, Arc}; |
88356646 | 3 | |
7690a8e7 | 4 | use anyhow::{bail, format_err, Error}; |
88356646 DM |
5 | use serde_json::Value; |
6 | ||
6ef1b649 WB |
7 | use proxmox_lang::try_block; |
8 | use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; | |
9 | use proxmox_schema::api; | |
25877d05 | 10 | use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; |
88356646 | 11 | |
e3619d41 DM |
12 | use pbs_api_types::{ |
13 | Authid, Userid, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, MediaPoolConfig, | |
8cc3760e | 14 | UPID_SCHEMA, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, |
c8c5c7f5 | 15 | GroupFilter, |
e3619d41 DM |
16 | }; |
17 | ||
c95c1c83 | 18 | use pbs_datastore::{DataStore, StoreProgress, SnapshotReader}; |
c8c5c7f5 | 19 | use pbs_datastore::backup_info::{BackupDir, BackupInfo, BackupGroup}; |
ba3d7e19 | 20 | use pbs_config::CachedUserInfo; |
b9700a9f | 21 | use proxmox_rest_server::WorkerTask; |
c23192d3 | 22 | |
88356646 | 23 | use crate::{ |
7690a8e7 | 24 | server::{ |
c9793d47 | 25 | lookup_user_email, |
4abd4dbe | 26 | TapeBackupJobSummary, |
7690a8e7 DM |
27 | jobstate::{ |
28 | Job, | |
29 | JobState, | |
30 | compute_schedule_status, | |
31 | }, | |
8513626b | 32 | }, |
88356646 DM |
33 | tape::{ |
34 | TAPE_STATUS_DIR, | |
35 | Inventory, | |
88356646 DM |
36 | PoolWriter, |
37 | MediaPool, | |
25aa55b5 DM |
38 | drive::{ |
39 | media_changer, | |
40 | lock_tape_device, | |
e5950360 | 41 | TapeLockError, |
926d05ef | 42 | set_tape_device_state, |
25aa55b5 | 43 | }, |
37796ff7 | 44 | changer::update_changer_online_status, |
88356646 DM |
45 | }, |
46 | }; | |
47 | ||
41a8db35 DM |
48 | const TAPE_BACKUP_JOB_ROUTER: Router = Router::new() |
49 | .post(&API_METHOD_RUN_TAPE_BACKUP_JOB); | |
50 | ||
51 | pub const ROUTER: Router = Router::new() | |
7690a8e7 | 52 | .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS) |
41a8db35 DM |
53 | .post(&API_METHOD_BACKUP) |
54 | .match_all("id", &TAPE_BACKUP_JOB_ROUTER); | |
55 | ||
b4975d31 DM |
56 | fn check_backup_permission( |
57 | auth_id: &Authid, | |
58 | store: &str, | |
59 | pool: &str, | |
60 | drive: &str, | |
61 | ) -> Result<(), Error> { | |
62 | ||
63 | let user_info = CachedUserInfo::new()?; | |
64 | ||
65 | let privs = user_info.lookup_privs(auth_id, &["datastore", store]); | |
66 | if (privs & PRIV_DATASTORE_READ) == 0 { | |
67 | bail!("no permissions on /datastore/{}", store); | |
68 | } | |
69 | ||
70 | let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]); | |
71 | if (privs & PRIV_TAPE_WRITE) == 0 { | |
72 | bail!("no permissions on /tape/drive/{}", drive); | |
73 | } | |
74 | ||
75 | let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]); | |
76 | if (privs & PRIV_TAPE_WRITE) == 0 { | |
77 | bail!("no permissions on /tape/pool/{}", pool); | |
78 | } | |
79 | ||
80 | Ok(()) | |
81 | } | |
82 | ||
7690a8e7 DM |
83 | #[api( |
84 | returns: { | |
85 | description: "List configured thape backup jobs and their status", | |
86 | type: Array, | |
87 | items: { type: TapeBackupJobStatus }, | |
88 | }, | |
396fd747 DM |
89 | access: { |
90 | description: "List configured tape jobs filtered by Tape.Audit privileges", | |
91 | permission: &Permission::Anybody, | |
92 | }, | |
7690a8e7 DM |
93 | )] |
94 | /// List all tape backup jobs | |
95 | pub fn list_tape_backup_jobs( | |
96 | _param: Value, | |
97 | mut rpcenv: &mut dyn RpcEnvironment, | |
98 | ) -> Result<Vec<TapeBackupJobStatus>, Error> { | |
396fd747 DM |
99 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
100 | let user_info = CachedUserInfo::new()?; | |
7690a8e7 | 101 | |
e3619d41 | 102 | let (job_config, digest) = pbs_config::tape_job::config()?; |
aad2d162 | 103 | let (pool_config, _pool_digest) = pbs_config::media_pool::config()?; |
1ce8e905 | 104 | let (drive_config, _digest) = pbs_config::drive::config()?; |
7690a8e7 | 105 | |
efe96ec0 | 106 | let job_list_iter = job_config |
7690a8e7 DM |
107 | .convert_to_typed_array("backup")? |
108 | .into_iter() | |
109 | .filter(|_job: &TapeBackupJobConfig| { | |
110 | // fixme: check access permission | |
111 | true | |
112 | }); | |
113 | ||
114 | let mut list = Vec::new(); | |
efe96ec0 | 115 | let status_path = Path::new(TAPE_STATUS_DIR); |
6ef1b649 | 116 | let current_time = proxmox_time::epoch_i64(); |
7690a8e7 DM |
117 | |
118 | for job in job_list_iter { | |
396fd747 DM |
119 | let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]); |
120 | if (privs & PRIV_TAPE_AUDIT) == 0 { | |
121 | continue; | |
122 | } | |
123 | ||
7690a8e7 DM |
124 | let last_state = JobState::load("tape-backup-job", &job.id) |
125 | .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?; | |
126 | ||
127 | let status = compute_schedule_status(&last_state, job.schedule.as_deref())?; | |
128 | ||
efe96ec0 DM |
129 | let next_run = status.next_run.unwrap_or(current_time); |
130 | ||
131 | let mut next_media_label = None; | |
132 | ||
133 | if let Ok(pool) = pool_config.lookup::<MediaPoolConfig>("pool", &job.setup.pool) { | |
134 | let mut changer_name = None; | |
135 | if let Ok(Some((_, name))) = media_changer(&drive_config, &job.setup.drive) { | |
136 | changer_name = Some(name); | |
137 | } | |
138 | if let Ok(mut pool) = MediaPool::with_config(status_path, &pool, changer_name, true) { | |
139 | if pool.start_write_session(next_run, false).is_ok() { | |
140 | if let Ok(media_id) = pool.guess_next_writable_media(next_run) { | |
141 | next_media_label = Some(media_id.label.label_text); | |
142 | } | |
143 | } | |
144 | } | |
145 | } | |
146 | ||
147 | list.push(TapeBackupJobStatus { config: job, status, next_media_label }); | |
7690a8e7 DM |
148 | } |
149 | ||
25877d05 | 150 | rpcenv["digest"] = hex::encode(&digest).into(); |
7690a8e7 DM |
151 | |
152 | Ok(list) | |
153 | } | |
154 | ||
8513626b DM |
155 | pub fn do_tape_backup_job( |
156 | mut job: Job, | |
5830e562 | 157 | setup: TapeBackupJobSetup, |
8513626b DM |
158 | auth_id: &Authid, |
159 | schedule: Option<String>, | |
bfa942c0 | 160 | to_stdout: bool, |
8513626b DM |
161 | ) -> Result<String, Error> { |
162 | ||
163 | let job_id = format!("{}:{}:{}:{}", | |
5830e562 DM |
164 | setup.store, |
165 | setup.pool, | |
166 | setup.drive, | |
8513626b DM |
167 | job.jobname()); |
168 | ||
169 | let worker_type = job.jobtype().to_string(); | |
170 | ||
5830e562 | 171 | let datastore = DataStore::lookup_datastore(&setup.store)?; |
8513626b | 172 | |
aad2d162 | 173 | let (config, _digest) = pbs_config::media_pool::config()?; |
5830e562 | 174 | let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?; |
8513626b | 175 | |
1ce8e905 | 176 | let (drive_config, _digest) = pbs_config::drive::config()?; |
8513626b | 177 | |
54fcb7f5 DC |
178 | // for scheduled jobs we acquire the lock later in the worker |
179 | let drive_lock = if schedule.is_some() { | |
180 | None | |
181 | } else { | |
182 | Some(lock_tape_device(&drive_config, &setup.drive)?) | |
183 | }; | |
184 | ||
9a37bd6c | 185 | let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid()); |
54fcb7f5 | 186 | let email = lookup_user_email(notify_user); |
8513626b DM |
187 | |
188 | let upid_str = WorkerTask::new_thread( | |
189 | &worker_type, | |
190 | Some(job_id.clone()), | |
049a22a3 | 191 | auth_id.to_string(), |
bfa942c0 | 192 | to_stdout, |
8513626b | 193 | move |worker| { |
8513626b | 194 | job.start(&worker.upid().to_string())?; |
54fcb7f5 DC |
195 | let mut drive_lock = drive_lock; |
196 | ||
4ca3f0c6 DC |
197 | let mut summary = Default::default(); |
198 | let job_result = try_block!({ | |
54fcb7f5 DC |
199 | if schedule.is_some() { |
200 | // for scheduled tape backup jobs, we wait indefinitely for the lock | |
201 | task_log!(worker, "waiting for drive lock..."); | |
202 | loop { | |
54fcb7f5 | 203 | worker.check_abort()?; |
e5950360 DC |
204 | match lock_tape_device(&drive_config, &setup.drive) { |
205 | Ok(lock) => { | |
206 | drive_lock = Some(lock); | |
207 | break; | |
208 | } | |
209 | Err(TapeLockError::TimeOut) => continue, | |
210 | Err(TapeLockError::Other(err)) => return Err(err), | |
211 | } | |
54fcb7f5 DC |
212 | } |
213 | } | |
214 | set_tape_device_state(&setup.drive, &worker.upid().to_string())?; | |
8513626b | 215 | |
54fcb7f5 DC |
216 | task_log!(worker,"Starting tape backup job '{}'", job_id); |
217 | if let Some(event_str) = schedule { | |
218 | task_log!(worker,"task triggered by schedule '{}'", event_str); | |
219 | } | |
8703a68a | 220 | |
4ca3f0c6 | 221 | |
54fcb7f5 DC |
222 | backup_worker( |
223 | &worker, | |
224 | datastore, | |
225 | &pool_config, | |
226 | &setup, | |
227 | email.clone(), | |
4ca3f0c6 | 228 | &mut summary, |
e953029e | 229 | false, |
54fcb7f5 | 230 | ) |
4ca3f0c6 | 231 | }); |
8513626b DM |
232 | |
233 | let status = worker.create_state(&job_result); | |
234 | ||
8703a68a DC |
235 | if let Some(email) = email { |
236 | if let Err(err) = crate::server::send_tape_backup_status( | |
237 | &email, | |
238 | Some(job.jobname()), | |
239 | &setup, | |
240 | &job_result, | |
4abd4dbe | 241 | summary, |
8703a68a DC |
242 | ) { |
243 | eprintln!("send tape backup notification failed: {}", err); | |
244 | } | |
245 | } | |
246 | ||
8513626b DM |
247 | if let Err(err) = job.finish(status) { |
248 | eprintln!( | |
249 | "could not finish job state for {}: {}", | |
250 | job.jobtype().to_string(), | |
251 | err | |
252 | ); | |
253 | } | |
254 | ||
5830e562 | 255 | if let Err(err) = set_tape_device_state(&setup.drive, "") { |
926d05ef DC |
256 | eprintln!( |
257 | "could not unset drive state for {}: {}", | |
5830e562 | 258 | setup.drive, |
926d05ef DC |
259 | err |
260 | ); | |
261 | } | |
262 | ||
8513626b DM |
263 | job_result |
264 | } | |
265 | )?; | |
266 | ||
267 | Ok(upid_str) | |
268 | } | |
269 | ||
41a8db35 DM |
270 | #[api( |
271 | input: { | |
272 | properties: { | |
273 | id: { | |
274 | schema: JOB_ID_SCHEMA, | |
275 | }, | |
276 | }, | |
277 | }, | |
b4975d31 DM |
278 | access: { |
279 | // Note: parameters are from job config, so we need to test inside function body | |
280 | description: "The user needs Tape.Write privilege on /tape/pool/{pool} \ | |
281 | and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.", | |
282 | permission: &Permission::Anybody, | |
283 | }, | |
41a8db35 DM |
284 | )] |
285 | /// Runs a tape backup job manually. | |
286 | pub fn run_tape_backup_job( | |
287 | id: String, | |
288 | rpcenv: &mut dyn RpcEnvironment, | |
289 | ) -> Result<String, Error> { | |
290 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
291 | ||
e3619d41 | 292 | let (config, _digest) = pbs_config::tape_job::config()?; |
41a8db35 DM |
293 | let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?; |
294 | ||
b4975d31 DM |
295 | check_backup_permission( |
296 | &auth_id, | |
297 | &backup_job.setup.store, | |
298 | &backup_job.setup.pool, | |
299 | &backup_job.setup.drive, | |
300 | )?; | |
301 | ||
41a8db35 DM |
302 | let job = Job::new("tape-backup-job", &id)?; |
303 | ||
bfa942c0 DC |
304 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
305 | ||
306 | let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None, to_stdout)?; | |
41a8db35 DM |
307 | |
308 | Ok(upid_str) | |
309 | } | |
310 | ||
88356646 | 311 | #[api( |
0dadf66d | 312 | input: { |
88356646 | 313 | properties: { |
5830e562 DM |
314 | setup: { |
315 | type: TapeBackupJobSetup, | |
316 | flatten: true, | |
0dadf66d | 317 | }, |
e953029e DC |
318 | "force-media-set": { |
319 | description: "Ignore the allocation policy and start a new media-set.", | |
320 | optional: true, | |
321 | type: bool, | |
322 | default: false, | |
323 | }, | |
88356646 DM |
324 | }, |
325 | }, | |
326 | returns: { | |
327 | schema: UPID_SCHEMA, | |
328 | }, | |
b4975d31 DM |
329 | access: { |
330 | // Note: parameters are no uri parameter, so we need to test inside function body | |
331 | description: "The user needs Tape.Write privilege on /tape/pool/{pool} \ | |
332 | and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.", | |
333 | permission: &Permission::Anybody, | |
334 | }, | |
88356646 DM |
335 | )] |
336 | /// Backup datastore to tape media pool | |
337 | pub fn backup( | |
5830e562 | 338 | setup: TapeBackupJobSetup, |
e953029e | 339 | force_media_set: bool, |
88356646 DM |
340 | rpcenv: &mut dyn RpcEnvironment, |
341 | ) -> Result<Value, Error> { | |
342 | ||
343 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
344 | ||
b4975d31 DM |
345 | check_backup_permission( |
346 | &auth_id, | |
347 | &setup.store, | |
348 | &setup.pool, | |
349 | &setup.drive, | |
350 | )?; | |
351 | ||
5830e562 | 352 | let datastore = DataStore::lookup_datastore(&setup.store)?; |
88356646 | 353 | |
aad2d162 | 354 | let (config, _digest) = pbs_config::media_pool::config()?; |
5830e562 | 355 | let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?; |
88356646 | 356 | |
1ce8e905 | 357 | let (drive_config, _digest) = pbs_config::drive::config()?; |
25aa55b5 DM |
358 | |
359 | // early check/lock before starting worker | |
5830e562 | 360 | let drive_lock = lock_tape_device(&drive_config, &setup.drive)?; |
88356646 | 361 | |
39735609 | 362 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
cb022525 | 363 | |
5830e562 | 364 | let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive); |
8513626b | 365 | |
9a37bd6c | 366 | let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| Userid::root_userid()); |
8703a68a DC |
367 | let email = lookup_user_email(notify_user); |
368 | ||
88356646 DM |
369 | let upid_str = WorkerTask::new_thread( |
370 | "tape-backup", | |
8513626b | 371 | Some(job_id), |
049a22a3 | 372 | auth_id.to_string(), |
cb022525 | 373 | to_stdout, |
88356646 | 374 | move |worker| { |
25aa55b5 | 375 | let _drive_lock = drive_lock; // keep lock guard |
5830e562 | 376 | set_tape_device_state(&setup.drive, &worker.upid().to_string())?; |
4abd4dbe | 377 | |
4ca3f0c6 DC |
378 | let mut summary = Default::default(); |
379 | let job_result = backup_worker( | |
0dadf66d DM |
380 | &worker, |
381 | datastore, | |
0dadf66d | 382 | &pool_config, |
5830e562 | 383 | &setup, |
8703a68a | 384 | email.clone(), |
4ca3f0c6 | 385 | &mut summary, |
e953029e | 386 | force_media_set, |
4ca3f0c6 | 387 | ); |
8703a68a DC |
388 | |
389 | if let Some(email) = email { | |
390 | if let Err(err) = crate::server::send_tape_backup_status( | |
391 | &email, | |
392 | None, | |
393 | &setup, | |
394 | &job_result, | |
4abd4dbe | 395 | summary, |
8703a68a DC |
396 | ) { |
397 | eprintln!("send tape backup notification failed: {}", err); | |
398 | } | |
399 | } | |
0dadf66d | 400 | |
926d05ef | 401 | // ignore errors |
5830e562 | 402 | let _ = set_tape_device_state(&setup.drive, ""); |
8703a68a | 403 | job_result |
88356646 DM |
404 | } |
405 | )?; | |
406 | ||
407 | Ok(upid_str.into()) | |
88356646 DM |
408 | } |
409 | ||
88356646 DM |
410 | fn backup_worker( |
411 | worker: &WorkerTask, | |
412 | datastore: Arc<DataStore>, | |
413 | pool_config: &MediaPoolConfig, | |
5830e562 | 414 | setup: &TapeBackupJobSetup, |
8703a68a | 415 | email: Option<String>, |
4ca3f0c6 | 416 | summary: &mut TapeBackupJobSummary, |
e953029e | 417 | force_media_set: bool, |
4ca3f0c6 | 418 | ) -> Result<(), Error> { |
88356646 DM |
419 | |
420 | let status_path = Path::new(TAPE_STATUS_DIR); | |
4abd4dbe | 421 | let start = std::time::Instant::now(); |
88356646 | 422 | |
271764de | 423 | task_log!(worker, "update media online status"); |
5830e562 | 424 | let changer_name = update_media_online_status(&setup.drive)?; |
88356646 | 425 | |
9a37bd6c | 426 | let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?; |
88356646 | 427 | |
e953029e DC |
428 | let mut pool_writer = PoolWriter::new( |
429 | pool, | |
430 | &setup.drive, | |
431 | worker, | |
432 | email, | |
433 | force_media_set | |
434 | )?; | |
88356646 DM |
435 | |
436 | let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?; | |
437 | ||
438 | group_list.sort_unstable(); | |
439 | ||
062edce2 | 440 | let (group_list, group_count) = if let Some(group_filters) = &setup.group_filter { |
c8c5c7f5 DC |
441 | let filter_fn = |group: &BackupGroup, group_filters: &[GroupFilter]| { |
442 | group_filters.iter().any(|filter| group.matches(filter)) | |
443 | }; | |
444 | ||
445 | let group_count_full = group_list.len(); | |
9a37bd6c | 446 | let list: Vec<BackupGroup> = group_list.into_iter().filter(|group| filter_fn(group, group_filters)).collect(); |
c8c5c7f5 DC |
447 | let group_count = list.len(); |
448 | task_log!(worker, "found {} groups (out of {} total)", group_count, group_count_full); | |
449 | (list, group_count) | |
450 | } else { | |
451 | let group_count = group_list.len(); | |
452 | task_log!(worker, "found {} groups", group_count); | |
453 | (group_list, group_count) | |
454 | }; | |
6396bace DM |
455 | |
456 | let mut progress = StoreProgress::new(group_count as u64); | |
457 | ||
5830e562 DM |
458 | let latest_only = setup.latest_only.unwrap_or(false); |
459 | ||
0dadf66d DM |
460 | if latest_only { |
461 | task_log!(worker, "latest-only: true (only considering latest snapshots)"); | |
462 | } | |
5830e562 | 463 | |
54722aca DM |
464 | let datastore_name = datastore.name(); |
465 | ||
2a06e086 DC |
466 | let mut errors = false; |
467 | ||
4830de40 DM |
468 | let mut need_catalog = false; // avoid writing catalog for empty jobs |
469 | ||
6396bace DM |
470 | for (group_number, group) in group_list.into_iter().enumerate() { |
471 | progress.done_groups = group_number as u64; | |
472 | progress.done_snapshots = 0; | |
473 | progress.group_snapshots = 0; | |
474 | ||
80acdd71 DM |
475 | let snapshot_list = group.list_backups(&datastore.base_path())?; |
476 | ||
477 | // filter out unfinished backups | |
cffe0b81 | 478 | let mut snapshot_list: Vec<_> = snapshot_list |
80acdd71 DM |
479 | .into_iter() |
480 | .filter(|item| item.is_finished()) | |
481 | .collect(); | |
0dadf66d | 482 | |
cffe0b81 DC |
483 | if snapshot_list.is_empty() { |
484 | task_log!(worker, "group {} was empty", group); | |
485 | continue; | |
486 | } | |
487 | ||
88356646 DM |
488 | BackupInfo::sort_list(&mut snapshot_list, true); // oldest first |
489 | ||
0dadf66d | 490 | if latest_only { |
6396bace | 491 | progress.group_snapshots = 1; |
0dadf66d | 492 | if let Some(info) = snapshot_list.pop() { |
54722aca | 493 | if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) { |
2c10410b | 494 | task_log!(worker, "skip snapshot {}", info.backup_dir); |
0dadf66d DM |
495 | continue; |
496 | } | |
4830de40 DM |
497 | |
498 | need_catalog = true; | |
499 | ||
4abd4dbe | 500 | let snapshot_name = info.backup_dir.to_string(); |
2a06e086 DC |
501 | if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? { |
502 | errors = true; | |
4abd4dbe DC |
503 | } else { |
504 | summary.snapshot_list.push(snapshot_name); | |
2a06e086 | 505 | } |
6396bace DM |
506 | progress.done_snapshots = 1; |
507 | task_log!( | |
508 | worker, | |
509 | "percentage done: {}", | |
510 | progress | |
511 | ); | |
0dadf66d DM |
512 | } |
513 | } else { | |
6396bace DM |
514 | progress.group_snapshots = snapshot_list.len() as u64; |
515 | for (snapshot_number, info) in snapshot_list.into_iter().enumerate() { | |
54722aca | 516 | if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) { |
2c10410b | 517 | task_log!(worker, "skip snapshot {}", info.backup_dir); |
0dadf66d DM |
518 | continue; |
519 | } | |
4830de40 DM |
520 | |
521 | need_catalog = true; | |
522 | ||
4abd4dbe | 523 | let snapshot_name = info.backup_dir.to_string(); |
2a06e086 DC |
524 | if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? { |
525 | errors = true; | |
4abd4dbe DC |
526 | } else { |
527 | summary.snapshot_list.push(snapshot_name); | |
2a06e086 | 528 | } |
6396bace DM |
529 | progress.done_snapshots = snapshot_number as u64 + 1; |
530 | task_log!( | |
531 | worker, | |
532 | "percentage done: {}", | |
533 | progress | |
534 | ); | |
88356646 | 535 | } |
88356646 DM |
536 | } |
537 | } | |
538 | ||
539 | pool_writer.commit()?; | |
540 | ||
4830de40 DM |
541 | if need_catalog { |
542 | task_log!(worker, "append media catalog"); | |
32b75d36 | 543 | |
4830de40 | 544 | let uuid = pool_writer.load_writable_media(worker)?; |
32b75d36 DM |
545 | let done = pool_writer.append_catalog_archive(worker)?; |
546 | if !done { | |
4830de40 DM |
547 | task_log!(worker, "catalog does not fit on tape, writing to next volume"); |
548 | pool_writer.set_media_status_full(&uuid)?; | |
549 | pool_writer.load_writable_media(worker)?; | |
550 | let done = pool_writer.append_catalog_archive(worker)?; | |
551 | if !done { | |
552 | bail!("write_catalog_archive failed on second media"); | |
553 | } | |
32b75d36 DM |
554 | } |
555 | } | |
556 | ||
5830e562 | 557 | if setup.export_media_set.unwrap_or(false) { |
edb90f6a | 558 | pool_writer.export_media_set(worker)?; |
5830e562 | 559 | } else if setup.eject_media.unwrap_or(false) { |
5654d8ce | 560 | pool_writer.eject_media(worker)?; |
42967bf1 DM |
561 | } |
562 | ||
2a06e086 DC |
563 | if errors { |
564 | bail!("Tape backup finished with some errors. Please check the task log."); | |
565 | } | |
566 | ||
4abd4dbe DC |
567 | summary.duration = start.elapsed(); |
568 | ||
4ca3f0c6 | 569 | Ok(()) |
88356646 DM |
570 | } |
571 | ||
572 | // Try to update the the media online status | |
cdf39e62 | 573 | fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> { |
88356646 | 574 | |
1ce8e905 | 575 | let (config, _digest) = pbs_config::drive::config()?; |
88356646 | 576 | |
46a1863f | 577 | if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) { |
88356646 | 578 | |
cdf39e62 | 579 | let label_text_list = changer.online_media_label_texts()?; |
88356646 DM |
580 | |
581 | let status_path = Path::new(TAPE_STATUS_DIR); | |
582 | let mut inventory = Inventory::load(status_path)?; | |
88356646 DM |
583 | |
584 | update_changer_online_status( | |
585 | &config, | |
586 | &mut inventory, | |
88356646 | 587 | &changer_name, |
8446fbca | 588 | &label_text_list, |
88356646 | 589 | )?; |
88356646 | 590 | |
cdf39e62 DM |
591 | Ok(Some(changer_name)) |
592 | } else { | |
593 | Ok(None) | |
594 | } | |
88356646 DM |
595 | } |
596 | ||
597 | pub fn backup_snapshot( | |
598 | worker: &WorkerTask, | |
599 | pool_writer: &mut PoolWriter, | |
600 | datastore: Arc<DataStore>, | |
601 | snapshot: BackupDir, | |
2a06e086 | 602 | ) -> Result<bool, Error> { |
88356646 | 603 | |
2c10410b | 604 | task_log!(worker, "backup snapshot {}", snapshot); |
88356646 | 605 | |
2a06e086 DC |
606 | let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) { |
607 | Ok(reader) => reader, | |
608 | Err(err) => { | |
609 | // ignore missing snapshots and continue | |
610 | task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err); | |
611 | return Ok(false); | |
612 | } | |
613 | }; | |
88356646 | 614 | |
5c4755ad DM |
615 | let snapshot_reader = Arc::new(Mutex::new(snapshot_reader)); |
616 | ||
617 | let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread( | |
618 | datastore.clone(), | |
619 | snapshot_reader.clone(), | |
620 | )?; | |
621 | ||
622 | let mut chunk_iter = chunk_iter.peekable(); | |
88356646 DM |
623 | |
624 | loop { | |
271764de DM |
625 | worker.check_abort()?; |
626 | ||
88356646 | 627 | // test is we have remaining chunks |
5c4755ad DM |
628 | match chunk_iter.peek() { |
629 | None => break, | |
630 | Some(Ok(_)) => { /* Ok */ }, | |
631 | Some(Err(err)) => bail!("{}", err), | |
88356646 DM |
632 | } |
633 | ||
ff58c519 | 634 | let uuid = pool_writer.load_writable_media(worker)?; |
88356646 | 635 | |
1f2c4713 DM |
636 | worker.check_abort()?; |
637 | ||
54722aca | 638 | let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?; |
88356646 DM |
639 | |
640 | if leom { | |
641 | pool_writer.set_media_status_full(&uuid)?; | |
642 | } | |
643 | } | |
644 | ||
5c4755ad DM |
645 | if let Err(_) = reader_thread.join() { |
646 | bail!("chunk reader thread failed"); | |
647 | } | |
648 | ||
271764de DM |
649 | worker.check_abort()?; |
650 | ||
ff58c519 | 651 | let uuid = pool_writer.load_writable_media(worker)?; |
88356646 | 652 | |
1f2c4713 DM |
653 | worker.check_abort()?; |
654 | ||
5c4755ad DM |
655 | let snapshot_reader = snapshot_reader.lock().unwrap(); |
656 | ||
5654d8ce | 657 | let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; |
88356646 DM |
658 | |
659 | if !done { | |
660 | // does not fit on tape, so we try on next volume | |
661 | pool_writer.set_media_status_full(&uuid)?; | |
662 | ||
271764de DM |
663 | worker.check_abort()?; |
664 | ||
ff58c519 | 665 | pool_writer.load_writable_media(worker)?; |
5654d8ce | 666 | let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; |
88356646 DM |
667 | |
668 | if !done { | |
669 | bail!("write_snapshot_archive failed on second media"); | |
670 | } | |
671 | } | |
672 | ||
271764de | 673 | task_log!(worker, "end backup {}:{}", datastore.name(), snapshot); |
88356646 | 674 | |
2a06e086 | 675 | Ok(true) |
88356646 | 676 | } |