]>
Commit | Line | Data |
---|---|---|
88356646 | 1 | use std::path::Path; |
5c4755ad | 2 | use std::sync::{Mutex, Arc}; |
88356646 | 3 | |
7690a8e7 | 4 | use anyhow::{bail, format_err, Error}; |
88356646 DM |
5 | use serde_json::Value; |
6 | ||
6ef1b649 WB |
7 | use proxmox_lang::try_block; |
8 | use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; | |
9 | use proxmox_schema::api; | |
88356646 | 10 | |
e3619d41 DM |
11 | use pbs_api_types::{ |
12 | Authid, Userid, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, MediaPoolConfig, | |
8cc3760e | 13 | UPID_SCHEMA, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, |
e3619d41 DM |
14 | }; |
15 | ||
c95c1c83 | 16 | use pbs_datastore::{DataStore, StoreProgress, SnapshotReader}; |
b2065dc7 | 17 | use pbs_datastore::backup_info::{BackupDir, BackupInfo}; |
619cd5cb | 18 | use pbs_tools::{task_log, task_warn, task::WorkerTaskContext}; |
ba3d7e19 | 19 | use pbs_config::CachedUserInfo; |
b9700a9f | 20 | use proxmox_rest_server::WorkerTask; |
c23192d3 | 21 | |
88356646 | 22 | use crate::{ |
7690a8e7 | 23 | server::{ |
c9793d47 | 24 | lookup_user_email, |
4abd4dbe | 25 | TapeBackupJobSummary, |
7690a8e7 DM |
26 | jobstate::{ |
27 | Job, | |
28 | JobState, | |
29 | compute_schedule_status, | |
30 | }, | |
8513626b | 31 | }, |
88356646 DM |
32 | tape::{ |
33 | TAPE_STATUS_DIR, | |
34 | Inventory, | |
88356646 DM |
35 | PoolWriter, |
36 | MediaPool, | |
25aa55b5 DM |
37 | drive::{ |
38 | media_changer, | |
39 | lock_tape_device, | |
e5950360 | 40 | TapeLockError, |
926d05ef | 41 | set_tape_device_state, |
25aa55b5 | 42 | }, |
37796ff7 | 43 | changer::update_changer_online_status, |
88356646 DM |
44 | }, |
45 | }; | |
46 | ||
41a8db35 DM |
47 | const TAPE_BACKUP_JOB_ROUTER: Router = Router::new() |
48 | .post(&API_METHOD_RUN_TAPE_BACKUP_JOB); | |
49 | ||
50 | pub const ROUTER: Router = Router::new() | |
7690a8e7 | 51 | .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS) |
41a8db35 DM |
52 | .post(&API_METHOD_BACKUP) |
53 | .match_all("id", &TAPE_BACKUP_JOB_ROUTER); | |
54 | ||
b4975d31 DM |
55 | fn check_backup_permission( |
56 | auth_id: &Authid, | |
57 | store: &str, | |
58 | pool: &str, | |
59 | drive: &str, | |
60 | ) -> Result<(), Error> { | |
61 | ||
62 | let user_info = CachedUserInfo::new()?; | |
63 | ||
64 | let privs = user_info.lookup_privs(auth_id, &["datastore", store]); | |
65 | if (privs & PRIV_DATASTORE_READ) == 0 { | |
66 | bail!("no permissions on /datastore/{}", store); | |
67 | } | |
68 | ||
69 | let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]); | |
70 | if (privs & PRIV_TAPE_WRITE) == 0 { | |
71 | bail!("no permissions on /tape/drive/{}", drive); | |
72 | } | |
73 | ||
74 | let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]); | |
75 | if (privs & PRIV_TAPE_WRITE) == 0 { | |
76 | bail!("no permissions on /tape/pool/{}", pool); | |
77 | } | |
78 | ||
79 | Ok(()) | |
80 | } | |
81 | ||
7690a8e7 DM |
82 | #[api( |
83 | returns: { | |
84 | description: "List configured thape backup jobs and their status", | |
85 | type: Array, | |
86 | items: { type: TapeBackupJobStatus }, | |
87 | }, | |
396fd747 DM |
88 | access: { |
89 | description: "List configured tape jobs filtered by Tape.Audit privileges", | |
90 | permission: &Permission::Anybody, | |
91 | }, | |
7690a8e7 DM |
92 | )] |
93 | /// List all tape backup jobs | |
94 | pub fn list_tape_backup_jobs( | |
95 | _param: Value, | |
96 | mut rpcenv: &mut dyn RpcEnvironment, | |
97 | ) -> Result<Vec<TapeBackupJobStatus>, Error> { | |
396fd747 DM |
98 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
99 | let user_info = CachedUserInfo::new()?; | |
7690a8e7 | 100 | |
e3619d41 | 101 | let (job_config, digest) = pbs_config::tape_job::config()?; |
aad2d162 | 102 | let (pool_config, _pool_digest) = pbs_config::media_pool::config()?; |
1ce8e905 | 103 | let (drive_config, _digest) = pbs_config::drive::config()?; |
7690a8e7 | 104 | |
efe96ec0 | 105 | let job_list_iter = job_config |
7690a8e7 DM |
106 | .convert_to_typed_array("backup")? |
107 | .into_iter() | |
108 | .filter(|_job: &TapeBackupJobConfig| { | |
109 | // fixme: check access permission | |
110 | true | |
111 | }); | |
112 | ||
113 | let mut list = Vec::new(); | |
efe96ec0 | 114 | let status_path = Path::new(TAPE_STATUS_DIR); |
6ef1b649 | 115 | let current_time = proxmox_time::epoch_i64(); |
7690a8e7 DM |
116 | |
117 | for job in job_list_iter { | |
396fd747 DM |
118 | let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]); |
119 | if (privs & PRIV_TAPE_AUDIT) == 0 { | |
120 | continue; | |
121 | } | |
122 | ||
7690a8e7 DM |
123 | let last_state = JobState::load("tape-backup-job", &job.id) |
124 | .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?; | |
125 | ||
126 | let status = compute_schedule_status(&last_state, job.schedule.as_deref())?; | |
127 | ||
efe96ec0 DM |
128 | let next_run = status.next_run.unwrap_or(current_time); |
129 | ||
130 | let mut next_media_label = None; | |
131 | ||
132 | if let Ok(pool) = pool_config.lookup::<MediaPoolConfig>("pool", &job.setup.pool) { | |
133 | let mut changer_name = None; | |
134 | if let Ok(Some((_, name))) = media_changer(&drive_config, &job.setup.drive) { | |
135 | changer_name = Some(name); | |
136 | } | |
137 | if let Ok(mut pool) = MediaPool::with_config(status_path, &pool, changer_name, true) { | |
138 | if pool.start_write_session(next_run, false).is_ok() { | |
139 | if let Ok(media_id) = pool.guess_next_writable_media(next_run) { | |
140 | next_media_label = Some(media_id.label.label_text); | |
141 | } | |
142 | } | |
143 | } | |
144 | } | |
145 | ||
146 | list.push(TapeBackupJobStatus { config: job, status, next_media_label }); | |
7690a8e7 DM |
147 | } |
148 | ||
149 | rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into(); | |
150 | ||
151 | Ok(list) | |
152 | } | |
153 | ||
8513626b DM |
154 | pub fn do_tape_backup_job( |
155 | mut job: Job, | |
5830e562 | 156 | setup: TapeBackupJobSetup, |
8513626b DM |
157 | auth_id: &Authid, |
158 | schedule: Option<String>, | |
bfa942c0 | 159 | to_stdout: bool, |
8513626b DM |
160 | ) -> Result<String, Error> { |
161 | ||
162 | let job_id = format!("{}:{}:{}:{}", | |
5830e562 DM |
163 | setup.store, |
164 | setup.pool, | |
165 | setup.drive, | |
8513626b DM |
166 | job.jobname()); |
167 | ||
168 | let worker_type = job.jobtype().to_string(); | |
169 | ||
5830e562 | 170 | let datastore = DataStore::lookup_datastore(&setup.store)?; |
8513626b | 171 | |
aad2d162 | 172 | let (config, _digest) = pbs_config::media_pool::config()?; |
5830e562 | 173 | let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?; |
8513626b | 174 | |
1ce8e905 | 175 | let (drive_config, _digest) = pbs_config::drive::config()?; |
8513626b | 176 | |
54fcb7f5 DC |
177 | // for scheduled jobs we acquire the lock later in the worker |
178 | let drive_lock = if schedule.is_some() { | |
179 | None | |
180 | } else { | |
181 | Some(lock_tape_device(&drive_config, &setup.drive)?) | |
182 | }; | |
183 | ||
184 | let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid()); | |
185 | let email = lookup_user_email(notify_user); | |
8513626b DM |
186 | |
187 | let upid_str = WorkerTask::new_thread( | |
188 | &worker_type, | |
189 | Some(job_id.clone()), | |
049a22a3 | 190 | auth_id.to_string(), |
bfa942c0 | 191 | to_stdout, |
8513626b | 192 | move |worker| { |
8513626b | 193 | job.start(&worker.upid().to_string())?; |
54fcb7f5 DC |
194 | let mut drive_lock = drive_lock; |
195 | ||
4ca3f0c6 DC |
196 | let mut summary = Default::default(); |
197 | let job_result = try_block!({ | |
54fcb7f5 DC |
198 | if schedule.is_some() { |
199 | // for scheduled tape backup jobs, we wait indefinitely for the lock | |
200 | task_log!(worker, "waiting for drive lock..."); | |
201 | loop { | |
54fcb7f5 | 202 | worker.check_abort()?; |
e5950360 DC |
203 | match lock_tape_device(&drive_config, &setup.drive) { |
204 | Ok(lock) => { | |
205 | drive_lock = Some(lock); | |
206 | break; | |
207 | } | |
208 | Err(TapeLockError::TimeOut) => continue, | |
209 | Err(TapeLockError::Other(err)) => return Err(err), | |
210 | } | |
54fcb7f5 DC |
211 | } |
212 | } | |
213 | set_tape_device_state(&setup.drive, &worker.upid().to_string())?; | |
8513626b | 214 | |
54fcb7f5 DC |
215 | task_log!(worker,"Starting tape backup job '{}'", job_id); |
216 | if let Some(event_str) = schedule { | |
217 | task_log!(worker,"task triggered by schedule '{}'", event_str); | |
218 | } | |
8703a68a | 219 | |
4ca3f0c6 | 220 | |
54fcb7f5 DC |
221 | backup_worker( |
222 | &worker, | |
223 | datastore, | |
224 | &pool_config, | |
225 | &setup, | |
226 | email.clone(), | |
4ca3f0c6 | 227 | &mut summary, |
e953029e | 228 | false, |
54fcb7f5 | 229 | ) |
4ca3f0c6 | 230 | }); |
8513626b DM |
231 | |
232 | let status = worker.create_state(&job_result); | |
233 | ||
8703a68a DC |
234 | if let Some(email) = email { |
235 | if let Err(err) = crate::server::send_tape_backup_status( | |
236 | &email, | |
237 | Some(job.jobname()), | |
238 | &setup, | |
239 | &job_result, | |
4abd4dbe | 240 | summary, |
8703a68a DC |
241 | ) { |
242 | eprintln!("send tape backup notification failed: {}", err); | |
243 | } | |
244 | } | |
245 | ||
8513626b DM |
246 | if let Err(err) = job.finish(status) { |
247 | eprintln!( | |
248 | "could not finish job state for {}: {}", | |
249 | job.jobtype().to_string(), | |
250 | err | |
251 | ); | |
252 | } | |
253 | ||
5830e562 | 254 | if let Err(err) = set_tape_device_state(&setup.drive, "") { |
926d05ef DC |
255 | eprintln!( |
256 | "could not unset drive state for {}: {}", | |
5830e562 | 257 | setup.drive, |
926d05ef DC |
258 | err |
259 | ); | |
260 | } | |
261 | ||
8513626b DM |
262 | job_result |
263 | } | |
264 | )?; | |
265 | ||
266 | Ok(upid_str) | |
267 | } | |
268 | ||
41a8db35 DM |
269 | #[api( |
270 | input: { | |
271 | properties: { | |
272 | id: { | |
273 | schema: JOB_ID_SCHEMA, | |
274 | }, | |
275 | }, | |
276 | }, | |
b4975d31 DM |
277 | access: { |
278 | // Note: parameters are from job config, so we need to test inside function body | |
279 | description: "The user needs Tape.Write privilege on /tape/pool/{pool} \ | |
280 | and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.", | |
281 | permission: &Permission::Anybody, | |
282 | }, | |
41a8db35 DM |
283 | )] |
284 | /// Runs a tape backup job manually. | |
285 | pub fn run_tape_backup_job( | |
286 | id: String, | |
287 | rpcenv: &mut dyn RpcEnvironment, | |
288 | ) -> Result<String, Error> { | |
289 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
290 | ||
e3619d41 | 291 | let (config, _digest) = pbs_config::tape_job::config()?; |
41a8db35 DM |
292 | let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?; |
293 | ||
b4975d31 DM |
294 | check_backup_permission( |
295 | &auth_id, | |
296 | &backup_job.setup.store, | |
297 | &backup_job.setup.pool, | |
298 | &backup_job.setup.drive, | |
299 | )?; | |
300 | ||
41a8db35 DM |
301 | let job = Job::new("tape-backup-job", &id)?; |
302 | ||
bfa942c0 DC |
303 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
304 | ||
305 | let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None, to_stdout)?; | |
41a8db35 DM |
306 | |
307 | Ok(upid_str) | |
308 | } | |
309 | ||
88356646 | 310 | #[api( |
0dadf66d | 311 | input: { |
88356646 | 312 | properties: { |
5830e562 DM |
313 | setup: { |
314 | type: TapeBackupJobSetup, | |
315 | flatten: true, | |
0dadf66d | 316 | }, |
e953029e DC |
317 | "force-media-set": { |
318 | description: "Ignore the allocation policy and start a new media-set.", | |
319 | optional: true, | |
320 | type: bool, | |
321 | default: false, | |
322 | }, | |
88356646 DM |
323 | }, |
324 | }, | |
325 | returns: { | |
326 | schema: UPID_SCHEMA, | |
327 | }, | |
b4975d31 DM |
328 | access: { |
329 | // Note: parameters are no uri parameter, so we need to test inside function body | |
330 | description: "The user needs Tape.Write privilege on /tape/pool/{pool} \ | |
331 | and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.", | |
332 | permission: &Permission::Anybody, | |
333 | }, | |
88356646 DM |
334 | )] |
335 | /// Backup datastore to tape media pool | |
336 | pub fn backup( | |
5830e562 | 337 | setup: TapeBackupJobSetup, |
e953029e | 338 | force_media_set: bool, |
88356646 DM |
339 | rpcenv: &mut dyn RpcEnvironment, |
340 | ) -> Result<Value, Error> { | |
341 | ||
342 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
343 | ||
b4975d31 DM |
344 | check_backup_permission( |
345 | &auth_id, | |
346 | &setup.store, | |
347 | &setup.pool, | |
348 | &setup.drive, | |
349 | )?; | |
350 | ||
5830e562 | 351 | let datastore = DataStore::lookup_datastore(&setup.store)?; |
88356646 | 352 | |
aad2d162 | 353 | let (config, _digest) = pbs_config::media_pool::config()?; |
5830e562 | 354 | let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?; |
88356646 | 355 | |
1ce8e905 | 356 | let (drive_config, _digest) = pbs_config::drive::config()?; |
25aa55b5 DM |
357 | |
358 | // early check/lock before starting worker | |
5830e562 | 359 | let drive_lock = lock_tape_device(&drive_config, &setup.drive)?; |
88356646 | 360 | |
39735609 | 361 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
cb022525 | 362 | |
5830e562 | 363 | let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive); |
8513626b | 364 | |
8703a68a DC |
365 | let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid()); |
366 | let email = lookup_user_email(notify_user); | |
367 | ||
88356646 DM |
368 | let upid_str = WorkerTask::new_thread( |
369 | "tape-backup", | |
8513626b | 370 | Some(job_id), |
049a22a3 | 371 | auth_id.to_string(), |
cb022525 | 372 | to_stdout, |
88356646 | 373 | move |worker| { |
25aa55b5 | 374 | let _drive_lock = drive_lock; // keep lock guard |
5830e562 | 375 | set_tape_device_state(&setup.drive, &worker.upid().to_string())?; |
4abd4dbe | 376 | |
4ca3f0c6 DC |
377 | let mut summary = Default::default(); |
378 | let job_result = backup_worker( | |
0dadf66d DM |
379 | &worker, |
380 | datastore, | |
0dadf66d | 381 | &pool_config, |
5830e562 | 382 | &setup, |
8703a68a | 383 | email.clone(), |
4ca3f0c6 | 384 | &mut summary, |
e953029e | 385 | force_media_set, |
4ca3f0c6 | 386 | ); |
8703a68a DC |
387 | |
388 | if let Some(email) = email { | |
389 | if let Err(err) = crate::server::send_tape_backup_status( | |
390 | &email, | |
391 | None, | |
392 | &setup, | |
393 | &job_result, | |
4abd4dbe | 394 | summary, |
8703a68a DC |
395 | ) { |
396 | eprintln!("send tape backup notification failed: {}", err); | |
397 | } | |
398 | } | |
0dadf66d | 399 | |
926d05ef | 400 | // ignore errors |
5830e562 | 401 | let _ = set_tape_device_state(&setup.drive, ""); |
8703a68a | 402 | job_result |
88356646 DM |
403 | } |
404 | )?; | |
405 | ||
406 | Ok(upid_str.into()) | |
88356646 DM |
407 | } |
408 | ||
88356646 DM |
409 | fn backup_worker( |
410 | worker: &WorkerTask, | |
411 | datastore: Arc<DataStore>, | |
412 | pool_config: &MediaPoolConfig, | |
5830e562 | 413 | setup: &TapeBackupJobSetup, |
8703a68a | 414 | email: Option<String>, |
4ca3f0c6 | 415 | summary: &mut TapeBackupJobSummary, |
e953029e | 416 | force_media_set: bool, |
4ca3f0c6 | 417 | ) -> Result<(), Error> { |
88356646 DM |
418 | |
419 | let status_path = Path::new(TAPE_STATUS_DIR); | |
4abd4dbe | 420 | let start = std::time::Instant::now(); |
88356646 | 421 | |
271764de | 422 | task_log!(worker, "update media online status"); |
5830e562 | 423 | let changer_name = update_media_online_status(&setup.drive)?; |
88356646 | 424 | |
30316192 | 425 | let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?; |
88356646 | 426 | |
e953029e DC |
427 | let mut pool_writer = PoolWriter::new( |
428 | pool, | |
429 | &setup.drive, | |
430 | worker, | |
431 | email, | |
432 | force_media_set | |
433 | )?; | |
88356646 DM |
434 | |
435 | let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?; | |
436 | ||
437 | group_list.sort_unstable(); | |
438 | ||
6396bace DM |
439 | let group_count = group_list.len(); |
440 | task_log!(worker, "found {} groups", group_count); | |
441 | ||
442 | let mut progress = StoreProgress::new(group_count as u64); | |
443 | ||
5830e562 DM |
444 | let latest_only = setup.latest_only.unwrap_or(false); |
445 | ||
0dadf66d DM |
446 | if latest_only { |
447 | task_log!(worker, "latest-only: true (only considering latest snapshots)"); | |
448 | } | |
5830e562 | 449 | |
54722aca DM |
450 | let datastore_name = datastore.name(); |
451 | ||
2a06e086 DC |
452 | let mut errors = false; |
453 | ||
4830de40 DM |
454 | let mut need_catalog = false; // avoid writing catalog for empty jobs |
455 | ||
6396bace DM |
456 | for (group_number, group) in group_list.into_iter().enumerate() { |
457 | progress.done_groups = group_number as u64; | |
458 | progress.done_snapshots = 0; | |
459 | progress.group_snapshots = 0; | |
460 | ||
80acdd71 DM |
461 | let snapshot_list = group.list_backups(&datastore.base_path())?; |
462 | ||
463 | // filter out unfinished backups | |
cffe0b81 | 464 | let mut snapshot_list: Vec<_> = snapshot_list |
80acdd71 DM |
465 | .into_iter() |
466 | .filter(|item| item.is_finished()) | |
467 | .collect(); | |
0dadf66d | 468 | |
cffe0b81 DC |
469 | if snapshot_list.is_empty() { |
470 | task_log!(worker, "group {} was empty", group); | |
471 | continue; | |
472 | } | |
473 | ||
88356646 DM |
474 | BackupInfo::sort_list(&mut snapshot_list, true); // oldest first |
475 | ||
0dadf66d | 476 | if latest_only { |
6396bace | 477 | progress.group_snapshots = 1; |
0dadf66d | 478 | if let Some(info) = snapshot_list.pop() { |
54722aca | 479 | if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) { |
2c10410b | 480 | task_log!(worker, "skip snapshot {}", info.backup_dir); |
0dadf66d DM |
481 | continue; |
482 | } | |
4830de40 DM |
483 | |
484 | need_catalog = true; | |
485 | ||
4abd4dbe | 486 | let snapshot_name = info.backup_dir.to_string(); |
2a06e086 DC |
487 | if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? { |
488 | errors = true; | |
4abd4dbe DC |
489 | } else { |
490 | summary.snapshot_list.push(snapshot_name); | |
2a06e086 | 491 | } |
6396bace DM |
492 | progress.done_snapshots = 1; |
493 | task_log!( | |
494 | worker, | |
495 | "percentage done: {}", | |
496 | progress | |
497 | ); | |
0dadf66d DM |
498 | } |
499 | } else { | |
6396bace DM |
500 | progress.group_snapshots = snapshot_list.len() as u64; |
501 | for (snapshot_number, info) in snapshot_list.into_iter().enumerate() { | |
54722aca | 502 | if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) { |
2c10410b | 503 | task_log!(worker, "skip snapshot {}", info.backup_dir); |
0dadf66d DM |
504 | continue; |
505 | } | |
4830de40 DM |
506 | |
507 | need_catalog = true; | |
508 | ||
4abd4dbe | 509 | let snapshot_name = info.backup_dir.to_string(); |
2a06e086 DC |
510 | if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? { |
511 | errors = true; | |
4abd4dbe DC |
512 | } else { |
513 | summary.snapshot_list.push(snapshot_name); | |
2a06e086 | 514 | } |
6396bace DM |
515 | progress.done_snapshots = snapshot_number as u64 + 1; |
516 | task_log!( | |
517 | worker, | |
518 | "percentage done: {}", | |
519 | progress | |
520 | ); | |
88356646 | 521 | } |
88356646 DM |
522 | } |
523 | } | |
524 | ||
525 | pool_writer.commit()?; | |
526 | ||
4830de40 DM |
527 | if need_catalog { |
528 | task_log!(worker, "append media catalog"); | |
32b75d36 | 529 | |
4830de40 | 530 | let uuid = pool_writer.load_writable_media(worker)?; |
32b75d36 DM |
531 | let done = pool_writer.append_catalog_archive(worker)?; |
532 | if !done { | |
4830de40 DM |
533 | task_log!(worker, "catalog does not fit on tape, writing to next volume"); |
534 | pool_writer.set_media_status_full(&uuid)?; | |
535 | pool_writer.load_writable_media(worker)?; | |
536 | let done = pool_writer.append_catalog_archive(worker)?; | |
537 | if !done { | |
538 | bail!("write_catalog_archive failed on second media"); | |
539 | } | |
32b75d36 DM |
540 | } |
541 | } | |
542 | ||
5830e562 | 543 | if setup.export_media_set.unwrap_or(false) { |
edb90f6a | 544 | pool_writer.export_media_set(worker)?; |
5830e562 | 545 | } else if setup.eject_media.unwrap_or(false) { |
5654d8ce | 546 | pool_writer.eject_media(worker)?; |
42967bf1 DM |
547 | } |
548 | ||
2a06e086 DC |
549 | if errors { |
550 | bail!("Tape backup finished with some errors. Please check the task log."); | |
551 | } | |
552 | ||
4abd4dbe DC |
553 | summary.duration = start.elapsed(); |
554 | ||
4ca3f0c6 | 555 | Ok(()) |
88356646 DM |
556 | } |
557 | ||
558 | // Try to update the the media online status | |
cdf39e62 | 559 | fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> { |
88356646 | 560 | |
1ce8e905 | 561 | let (config, _digest) = pbs_config::drive::config()?; |
88356646 | 562 | |
46a1863f | 563 | if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) { |
88356646 | 564 | |
cdf39e62 | 565 | let label_text_list = changer.online_media_label_texts()?; |
88356646 DM |
566 | |
567 | let status_path = Path::new(TAPE_STATUS_DIR); | |
568 | let mut inventory = Inventory::load(status_path)?; | |
88356646 DM |
569 | |
570 | update_changer_online_status( | |
571 | &config, | |
572 | &mut inventory, | |
88356646 | 573 | &changer_name, |
8446fbca | 574 | &label_text_list, |
88356646 | 575 | )?; |
88356646 | 576 | |
cdf39e62 DM |
577 | Ok(Some(changer_name)) |
578 | } else { | |
579 | Ok(None) | |
580 | } | |
88356646 DM |
581 | } |
582 | ||
583 | pub fn backup_snapshot( | |
584 | worker: &WorkerTask, | |
585 | pool_writer: &mut PoolWriter, | |
586 | datastore: Arc<DataStore>, | |
587 | snapshot: BackupDir, | |
2a06e086 | 588 | ) -> Result<bool, Error> { |
88356646 | 589 | |
2c10410b | 590 | task_log!(worker, "backup snapshot {}", snapshot); |
88356646 | 591 | |
2a06e086 DC |
592 | let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) { |
593 | Ok(reader) => reader, | |
594 | Err(err) => { | |
595 | // ignore missing snapshots and continue | |
596 | task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err); | |
597 | return Ok(false); | |
598 | } | |
599 | }; | |
88356646 | 600 | |
5c4755ad DM |
601 | let snapshot_reader = Arc::new(Mutex::new(snapshot_reader)); |
602 | ||
603 | let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread( | |
604 | datastore.clone(), | |
605 | snapshot_reader.clone(), | |
606 | )?; | |
607 | ||
608 | let mut chunk_iter = chunk_iter.peekable(); | |
88356646 DM |
609 | |
610 | loop { | |
271764de DM |
611 | worker.check_abort()?; |
612 | ||
88356646 | 613 | // test is we have remaining chunks |
5c4755ad DM |
614 | match chunk_iter.peek() { |
615 | None => break, | |
616 | Some(Ok(_)) => { /* Ok */ }, | |
617 | Some(Err(err)) => bail!("{}", err), | |
88356646 DM |
618 | } |
619 | ||
ff58c519 | 620 | let uuid = pool_writer.load_writable_media(worker)?; |
88356646 | 621 | |
1f2c4713 DM |
622 | worker.check_abort()?; |
623 | ||
54722aca | 624 | let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?; |
88356646 DM |
625 | |
626 | if leom { | |
627 | pool_writer.set_media_status_full(&uuid)?; | |
628 | } | |
629 | } | |
630 | ||
5c4755ad DM |
631 | if let Err(_) = reader_thread.join() { |
632 | bail!("chunk reader thread failed"); | |
633 | } | |
634 | ||
271764de DM |
635 | worker.check_abort()?; |
636 | ||
ff58c519 | 637 | let uuid = pool_writer.load_writable_media(worker)?; |
88356646 | 638 | |
1f2c4713 DM |
639 | worker.check_abort()?; |
640 | ||
5c4755ad DM |
641 | let snapshot_reader = snapshot_reader.lock().unwrap(); |
642 | ||
5654d8ce | 643 | let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; |
88356646 DM |
644 | |
645 | if !done { | |
646 | // does not fit on tape, so we try on next volume | |
647 | pool_writer.set_media_status_full(&uuid)?; | |
648 | ||
271764de DM |
649 | worker.check_abort()?; |
650 | ||
ff58c519 | 651 | pool_writer.load_writable_media(worker)?; |
5654d8ce | 652 | let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; |
88356646 DM |
653 | |
654 | if !done { | |
655 | bail!("write_snapshot_archive failed on second media"); | |
656 | } | |
657 | } | |
658 | ||
271764de | 659 | task_log!(worker, "end backup {}:{}", datastore.name(), snapshot); |
88356646 | 660 | |
2a06e086 | 661 | Ok(true) |
88356646 | 662 | } |