]>
Commit | Line | Data |
---|---|---|
88356646 DM |
1 | use std::path::Path; |
2 | use std::sync::Arc; | |
3 | ||
7690a8e7 | 4 | use anyhow::{bail, format_err, Error}; |
88356646 DM |
5 | use serde_json::Value; |
6 | ||
7 | use proxmox::{ | |
8 | api::{ | |
9 | api, | |
10 | RpcEnvironment, | |
cb022525 | 11 | RpcEnvironmentType, |
88356646 DM |
12 | Router, |
13 | }, | |
14 | }; | |
15 | ||
16 | use crate::{ | |
271764de | 17 | task_log, |
8513626b DM |
18 | config::{ |
19 | self, | |
7690a8e7 DM |
20 | tape_job::{ |
21 | TapeBackupJobConfig, | |
22 | TapeBackupJobStatus, | |
23 | }, | |
24 | }, | |
25 | server::{ | |
26 | jobstate::{ | |
27 | Job, | |
28 | JobState, | |
29 | compute_schedule_status, | |
30 | }, | |
8513626b | 31 | }, |
88356646 DM |
32 | backup::{ |
33 | DataStore, | |
34 | BackupDir, | |
35 | BackupInfo, | |
36 | }, | |
37 | api2::types::{ | |
38 | Authid, | |
39 | DATASTORE_SCHEMA, | |
40 | MEDIA_POOL_NAME_SCHEMA, | |
9883b54c | 41 | DRIVE_NAME_SCHEMA, |
88356646 | 42 | UPID_SCHEMA, |
41a8db35 | 43 | JOB_ID_SCHEMA, |
88356646 DM |
44 | MediaPoolConfig, |
45 | }, | |
46 | server::WorkerTask, | |
271764de | 47 | task::TaskState, |
88356646 DM |
48 | tape::{ |
49 | TAPE_STATUS_DIR, | |
50 | Inventory, | |
88356646 DM |
51 | PoolWriter, |
52 | MediaPool, | |
53 | SnapshotReader, | |
25aa55b5 DM |
54 | drive::{ |
55 | media_changer, | |
56 | lock_tape_device, | |
926d05ef | 57 | set_tape_device_state, |
25aa55b5 | 58 | }, |
37796ff7 | 59 | changer::update_changer_online_status, |
88356646 DM |
60 | }, |
61 | }; | |
62 | ||
41a8db35 DM |
63 | const TAPE_BACKUP_JOB_ROUTER: Router = Router::new() |
64 | .post(&API_METHOD_RUN_TAPE_BACKUP_JOB); | |
65 | ||
66 | pub const ROUTER: Router = Router::new() | |
7690a8e7 | 67 | .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS) |
41a8db35 DM |
68 | .post(&API_METHOD_BACKUP) |
69 | .match_all("id", &TAPE_BACKUP_JOB_ROUTER); | |
70 | ||
7690a8e7 DM |
71 | #[api( |
72 | returns: { | |
73 | description: "List configured thape backup jobs and their status", | |
74 | type: Array, | |
75 | items: { type: TapeBackupJobStatus }, | |
76 | }, | |
77 | )] | |
78 | /// List all tape backup jobs | |
79 | pub fn list_tape_backup_jobs( | |
80 | _param: Value, | |
81 | mut rpcenv: &mut dyn RpcEnvironment, | |
82 | ) -> Result<Vec<TapeBackupJobStatus>, Error> { | |
83 | ||
84 | let (config, digest) = config::tape_job::config()?; | |
85 | ||
86 | let job_list_iter = config | |
87 | .convert_to_typed_array("backup")? | |
88 | .into_iter() | |
89 | .filter(|_job: &TapeBackupJobConfig| { | |
90 | // fixme: check access permission | |
91 | true | |
92 | }); | |
93 | ||
94 | let mut list = Vec::new(); | |
95 | ||
96 | for job in job_list_iter { | |
97 | let last_state = JobState::load("tape-backup-job", &job.id) | |
98 | .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?; | |
99 | ||
100 | let status = compute_schedule_status(&last_state, job.schedule.as_deref())?; | |
101 | ||
102 | list.push(TapeBackupJobStatus { config: job, status }); | |
103 | } | |
104 | ||
105 | rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into(); | |
106 | ||
107 | Ok(list) | |
108 | } | |
109 | ||
8513626b DM |
110 | pub fn do_tape_backup_job( |
111 | mut job: Job, | |
112 | tape_job: TapeBackupJobConfig, | |
113 | auth_id: &Authid, | |
114 | schedule: Option<String>, | |
115 | ) -> Result<String, Error> { | |
116 | ||
117 | let job_id = format!("{}:{}:{}:{}", | |
118 | tape_job.store, | |
119 | tape_job.pool, | |
120 | tape_job.drive, | |
121 | job.jobname()); | |
122 | ||
123 | let worker_type = job.jobtype().to_string(); | |
124 | ||
125 | let datastore = DataStore::lookup_datastore(&tape_job.store)?; | |
126 | ||
127 | let (config, _digest) = config::media_pool::config()?; | |
128 | let pool_config: MediaPoolConfig = config.lookup("pool", &tape_job.pool)?; | |
129 | ||
130 | let (drive_config, _digest) = config::drive::config()?; | |
131 | ||
132 | // early check/lock before starting worker | |
133 | let drive_lock = lock_tape_device(&drive_config, &tape_job.drive)?; | |
134 | ||
135 | let upid_str = WorkerTask::new_thread( | |
136 | &worker_type, | |
137 | Some(job_id.clone()), | |
138 | auth_id.clone(), | |
139 | false, | |
140 | move |worker| { | |
141 | let _drive_lock = drive_lock; // keep lock guard | |
142 | ||
926d05ef | 143 | set_tape_device_state(&tape_job.drive, &worker.upid().to_string())?; |
8513626b DM |
144 | job.start(&worker.upid().to_string())?; |
145 | ||
146 | let eject_media = false; | |
147 | let export_media_set = false; | |
148 | ||
149 | task_log!(worker,"Starting tape backup job '{}'", job_id); | |
150 | if let Some(event_str) = schedule { | |
151 | task_log!(worker,"task triggered by schedule '{}'", event_str); | |
152 | } | |
153 | ||
154 | let job_result = backup_worker( | |
155 | &worker, | |
156 | datastore, | |
157 | &tape_job.drive, | |
158 | &pool_config, | |
159 | eject_media, | |
160 | export_media_set, | |
161 | ); | |
162 | ||
163 | let status = worker.create_state(&job_result); | |
164 | ||
165 | if let Err(err) = job.finish(status) { | |
166 | eprintln!( | |
167 | "could not finish job state for {}: {}", | |
168 | job.jobtype().to_string(), | |
169 | err | |
170 | ); | |
171 | } | |
172 | ||
926d05ef DC |
173 | if let Err(err) = set_tape_device_state(&tape_job.drive, "") { |
174 | eprintln!( | |
175 | "could not unset drive state for {}: {}", | |
176 | tape_job.drive, | |
177 | err | |
178 | ); | |
179 | } | |
180 | ||
8513626b DM |
181 | job_result |
182 | } | |
183 | )?; | |
184 | ||
185 | Ok(upid_str) | |
186 | } | |
187 | ||
41a8db35 DM |
188 | #[api( |
189 | input: { | |
190 | properties: { | |
191 | id: { | |
192 | schema: JOB_ID_SCHEMA, | |
193 | }, | |
194 | }, | |
195 | }, | |
196 | )] | |
197 | /// Runs a tape backup job manually. | |
198 | pub fn run_tape_backup_job( | |
199 | id: String, | |
200 | rpcenv: &mut dyn RpcEnvironment, | |
201 | ) -> Result<String, Error> { | |
202 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
203 | ||
204 | let (config, _digest) = config::tape_job::config()?; | |
205 | let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?; | |
206 | ||
207 | let job = Job::new("tape-backup-job", &id)?; | |
208 | ||
209 | let upid_str = do_tape_backup_job(job, backup_job, &auth_id, None)?; | |
210 | ||
211 | Ok(upid_str) | |
212 | } | |
213 | ||
88356646 DM |
214 | #[api( |
215 | input: { | |
216 | properties: { | |
217 | store: { | |
218 | schema: DATASTORE_SCHEMA, | |
219 | }, | |
220 | pool: { | |
221 | schema: MEDIA_POOL_NAME_SCHEMA, | |
222 | }, | |
9883b54c DM |
223 | drive: { |
224 | schema: DRIVE_NAME_SCHEMA, | |
225 | }, | |
42967bf1 DM |
226 | "eject-media": { |
227 | description: "Eject media upon job completion.", | |
228 | type: bool, | |
229 | optional: true, | |
230 | }, | |
edb90f6a DM |
231 | "export-media-set": { |
232 | description: "Export media set upon job completion.", | |
233 | type: bool, | |
234 | optional: true, | |
235 | }, | |
88356646 DM |
236 | }, |
237 | }, | |
238 | returns: { | |
239 | schema: UPID_SCHEMA, | |
240 | }, | |
241 | )] | |
242 | /// Backup datastore to tape media pool | |
243 | pub fn backup( | |
244 | store: String, | |
245 | pool: String, | |
9883b54c | 246 | drive: String, |
42967bf1 | 247 | eject_media: Option<bool>, |
edb90f6a | 248 | export_media_set: Option<bool>, |
88356646 DM |
249 | rpcenv: &mut dyn RpcEnvironment, |
250 | ) -> Result<Value, Error> { | |
251 | ||
252 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
253 | ||
254 | let datastore = DataStore::lookup_datastore(&store)?; | |
255 | ||
256 | let (config, _digest) = config::media_pool::config()?; | |
257 | let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?; | |
258 | ||
259 | let (drive_config, _digest) = config::drive::config()?; | |
25aa55b5 DM |
260 | |
261 | // early check/lock before starting worker | |
262 | let drive_lock = lock_tape_device(&drive_config, &drive)?; | |
88356646 | 263 | |
39735609 | 264 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; |
cb022525 | 265 | |
42967bf1 | 266 | let eject_media = eject_media.unwrap_or(false); |
edb90f6a | 267 | let export_media_set = export_media_set.unwrap_or(false); |
42967bf1 | 268 | |
8513626b DM |
269 | let job_id = format!("{}:{}:{}", store, pool, drive); |
270 | ||
88356646 DM |
271 | let upid_str = WorkerTask::new_thread( |
272 | "tape-backup", | |
8513626b | 273 | Some(job_id), |
88356646 | 274 | auth_id, |
cb022525 | 275 | to_stdout, |
88356646 | 276 | move |worker| { |
25aa55b5 | 277 | let _drive_lock = drive_lock; // keep lock guard |
926d05ef | 278 | set_tape_device_state(&drive, &worker.upid().to_string())?; |
9883b54c | 279 | backup_worker(&worker, datastore, &drive, &pool_config, eject_media, export_media_set)?; |
926d05ef DC |
280 | // ignore errors |
281 | let _ = set_tape_device_state(&drive, ""); | |
88356646 DM |
282 | Ok(()) |
283 | } | |
284 | )?; | |
285 | ||
286 | Ok(upid_str.into()) | |
88356646 DM |
287 | } |
288 | ||
88356646 DM |
289 | fn backup_worker( |
290 | worker: &WorkerTask, | |
291 | datastore: Arc<DataStore>, | |
9883b54c | 292 | drive: &str, |
88356646 | 293 | pool_config: &MediaPoolConfig, |
42967bf1 | 294 | eject_media: bool, |
edb90f6a | 295 | export_media_set: bool, |
88356646 DM |
296 | ) -> Result<(), Error> { |
297 | ||
298 | let status_path = Path::new(TAPE_STATUS_DIR); | |
299 | ||
300 | let _lock = MediaPool::lock(status_path, &pool_config.name)?; | |
301 | ||
271764de | 302 | task_log!(worker, "update media online status"); |
cdf39e62 | 303 | let changer_name = update_media_online_status(drive)?; |
88356646 | 304 | |
cdf39e62 | 305 | let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?; |
88356646 | 306 | |
9883b54c | 307 | let mut pool_writer = PoolWriter::new(pool, drive)?; |
88356646 DM |
308 | |
309 | let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?; | |
310 | ||
311 | group_list.sort_unstable(); | |
312 | ||
313 | for group in group_list { | |
314 | let mut snapshot_list = group.list_backups(&datastore.base_path())?; | |
315 | BackupInfo::sort_list(&mut snapshot_list, true); // oldest first | |
316 | ||
317 | for info in snapshot_list { | |
318 | if pool_writer.contains_snapshot(&info.backup_dir.to_string()) { | |
319 | continue; | |
320 | } | |
271764de | 321 | task_log!(worker, "backup snapshot {}", info.backup_dir); |
88356646 DM |
322 | backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?; |
323 | } | |
324 | } | |
325 | ||
326 | pool_writer.commit()?; | |
327 | ||
edb90f6a | 328 | if export_media_set { |
edb90f6a DM |
329 | pool_writer.export_media_set(worker)?; |
330 | } else if eject_media { | |
5654d8ce | 331 | pool_writer.eject_media(worker)?; |
42967bf1 DM |
332 | } |
333 | ||
88356646 DM |
334 | Ok(()) |
335 | } | |
336 | ||
337 | // Try to update the the media online status | |
cdf39e62 | 338 | fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> { |
88356646 DM |
339 | |
340 | let (config, _digest) = config::drive::config()?; | |
341 | ||
46a1863f | 342 | if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) { |
88356646 | 343 | |
cdf39e62 | 344 | let label_text_list = changer.online_media_label_texts()?; |
88356646 DM |
345 | |
346 | let status_path = Path::new(TAPE_STATUS_DIR); | |
347 | let mut inventory = Inventory::load(status_path)?; | |
88356646 DM |
348 | |
349 | update_changer_online_status( | |
350 | &config, | |
351 | &mut inventory, | |
88356646 | 352 | &changer_name, |
8446fbca | 353 | &label_text_list, |
88356646 | 354 | )?; |
88356646 | 355 | |
cdf39e62 DM |
356 | Ok(Some(changer_name)) |
357 | } else { | |
358 | Ok(None) | |
359 | } | |
88356646 DM |
360 | } |
361 | ||
362 | pub fn backup_snapshot( | |
363 | worker: &WorkerTask, | |
364 | pool_writer: &mut PoolWriter, | |
365 | datastore: Arc<DataStore>, | |
366 | snapshot: BackupDir, | |
367 | ) -> Result<(), Error> { | |
368 | ||
271764de | 369 | task_log!(worker, "start backup {}:{}", datastore.name(), snapshot); |
88356646 DM |
370 | |
371 | let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?; | |
372 | ||
373 | let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable(); | |
374 | ||
375 | loop { | |
271764de DM |
376 | worker.check_abort()?; |
377 | ||
88356646 DM |
378 | // test is we have remaining chunks |
379 | if chunk_iter.peek().is_none() { | |
380 | break; | |
381 | } | |
382 | ||
ff58c519 | 383 | let uuid = pool_writer.load_writable_media(worker)?; |
88356646 | 384 | |
1f2c4713 DM |
385 | worker.check_abort()?; |
386 | ||
31cf625a | 387 | let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?; |
88356646 DM |
388 | |
389 | if leom { | |
390 | pool_writer.set_media_status_full(&uuid)?; | |
391 | } | |
392 | } | |
393 | ||
271764de DM |
394 | worker.check_abort()?; |
395 | ||
ff58c519 | 396 | let uuid = pool_writer.load_writable_media(worker)?; |
88356646 | 397 | |
1f2c4713 DM |
398 | worker.check_abort()?; |
399 | ||
5654d8ce | 400 | let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; |
88356646 DM |
401 | |
402 | if !done { | |
403 | // does not fit on tape, so we try on next volume | |
404 | pool_writer.set_media_status_full(&uuid)?; | |
405 | ||
271764de DM |
406 | worker.check_abort()?; |
407 | ||
ff58c519 | 408 | pool_writer.load_writable_media(worker)?; |
5654d8ce | 409 | let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; |
88356646 DM |
410 | |
411 | if !done { | |
412 | bail!("write_snapshot_archive failed on second media"); | |
413 | } | |
414 | } | |
415 | ||
271764de | 416 | task_log!(worker, "end backup {}:{}", datastore.name(), snapshot); |
88356646 DM |
417 | |
418 | Ok(()) | |
419 | } |