]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/backup.rs
f10a71e3a76dc13f3e02d27f8c67233674da1760
[proxmox-backup.git] / src / api2 / tape / backup.rs
1 use std::path::Path;
2 use std::sync::Arc;
3
4 use anyhow::{bail, format_err, Error};
5 use serde_json::Value;
6
7 use proxmox::{
8 api::{
9 api,
10 RpcEnvironment,
11 RpcEnvironmentType,
12 Router,
13 },
14 };
15
16 use crate::{
17 task_log,
18 config::{
19 self,
20 tape_job::{
21 TapeBackupJobConfig,
22 TapeBackupJobStatus,
23 },
24 },
25 server::{
26 jobstate::{
27 Job,
28 JobState,
29 compute_schedule_status,
30 },
31 },
32 backup::{
33 DataStore,
34 BackupDir,
35 BackupInfo,
36 },
37 api2::types::{
38 Authid,
39 DATASTORE_SCHEMA,
40 MEDIA_POOL_NAME_SCHEMA,
41 DRIVE_NAME_SCHEMA,
42 UPID_SCHEMA,
43 JOB_ID_SCHEMA,
44 MediaPoolConfig,
45 },
46 server::WorkerTask,
47 task::TaskState,
48 tape::{
49 TAPE_STATUS_DIR,
50 Inventory,
51 PoolWriter,
52 MediaPool,
53 SnapshotReader,
54 drive::{
55 media_changer,
56 lock_tape_device,
57 },
58 changer::update_changer_online_status,
59 },
60 };
61
62 const TAPE_BACKUP_JOB_ROUTER: Router = Router::new()
63 .post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
64
65 pub const ROUTER: Router = Router::new()
66 .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
67 .post(&API_METHOD_BACKUP)
68 .match_all("id", &TAPE_BACKUP_JOB_ROUTER);
69
70 #[api(
71 returns: {
72 description: "List configured thape backup jobs and their status",
73 type: Array,
74 items: { type: TapeBackupJobStatus },
75 },
76 )]
77 /// List all tape backup jobs
78 pub fn list_tape_backup_jobs(
79 _param: Value,
80 mut rpcenv: &mut dyn RpcEnvironment,
81 ) -> Result<Vec<TapeBackupJobStatus>, Error> {
82
83 let (config, digest) = config::tape_job::config()?;
84
85 let job_list_iter = config
86 .convert_to_typed_array("backup")?
87 .into_iter()
88 .filter(|_job: &TapeBackupJobConfig| {
89 // fixme: check access permission
90 true
91 });
92
93 let mut list = Vec::new();
94
95 for job in job_list_iter {
96 let last_state = JobState::load("tape-backup-job", &job.id)
97 .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
98
99 let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
100
101 list.push(TapeBackupJobStatus { config: job, status });
102 }
103
104 rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
105
106 Ok(list)
107 }
108
109 pub fn do_tape_backup_job(
110 mut job: Job,
111 tape_job: TapeBackupJobConfig,
112 auth_id: &Authid,
113 schedule: Option<String>,
114 ) -> Result<String, Error> {
115
116 let job_id = format!("{}:{}:{}:{}",
117 tape_job.store,
118 tape_job.pool,
119 tape_job.drive,
120 job.jobname());
121
122 let worker_type = job.jobtype().to_string();
123
124 let datastore = DataStore::lookup_datastore(&tape_job.store)?;
125
126 let (config, _digest) = config::media_pool::config()?;
127 let pool_config: MediaPoolConfig = config.lookup("pool", &tape_job.pool)?;
128
129 let (drive_config, _digest) = config::drive::config()?;
130
131 // early check/lock before starting worker
132 let drive_lock = lock_tape_device(&drive_config, &tape_job.drive)?;
133
134 let upid_str = WorkerTask::new_thread(
135 &worker_type,
136 Some(job_id.clone()),
137 auth_id.clone(),
138 false,
139 move |worker| {
140 let _drive_lock = drive_lock; // keep lock guard
141
142 job.start(&worker.upid().to_string())?;
143
144 let eject_media = false;
145 let export_media_set = false;
146
147 task_log!(worker,"Starting tape backup job '{}'", job_id);
148 if let Some(event_str) = schedule {
149 task_log!(worker,"task triggered by schedule '{}'", event_str);
150 }
151
152 let job_result = backup_worker(
153 &worker,
154 datastore,
155 &tape_job.drive,
156 &pool_config,
157 eject_media,
158 export_media_set,
159 );
160
161 let status = worker.create_state(&job_result);
162
163 if let Err(err) = job.finish(status) {
164 eprintln!(
165 "could not finish job state for {}: {}",
166 job.jobtype().to_string(),
167 err
168 );
169 }
170
171 job_result
172 }
173 )?;
174
175 Ok(upid_str)
176 }
177
178 #[api(
179 input: {
180 properties: {
181 id: {
182 schema: JOB_ID_SCHEMA,
183 },
184 },
185 },
186 )]
187 /// Runs a tape backup job manually.
188 pub fn run_tape_backup_job(
189 id: String,
190 rpcenv: &mut dyn RpcEnvironment,
191 ) -> Result<String, Error> {
192 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
193
194 let (config, _digest) = config::tape_job::config()?;
195 let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
196
197 let job = Job::new("tape-backup-job", &id)?;
198
199 let upid_str = do_tape_backup_job(job, backup_job, &auth_id, None)?;
200
201 Ok(upid_str)
202 }
203
204 #[api(
205 input: {
206 properties: {
207 store: {
208 schema: DATASTORE_SCHEMA,
209 },
210 pool: {
211 schema: MEDIA_POOL_NAME_SCHEMA,
212 },
213 drive: {
214 schema: DRIVE_NAME_SCHEMA,
215 },
216 "eject-media": {
217 description: "Eject media upon job completion.",
218 type: bool,
219 optional: true,
220 },
221 "export-media-set": {
222 description: "Export media set upon job completion.",
223 type: bool,
224 optional: true,
225 },
226 },
227 },
228 returns: {
229 schema: UPID_SCHEMA,
230 },
231 )]
232 /// Backup datastore to tape media pool
233 pub fn backup(
234 store: String,
235 pool: String,
236 drive: String,
237 eject_media: Option<bool>,
238 export_media_set: Option<bool>,
239 rpcenv: &mut dyn RpcEnvironment,
240 ) -> Result<Value, Error> {
241
242 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
243
244 let datastore = DataStore::lookup_datastore(&store)?;
245
246 let (config, _digest) = config::media_pool::config()?;
247 let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
248
249 let (drive_config, _digest) = config::drive::config()?;
250
251 // early check/lock before starting worker
252 let drive_lock = lock_tape_device(&drive_config, &drive)?;
253
254 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
255
256 let eject_media = eject_media.unwrap_or(false);
257 let export_media_set = export_media_set.unwrap_or(false);
258
259 let job_id = format!("{}:{}:{}", store, pool, drive);
260
261 let upid_str = WorkerTask::new_thread(
262 "tape-backup",
263 Some(job_id),
264 auth_id,
265 to_stdout,
266 move |worker| {
267 let _drive_lock = drive_lock; // keep lock guard
268 backup_worker(&worker, datastore, &drive, &pool_config, eject_media, export_media_set)?;
269 Ok(())
270 }
271 )?;
272
273 Ok(upid_str.into())
274 }
275
276 fn backup_worker(
277 worker: &WorkerTask,
278 datastore: Arc<DataStore>,
279 drive: &str,
280 pool_config: &MediaPoolConfig,
281 eject_media: bool,
282 export_media_set: bool,
283 ) -> Result<(), Error> {
284
285 let status_path = Path::new(TAPE_STATUS_DIR);
286
287 let _lock = MediaPool::lock(status_path, &pool_config.name)?;
288
289 task_log!(worker, "update media online status");
290 let changer_name = update_media_online_status(drive)?;
291
292 let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
293
294 let mut pool_writer = PoolWriter::new(pool, drive)?;
295
296 let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
297
298 group_list.sort_unstable();
299
300 for group in group_list {
301 let mut snapshot_list = group.list_backups(&datastore.base_path())?;
302 BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
303
304 for info in snapshot_list {
305 if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
306 continue;
307 }
308 task_log!(worker, "backup snapshot {}", info.backup_dir);
309 backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
310 }
311 }
312
313 pool_writer.commit()?;
314
315 if export_media_set {
316 pool_writer.export_media_set(worker)?;
317 } else if eject_media {
318 pool_writer.eject_media(worker)?;
319 }
320
321 Ok(())
322 }
323
324 // Try to update the the media online status
325 fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
326
327 let (config, _digest) = config::drive::config()?;
328
329 if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
330
331 let label_text_list = changer.online_media_label_texts()?;
332
333 let status_path = Path::new(TAPE_STATUS_DIR);
334 let mut inventory = Inventory::load(status_path)?;
335
336 update_changer_online_status(
337 &config,
338 &mut inventory,
339 &changer_name,
340 &label_text_list,
341 )?;
342
343 Ok(Some(changer_name))
344 } else {
345 Ok(None)
346 }
347 }
348
349 pub fn backup_snapshot(
350 worker: &WorkerTask,
351 pool_writer: &mut PoolWriter,
352 datastore: Arc<DataStore>,
353 snapshot: BackupDir,
354 ) -> Result<(), Error> {
355
356 task_log!(worker, "start backup {}:{}", datastore.name(), snapshot);
357
358 let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
359
360 let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
361
362 loop {
363 worker.check_abort()?;
364
365 // test is we have remaining chunks
366 if chunk_iter.peek().is_none() {
367 break;
368 }
369
370 let uuid = pool_writer.load_writable_media(worker)?;
371
372 worker.check_abort()?;
373
374 let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
375
376 if leom {
377 pool_writer.set_media_status_full(&uuid)?;
378 }
379 }
380
381 worker.check_abort()?;
382
383 let uuid = pool_writer.load_writable_media(worker)?;
384
385 worker.check_abort()?;
386
387 let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
388
389 if !done {
390 // does not fit on tape, so we try on next volume
391 pool_writer.set_media_status_full(&uuid)?;
392
393 worker.check_abort()?;
394
395 pool_writer.load_writable_media(worker)?;
396 let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
397
398 if !done {
399 bail!("write_snapshot_archive failed on second media");
400 }
401 }
402
403 task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
404
405 Ok(())
406 }