]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/tape/backup.rs
api2/tape/changer: add drive state to changer status output
[proxmox-backup.git] / src / api2 / tape / backup.rs
CommitLineData
88356646
DM
1use std::path::Path;
2use std::sync::Arc;
3
7690a8e7 4use anyhow::{bail, format_err, Error};
88356646
DM
5use serde_json::Value;
6
7use proxmox::{
8 api::{
9 api,
10 RpcEnvironment,
cb022525 11 RpcEnvironmentType,
88356646
DM
12 Router,
13 },
14};
15
16use crate::{
271764de 17 task_log,
8513626b
DM
18 config::{
19 self,
7690a8e7
DM
20 tape_job::{
21 TapeBackupJobConfig,
22 TapeBackupJobStatus,
23 },
24 },
25 server::{
26 jobstate::{
27 Job,
28 JobState,
29 compute_schedule_status,
30 },
8513626b 31 },
88356646
DM
32 backup::{
33 DataStore,
34 BackupDir,
35 BackupInfo,
36 },
37 api2::types::{
38 Authid,
39 DATASTORE_SCHEMA,
40 MEDIA_POOL_NAME_SCHEMA,
9883b54c 41 DRIVE_NAME_SCHEMA,
88356646 42 UPID_SCHEMA,
41a8db35 43 JOB_ID_SCHEMA,
88356646
DM
44 MediaPoolConfig,
45 },
46 server::WorkerTask,
271764de 47 task::TaskState,
88356646
DM
48 tape::{
49 TAPE_STATUS_DIR,
50 Inventory,
88356646
DM
51 PoolWriter,
52 MediaPool,
53 SnapshotReader,
25aa55b5
DM
54 drive::{
55 media_changer,
56 lock_tape_device,
57 },
37796ff7 58 changer::update_changer_online_status,
88356646
DM
59 },
60};
61
41a8db35
DM
62const TAPE_BACKUP_JOB_ROUTER: Router = Router::new()
63 .post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
64
65pub const ROUTER: Router = Router::new()
7690a8e7 66 .get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
41a8db35
DM
67 .post(&API_METHOD_BACKUP)
68 .match_all("id", &TAPE_BACKUP_JOB_ROUTER);
69
7690a8e7
DM
70#[api(
71 returns: {
72 description: "List configured thape backup jobs and their status",
73 type: Array,
74 items: { type: TapeBackupJobStatus },
75 },
76)]
77/// List all tape backup jobs
78pub fn list_tape_backup_jobs(
79 _param: Value,
80 mut rpcenv: &mut dyn RpcEnvironment,
81) -> Result<Vec<TapeBackupJobStatus>, Error> {
82
83 let (config, digest) = config::tape_job::config()?;
84
85 let job_list_iter = config
86 .convert_to_typed_array("backup")?
87 .into_iter()
88 .filter(|_job: &TapeBackupJobConfig| {
89 // fixme: check access permission
90 true
91 });
92
93 let mut list = Vec::new();
94
95 for job in job_list_iter {
96 let last_state = JobState::load("tape-backup-job", &job.id)
97 .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
98
99 let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
100
101 list.push(TapeBackupJobStatus { config: job, status });
102 }
103
104 rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
105
106 Ok(list)
107}
108
8513626b
DM
109pub fn do_tape_backup_job(
110 mut job: Job,
111 tape_job: TapeBackupJobConfig,
112 auth_id: &Authid,
113 schedule: Option<String>,
114) -> Result<String, Error> {
115
116 let job_id = format!("{}:{}:{}:{}",
117 tape_job.store,
118 tape_job.pool,
119 tape_job.drive,
120 job.jobname());
121
122 let worker_type = job.jobtype().to_string();
123
124 let datastore = DataStore::lookup_datastore(&tape_job.store)?;
125
126 let (config, _digest) = config::media_pool::config()?;
127 let pool_config: MediaPoolConfig = config.lookup("pool", &tape_job.pool)?;
128
129 let (drive_config, _digest) = config::drive::config()?;
130
131 // early check/lock before starting worker
132 let drive_lock = lock_tape_device(&drive_config, &tape_job.drive)?;
133
134 let upid_str = WorkerTask::new_thread(
135 &worker_type,
136 Some(job_id.clone()),
137 auth_id.clone(),
138 false,
139 move |worker| {
140 let _drive_lock = drive_lock; // keep lock guard
141
142 job.start(&worker.upid().to_string())?;
143
144 let eject_media = false;
145 let export_media_set = false;
146
147 task_log!(worker,"Starting tape backup job '{}'", job_id);
148 if let Some(event_str) = schedule {
149 task_log!(worker,"task triggered by schedule '{}'", event_str);
150 }
151
152 let job_result = backup_worker(
153 &worker,
154 datastore,
155 &tape_job.drive,
156 &pool_config,
157 eject_media,
158 export_media_set,
159 );
160
161 let status = worker.create_state(&job_result);
162
163 if let Err(err) = job.finish(status) {
164 eprintln!(
165 "could not finish job state for {}: {}",
166 job.jobtype().to_string(),
167 err
168 );
169 }
170
171 job_result
172 }
173 )?;
174
175 Ok(upid_str)
176}
177
41a8db35
DM
178#[api(
179 input: {
180 properties: {
181 id: {
182 schema: JOB_ID_SCHEMA,
183 },
184 },
185 },
186)]
187/// Runs a tape backup job manually.
188pub fn run_tape_backup_job(
189 id: String,
190 rpcenv: &mut dyn RpcEnvironment,
191) -> Result<String, Error> {
192 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
193
194 let (config, _digest) = config::tape_job::config()?;
195 let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
196
197 let job = Job::new("tape-backup-job", &id)?;
198
199 let upid_str = do_tape_backup_job(job, backup_job, &auth_id, None)?;
200
201 Ok(upid_str)
202}
203
88356646
DM
204#[api(
205 input: {
206 properties: {
207 store: {
208 schema: DATASTORE_SCHEMA,
209 },
210 pool: {
211 schema: MEDIA_POOL_NAME_SCHEMA,
212 },
9883b54c
DM
213 drive: {
214 schema: DRIVE_NAME_SCHEMA,
215 },
42967bf1
DM
216 "eject-media": {
217 description: "Eject media upon job completion.",
218 type: bool,
219 optional: true,
220 },
edb90f6a
DM
221 "export-media-set": {
222 description: "Export media set upon job completion.",
223 type: bool,
224 optional: true,
225 },
88356646
DM
226 },
227 },
228 returns: {
229 schema: UPID_SCHEMA,
230 },
231)]
232/// Backup datastore to tape media pool
233pub fn backup(
234 store: String,
235 pool: String,
9883b54c 236 drive: String,
42967bf1 237 eject_media: Option<bool>,
edb90f6a 238 export_media_set: Option<bool>,
88356646
DM
239 rpcenv: &mut dyn RpcEnvironment,
240) -> Result<Value, Error> {
241
242 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
243
244 let datastore = DataStore::lookup_datastore(&store)?;
245
246 let (config, _digest) = config::media_pool::config()?;
247 let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
248
249 let (drive_config, _digest) = config::drive::config()?;
25aa55b5
DM
250
251 // early check/lock before starting worker
252 let drive_lock = lock_tape_device(&drive_config, &drive)?;
88356646 253
39735609 254 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
cb022525 255
42967bf1 256 let eject_media = eject_media.unwrap_or(false);
edb90f6a 257 let export_media_set = export_media_set.unwrap_or(false);
42967bf1 258
8513626b
DM
259 let job_id = format!("{}:{}:{}", store, pool, drive);
260
88356646
DM
261 let upid_str = WorkerTask::new_thread(
262 "tape-backup",
8513626b 263 Some(job_id),
88356646 264 auth_id,
cb022525 265 to_stdout,
88356646 266 move |worker| {
25aa55b5 267 let _drive_lock = drive_lock; // keep lock guard
9883b54c 268 backup_worker(&worker, datastore, &drive, &pool_config, eject_media, export_media_set)?;
88356646
DM
269 Ok(())
270 }
271 )?;
272
273 Ok(upid_str.into())
88356646
DM
274}
275
88356646
DM
276fn backup_worker(
277 worker: &WorkerTask,
278 datastore: Arc<DataStore>,
9883b54c 279 drive: &str,
88356646 280 pool_config: &MediaPoolConfig,
42967bf1 281 eject_media: bool,
edb90f6a 282 export_media_set: bool,
88356646
DM
283) -> Result<(), Error> {
284
285 let status_path = Path::new(TAPE_STATUS_DIR);
286
287 let _lock = MediaPool::lock(status_path, &pool_config.name)?;
288
271764de 289 task_log!(worker, "update media online status");
cdf39e62 290 let changer_name = update_media_online_status(drive)?;
88356646 291
cdf39e62 292 let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
88356646 293
9883b54c 294 let mut pool_writer = PoolWriter::new(pool, drive)?;
88356646
DM
295
296 let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
297
298 group_list.sort_unstable();
299
300 for group in group_list {
301 let mut snapshot_list = group.list_backups(&datastore.base_path())?;
302 BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
303
304 for info in snapshot_list {
305 if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
306 continue;
307 }
271764de 308 task_log!(worker, "backup snapshot {}", info.backup_dir);
88356646
DM
309 backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
310 }
311 }
312
313 pool_writer.commit()?;
314
edb90f6a 315 if export_media_set {
edb90f6a
DM
316 pool_writer.export_media_set(worker)?;
317 } else if eject_media {
5654d8ce 318 pool_writer.eject_media(worker)?;
42967bf1
DM
319 }
320
88356646
DM
321 Ok(())
322}
323
324// Try to update the the media online status
cdf39e62 325fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
88356646
DM
326
327 let (config, _digest) = config::drive::config()?;
328
46a1863f 329 if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
88356646 330
cdf39e62 331 let label_text_list = changer.online_media_label_texts()?;
88356646
DM
332
333 let status_path = Path::new(TAPE_STATUS_DIR);
334 let mut inventory = Inventory::load(status_path)?;
88356646
DM
335
336 update_changer_online_status(
337 &config,
338 &mut inventory,
88356646 339 &changer_name,
8446fbca 340 &label_text_list,
88356646 341 )?;
88356646 342
cdf39e62
DM
343 Ok(Some(changer_name))
344 } else {
345 Ok(None)
346 }
88356646
DM
347}
348
349pub fn backup_snapshot(
350 worker: &WorkerTask,
351 pool_writer: &mut PoolWriter,
352 datastore: Arc<DataStore>,
353 snapshot: BackupDir,
354) -> Result<(), Error> {
355
271764de 356 task_log!(worker, "start backup {}:{}", datastore.name(), snapshot);
88356646
DM
357
358 let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
359
360 let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
361
362 loop {
271764de
DM
363 worker.check_abort()?;
364
88356646
DM
365 // test is we have remaining chunks
366 if chunk_iter.peek().is_none() {
367 break;
368 }
369
ff58c519 370 let uuid = pool_writer.load_writable_media(worker)?;
88356646 371
1f2c4713
DM
372 worker.check_abort()?;
373
31cf625a 374 let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
88356646
DM
375
376 if leom {
377 pool_writer.set_media_status_full(&uuid)?;
378 }
379 }
380
271764de
DM
381 worker.check_abort()?;
382
ff58c519 383 let uuid = pool_writer.load_writable_media(worker)?;
88356646 384
1f2c4713
DM
385 worker.check_abort()?;
386
5654d8ce 387 let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
88356646
DM
388
389 if !done {
390 // does not fit on tape, so we try on next volume
391 pool_writer.set_media_status_full(&uuid)?;
392
271764de
DM
393 worker.check_abort()?;
394
ff58c519 395 pool_writer.load_writable_media(worker)?;
5654d8ce 396 let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
88356646
DM
397
398 if !done {
399 bail!("write_snapshot_archive failed on second media");
400 }
401 }
402
271764de 403 task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
88356646
DM
404
405 Ok(())
406}