]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/backup.rs
tape: backup - implement export-media-set option
[proxmox-backup.git] / src / api2 / tape / backup.rs
1 use std::path::Path;
2 use std::sync::Arc;
3
4 use anyhow::{bail, Error};
5 use serde_json::Value;
6
7 use proxmox::{
8 api::{
9 api,
10 RpcEnvironment,
11 RpcEnvironmentType,
12 Router,
13 },
14 };
15
16 use crate::{
17 config::{
18 self,
19 drive::check_drive_exists,
20 },
21 backup::{
22 DataStore,
23 BackupDir,
24 BackupInfo,
25 },
26 api2::types::{
27 Authid,
28 DATASTORE_SCHEMA,
29 MEDIA_POOL_NAME_SCHEMA,
30 UPID_SCHEMA,
31 MediaPoolConfig,
32 },
33 server::WorkerTask,
34 tape::{
35 TAPE_STATUS_DIR,
36 Inventory,
37 PoolWriter,
38 MediaPool,
39 SnapshotReader,
40 media_changer,
41 update_changer_online_status,
42 },
43 };
44
45 #[api(
46 input: {
47 properties: {
48 store: {
49 schema: DATASTORE_SCHEMA,
50 },
51 pool: {
52 schema: MEDIA_POOL_NAME_SCHEMA,
53 },
54 "eject-media": {
55 description: "Eject media upon job completion.",
56 type: bool,
57 optional: true,
58 },
59 "export-media-set": {
60 description: "Export media set upon job completion.",
61 type: bool,
62 optional: true,
63 },
64 },
65 },
66 returns: {
67 schema: UPID_SCHEMA,
68 },
69 )]
70 /// Backup datastore to tape media pool
71 pub fn backup(
72 store: String,
73 pool: String,
74 eject_media: Option<bool>,
75 export_media_set: Option<bool>,
76 rpcenv: &mut dyn RpcEnvironment,
77 ) -> Result<Value, Error> {
78
79 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
80
81 let datastore = DataStore::lookup_datastore(&store)?;
82
83 let (config, _digest) = config::media_pool::config()?;
84 let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
85
86 let (drive_config, _digest) = config::drive::config()?;
87 // early check before starting worker
88 check_drive_exists(&drive_config, &pool_config.drive)?;
89
90 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
91
92 let eject_media = eject_media.unwrap_or(false);
93 let export_media_set = export_media_set.unwrap_or(false);
94
95 let upid_str = WorkerTask::new_thread(
96 "tape-backup",
97 Some(store.clone()),
98 auth_id,
99 to_stdout,
100 move |worker| {
101 backup_worker(&worker, datastore, &pool_config, eject_media, export_media_set)?;
102 Ok(())
103 }
104 )?;
105
106 Ok(upid_str.into())
107 }
108
109 pub const ROUTER: Router = Router::new()
110 .post(&API_METHOD_BACKUP);
111
112
113 fn backup_worker(
114 worker: &WorkerTask,
115 datastore: Arc<DataStore>,
116 pool_config: &MediaPoolConfig,
117 eject_media: bool,
118 export_media_set: bool,
119 ) -> Result<(), Error> {
120
121 let status_path = Path::new(TAPE_STATUS_DIR);
122
123 let _lock = MediaPool::lock(status_path, &pool_config.name)?;
124
125 worker.log("update media online status");
126 let has_changer = update_media_online_status(&pool_config.drive)?;
127
128 let use_offline_media = !has_changer;
129
130 let pool = MediaPool::with_config(status_path, &pool_config, use_offline_media)?;
131
132 let mut pool_writer = PoolWriter::new(pool, &pool_config.drive)?;
133
134 let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
135
136 group_list.sort_unstable();
137
138 for group in group_list {
139 let mut snapshot_list = group.list_backups(&datastore.base_path())?;
140 BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
141
142 for info in snapshot_list {
143 if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
144 continue;
145 }
146 worker.log(format!("backup snapshot {}", info.backup_dir));
147 backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
148 }
149 }
150
151 pool_writer.commit()?;
152
153 if export_media_set {
154 worker.log(format!("exporting current media set"));
155 pool_writer.export_media_set(worker)?;
156 } else if eject_media {
157 worker.log(format!("ejection backup media"));
158 pool_writer.eject_media()?;
159 }
160
161 Ok(())
162 }
163
164 // Try to update the the media online status
165 fn update_media_online_status(drive: &str) -> Result<bool, Error> {
166
167 let (config, _digest) = config::drive::config()?;
168
169 let mut has_changer = false;
170
171 if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
172
173 has_changer = true;
174
175 let changer_id_list = changer.online_media_changer_ids()?;
176
177 let status_path = Path::new(TAPE_STATUS_DIR);
178 let mut inventory = Inventory::load(status_path)?;
179
180 update_changer_online_status(
181 &config,
182 &mut inventory,
183 &changer_name,
184 &changer_id_list,
185 )?;
186 }
187
188 Ok(has_changer)
189 }
190
191 pub fn backup_snapshot(
192 worker: &WorkerTask,
193 pool_writer: &mut PoolWriter,
194 datastore: Arc<DataStore>,
195 snapshot: BackupDir,
196 ) -> Result<(), Error> {
197
198 worker.log(format!("start backup {}:{}", datastore.name(), snapshot));
199
200 let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
201
202 let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
203
204 loop {
205 // test is we have remaining chunks
206 if chunk_iter.peek().is_none() {
207 break;
208 }
209
210 let uuid = pool_writer.load_writable_media(worker)?;
211
212 let (leom, _bytes) = pool_writer.append_chunk_archive(&datastore, &mut chunk_iter)?;
213
214 if leom {
215 pool_writer.set_media_status_full(&uuid)?;
216 }
217 }
218
219 let uuid = pool_writer.load_writable_media(worker)?;
220
221 let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?;
222
223 if !done {
224 // does not fit on tape, so we try on next volume
225 pool_writer.set_media_status_full(&uuid)?;
226
227 pool_writer.load_writable_media(worker)?;
228 let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?;
229
230 if !done {
231 bail!("write_snapshot_archive failed on second media");
232 }
233 }
234
235 worker.log(format!("end backup {}:{}", datastore.name(), snapshot));
236
237 Ok(())
238 }