]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/tape/backup.rs
tape: improve MediaChange trait
[proxmox-backup.git] / src / api2 / tape / backup.rs
1 use std::path::Path;
2 use std::sync::Arc;
3
4 use anyhow::{bail, Error};
5 use serde_json::Value;
6
7 use proxmox::{
8 api::{
9 api,
10 RpcEnvironment,
11 RpcEnvironmentType,
12 Router,
13 },
14 };
15
16 use crate::{
17 config::{
18 self,
19 drive::check_drive_exists,
20 },
21 backup::{
22 DataStore,
23 BackupDir,
24 BackupInfo,
25 },
26 api2::types::{
27 Authid,
28 DATASTORE_SCHEMA,
29 MEDIA_POOL_NAME_SCHEMA,
30 UPID_SCHEMA,
31 MediaPoolConfig,
32 },
33 server::WorkerTask,
34 tape::{
35 TAPE_STATUS_DIR,
36 Inventory,
37 PoolWriter,
38 MediaPool,
39 SnapshotReader,
40 media_changer,
41 update_changer_online_status,
42 },
43 };
44
45 #[api(
46 input: {
47 properties: {
48 store: {
49 schema: DATASTORE_SCHEMA,
50 },
51 pool: {
52 schema: MEDIA_POOL_NAME_SCHEMA,
53 },
54 },
55 },
56 returns: {
57 schema: UPID_SCHEMA,
58 },
59 )]
60 /// Backup datastore to tape media pool
61 pub fn backup(
62 store: String,
63 pool: String,
64 rpcenv: &mut dyn RpcEnvironment,
65 ) -> Result<Value, Error> {
66
67 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
68
69 let datastore = DataStore::lookup_datastore(&store)?;
70
71 let (config, _digest) = config::media_pool::config()?;
72 let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
73
74 let (drive_config, _digest) = config::drive::config()?;
75 // early check before starting worker
76 check_drive_exists(&drive_config, &pool_config.drive)?;
77
78 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
79
80 let upid_str = WorkerTask::new_thread(
81 "tape-backup",
82 Some(store.clone()),
83 auth_id,
84 to_stdout,
85 move |worker| {
86 backup_worker(&worker, datastore, &pool_config)?;
87 Ok(())
88 }
89 )?;
90
91 Ok(upid_str.into())
92
93
94 }
95
96 pub const ROUTER: Router = Router::new()
97 .post(&API_METHOD_BACKUP);
98
99
100 fn backup_worker(
101 worker: &WorkerTask,
102 datastore: Arc<DataStore>,
103 pool_config: &MediaPoolConfig,
104 ) -> Result<(), Error> {
105
106 let status_path = Path::new(TAPE_STATUS_DIR);
107
108 let _lock = MediaPool::lock(status_path, &pool_config.name)?;
109
110 worker.log("update media online status");
111 let has_changer = update_media_online_status(&pool_config.drive)?;
112
113 let use_offline_media = !has_changer;
114
115 let pool = MediaPool::with_config(status_path, &pool_config, use_offline_media)?;
116
117 let mut pool_writer = PoolWriter::new(pool, &pool_config.drive)?;
118
119 let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
120
121 group_list.sort_unstable();
122
123 for group in group_list {
124 let mut snapshot_list = group.list_backups(&datastore.base_path())?;
125 BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
126
127 for info in snapshot_list {
128 if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
129 continue;
130 }
131 worker.log(format!("backup snapshot {}", info.backup_dir));
132 backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
133 }
134 }
135
136 pool_writer.commit()?;
137
138 Ok(())
139 }
140
141 // Try to update the the media online status
142 fn update_media_online_status(drive: &str) -> Result<bool, Error> {
143
144 let (config, _digest) = config::drive::config()?;
145
146 let mut has_changer = false;
147
148 if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
149
150 has_changer = true;
151
152 let changer_id_list = changer.online_media_changer_ids()?;
153
154 let status_path = Path::new(TAPE_STATUS_DIR);
155 let mut inventory = Inventory::load(status_path)?;
156
157 update_changer_online_status(
158 &config,
159 &mut inventory,
160 &changer_name,
161 &changer_id_list,
162 )?;
163 }
164
165 Ok(has_changer)
166 }
167
168 pub fn backup_snapshot(
169 worker: &WorkerTask,
170 pool_writer: &mut PoolWriter,
171 datastore: Arc<DataStore>,
172 snapshot: BackupDir,
173 ) -> Result<(), Error> {
174
175 worker.log(format!("start backup {}:{}", datastore.name(), snapshot));
176
177 let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
178
179 let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
180
181 loop {
182 // test is we have remaining chunks
183 if chunk_iter.peek().is_none() {
184 break;
185 }
186
187 let uuid = pool_writer.load_writable_media(worker)?;
188
189 let (leom, _bytes) = pool_writer.append_chunk_archive(&datastore, &mut chunk_iter)?;
190
191 if leom {
192 pool_writer.set_media_status_full(&uuid)?;
193 }
194 }
195
196 let uuid = pool_writer.load_writable_media(worker)?;
197
198 let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?;
199
200 if !done {
201 // does not fit on tape, so we try on next volume
202 pool_writer.set_media_status_full(&uuid)?;
203
204 pool_writer.load_writable_media(worker)?;
205 let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?;
206
207 if !done {
208 bail!("write_snapshot_archive failed on second media");
209 }
210 }
211
212 worker.log(format!("end backup {}:{}", datastore.name(), snapshot));
213
214 Ok(())
215 }