]> git.proxmox.com Git - proxmox-backup.git/blame - src/backup/datastore.rs
src/backup/manifest.rs: new helper archive_type()
[proxmox-backup.git] / src / backup / datastore.rs
CommitLineData
2c32fdde 1use std::collections::HashMap;
367f002e
WB
2use std::io;
3use std::path::{Path, PathBuf};
4use std::sync::{Arc, Mutex};
5
6use failure::*;
2c32fdde 7use lazy_static::lazy_static;
529de6c7 8
367f002e
WB
9use super::backup_info::BackupDir;
10use super::chunk_store::{ChunkStore, GarbageCollectionStatus};
11use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
12use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
5de2bced 13use super::index::*;
1e8da0a7 14use super::{DataBlob, ArchiveType, archive_type};
367f002e 15use crate::config::datastore;
d4b59ae0 16use crate::server::WorkerTask;
367f002e 17use crate::tools;
529de6c7 18
367f002e
WB
19lazy_static! {
20 static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
b3483782 21}
ff3d3100 22
e5064ba6
DM
23/// Datastore Management
24///
25/// A Datastore can store severals backups, and provides the
26/// management interface for backup.
529de6c7 27pub struct DataStore {
1629d2ad 28 chunk_store: Arc<ChunkStore>,
64e53b28 29 gc_mutex: Mutex<bool>,
f2b99c34 30 last_gc_status: Mutex<GarbageCollectionStatus>,
529de6c7
DM
31}
32
33impl DataStore {
34
2c32fdde
DM
35 pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
36
37 let config = datastore::config()?;
38 let (_, store_config) = config.sections.get(name)
39 .ok_or(format_err!("no such datastore '{}'", name))?;
40
41 let path = store_config["path"].as_str().unwrap();
42
515688d1 43 let mut map = DATASTORE_MAP.lock().unwrap();
2c32fdde
DM
44
45 if let Some(datastore) = map.get(name) {
46 // Compare Config - if changed, create new Datastore object!
a198d74f 47 if datastore.chunk_store.base == PathBuf::from(path) {
2c32fdde
DM
48 return Ok(datastore.clone());
49 }
50 }
51
f0a61124
DM
52 let datastore = DataStore::open(name)?;
53
54 let datastore = Arc::new(datastore);
55 map.insert(name.to_string(), datastore.clone());
2c32fdde 56
f0a61124 57 Ok(datastore)
2c32fdde
DM
58 }
59
af6f80d3 60 pub fn open(store_name: &str) -> Result<Self, Error> {
529de6c7
DM
61
62 let config = datastore::config()?;
63 let (_, store_config) = config.sections.get(store_name)
64 .ok_or(format_err!("no such datastore '{}'", store_name))?;
65
66 let path = store_config["path"].as_str().unwrap();
67
277fc5a3 68 let chunk_store = ChunkStore::open(store_name, path)?;
529de6c7 69
f2b99c34
DM
70 let gc_status = GarbageCollectionStatus::default();
71
529de6c7 72 Ok(Self {
1629d2ad 73 chunk_store: Arc::new(chunk_store),
64e53b28 74 gc_mutex: Mutex::new(false),
f2b99c34 75 last_gc_status: Mutex::new(gc_status),
529de6c7
DM
76 })
77 }
78
d59397e6
WB
79 pub fn get_chunk_iterator(
80 &self,
81 ) -> Result<
a5736098 82 impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)>,
d59397e6
WB
83 Error
84 > {
a5736098 85 self.chunk_store.get_chunk_iterator()
d59397e6
WB
86 }
87
91a905b6 88 pub fn create_fixed_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<FixedIndexWriter, Error> {
529de6c7 89
91a905b6 90 let index = FixedIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
529de6c7
DM
91
92 Ok(index)
93 }
94
91a905b6 95 pub fn open_fixed_reader<P: AsRef<Path>>(&self, filename: P) -> Result<FixedIndexReader, Error> {
529de6c7 96
a7c72ad9
DM
97 let full_path = self.chunk_store.relative_path(filename.as_ref());
98
99 let index = FixedIndexReader::open(&full_path)?;
529de6c7
DM
100
101 Ok(index)
102 }
3d5c11e5 103
93d5d779 104 pub fn create_dynamic_writer<P: AsRef<Path>>(
0433db19 105 &self, filename: P,
93d5d779 106 ) -> Result<DynamicIndexWriter, Error> {
0433db19 107
93d5d779 108 let index = DynamicIndexWriter::create(
976595e1 109 self.chunk_store.clone(), filename.as_ref())?;
0433db19
DM
110
111 Ok(index)
112 }
ff3d3100 113
93d5d779 114 pub fn open_dynamic_reader<P: AsRef<Path>>(&self, filename: P) -> Result<DynamicIndexReader, Error> {
77703d95 115
d48a9955
DM
116 let full_path = self.chunk_store.relative_path(filename.as_ref());
117
118 let index = DynamicIndexReader::open(&full_path)?;
77703d95
DM
119
120 Ok(index)
121 }
122
5de2bced
WB
123 pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
124 where
125 P: AsRef<Path>,
126 {
127 let filename = filename.as_ref();
128 let out: Box<dyn IndexFile + Send> =
1e8da0a7
DM
129 match archive_type(filename)? {
130 ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?),
131 ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?),
5de2bced
WB
132 _ => bail!("cannot open index file of unknown type: {:?}", filename),
133 };
134 Ok(out)
135 }
136
ff3d3100
DM
137 pub fn base_path(&self) -> PathBuf {
138 self.chunk_store.base_path()
139 }
140
8f579717 141 /// Remove a backup directory including all content
38b0dfa5 142 pub fn remove_backup_dir(&self, backup_dir: &BackupDir,
8f579717
DM
143 ) -> Result<(), io::Error> {
144
38b0dfa5 145 let relative_path = backup_dir.relative_path();
8f579717
DM
146 let mut full_path = self.base_path();
147 full_path.push(&relative_path);
148
149 log::info!("removing backup {:?}", full_path);
150 std::fs::remove_dir_all(full_path)?;
151
152 Ok(())
153 }
154
b3483782 155 pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
ff3d3100 156
8731e40a
WB
157 // create intermediate path first:
158 let mut full_path = self.base_path();
b3483782 159 full_path.push(backup_dir.group().group_path());
8731e40a
WB
160 std::fs::create_dir_all(&full_path)?;
161
b3483782
DM
162 let relative_path = backup_dir.relative_path();
163 let mut full_path = self.base_path();
164 full_path.push(&relative_path);
ff3d3100 165
8731e40a
WB
166 // create the last component now
167 match std::fs::create_dir(&full_path) {
168 Ok(_) => Ok((relative_path, true)),
169 Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
170 Err(e) => Err(e)
171 }
ff3d3100
DM
172 }
173
3d5c11e5 174 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
ff3d3100 175 let base = self.base_path();
3d5c11e5
DM
176
177 let mut list = vec![];
178
95cea65b
DM
179 use walkdir::WalkDir;
180
181 let walker = WalkDir::new(&base).same_file_system(true).into_iter();
182
183 // make sure we skip .chunks (and other hidden files to keep it simple)
184 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
185 entry.file_name()
186 .to_str()
187 .map(|s| s.starts_with("."))
188 .unwrap_or(false)
189 }
190
191 for entry in walker.filter_entry(|e| !is_hidden(e)) {
192 let path = entry?.into_path();
1e8da0a7
DM
193 if let Ok(archive_type) = archive_type(&path) {
194 if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex {
95cea65b 195 list.push(path);
3d5c11e5
DM
196 }
197 }
198 }
199
200 Ok(list)
201 }
202
a660978c
DM
203 // mark chunks used by ``index`` as used
204 fn index_mark_used_chunks<I: IndexFile>(
205 &self,
206 index: I,
207 file_name: &Path, // only used for error reporting
208 status: &mut GarbageCollectionStatus,
209 ) -> Result<(), Error> {
210
211 status.index_file_count += 1;
212 status.index_data_bytes += index.index_bytes();
213
214 for pos in 0..index.index_count() {
215 tools::fail_on_shutdown()?;
216 let digest = index.index_digest(pos).unwrap();
217 if let Err(err) = self.chunk_store.touch_chunk(digest) {
218 bail!("unable to access chunk {}, required by {:?} - {}",
219 proxmox::tools::digest_to_hex(digest), file_name, err);
220 }
221 }
222 Ok(())
223 }
224
64e53b28 225 fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
3d5c11e5
DM
226
227 let image_list = self.list_images()?;
228
229 for path in image_list {
92da93b2
DM
230
231 tools::fail_on_shutdown()?;
232
1e8da0a7
DM
233 if let Ok(archive_type) = archive_type(&path) {
234 if archive_type == ArchiveType::FixedIndex {
91a905b6 235 let index = self.open_fixed_reader(&path)?;
a660978c 236 self.index_mark_used_chunks(index, &path, status)?;
1e8da0a7 237 } else if archive_type == ArchiveType::DynamicIndex {
93d5d779 238 let index = self.open_dynamic_reader(&path)?;
a660978c 239 self.index_mark_used_chunks(index, &path, status)?;
77703d95
DM
240 }
241 }
3d5c11e5
DM
242 }
243
244 Ok(())
f2b99c34
DM
245 }
246
247 pub fn last_gc_status(&self) -> GarbageCollectionStatus {
248 self.last_gc_status.lock().unwrap().clone()
249 }
3d5c11e5 250
d4b59ae0 251 pub fn garbage_collection(&self, worker: Arc<WorkerTask>) -> Result<(), Error> {
3d5c11e5 252
a198d74f 253 if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
e95950e4 254
43b13033
DM
255 let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
256
11861a48
DM
257 let oldest_writer = self.chunk_store.oldest_writer();
258
64e53b28 259 let mut gc_status = GarbageCollectionStatus::default();
f2b99c34 260 gc_status.upid = Some(worker.to_string());
6ea3a0b7 261
73e57f24 262 worker.log("Start GC phase1 (mark used chunks)");
64e53b28
DM
263
264 self.mark_used_chunks(&mut gc_status)?;
265
d4b59ae0 266 worker.log("Start GC phase2 (sweep unused chunks)");
a5736098 267 self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, worker.clone())?;
64e53b28 268
a660978c
DM
269 worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
270 worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
271 worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
868c5852
DM
272
273 if gc_status.index_data_bytes > 0 {
274 let comp_per = (gc_status.disk_bytes*100)/gc_status.index_data_bytes;
275 worker.log(&format!("Disk bytes: {} ({} %)", gc_status.disk_bytes, comp_per));
276 }
277
d4b59ae0 278 worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks));
868c5852
DM
279
280 if gc_status.disk_chunks > 0 {
281 let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
282 worker.log(&format!("Average chunk size: {}", avg_chunk));
283 }
64e53b28 284
f2b99c34
DM
285 *self.last_gc_status.lock().unwrap() = gc_status;
286
64e53b28 287 } else {
d4b59ae0 288 bail!("Start GC failed - (already running/locked)");
64e53b28 289 }
3d5c11e5
DM
290
291 Ok(())
292 }
3b7ade9e 293
d48a9955
DM
294 pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) {
295 self.chunk_store.chunk_path(digest)
296 }
297
f98ac774 298 pub fn insert_chunk(
3b7ade9e 299 &self,
4ee8f53d
DM
300 chunk: &DataBlob,
301 digest: &[u8; 32],
3b7ade9e 302 ) -> Result<(bool, u64), Error> {
4ee8f53d 303 self.chunk_store.insert_chunk(chunk, digest)
3b7ade9e 304 }
529de6c7 305}