]> git.proxmox.com Git - proxmox-backup.git/blame - src/backup/datastore.rs
src/bin/proxmox-backup-client.rs: implement restore using BackupReader
[proxmox-backup.git] / src / backup / datastore.rs
CommitLineData
529de6c7
DM
1use failure::*;
2
8731e40a 3use std::io;
3d5c11e5 4use std::path::{PathBuf, Path};
2c32fdde
DM
5use std::collections::HashMap;
6use lazy_static::lazy_static;
7use std::sync::{Mutex, Arc};
529de6c7 8
e25736b4 9use crate::tools;
529de6c7
DM
10use crate::config::datastore;
11use super::chunk_store::*;
91a905b6 12use super::fixed_index::*;
93d5d779 13use super::dynamic_index::*;
5de2bced 14use super::index::*;
b3483782 15use super::backup_info::*;
f98ac774 16use super::DataChunk;
d4b59ae0 17use crate::server::WorkerTask;
529de6c7 18
b3483782 19lazy_static!{
515688d1 20 static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
b3483782 21}
ff3d3100 22
e5064ba6
DM
23/// Datastore Management
24///
25/// A Datastore can store severals backups, and provides the
26/// management interface for backup.
529de6c7 27pub struct DataStore {
1629d2ad 28 chunk_store: Arc<ChunkStore>,
64e53b28 29 gc_mutex: Mutex<bool>,
f2b99c34 30 last_gc_status: Mutex<GarbageCollectionStatus>,
529de6c7
DM
31}
32
33impl DataStore {
34
2c32fdde
DM
35 pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
36
37 let config = datastore::config()?;
38 let (_, store_config) = config.sections.get(name)
39 .ok_or(format_err!("no such datastore '{}'", name))?;
40
41 let path = store_config["path"].as_str().unwrap();
42
515688d1 43 let mut map = DATASTORE_MAP.lock().unwrap();
2c32fdde
DM
44
45 if let Some(datastore) = map.get(name) {
46 // Compare Config - if changed, create new Datastore object!
a198d74f 47 if datastore.chunk_store.base == PathBuf::from(path) {
2c32fdde
DM
48 return Ok(datastore.clone());
49 }
50 }
51
f0a61124
DM
52 let datastore = DataStore::open(name)?;
53
54 let datastore = Arc::new(datastore);
55 map.insert(name.to_string(), datastore.clone());
2c32fdde 56
f0a61124 57 Ok(datastore)
2c32fdde
DM
58 }
59
af6f80d3 60 pub fn open(store_name: &str) -> Result<Self, Error> {
529de6c7
DM
61
62 let config = datastore::config()?;
63 let (_, store_config) = config.sections.get(store_name)
64 .ok_or(format_err!("no such datastore '{}'", store_name))?;
65
66 let path = store_config["path"].as_str().unwrap();
67
277fc5a3 68 let chunk_store = ChunkStore::open(store_name, path)?;
529de6c7 69
f2b99c34
DM
70 let gc_status = GarbageCollectionStatus::default();
71
529de6c7 72 Ok(Self {
1629d2ad 73 chunk_store: Arc::new(chunk_store),
64e53b28 74 gc_mutex: Mutex::new(false),
f2b99c34 75 last_gc_status: Mutex::new(gc_status),
529de6c7
DM
76 })
77 }
78
d59397e6
WB
79 pub fn get_chunk_iterator(
80 &self,
eff25eca 81 print_percentage: bool,
d59397e6
WB
82 ) -> Result<
83 impl Iterator<Item = Result<tools::fs::ReadDirEntry, Error>>,
84 Error
85 > {
eff25eca 86 self.chunk_store.get_chunk_iterator(print_percentage)
d59397e6
WB
87 }
88
91a905b6 89 pub fn create_fixed_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<FixedIndexWriter, Error> {
529de6c7 90
91a905b6 91 let index = FixedIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
529de6c7
DM
92
93 Ok(index)
94 }
95
91a905b6 96 pub fn open_fixed_reader<P: AsRef<Path>>(&self, filename: P) -> Result<FixedIndexReader, Error> {
529de6c7 97
91a905b6 98 let index = FixedIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
529de6c7
DM
99
100 Ok(index)
101 }
3d5c11e5 102
93d5d779 103 pub fn create_dynamic_writer<P: AsRef<Path>>(
0433db19 104 &self, filename: P,
93d5d779 105 ) -> Result<DynamicIndexWriter, Error> {
0433db19 106
93d5d779 107 let index = DynamicIndexWriter::create(
976595e1 108 self.chunk_store.clone(), filename.as_ref())?;
0433db19
DM
109
110 Ok(index)
111 }
ff3d3100 112
93d5d779 113 pub fn open_dynamic_reader<P: AsRef<Path>>(&self, filename: P) -> Result<DynamicIndexReader, Error> {
77703d95 114
d48a9955
DM
115 let full_path = self.chunk_store.relative_path(filename.as_ref());
116
117 let index = DynamicIndexReader::open(&full_path)?;
77703d95
DM
118
119 Ok(index)
120 }
121
5de2bced
WB
122 pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
123 where
124 P: AsRef<Path>,
125 {
126 let filename = filename.as_ref();
127 let out: Box<dyn IndexFile + Send> =
128 match filename.extension().and_then(|ext| ext.to_str()) {
129 Some("didx") => Box::new(self.open_dynamic_reader(filename)?),
130 Some("fidx") => Box::new(self.open_fixed_reader(filename)?),
131 _ => bail!("cannot open index file of unknown type: {:?}", filename),
132 };
133 Ok(out)
134 }
135
ff3d3100
DM
136 pub fn base_path(&self) -> PathBuf {
137 self.chunk_store.base_path()
138 }
139
8f579717 140 /// Remove a backup directory including all content
38b0dfa5 141 pub fn remove_backup_dir(&self, backup_dir: &BackupDir,
8f579717
DM
142 ) -> Result<(), io::Error> {
143
38b0dfa5 144 let relative_path = backup_dir.relative_path();
8f579717
DM
145 let mut full_path = self.base_path();
146 full_path.push(&relative_path);
147
148 log::info!("removing backup {:?}", full_path);
149 std::fs::remove_dir_all(full_path)?;
150
151 Ok(())
152 }
153
b3483782 154 pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
ff3d3100 155
8731e40a
WB
156 // create intermediate path first:
157 let mut full_path = self.base_path();
b3483782 158 full_path.push(backup_dir.group().group_path());
8731e40a
WB
159 std::fs::create_dir_all(&full_path)?;
160
b3483782
DM
161 let relative_path = backup_dir.relative_path();
162 let mut full_path = self.base_path();
163 full_path.push(&relative_path);
ff3d3100 164
8731e40a
WB
165 // create the last component now
166 match std::fs::create_dir(&full_path) {
167 Ok(_) => Ok((relative_path, true)),
168 Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
169 Err(e) => Err(e)
170 }
ff3d3100
DM
171 }
172
3d5c11e5 173 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
ff3d3100 174 let base = self.base_path();
3d5c11e5
DM
175
176 let mut list = vec![];
177
95cea65b
DM
178 use walkdir::WalkDir;
179
180 let walker = WalkDir::new(&base).same_file_system(true).into_iter();
181
182 // make sure we skip .chunks (and other hidden files to keep it simple)
183 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
184 entry.file_name()
185 .to_str()
186 .map(|s| s.starts_with("."))
187 .unwrap_or(false)
188 }
189
190 for entry in walker.filter_entry(|e| !is_hidden(e)) {
191 let path = entry?.into_path();
192 if let Some(ext) = path.extension() {
91a905b6 193 if ext == "fidx" {
95cea65b 194 list.push(path);
93d5d779 195 } else if ext == "didx" {
95cea65b 196 list.push(path);
3d5c11e5
DM
197 }
198 }
199 }
200
201 Ok(list)
202 }
203
64e53b28 204 fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
3d5c11e5
DM
205
206 let image_list = self.list_images()?;
207
208 for path in image_list {
92da93b2
DM
209
210 tools::fail_on_shutdown()?;
211
77703d95 212 if let Some(ext) = path.extension() {
91a905b6
DM
213 if ext == "fidx" {
214 let index = self.open_fixed_reader(&path)?;
77703d95 215 index.mark_used_chunks(status)?;
93d5d779
DM
216 } else if ext == "didx" {
217 let index = self.open_dynamic_reader(&path)?;
77703d95
DM
218 index.mark_used_chunks(status)?;
219 }
220 }
3d5c11e5
DM
221 }
222
223 Ok(())
f2b99c34
DM
224 }
225
226 pub fn last_gc_status(&self) -> GarbageCollectionStatus {
227 self.last_gc_status.lock().unwrap().clone()
228 }
3d5c11e5 229
d4b59ae0 230 pub fn garbage_collection(&self, worker: Arc<WorkerTask>) -> Result<(), Error> {
3d5c11e5 231
a198d74f 232 if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
e95950e4 233
43b13033
DM
234 let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
235
11861a48
DM
236 let oldest_writer = self.chunk_store.oldest_writer();
237
64e53b28 238 let mut gc_status = GarbageCollectionStatus::default();
f2b99c34 239 gc_status.upid = Some(worker.to_string());
6ea3a0b7 240
d4b59ae0 241 worker.log("Start GC phase1 (mark chunks)");
64e53b28
DM
242
243 self.mark_used_chunks(&mut gc_status)?;
244
d4b59ae0 245 worker.log("Start GC phase2 (sweep unused chunks)");
11861a48 246 self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status)?;
64e53b28 247
d4b59ae0
DM
248 worker.log(&format!("Used bytes: {}", gc_status.used_bytes));
249 worker.log(&format!("Used chunks: {}", gc_status.used_chunks));
250 worker.log(&format!("Disk bytes: {}", gc_status.disk_bytes));
251 worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks));
64e53b28 252
f2b99c34
DM
253 *self.last_gc_status.lock().unwrap() = gc_status;
254
64e53b28 255 } else {
d4b59ae0 256 bail!("Start GC failed - (already running/locked)");
64e53b28 257 }
3d5c11e5
DM
258
259 Ok(())
260 }
3b7ade9e 261
d48a9955
DM
262 pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) {
263 self.chunk_store.chunk_path(digest)
264 }
265
f98ac774 266 pub fn insert_chunk(
3b7ade9e 267 &self,
f98ac774 268 chunk: &DataChunk,
3b7ade9e 269 ) -> Result<(bool, u64), Error> {
f98ac774 270 self.chunk_store.insert_chunk(chunk)
3b7ade9e 271 }
529de6c7 272}