]> git.proxmox.com Git - proxmox-backup.git/blame - src/backup/datastore.rs
router: add upgrade method
[proxmox-backup.git] / src / backup / datastore.rs
CommitLineData
529de6c7
DM
1use failure::*;
2
8731e40a 3use std::io;
3d5c11e5 4use std::path::{PathBuf, Path};
2c32fdde
DM
5use std::collections::HashMap;
6use lazy_static::lazy_static;
7use std::sync::{Mutex, Arc};
529de6c7 8
e25736b4 9use crate::tools;
529de6c7
DM
10use crate::config::datastore;
11use super::chunk_store::*;
91a905b6 12use super::fixed_index::*;
93d5d779 13use super::dynamic_index::*;
5de2bced 14use super::index::*;
b3483782 15use super::backup_info::*;
529de6c7 16
b3483782
DM
17lazy_static!{
18 static ref datastore_map: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
19}
ff3d3100 20
e5064ba6
DM
21/// Datastore Management
22///
23/// A Datastore can store severals backups, and provides the
24/// management interface for backup.
529de6c7 25pub struct DataStore {
1629d2ad 26 chunk_store: Arc<ChunkStore>,
64e53b28 27 gc_mutex: Mutex<bool>,
529de6c7
DM
28}
29
30impl DataStore {
31
2c32fdde
DM
32 pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
33
34 let config = datastore::config()?;
35 let (_, store_config) = config.sections.get(name)
36 .ok_or(format_err!("no such datastore '{}'", name))?;
37
38 let path = store_config["path"].as_str().unwrap();
39
40 let mut map = datastore_map.lock().unwrap();
41
42 if let Some(datastore) = map.get(name) {
43 // Compare Config - if changed, create new Datastore object!
a198d74f 44 if datastore.chunk_store.base == PathBuf::from(path) {
2c32fdde
DM
45 return Ok(datastore.clone());
46 }
47 }
48
f0a61124
DM
49 let datastore = DataStore::open(name)?;
50
51 let datastore = Arc::new(datastore);
52 map.insert(name.to_string(), datastore.clone());
2c32fdde 53
f0a61124 54 Ok(datastore)
2c32fdde
DM
55 }
56
af6f80d3 57 pub fn open(store_name: &str) -> Result<Self, Error> {
529de6c7
DM
58
59 let config = datastore::config()?;
60 let (_, store_config) = config.sections.get(store_name)
61 .ok_or(format_err!("no such datastore '{}'", store_name))?;
62
63 let path = store_config["path"].as_str().unwrap();
64
277fc5a3 65 let chunk_store = ChunkStore::open(store_name, path)?;
529de6c7
DM
66
67 Ok(Self {
1629d2ad 68 chunk_store: Arc::new(chunk_store),
64e53b28 69 gc_mutex: Mutex::new(false),
529de6c7
DM
70 })
71 }
72
d59397e6
WB
73 pub fn get_chunk_iterator(
74 &self,
eff25eca 75 print_percentage: bool,
d59397e6
WB
76 ) -> Result<
77 impl Iterator<Item = Result<tools::fs::ReadDirEntry, Error>>,
78 Error
79 > {
eff25eca 80 self.chunk_store.get_chunk_iterator(print_percentage)
d59397e6
WB
81 }
82
91a905b6 83 pub fn create_fixed_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<FixedIndexWriter, Error> {
529de6c7 84
91a905b6 85 let index = FixedIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
529de6c7
DM
86
87 Ok(index)
88 }
89
91a905b6 90 pub fn open_fixed_reader<P: AsRef<Path>>(&self, filename: P) -> Result<FixedIndexReader, Error> {
529de6c7 91
91a905b6 92 let index = FixedIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
529de6c7
DM
93
94 Ok(index)
95 }
3d5c11e5 96
93d5d779 97 pub fn create_dynamic_writer<P: AsRef<Path>>(
0433db19
DM
98 &self, filename: P,
99 chunk_size: usize
93d5d779 100 ) -> Result<DynamicIndexWriter, Error> {
0433db19 101
93d5d779 102 let index = DynamicIndexWriter::create(
1629d2ad 103 self.chunk_store.clone(), filename.as_ref(), chunk_size)?;
0433db19
DM
104
105 Ok(index)
106 }
ff3d3100 107
93d5d779 108 pub fn open_dynamic_reader<P: AsRef<Path>>(&self, filename: P) -> Result<DynamicIndexReader, Error> {
77703d95 109
93d5d779 110 let index = DynamicIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
77703d95
DM
111
112 Ok(index)
113 }
114
5de2bced
WB
115 pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
116 where
117 P: AsRef<Path>,
118 {
119 let filename = filename.as_ref();
120 let out: Box<dyn IndexFile + Send> =
121 match filename.extension().and_then(|ext| ext.to_str()) {
122 Some("didx") => Box::new(self.open_dynamic_reader(filename)?),
123 Some("fidx") => Box::new(self.open_fixed_reader(filename)?),
124 _ => bail!("cannot open index file of unknown type: {:?}", filename),
125 };
126 Ok(out)
127 }
128
ff3d3100
DM
129 pub fn base_path(&self) -> PathBuf {
130 self.chunk_store.base_path()
131 }
132
8f579717 133 /// Remove a backup directory including all content
38b0dfa5 134 pub fn remove_backup_dir(&self, backup_dir: &BackupDir,
8f579717
DM
135 ) -> Result<(), io::Error> {
136
38b0dfa5 137 let relative_path = backup_dir.relative_path();
8f579717
DM
138 let mut full_path = self.base_path();
139 full_path.push(&relative_path);
140
141 log::info!("removing backup {:?}", full_path);
142 std::fs::remove_dir_all(full_path)?;
143
144 Ok(())
145 }
146
b3483782 147 pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
ff3d3100 148
8731e40a
WB
149 // create intermediate path first:
150 let mut full_path = self.base_path();
b3483782 151 full_path.push(backup_dir.group().group_path());
8731e40a
WB
152 std::fs::create_dir_all(&full_path)?;
153
b3483782
DM
154 let relative_path = backup_dir.relative_path();
155 let mut full_path = self.base_path();
156 full_path.push(&relative_path);
ff3d3100 157
8731e40a
WB
158 // create the last component now
159 match std::fs::create_dir(&full_path) {
160 Ok(_) => Ok((relative_path, true)),
161 Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
162 Err(e) => Err(e)
163 }
ff3d3100
DM
164 }
165
e25736b4
DM
166 pub fn list_backups(&self) -> Result<Vec<BackupInfo>, Error> {
167 let path = self.base_path();
b3483782 168 BackupInfo::list_backups(&path)
e25736b4
DM
169 }
170
3d5c11e5 171 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
ff3d3100 172 let base = self.base_path();
3d5c11e5
DM
173
174 let mut list = vec![];
175
95cea65b
DM
176 use walkdir::WalkDir;
177
178 let walker = WalkDir::new(&base).same_file_system(true).into_iter();
179
180 // make sure we skip .chunks (and other hidden files to keep it simple)
181 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
182 entry.file_name()
183 .to_str()
184 .map(|s| s.starts_with("."))
185 .unwrap_or(false)
186 }
187
188 for entry in walker.filter_entry(|e| !is_hidden(e)) {
189 let path = entry?.into_path();
190 if let Some(ext) = path.extension() {
91a905b6 191 if ext == "fidx" {
95cea65b 192 list.push(path);
93d5d779 193 } else if ext == "didx" {
95cea65b 194 list.push(path);
3d5c11e5
DM
195 }
196 }
197 }
198
199 Ok(list)
200 }
201
64e53b28 202 fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
3d5c11e5
DM
203
204 let image_list = self.list_images()?;
205
206 for path in image_list {
77703d95 207 if let Some(ext) = path.extension() {
91a905b6
DM
208 if ext == "fidx" {
209 let index = self.open_fixed_reader(&path)?;
77703d95 210 index.mark_used_chunks(status)?;
93d5d779
DM
211 } else if ext == "didx" {
212 let index = self.open_dynamic_reader(&path)?;
77703d95
DM
213 index.mark_used_chunks(status)?;
214 }
215 }
3d5c11e5
DM
216 }
217
218 Ok(())
219 }
220
03e4753d 221 pub fn garbage_collection(&self) -> Result<(), Error> {
3d5c11e5 222
a198d74f 223 if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
e95950e4 224
64e53b28
DM
225 let mut gc_status = GarbageCollectionStatus::default();
226 gc_status.used_bytes = 0;
6ea3a0b7 227
64e53b28
DM
228 println!("Start GC phase1 (mark chunks)");
229
230 self.mark_used_chunks(&mut gc_status)?;
231
232 println!("Start GC phase2 (sweep unused chunks)");
77703d95 233 self.chunk_store.sweep_unused_chunks(&mut gc_status)?;
64e53b28
DM
234
235 println!("Used bytes: {}", gc_status.used_bytes);
236 println!("Used chunks: {}", gc_status.used_chunks);
237 println!("Disk bytes: {}", gc_status.disk_bytes);
238 println!("Disk chunks: {}", gc_status.disk_chunks);
239
240 } else {
241 println!("Start GC failed - (already running/locked)");
242 }
3d5c11e5
DM
243
244 Ok(())
245 }
529de6c7 246}