]> git.proxmox.com Git - proxmox-backup.git/blame - src/backup/datastore.rs
section_config.rs: simplify parser by using new try_block macro
[proxmox-backup.git] / src / backup / datastore.rs
CommitLineData
529de6c7
DM
1use failure::*;
2
e25736b4
DM
3use chrono::prelude::*;
4
3d5c11e5 5use std::path::{PathBuf, Path};
2c32fdde
DM
6use std::collections::HashMap;
7use lazy_static::lazy_static;
8use std::sync::{Mutex, Arc};
529de6c7 9
e25736b4 10use crate::tools;
529de6c7
DM
11use crate::config::datastore;
12use super::chunk_store::*;
91a905b6 13use super::fixed_index::*;
93d5d779 14use super::dynamic_index::*;
529de6c7 15
ff3d3100
DM
16use chrono::{Utc, TimeZone};
17
e5064ba6
DM
18/// Datastore Management
19///
20/// A Datastore can store severals backups, and provides the
21/// management interface for backup.
529de6c7 22pub struct DataStore {
1629d2ad 23 chunk_store: Arc<ChunkStore>,
64e53b28 24 gc_mutex: Mutex<bool>,
529de6c7
DM
25}
26
e5064ba6 27/// Detailed Backup Information
e25736b4
DM
28#[derive(Debug)]
29pub struct BackupInfo {
e5064ba6 30 /// Type of backup
e25736b4 31 pub backup_type: String,
e5064ba6 32 /// Unique (for this type) ID
e25736b4 33 pub backup_id: String,
e5064ba6 34 /// Backup timestamp
7ca80246 35 pub backup_time: DateTime<Utc>,
e25736b4
DM
36}
37
2c32fdde
DM
38lazy_static!{
39 static ref datastore_map: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
40}
41
529de6c7
DM
42impl DataStore {
43
2c32fdde
DM
44 pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
45
46 let config = datastore::config()?;
47 let (_, store_config) = config.sections.get(name)
48 .ok_or(format_err!("no such datastore '{}'", name))?;
49
50 let path = store_config["path"].as_str().unwrap();
51
52 let mut map = datastore_map.lock().unwrap();
53
54 if let Some(datastore) = map.get(name) {
55 // Compare Config - if changed, create new Datastore object!
a198d74f 56 if datastore.chunk_store.base == PathBuf::from(path) {
2c32fdde
DM
57 return Ok(datastore.clone());
58 }
59 }
60
61 if let Ok(datastore) = DataStore::open(name) {
62 let datastore = Arc::new(datastore);
63 map.insert(name.to_string(), datastore.clone());
64 return Ok(datastore);
65 }
66
67 bail!("store not found");
68 }
69
af6f80d3 70 pub fn open(store_name: &str) -> Result<Self, Error> {
529de6c7
DM
71
72 let config = datastore::config()?;
73 let (_, store_config) = config.sections.get(store_name)
74 .ok_or(format_err!("no such datastore '{}'", store_name))?;
75
76 let path = store_config["path"].as_str().unwrap();
77
277fc5a3 78 let chunk_store = ChunkStore::open(store_name, path)?;
529de6c7
DM
79
80 Ok(Self {
1629d2ad 81 chunk_store: Arc::new(chunk_store),
64e53b28 82 gc_mutex: Mutex::new(false),
529de6c7
DM
83 })
84 }
85
91a905b6 86 pub fn create_fixed_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<FixedIndexWriter, Error> {
529de6c7 87
91a905b6 88 let index = FixedIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
529de6c7
DM
89
90 Ok(index)
91 }
92
91a905b6 93 pub fn open_fixed_reader<P: AsRef<Path>>(&self, filename: P) -> Result<FixedIndexReader, Error> {
529de6c7 94
91a905b6 95 let index = FixedIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
529de6c7
DM
96
97 Ok(index)
98 }
3d5c11e5 99
93d5d779 100 pub fn create_dynamic_writer<P: AsRef<Path>>(
0433db19
DM
101 &self, filename: P,
102 chunk_size: usize
93d5d779 103 ) -> Result<DynamicIndexWriter, Error> {
0433db19 104
93d5d779 105 let index = DynamicIndexWriter::create(
1629d2ad 106 self.chunk_store.clone(), filename.as_ref(), chunk_size)?;
0433db19
DM
107
108 Ok(index)
109 }
ff3d3100 110
93d5d779 111 pub fn open_dynamic_reader<P: AsRef<Path>>(&self, filename: P) -> Result<DynamicIndexReader, Error> {
77703d95 112
93d5d779 113 let index = DynamicIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
77703d95
DM
114
115 Ok(index)
116 }
117
ff3d3100
DM
118 pub fn base_path(&self) -> PathBuf {
119 self.chunk_store.base_path()
120 }
121
6a4c0916
DM
122 pub fn get_backup_dir(
123 &self,
124 backup_type: &str,
125 backup_id: &str,
7ca80246 126 backup_time: DateTime<Utc>,
6a4c0916
DM
127 ) -> PathBuf {
128
129 let mut relative_path = PathBuf::new();
130
131 relative_path.push(backup_type);
132
133 relative_path.push(backup_id);
134
7ca80246 135 let date_str = backup_time.format("%Y-%m-%dT%H:%M:%S").to_string();
6a4c0916
DM
136
137 relative_path.push(&date_str);
138
139 relative_path
140 }
141
ff3d3100
DM
142 pub fn create_backup_dir(
143 &self,
144 backup_type: &str,
145 backup_id: &str,
146 backup_time: i64,
147 ) -> Result<PathBuf, Error> {
148 let mut relative_path = PathBuf::new();
149
150 relative_path.push(backup_type);
151
152 relative_path.push(backup_id);
153
154 let dt = Utc.timestamp(backup_time, 0);
155 let date_str = dt.format("%Y-%m-%dT%H:%M:%S").to_string();
156
157 println!("date: {}", date_str);
158
159 relative_path.push(&date_str);
160
161
162 let mut full_path = self.base_path();
163 full_path.push(&relative_path);
164
165 std::fs::create_dir_all(&full_path)?;
166
167 Ok(relative_path)
168 }
169
e25736b4
DM
170 pub fn list_backups(&self) -> Result<Vec<BackupInfo>, Error> {
171 let path = self.base_path();
172
173 let mut list = vec![];
174
175 lazy_static! {
784252db
DM
176 static ref BACKUP_TYPE_REGEX: regex::Regex = regex::Regex::new(r"^(host|vm|ct)$").unwrap();
177 static ref BACKUP_ID_REGEX: regex::Regex = regex::Regex::new(r"^[A-Za-z][A-Za-z0-9_-]+$").unwrap();
be0084b0 178 static ref BACKUP_DATE_REGEX: regex::Regex = regex::Regex::new(
784252db 179 r"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$").unwrap();
e25736b4
DM
180 }
181
182 tools::scandir(libc::AT_FDCWD, &path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
183 if file_type != nix::dir::Type::Directory { return Ok(()); }
184 tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
185 if file_type != nix::dir::Type::Directory { return Ok(()); }
186 tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |_, backup_time, file_type| {
187 if file_type != nix::dir::Type::Directory { return Ok(()); }
188
189 let dt = Utc.datetime_from_str(backup_time, "%Y-%m-%dT%H:%M:%S")?;
190
191 list.push(BackupInfo {
192 backup_type: backup_type.to_owned(),
193 backup_id: backup_id.to_owned(),
7ca80246 194 backup_time: dt,
e25736b4
DM
195 });
196
197 Ok(())
198 })
199 })
200 })?;
201
202 Ok(list)
203 }
204
3d5c11e5 205 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
ff3d3100 206 let base = self.base_path();
3d5c11e5
DM
207
208 let mut list = vec![];
209
95cea65b
DM
210 use walkdir::WalkDir;
211
212 let walker = WalkDir::new(&base).same_file_system(true).into_iter();
213
214 // make sure we skip .chunks (and other hidden files to keep it simple)
215 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
216 entry.file_name()
217 .to_str()
218 .map(|s| s.starts_with("."))
219 .unwrap_or(false)
220 }
221
222 for entry in walker.filter_entry(|e| !is_hidden(e)) {
223 let path = entry?.into_path();
224 if let Some(ext) = path.extension() {
91a905b6 225 if ext == "fidx" {
95cea65b 226 list.push(path);
93d5d779 227 } else if ext == "didx" {
95cea65b 228 list.push(path);
3d5c11e5
DM
229 }
230 }
231 }
232
233 Ok(list)
234 }
235
64e53b28 236 fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
3d5c11e5
DM
237
238 let image_list = self.list_images()?;
239
240 for path in image_list {
77703d95 241 if let Some(ext) = path.extension() {
91a905b6
DM
242 if ext == "fidx" {
243 let index = self.open_fixed_reader(&path)?;
77703d95 244 index.mark_used_chunks(status)?;
93d5d779
DM
245 } else if ext == "didx" {
246 let index = self.open_dynamic_reader(&path)?;
77703d95
DM
247 index.mark_used_chunks(status)?;
248 }
249 }
3d5c11e5
DM
250 }
251
252 Ok(())
253 }
254
03e4753d 255 pub fn garbage_collection(&self) -> Result<(), Error> {
3d5c11e5 256
a198d74f 257 if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
e95950e4 258
64e53b28
DM
259 let mut gc_status = GarbageCollectionStatus::default();
260 gc_status.used_bytes = 0;
6ea3a0b7 261
64e53b28
DM
262 println!("Start GC phase1 (mark chunks)");
263
264 self.mark_used_chunks(&mut gc_status)?;
265
266 println!("Start GC phase2 (sweep unused chunks)");
77703d95 267 self.chunk_store.sweep_unused_chunks(&mut gc_status)?;
64e53b28
DM
268
269 println!("Used bytes: {}", gc_status.used_bytes);
270 println!("Used chunks: {}", gc_status.used_chunks);
271 println!("Disk bytes: {}", gc_status.disk_bytes);
272 println!("Disk chunks: {}", gc_status.disk_chunks);
273
274 } else {
275 println!("Start GC failed - (already running/locked)");
276 }
3d5c11e5
DM
277
278 Ok(())
279 }
529de6c7 280}