]> git.proxmox.com Git - proxmox-backup.git/blob - src/backup/datastore.rs
Revert "backup/datastore: fn open is a private implementation detail"
[proxmox-backup.git] / src / backup / datastore.rs
1 use failure::*;
2
3 use chrono::prelude::*;
4
5 use std::path::{PathBuf, Path};
6 use std::collections::HashMap;
7 use lazy_static::lazy_static;
8 use std::sync::{Mutex, Arc};
9
10 use crate::tools;
11 use crate::config::datastore;
12 use super::chunk_store::*;
13 use super::fixed_index::*;
14 use super::dynamic_index::*;
15
16 use chrono::{Utc, TimeZone};
17
18 /// Datastore Management
19 ///
20 /// A Datastore can store severals backups, and provides the
21 /// management interface for backup.
22 pub struct DataStore {
23 chunk_store: Arc<ChunkStore>,
24 gc_mutex: Mutex<bool>,
25 }
26
27 /// Detailed Backup Information
28 #[derive(Debug)]
29 pub struct BackupInfo {
30 /// Type of backup
31 pub backup_type: String,
32 /// Unique (for this type) ID
33 pub backup_id: String,
34 /// Backup timestamp
35 pub backup_time: DateTime<Utc>,
36 }
37
38 lazy_static!{
39 static ref datastore_map: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
40 }
41
42 impl DataStore {
43
44 pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
45
46 let config = datastore::config()?;
47 let (_, store_config) = config.sections.get(name)
48 .ok_or(format_err!("no such datastore '{}'", name))?;
49
50 let path = store_config["path"].as_str().unwrap();
51
52 let mut map = datastore_map.lock().unwrap();
53
54 if let Some(datastore) = map.get(name) {
55 // Compare Config - if changed, create new Datastore object!
56 if datastore.chunk_store.base == PathBuf::from(path) {
57 return Ok(datastore.clone());
58 }
59 }
60
61 if let Ok(datastore) = DataStore::open(name) {
62 let datastore = Arc::new(datastore);
63 map.insert(name.to_string(), datastore.clone());
64 return Ok(datastore);
65 }
66
67 bail!("store not found");
68 }
69
70 pub fn open(store_name: &str) -> Result<Self, Error> {
71
72 let config = datastore::config()?;
73 let (_, store_config) = config.sections.get(store_name)
74 .ok_or(format_err!("no such datastore '{}'", store_name))?;
75
76 let path = store_config["path"].as_str().unwrap();
77
78 let chunk_store = ChunkStore::open(store_name, path)?;
79
80 Ok(Self {
81 chunk_store: Arc::new(chunk_store),
82 gc_mutex: Mutex::new(false),
83 })
84 }
85
86 pub fn create_fixed_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<FixedIndexWriter, Error> {
87
88 let index = FixedIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
89
90 Ok(index)
91 }
92
93 pub fn open_fixed_reader<P: AsRef<Path>>(&self, filename: P) -> Result<FixedIndexReader, Error> {
94
95 let index = FixedIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
96
97 Ok(index)
98 }
99
100 pub fn create_dynamic_writer<P: AsRef<Path>>(
101 &self, filename: P,
102 chunk_size: usize
103 ) -> Result<DynamicIndexWriter, Error> {
104
105 let index = DynamicIndexWriter::create(
106 self.chunk_store.clone(), filename.as_ref(), chunk_size)?;
107
108 Ok(index)
109 }
110
111 pub fn open_dynamic_reader<P: AsRef<Path>>(&self, filename: P) -> Result<DynamicIndexReader, Error> {
112
113 let index = DynamicIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
114
115 Ok(index)
116 }
117
118 pub fn base_path(&self) -> PathBuf {
119 self.chunk_store.base_path()
120 }
121
122 pub fn get_backup_dir(
123 &self,
124 backup_type: &str,
125 backup_id: &str,
126 backup_time: DateTime<Utc>,
127 ) -> PathBuf {
128
129 let mut relative_path = PathBuf::new();
130
131 relative_path.push(backup_type);
132
133 relative_path.push(backup_id);
134
135 let date_str = backup_time.format("%Y-%m-%dT%H:%M:%S").to_string();
136
137 relative_path.push(&date_str);
138
139 relative_path
140 }
141
142 pub fn create_backup_dir(
143 &self,
144 backup_type: &str,
145 backup_id: &str,
146 backup_time: i64,
147 ) -> Result<PathBuf, Error> {
148 let mut relative_path = PathBuf::new();
149
150 relative_path.push(backup_type);
151
152 relative_path.push(backup_id);
153
154 let dt = Utc.timestamp(backup_time, 0);
155 let date_str = dt.format("%Y-%m-%dT%H:%M:%S").to_string();
156
157 println!("date: {}", date_str);
158
159 relative_path.push(&date_str);
160
161
162 let mut full_path = self.base_path();
163 full_path.push(&relative_path);
164
165 std::fs::create_dir_all(&full_path)?;
166
167 Ok(relative_path)
168 }
169
170 pub fn list_backups(&self) -> Result<Vec<BackupInfo>, Error> {
171 let path = self.base_path();
172
173 let mut list = vec![];
174
175 lazy_static! {
176 static ref BACKUP_TYPE_REGEX: regex::Regex = regex::Regex::new(r"^(host|vm|ct)$").unwrap();
177 static ref BACKUP_ID_REGEX: regex::Regex = regex::Regex::new(r"^[A-Za-z][A-Za-z0-9_-]+$").unwrap();
178 static ref BACKUP_DATE_REGEX: regex::Regex = regex::Regex::new(
179 r"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$").unwrap();
180 }
181
182 tools::scandir(libc::AT_FDCWD, &path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
183 if file_type != nix::dir::Type::Directory { return Ok(()); }
184 tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
185 if file_type != nix::dir::Type::Directory { return Ok(()); }
186 tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |_, backup_time, file_type| {
187 if file_type != nix::dir::Type::Directory { return Ok(()); }
188
189 let dt = Utc.datetime_from_str(backup_time, "%Y-%m-%dT%H:%M:%S")?;
190
191 list.push(BackupInfo {
192 backup_type: backup_type.to_owned(),
193 backup_id: backup_id.to_owned(),
194 backup_time: dt,
195 });
196
197 Ok(())
198 })
199 })
200 })?;
201
202 Ok(list)
203 }
204
205 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
206 let base = self.base_path();
207
208 let mut list = vec![];
209
210 use walkdir::WalkDir;
211
212 let walker = WalkDir::new(&base).same_file_system(true).into_iter();
213
214 // make sure we skip .chunks (and other hidden files to keep it simple)
215 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
216 entry.file_name()
217 .to_str()
218 .map(|s| s.starts_with("."))
219 .unwrap_or(false)
220 }
221
222 for entry in walker.filter_entry(|e| !is_hidden(e)) {
223 let path = entry?.into_path();
224 if let Some(ext) = path.extension() {
225 if ext == "fidx" {
226 list.push(path);
227 } else if ext == "didx" {
228 list.push(path);
229 }
230 }
231 }
232
233 Ok(list)
234 }
235
236 fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
237
238 let image_list = self.list_images()?;
239
240 for path in image_list {
241 if let Some(ext) = path.extension() {
242 if ext == "fidx" {
243 let index = self.open_fixed_reader(&path)?;
244 index.mark_used_chunks(status)?;
245 } else if ext == "didx" {
246 let index = self.open_dynamic_reader(&path)?;
247 index.mark_used_chunks(status)?;
248 }
249 }
250 }
251
252 Ok(())
253 }
254
255 pub fn garbage_collection(&self) -> Result<(), Error> {
256
257 if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
258
259 let mut gc_status = GarbageCollectionStatus::default();
260 gc_status.used_bytes = 0;
261
262 println!("Start GC phase1 (mark chunks)");
263
264 self.mark_used_chunks(&mut gc_status)?;
265
266 println!("Start GC phase2 (sweep unused chunks)");
267 self.chunk_store.sweep_unused_chunks(&mut gc_status)?;
268
269 println!("Used bytes: {}", gc_status.used_bytes);
270 println!("Used chunks: {}", gc_status.used_chunks);
271 println!("Disk bytes: {}", gc_status.disk_bytes);
272 println!("Disk chunks: {}", gc_status.disk_chunks);
273
274 } else {
275 println!("Start GC failed - (already running/locked)");
276 }
277
278 Ok(())
279 }
280 }