]> git.proxmox.com Git - proxmox-backup.git/blob - src/backup/datastore.rs
backup/datastore.rs: use single lazy_static block
[proxmox-backup.git] / src / backup / datastore.rs
1 use failure::*;
2
3 use chrono::prelude::*;
4
5 use std::path::{PathBuf, Path};
6 use std::collections::HashMap;
7 use lazy_static::lazy_static;
8 use std::sync::{Mutex, Arc};
9
10 use std::os::unix::io::AsRawFd;
11
12 use crate::tools;
13 use crate::config::datastore;
14 use super::chunk_store::*;
15 use super::image_index::*;
16 use super::archive_index::*;
17
18 use chrono::{Utc, TimeZone};
19
20 pub struct DataStore {
21 chunk_store: Arc<ChunkStore>,
22 gc_mutex: Mutex<bool>,
23 }
24
25 #[derive(Debug)]
26 pub struct BackupInfo {
27 pub backup_type: String,
28 pub backup_id: String,
29 pub backup_time: DateTime<Utc>,
30 }
31
32 lazy_static!{
33 static ref datastore_map: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
34 }
35
36 impl DataStore {
37
38 pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
39
40 let config = datastore::config()?;
41 let (_, store_config) = config.sections.get(name)
42 .ok_or(format_err!("no such datastore '{}'", name))?;
43
44 let path = store_config["path"].as_str().unwrap();
45
46 let mut map = datastore_map.lock().unwrap();
47
48 if let Some(datastore) = map.get(name) {
49 // Compare Config - if changed, create new Datastore object!
50 if datastore.chunk_store.base == PathBuf::from(path) {
51 return Ok(datastore.clone());
52 }
53 }
54
55 if let Ok(datastore) = DataStore::open(name) {
56 let datastore = Arc::new(datastore);
57 map.insert(name.to_string(), datastore.clone());
58 return Ok(datastore);
59 }
60
61 bail!("store not found");
62 }
63
64 pub fn open(store_name: &str) -> Result<Self, Error> {
65
66 let config = datastore::config()?;
67 let (_, store_config) = config.sections.get(store_name)
68 .ok_or(format_err!("no such datastore '{}'", store_name))?;
69
70 let path = store_config["path"].as_str().unwrap();
71
72 let chunk_store = ChunkStore::open(store_name, path)?;
73
74 Ok(Self {
75 chunk_store: Arc::new(chunk_store),
76 gc_mutex: Mutex::new(false),
77 })
78 }
79
80 pub fn create_image_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<ImageIndexWriter, Error> {
81
82 let index = ImageIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
83
84 Ok(index)
85 }
86
87 pub fn open_image_reader<P: AsRef<Path>>(&self, filename: P) -> Result<ImageIndexReader, Error> {
88
89 let index = ImageIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
90
91 Ok(index)
92 }
93
94 pub fn create_archive_writer<P: AsRef<Path>>(
95 &self, filename: P,
96 chunk_size: usize
97 ) -> Result<ArchiveIndexWriter, Error> {
98
99 let index = ArchiveIndexWriter::create(
100 self.chunk_store.clone(), filename.as_ref(), chunk_size)?;
101
102 Ok(index)
103 }
104
105 pub fn open_archive_reader<P: AsRef<Path>>(&self, filename: P) -> Result<ArchiveIndexReader, Error> {
106
107 let index = ArchiveIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
108
109 Ok(index)
110 }
111
112 pub fn base_path(&self) -> PathBuf {
113 self.chunk_store.base_path()
114 }
115
116 pub fn get_backup_dir(
117 &self,
118 backup_type: &str,
119 backup_id: &str,
120 backup_time: DateTime<Utc>,
121 ) -> PathBuf {
122
123 let mut relative_path = PathBuf::new();
124
125 relative_path.push(backup_type);
126
127 relative_path.push(backup_id);
128
129 let date_str = backup_time.format("%Y-%m-%dT%H:%M:%S").to_string();
130
131 relative_path.push(&date_str);
132
133 relative_path
134 }
135
136 pub fn create_backup_dir(
137 &self,
138 backup_type: &str,
139 backup_id: &str,
140 backup_time: i64,
141 ) -> Result<PathBuf, Error> {
142 let mut relative_path = PathBuf::new();
143
144 relative_path.push(backup_type);
145
146 relative_path.push(backup_id);
147
148 let dt = Utc.timestamp(backup_time, 0);
149 let date_str = dt.format("%Y-%m-%dT%H:%M:%S").to_string();
150
151 println!("date: {}", date_str);
152
153 relative_path.push(&date_str);
154
155
156 let mut full_path = self.base_path();
157 full_path.push(&relative_path);
158
159 std::fs::create_dir_all(&full_path)?;
160
161 Ok(relative_path)
162 }
163
164 pub fn list_backups(&self) -> Result<Vec<BackupInfo>, Error> {
165 let path = self.base_path();
166
167 let mut list = vec![];
168
169 lazy_static! {
170 static ref BACKUP_TYPE_REGEX: regex::Regex = regex::Regex::new(r"^(host|vm|ct)$").unwrap();
171 static ref BACKUP_ID_REGEX: regex::Regex = regex::Regex::new(r"^[A-Za-z][A-Za-z0-9_-]+$").unwrap();
172 static ref BACKUP_DATE_REGEX: regex::Regex = regex::Regex::new(
173 r"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$").unwrap();
174 }
175
176 tools::scandir(libc::AT_FDCWD, &path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
177 if file_type != nix::dir::Type::Directory { return Ok(()); }
178 tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
179 if file_type != nix::dir::Type::Directory { return Ok(()); }
180 tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |_, backup_time, file_type| {
181 if file_type != nix::dir::Type::Directory { return Ok(()); }
182
183 let dt = Utc.datetime_from_str(backup_time, "%Y-%m-%dT%H:%M:%S")?;
184
185 list.push(BackupInfo {
186 backup_type: backup_type.to_owned(),
187 backup_id: backup_id.to_owned(),
188 backup_time: dt,
189 });
190
191 Ok(())
192 })
193 })
194 })?;
195
196 Ok(list)
197 }
198
199 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
200 let base = self.base_path();
201
202 let mut list = vec![];
203
204 use walkdir::WalkDir;
205
206 let walker = WalkDir::new(&base).same_file_system(true).into_iter();
207
208 // make sure we skip .chunks (and other hidden files to keep it simple)
209 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
210 entry.file_name()
211 .to_str()
212 .map(|s| s.starts_with("."))
213 .unwrap_or(false)
214 }
215
216 for entry in walker.filter_entry(|e| !is_hidden(e)) {
217 let path = entry?.into_path();
218 if let Some(ext) = path.extension() {
219 if ext == "iidx" {
220 list.push(path);
221 } else if ext == "aidx" {
222 list.push(path);
223 }
224 }
225 }
226
227 Ok(list)
228 }
229
230 fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
231
232 let image_list = self.list_images()?;
233
234 for path in image_list {
235 if let Some(ext) = path.extension() {
236 if ext == "iidx" {
237 let index = self.open_image_reader(&path)?;
238 index.mark_used_chunks(status)?;
239 } else if ext == "aidx" {
240 let index = self.open_archive_reader(&path)?;
241 index.mark_used_chunks(status)?;
242 }
243 }
244 }
245
246 Ok(())
247 }
248
249 pub fn garbage_collection(&self) -> Result<(), Error> {
250
251 if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
252
253 let mut gc_status = GarbageCollectionStatus::default();
254 gc_status.used_bytes = 0;
255
256 println!("Start GC phase1 (mark chunks)");
257
258 self.mark_used_chunks(&mut gc_status)?;
259
260 println!("Start GC phase2 (sweep unused chunks)");
261 self.chunk_store.sweep_unused_chunks(&mut gc_status)?;
262
263 println!("Used bytes: {}", gc_status.used_bytes);
264 println!("Used chunks: {}", gc_status.used_chunks);
265 println!("Disk bytes: {}", gc_status.disk_bytes);
266 println!("Disk chunks: {}", gc_status.disk_chunks);
267
268 } else {
269 println!("Start GC failed - (already running/locked)");
270 }
271
272 Ok(())
273 }
274 }