]> git.proxmox.com Git - proxmox-backup.git/blob - src/backup/datastore.rs
backup/datastore.rs: list all index files using walkdir crate
[proxmox-backup.git] / src / backup / datastore.rs
1 use failure::*;
2
3 use std::path::{PathBuf, Path};
4 use std::collections::HashMap;
5 use lazy_static::lazy_static;
6 use std::sync::{Mutex, Arc};
7
8 use crate::config::datastore;
9 use super::chunk_store::*;
10 use super::image_index::*;
11 use super::archive_index::*;
12
13 use chrono::{Utc, TimeZone};
14
15 pub struct DataStore {
16 chunk_store: Arc<ChunkStore>,
17 gc_mutex: Mutex<bool>,
18 }
19
20 lazy_static!{
21 static ref datastore_map: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
22 }
23
24 impl DataStore {
25
26 pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
27
28 let config = datastore::config()?;
29 let (_, store_config) = config.sections.get(name)
30 .ok_or(format_err!("no such datastore '{}'", name))?;
31
32 let path = store_config["path"].as_str().unwrap();
33
34 let mut map = datastore_map.lock().unwrap();
35
36 if let Some(datastore) = map.get(name) {
37 // Compare Config - if changed, create new Datastore object!
38 if datastore.chunk_store.base == PathBuf::from(path) {
39 return Ok(datastore.clone());
40 }
41 }
42
43 if let Ok(datastore) = DataStore::open(name) {
44 let datastore = Arc::new(datastore);
45 map.insert(name.to_string(), datastore.clone());
46 return Ok(datastore);
47 }
48
49 bail!("store not found");
50 }
51
52 pub fn open(store_name: &str) -> Result<Self, Error> {
53
54 let config = datastore::config()?;
55 let (_, store_config) = config.sections.get(store_name)
56 .ok_or(format_err!("no such datastore '{}'", store_name))?;
57
58 let path = store_config["path"].as_str().unwrap();
59
60 let chunk_store = ChunkStore::open(store_name, path)?;
61
62 Ok(Self {
63 chunk_store: Arc::new(chunk_store),
64 gc_mutex: Mutex::new(false),
65 })
66 }
67
68 pub fn create_image_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<ImageIndexWriter, Error> {
69
70 let index = ImageIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
71
72 Ok(index)
73 }
74
75 pub fn open_image_reader<P: AsRef<Path>>(&self, filename: P) -> Result<ImageIndexReader, Error> {
76
77 let index = ImageIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
78
79 Ok(index)
80 }
81
82 pub fn create_archive_writer<P: AsRef<Path>>(
83 &self, filename: P,
84 chunk_size: usize
85 ) -> Result<ArchiveIndexWriter, Error> {
86
87 let index = ArchiveIndexWriter::create(
88 self.chunk_store.clone(), filename.as_ref(), chunk_size)?;
89
90 Ok(index)
91 }
92
93 pub fn open_archive_reader<P: AsRef<Path>>(&self, filename: P) -> Result<ArchiveIndexReader, Error> {
94
95 let index = ArchiveIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
96
97 Ok(index)
98 }
99
100 pub fn base_path(&self) -> PathBuf {
101 self.chunk_store.base_path()
102 }
103
104 pub fn create_backup_dir(
105 &self,
106 backup_type: &str,
107 backup_id: &str,
108 backup_time: i64,
109 ) -> Result<PathBuf, Error> {
110 let mut relative_path = PathBuf::new();
111
112 relative_path.push(backup_type);
113
114 relative_path.push(backup_id);
115
116 let dt = Utc.timestamp(backup_time, 0);
117 let date_str = dt.format("%Y-%m-%dT%H:%M:%S").to_string();
118
119 println!("date: {}", date_str);
120
121 relative_path.push(&date_str);
122
123
124 let mut full_path = self.base_path();
125 full_path.push(&relative_path);
126
127 std::fs::create_dir_all(&full_path)?;
128
129 Ok(relative_path)
130 }
131
132 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
133 let base = self.base_path();
134
135 let mut list = vec![];
136
137 use walkdir::WalkDir;
138
139 let walker = WalkDir::new(&base).same_file_system(true).into_iter();
140
141 // make sure we skip .chunks (and other hidden files to keep it simple)
142 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
143 entry.file_name()
144 .to_str()
145 .map(|s| s.starts_with("."))
146 .unwrap_or(false)
147 }
148
149 for entry in walker.filter_entry(|e| !is_hidden(e)) {
150 let path = entry?.into_path();
151 if let Some(ext) = path.extension() {
152 if ext == "iidx" {
153 list.push(path);
154 } else if ext == "aidx" {
155 list.push(path);
156 }
157 }
158 }
159
160 Ok(list)
161 }
162
163 fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
164
165 let image_list = self.list_images()?;
166
167 for path in image_list {
168 if let Some(ext) = path.extension() {
169 if ext == "iidx" {
170 let index = self.open_image_reader(&path)?;
171 index.mark_used_chunks(status)?;
172 } else if ext == "aidx" {
173 let index = self.open_archive_reader(&path)?;
174 index.mark_used_chunks(status)?;
175 }
176 }
177 }
178
179 Ok(())
180 }
181
182 pub fn garbage_collection(&self) -> Result<(), Error> {
183
184 if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
185
186 let mut gc_status = GarbageCollectionStatus::default();
187 gc_status.used_bytes = 0;
188
189 println!("Start GC phase1 (mark chunks)");
190
191 self.mark_used_chunks(&mut gc_status)?;
192
193 println!("Start GC phase2 (sweep unused chunks)");
194 self.chunk_store.sweep_unused_chunks(&mut gc_status)?;
195
196 println!("Used bytes: {}", gc_status.used_bytes);
197 println!("Used chunks: {}", gc_status.used_chunks);
198 println!("Disk bytes: {}", gc_status.disk_bytes);
199 println!("Disk chunks: {}", gc_status.disk_chunks);
200
201 } else {
202 println!("Start GC failed - (already running/locked)");
203 }
204
205 Ok(())
206 }
207 }