]>
git.proxmox.com Git - proxmox-backup.git/blob - src/backup/datastore.rs
4 use std
::path
::{PathBuf, Path}
;
5 use std
::collections
::HashMap
;
6 use lazy_static
::lazy_static
;
7 use std
::sync
::{Mutex, Arc}
;
10 use crate::config
::datastore
;
11 use super::chunk_store
::*;
12 use super::fixed_index
::*;
13 use super::dynamic_index
::*;
15 use super::backup_info
::*;
18 static ref DATASTORE_MAP
: Mutex
<HashMap
<String
, Arc
<DataStore
>>> = Mutex
::new(HashMap
::new());
21 /// Datastore Management
23 /// A Datastore can store severals backups, and provides the
24 /// management interface for backup.
25 pub struct DataStore
{
26 chunk_store
: Arc
<ChunkStore
>,
27 gc_mutex
: Mutex
<bool
>,
32 pub fn lookup_datastore(name
: &str) -> Result
<Arc
<DataStore
>, Error
> {
34 let config
= datastore
::config()?
;
35 let (_
, store_config
) = config
.sections
.get(name
)
36 .ok_or(format_err
!("no such datastore '{}'", name
))?
;
38 let path
= store_config
["path"].as_str().unwrap();
40 let mut map
= DATASTORE_MAP
.lock().unwrap();
42 if let Some(datastore
) = map
.get(name
) {
43 // Compare Config - if changed, create new Datastore object!
44 if datastore
.chunk_store
.base
== PathBuf
::from(path
) {
45 return Ok(datastore
.clone());
49 let datastore
= DataStore
::open(name
)?
;
51 let datastore
= Arc
::new(datastore
);
52 map
.insert(name
.to_string(), datastore
.clone());
57 pub fn open(store_name
: &str) -> Result
<Self, Error
> {
59 let config
= datastore
::config()?
;
60 let (_
, store_config
) = config
.sections
.get(store_name
)
61 .ok_or(format_err
!("no such datastore '{}'", store_name
))?
;
63 let path
= store_config
["path"].as_str().unwrap();
65 let chunk_store
= ChunkStore
::open(store_name
, path
)?
;
68 chunk_store
: Arc
::new(chunk_store
),
69 gc_mutex
: Mutex
::new(false),
73 pub fn get_chunk_iterator(
75 print_percentage
: bool
,
77 impl Iterator
<Item
= Result
<tools
::fs
::ReadDirEntry
, Error
>>,
80 self.chunk_store
.get_chunk_iterator(print_percentage
)
83 pub fn create_fixed_writer
<P
: AsRef
<Path
>>(&self, filename
: P
, size
: usize, chunk_size
: usize) -> Result
<FixedIndexWriter
, Error
> {
85 let index
= FixedIndexWriter
::create(self.chunk_store
.clone(), filename
.as_ref(), size
, chunk_size
)?
;
90 pub fn open_fixed_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<FixedIndexReader
, Error
> {
92 let index
= FixedIndexReader
::open(self.chunk_store
.clone(), filename
.as_ref())?
;
97 pub fn create_dynamic_writer
<P
: AsRef
<Path
>>(
100 ) -> Result
<DynamicIndexWriter
, Error
> {
102 let index
= DynamicIndexWriter
::create(
103 self.chunk_store
.clone(), filename
.as_ref(), chunk_size
)?
;
108 pub fn open_dynamic_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<DynamicIndexReader
, Error
> {
110 let index
= DynamicIndexReader
::open(self.chunk_store
.clone(), filename
.as_ref())?
;
115 pub fn open_index
<P
>(&self, filename
: P
) -> Result
<Box
<dyn IndexFile
+ Send
>, Error
>
119 let filename
= filename
.as_ref();
120 let out
: Box
<dyn IndexFile
+ Send
> =
121 match filename
.extension().and_then(|ext
| ext
.to_str()) {
122 Some("didx") => Box
::new(self.open_dynamic_reader(filename
)?
),
123 Some("fidx") => Box
::new(self.open_fixed_reader(filename
)?
),
124 _
=> bail
!("cannot open index file of unknown type: {:?}", filename
),
129 pub fn base_path(&self) -> PathBuf
{
130 self.chunk_store
.base_path()
133 /// Remove a backup directory including all content
134 pub fn remove_backup_dir(&self, backup_dir
: &BackupDir
,
135 ) -> Result
<(), io
::Error
> {
137 let relative_path
= backup_dir
.relative_path();
138 let mut full_path
= self.base_path();
139 full_path
.push(&relative_path
);
141 log
::info
!("removing backup {:?}", full_path
);
142 std
::fs
::remove_dir_all(full_path
)?
;
147 pub fn create_backup_dir(&self, backup_dir
: &BackupDir
) -> Result
<(PathBuf
, bool
), io
::Error
> {
149 // create intermediate path first:
150 let mut full_path
= self.base_path();
151 full_path
.push(backup_dir
.group().group_path());
152 std
::fs
::create_dir_all(&full_path
)?
;
154 let relative_path
= backup_dir
.relative_path();
155 let mut full_path
= self.base_path();
156 full_path
.push(&relative_path
);
158 // create the last component now
159 match std
::fs
::create_dir(&full_path
) {
160 Ok(_
) => Ok((relative_path
, true)),
161 Err(ref e
) if e
.kind() == io
::ErrorKind
::AlreadyExists
=> Ok((relative_path
, false)),
166 pub fn list_backups(&self) -> Result
<Vec
<BackupInfo
>, Error
> {
167 let path
= self.base_path();
168 BackupInfo
::list_backups(&path
)
171 pub fn list_files(&self, backup_dir
: &BackupDir
) -> Result
<Vec
<String
>, Error
> {
172 let path
= self.base_path();
173 BackupInfo
::list_files(&path
, backup_dir
)
176 pub fn list_images(&self) -> Result
<Vec
<PathBuf
>, Error
> {
177 let base
= self.base_path();
179 let mut list
= vec
![];
181 use walkdir
::WalkDir
;
183 let walker
= WalkDir
::new(&base
).same_file_system(true).into_iter();
185 // make sure we skip .chunks (and other hidden files to keep it simple)
186 fn is_hidden(entry
: &walkdir
::DirEntry
) -> bool
{
189 .map(|s
| s
.starts_with("."))
193 for entry
in walker
.filter_entry(|e
| !is_hidden(e
)) {
194 let path
= entry?
.into_path();
195 if let Some(ext
) = path
.extension() {
198 } else if ext
== "didx" {
207 fn mark_used_chunks(&self, status
: &mut GarbageCollectionStatus
) -> Result
<(), Error
> {
209 let image_list
= self.list_images()?
;
211 for path
in image_list
{
212 if let Some(ext
) = path
.extension() {
214 let index
= self.open_fixed_reader(&path
)?
;
215 index
.mark_used_chunks(status
)?
;
216 } else if ext
== "didx" {
217 let index
= self.open_dynamic_reader(&path
)?
;
218 index
.mark_used_chunks(status
)?
;
226 pub fn garbage_collection(&self) -> Result
<(), Error
> {
228 if let Ok(ref mut _mutex
) = self.gc_mutex
.try_lock() {
230 let _exclusive_lock
= self.chunk_store
.try_exclusive_lock()?
;
232 let oldest_writer
= self.chunk_store
.oldest_writer();
234 let mut gc_status
= GarbageCollectionStatus
::default();
235 gc_status
.used_bytes
= 0;
237 println
!("Start GC phase1 (mark chunks)");
239 self.mark_used_chunks(&mut gc_status
)?
;
241 println
!("Start GC phase2 (sweep unused chunks)");
242 self.chunk_store
.sweep_unused_chunks(oldest_writer
, &mut gc_status
)?
;
244 println
!("Used bytes: {}", gc_status
.used_bytes
);
245 println
!("Used chunks: {}", gc_status
.used_chunks
);
246 println
!("Disk bytes: {}", gc_status
.disk_bytes
);
247 println
!("Disk chunks: {}", gc_status
.disk_chunks
);
250 println
!("Start GC failed - (already running/locked)");
256 pub fn insert_chunk(&self, chunk
: &[u8]) -> Result
<(bool
, [u8; 32], u64), Error
> {
257 self.chunk_store
.insert_chunk(chunk
)
260 pub fn insert_chunk_noverify(
264 ) -> Result
<(bool
, u64), Error
> {
265 self.chunk_store
.insert_chunk_noverify(digest
, chunk
)