3 use chrono
::prelude
::*;
5 use std
::path
::{PathBuf, Path}
;
6 use std
::collections
::HashMap
;
7 use lazy_static
::lazy_static
;
8 use std
::sync
::{Mutex, Arc}
;
11 use crate::config
::datastore
;
12 use super::chunk_store
::*;
13 use super::fixed_index
::*;
14 use super::dynamic_index
::*;
16 use chrono
::{Utc, TimeZone}
;
18 /// Datastore Management
20 /// A Datastore can store severals backups, and provides the
21 /// management interface for backup.
22 pub struct DataStore
{
23 chunk_store
: Arc
<ChunkStore
>,
24 gc_mutex
: Mutex
<bool
>,
27 /// Detailed Backup Information
29 pub struct BackupInfo
{
31 pub backup_type
: String
,
32 /// Unique (for this type) ID
33 pub backup_id
: String
,
35 pub backup_time
: DateTime
<Utc
>,
39 static ref datastore_map
: Mutex
<HashMap
<String
, Arc
<DataStore
>>> = Mutex
::new(HashMap
::new());
44 pub fn lookup_datastore(name
: &str) -> Result
<Arc
<DataStore
>, Error
> {
46 let config
= datastore
::config()?
;
47 let (_
, store_config
) = config
.sections
.get(name
)
48 .ok_or(format_err
!("no such datastore '{}'", name
))?
;
50 let path
= store_config
["path"].as_str().unwrap();
52 let mut map
= datastore_map
.lock().unwrap();
54 if let Some(datastore
) = map
.get(name
) {
55 // Compare Config - if changed, create new Datastore object!
56 if datastore
.chunk_store
.base
== PathBuf
::from(path
) {
57 return Ok(datastore
.clone());
61 if let Ok(datastore
) = DataStore
::open(name
) {
62 let datastore
= Arc
::new(datastore
);
63 map
.insert(name
.to_string(), datastore
.clone());
67 bail
!("store not found");
70 pub fn open(store_name
: &str) -> Result
<Self, Error
> {
72 let config
= datastore
::config()?
;
73 let (_
, store_config
) = config
.sections
.get(store_name
)
74 .ok_or(format_err
!("no such datastore '{}'", store_name
))?
;
76 let path
= store_config
["path"].as_str().unwrap();
78 let chunk_store
= ChunkStore
::open(store_name
, path
)?
;
81 chunk_store
: Arc
::new(chunk_store
),
82 gc_mutex
: Mutex
::new(false),
86 pub fn create_fixed_writer
<P
: AsRef
<Path
>>(&self, filename
: P
, size
: usize, chunk_size
: usize) -> Result
<FixedIndexWriter
, Error
> {
88 let index
= FixedIndexWriter
::create(self.chunk_store
.clone(), filename
.as_ref(), size
, chunk_size
)?
;
93 pub fn open_fixed_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<FixedIndexReader
, Error
> {
95 let index
= FixedIndexReader
::open(self.chunk_store
.clone(), filename
.as_ref())?
;
100 pub fn create_dynamic_writer
<P
: AsRef
<Path
>>(
103 ) -> Result
<DynamicIndexWriter
, Error
> {
105 let index
= DynamicIndexWriter
::create(
106 self.chunk_store
.clone(), filename
.as_ref(), chunk_size
)?
;
111 pub fn open_dynamic_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<DynamicIndexReader
, Error
> {
113 let index
= DynamicIndexReader
::open(self.chunk_store
.clone(), filename
.as_ref())?
;
118 pub fn base_path(&self) -> PathBuf
{
119 self.chunk_store
.base_path()
122 pub fn get_backup_dir(
126 backup_time
: DateTime
<Utc
>,
129 let mut relative_path
= PathBuf
::new();
131 relative_path
.push(backup_type
);
133 relative_path
.push(backup_id
);
135 let date_str
= backup_time
.format("%Y-%m-%dT%H:%M:%S").to_string();
137 relative_path
.push(&date_str
);
142 pub fn create_backup_dir(
147 ) -> Result
<PathBuf
, Error
> {
148 let mut relative_path
= PathBuf
::new();
150 relative_path
.push(backup_type
);
152 relative_path
.push(backup_id
);
154 let dt
= Utc
.timestamp(backup_time
, 0);
155 let date_str
= dt
.format("%Y-%m-%dT%H:%M:%S").to_string();
157 println
!("date: {}", date_str
);
159 relative_path
.push(&date_str
);
162 let mut full_path
= self.base_path();
163 full_path
.push(&relative_path
);
165 std
::fs
::create_dir_all(&full_path
)?
;
170 pub fn list_backups(&self) -> Result
<Vec
<BackupInfo
>, Error
> {
171 let path
= self.base_path();
173 let mut list
= vec
![];
176 static ref BACKUP_TYPE_REGEX
: regex
::Regex
= regex
::Regex
::new(r
"^(host|vm|ct)$").unwrap();
177 static ref BACKUP_ID_REGEX
: regex
::Regex
= regex
::Regex
::new(r
"^[A-Za-z][A-Za-z0-9_-]+$").unwrap();
178 static ref BACKUP_DATE_REGEX
: regex
::Regex
= regex
::Regex
::new(
179 r
"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$").unwrap();
182 tools
::scandir(libc
::AT_FDCWD
, &path
, &BACKUP_TYPE_REGEX
, |l0_fd
, backup_type
, file_type
| {
183 if file_type
!= nix
::dir
::Type
::Directory { return Ok(()); }
184 tools
::scandir(l0_fd
, backup_type
, &BACKUP_ID_REGEX
, |l1_fd
, backup_id
, file_type
| {
185 if file_type
!= nix
::dir
::Type
::Directory { return Ok(()); }
186 tools
::scandir(l1_fd
, backup_id
, &BACKUP_DATE_REGEX
, |_
, backup_time
, file_type
| {
187 if file_type
!= nix
::dir
::Type
::Directory { return Ok(()); }
189 let dt
= Utc
.datetime_from_str(backup_time
, "%Y-%m-%dT%H:%M:%S")?
;
191 list
.push(BackupInfo
{
192 backup_type
: backup_type
.to_owned(),
193 backup_id
: backup_id
.to_owned(),
205 pub fn list_images(&self) -> Result
<Vec
<PathBuf
>, Error
> {
206 let base
= self.base_path();
208 let mut list
= vec
![];
210 use walkdir
::WalkDir
;
212 let walker
= WalkDir
::new(&base
).same_file_system(true).into_iter();
214 // make sure we skip .chunks (and other hidden files to keep it simple)
215 fn is_hidden(entry
: &walkdir
::DirEntry
) -> bool
{
218 .map(|s
| s
.starts_with("."))
222 for entry
in walker
.filter_entry(|e
| !is_hidden(e
)) {
223 let path
= entry?
.into_path();
224 if let Some(ext
) = path
.extension() {
227 } else if ext
== "didx" {
236 fn mark_used_chunks(&self, status
: &mut GarbageCollectionStatus
) -> Result
<(), Error
> {
238 let image_list
= self.list_images()?
;
240 for path
in image_list
{
241 if let Some(ext
) = path
.extension() {
243 let index
= self.open_fixed_reader(&path
)?
;
244 index
.mark_used_chunks(status
)?
;
245 } else if ext
== "didx" {
246 let index
= self.open_dynamic_reader(&path
)?
;
247 index
.mark_used_chunks(status
)?
;
255 pub fn garbage_collection(&self) -> Result
<(), Error
> {
257 if let Ok(ref mut _mutex
) = self.gc_mutex
.try_lock() {
259 let mut gc_status
= GarbageCollectionStatus
::default();
260 gc_status
.used_bytes
= 0;
262 println
!("Start GC phase1 (mark chunks)");
264 self.mark_used_chunks(&mut gc_status
)?
;
266 println
!("Start GC phase2 (sweep unused chunks)");
267 self.chunk_store
.sweep_unused_chunks(&mut gc_status
)?
;
269 println
!("Used bytes: {}", gc_status
.used_bytes
);
270 println
!("Used chunks: {}", gc_status
.used_chunks
);
271 println
!("Disk bytes: {}", gc_status
.disk_bytes
);
272 println
!("Disk chunks: {}", gc_status
.disk_chunks
);
275 println
!("Start GC failed - (already running/locked)");