3 use chrono
::prelude
::*;
5 use std
::path
::{PathBuf, Path}
;
6 use std
::collections
::HashMap
;
7 use lazy_static
::lazy_static
;
8 use std
::sync
::{Mutex, Arc}
;
10 use std
::os
::unix
::io
::AsRawFd
;
13 use crate::config
::datastore
;
14 use super::chunk_store
::*;
15 use super::image_index
::*;
16 use super::archive_index
::*;
18 use chrono
::{Utc, TimeZone}
;
20 pub struct DataStore
{
21 chunk_store
: Arc
<ChunkStore
>,
22 gc_mutex
: Mutex
<bool
>,
26 pub struct BackupInfo
{
27 pub backup_type
: String
,
28 pub backup_id
: String
,
29 pub backup_time
: DateTime
<Utc
>,
33 static ref datastore_map
: Mutex
<HashMap
<String
, Arc
<DataStore
>>> = Mutex
::new(HashMap
::new());
38 pub fn lookup_datastore(name
: &str) -> Result
<Arc
<DataStore
>, Error
> {
40 let config
= datastore
::config()?
;
41 let (_
, store_config
) = config
.sections
.get(name
)
42 .ok_or(format_err
!("no such datastore '{}'", name
))?
;
44 let path
= store_config
["path"].as_str().unwrap();
46 let mut map
= datastore_map
.lock().unwrap();
48 if let Some(datastore
) = map
.get(name
) {
49 // Compare Config - if changed, create new Datastore object!
50 if datastore
.chunk_store
.base
== PathBuf
::from(path
) {
51 return Ok(datastore
.clone());
55 if let Ok(datastore
) = DataStore
::open(name
) {
56 let datastore
= Arc
::new(datastore
);
57 map
.insert(name
.to_string(), datastore
.clone());
61 bail
!("store not found");
64 pub fn open(store_name
: &str) -> Result
<Self, Error
> {
66 let config
= datastore
::config()?
;
67 let (_
, store_config
) = config
.sections
.get(store_name
)
68 .ok_or(format_err
!("no such datastore '{}'", store_name
))?
;
70 let path
= store_config
["path"].as_str().unwrap();
72 let chunk_store
= ChunkStore
::open(store_name
, path
)?
;
75 chunk_store
: Arc
::new(chunk_store
),
76 gc_mutex
: Mutex
::new(false),
80 pub fn create_image_writer
<P
: AsRef
<Path
>>(&self, filename
: P
, size
: usize, chunk_size
: usize) -> Result
<ImageIndexWriter
, Error
> {
82 let index
= ImageIndexWriter
::create(self.chunk_store
.clone(), filename
.as_ref(), size
, chunk_size
)?
;
87 pub fn open_image_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<ImageIndexReader
, Error
> {
89 let index
= ImageIndexReader
::open(self.chunk_store
.clone(), filename
.as_ref())?
;
94 pub fn create_archive_writer
<P
: AsRef
<Path
>>(
97 ) -> Result
<ArchiveIndexWriter
, Error
> {
99 let index
= ArchiveIndexWriter
::create(
100 self.chunk_store
.clone(), filename
.as_ref(), chunk_size
)?
;
105 pub fn open_archive_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<ArchiveIndexReader
, Error
> {
107 let index
= ArchiveIndexReader
::open(self.chunk_store
.clone(), filename
.as_ref())?
;
112 pub fn base_path(&self) -> PathBuf
{
113 self.chunk_store
.base_path()
116 pub fn get_backup_dir(
120 backup_time
: DateTime
<Utc
>,
123 let mut relative_path
= PathBuf
::new();
125 relative_path
.push(backup_type
);
127 relative_path
.push(backup_id
);
129 let date_str
= backup_time
.format("%Y-%m-%dT%H:%M:%S").to_string();
131 relative_path
.push(&date_str
);
136 pub fn create_backup_dir(
141 ) -> Result
<PathBuf
, Error
> {
142 let mut relative_path
= PathBuf
::new();
144 relative_path
.push(backup_type
);
146 relative_path
.push(backup_id
);
148 let dt
= Utc
.timestamp(backup_time
, 0);
149 let date_str
= dt
.format("%Y-%m-%dT%H:%M:%S").to_string();
151 println
!("date: {}", date_str
);
153 relative_path
.push(&date_str
);
156 let mut full_path
= self.base_path();
157 full_path
.push(&relative_path
);
159 std
::fs
::create_dir_all(&full_path
)?
;
164 pub fn list_backups(&self) -> Result
<Vec
<BackupInfo
>, Error
> {
165 let path
= self.base_path();
167 let mut list
= vec
![];
170 static ref BACKUP_TYPE_REGEX
: regex
::Regex
= regex
::Regex
::new(r
"^(host|vm|ct)$").unwrap();
171 static ref BACKUP_ID_REGEX
: regex
::Regex
= regex
::Regex
::new(r
"^[A-Za-z][A-Za-z0-9_-]+$").unwrap();
172 static ref BACKUP_DATE_REGEX
: regex
::Regex
= regex
::Regex
::new(
173 r
"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$").unwrap();
176 tools
::scandir(libc
::AT_FDCWD
, &path
, &BACKUP_TYPE_REGEX
, |l0_fd
, backup_type
, file_type
| {
177 if file_type
!= nix
::dir
::Type
::Directory { return Ok(()); }
178 tools
::scandir(l0_fd
, backup_type
, &BACKUP_ID_REGEX
, |l1_fd
, backup_id
, file_type
| {
179 if file_type
!= nix
::dir
::Type
::Directory { return Ok(()); }
180 tools
::scandir(l1_fd
, backup_id
, &BACKUP_DATE_REGEX
, |_
, backup_time
, file_type
| {
181 if file_type
!= nix
::dir
::Type
::Directory { return Ok(()); }
183 let dt
= Utc
.datetime_from_str(backup_time
, "%Y-%m-%dT%H:%M:%S")?
;
185 list
.push(BackupInfo
{
186 backup_type
: backup_type
.to_owned(),
187 backup_id
: backup_id
.to_owned(),
199 pub fn list_images(&self) -> Result
<Vec
<PathBuf
>, Error
> {
200 let base
= self.base_path();
202 let mut list
= vec
![];
204 use walkdir
::WalkDir
;
206 let walker
= WalkDir
::new(&base
).same_file_system(true).into_iter();
208 // make sure we skip .chunks (and other hidden files to keep it simple)
209 fn is_hidden(entry
: &walkdir
::DirEntry
) -> bool
{
212 .map(|s
| s
.starts_with("."))
216 for entry
in walker
.filter_entry(|e
| !is_hidden(e
)) {
217 let path
= entry?
.into_path();
218 if let Some(ext
) = path
.extension() {
221 } else if ext
== "aidx" {
230 fn mark_used_chunks(&self, status
: &mut GarbageCollectionStatus
) -> Result
<(), Error
> {
232 let image_list
= self.list_images()?
;
234 for path
in image_list
{
235 if let Some(ext
) = path
.extension() {
237 let index
= self.open_image_reader(&path
)?
;
238 index
.mark_used_chunks(status
)?
;
239 } else if ext
== "aidx" {
240 let index
= self.open_archive_reader(&path
)?
;
241 index
.mark_used_chunks(status
)?
;
249 pub fn garbage_collection(&self) -> Result
<(), Error
> {
251 if let Ok(ref mut _mutex
) = self.gc_mutex
.try_lock() {
253 let mut gc_status
= GarbageCollectionStatus
::default();
254 gc_status
.used_bytes
= 0;
256 println
!("Start GC phase1 (mark chunks)");
258 self.mark_used_chunks(&mut gc_status
)?
;
260 println
!("Start GC phase2 (sweep unused chunks)");
261 self.chunk_store
.sweep_unused_chunks(&mut gc_status
)?
;
263 println
!("Used bytes: {}", gc_status
.used_bytes
);
264 println
!("Used chunks: {}", gc_status
.used_chunks
);
265 println
!("Disk bytes: {}", gc_status
.disk_bytes
);
266 println
!("Disk chunks: {}", gc_status
.disk_chunks
);
269 println
!("Start GC failed - (already running/locked)");