]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
9befcaae13a0c89fee9359da309d96c1ce7735b6
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use failure::*;
2 use futures::*;
3
4 use crate::tools;
5 use crate::api2::types::*;
6 use crate::api_schema::*;
7 use crate::api_schema::router::*;
8 //use crate::server::rest::*;
9 use serde_json::{json, Value};
10 use std::collections::{HashSet, HashMap};
11 use chrono::{DateTime, Datelike, TimeZone, Local};
12 use std::path::PathBuf;
13 use std::sync::Arc;
14
15 use crate::config::datastore;
16
17 use crate::backup::*;
18 use crate::server::WorkerTask;
19
20 use hyper::{header, Body, Response, StatusCode};
21 use hyper::http::request::Parts;
22
23 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
24
25 let mut group_hash = HashMap::new();
26
27 for info in backup_list {
28 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
29 let time_list = group_hash.entry(group_id).or_insert(vec![]);
30 time_list.push(info);
31 }
32
33 group_hash
34 }
35
36 fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
37 mark: &mut HashSet<PathBuf>,
38 list: &Vec<BackupInfo>,
39 keep: usize,
40 select_id: F,
41 ){
42 let mut hash = HashSet::new();
43 for info in list {
44 let local_time = info.backup_dir.backup_time().with_timezone(&Local);
45 if hash.len() >= keep as usize { break; }
46 let backup_id = info.backup_dir.relative_path();
47 let sel_id: String = select_id(local_time, &info);
48 if !hash.contains(&sel_id) {
49 hash.insert(sel_id);
50 //println!(" KEEP ID {} {}", backup_id, local_time.format("%c"));
51 mark.insert(backup_id);
52 }
53 }
54 }
55
56 fn list_groups(
57 param: Value,
58 _info: &ApiMethod,
59 _rpcenv: &mut dyn RpcEnvironment,
60 ) -> Result<Value, Error> {
61
62 let store = param["store"].as_str().unwrap();
63
64 let datastore = DataStore::lookup_datastore(store)?;
65
66 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
67
68 let group_hash = group_backups(backup_list);
69
70 let mut groups = vec![];
71
72 for (_group_id, mut list) in group_hash {
73
74 BackupInfo::sort_list(&mut list, false);
75
76 let info = &list[0];
77 let group = info.backup_dir.group();
78
79 groups.push(json!({
80 "backup-type": group.backup_type(),
81 "backup-id": group.backup_id(),
82 "last-backup": info.backup_dir.backup_time().timestamp(),
83 "backup-count": list.len() as u64,
84 "files": info.files,
85 }));
86 }
87
88 Ok(json!(groups))
89 }
90
91 fn list_snapshot_files (
92 param: Value,
93 _info: &ApiMethod,
94 _rpcenv: &mut dyn RpcEnvironment,
95 ) -> Result<Value, Error> {
96
97 let store = tools::required_string_param(&param, "store")?;
98 let backup_type = tools::required_string_param(&param, "backup-type")?;
99 let backup_id = tools::required_string_param(&param, "backup-id")?;
100 let backup_time = tools::required_integer_param(&param, "backup-time")?;
101
102 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
103
104 let datastore = DataStore::lookup_datastore(store)?;
105
106 let path = datastore.base_path();
107 let files = BackupInfo::list_files(&path, &snapshot)?;
108
109 Ok(json!(files))
110 }
111
112 fn delete_snapshots (
113 param: Value,
114 _info: &ApiMethod,
115 _rpcenv: &mut dyn RpcEnvironment,
116 ) -> Result<Value, Error> {
117
118 let store = tools::required_string_param(&param, "store")?;
119 let backup_type = tools::required_string_param(&param, "backup-type")?;
120 let backup_id = tools::required_string_param(&param, "backup-id")?;
121 let backup_time = tools::required_integer_param(&param, "backup-time")?;
122
123 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
124
125 let datastore = DataStore::lookup_datastore(store)?;
126
127 datastore.remove_backup_dir(&snapshot)?;
128
129 Ok(Value::Null)
130 }
131
132 fn list_snapshots (
133 param: Value,
134 _info: &ApiMethod,
135 _rpcenv: &mut dyn RpcEnvironment,
136 ) -> Result<Value, Error> {
137
138 let store = tools::required_string_param(&param, "store")?;
139 let backup_type = param["backup-type"].as_str();
140 let backup_id = param["backup-id"].as_str();
141
142 let datastore = DataStore::lookup_datastore(store)?;
143
144 let base_path = datastore.base_path();
145
146 let backup_list = BackupInfo::list_backups(&base_path)?;
147
148 let mut snapshots = vec![];
149
150 for info in backup_list {
151 let group = info.backup_dir.group();
152 if let Some(backup_type) = backup_type {
153 if backup_type != group.backup_type() { continue; }
154 }
155 if let Some(backup_id) = backup_id {
156 if backup_id != group.backup_id() { continue; }
157 }
158 snapshots.push(json!({
159 "backup-type": group.backup_type(),
160 "backup-id": group.backup_id(),
161 "backup-time": info.backup_dir.backup_time().timestamp(),
162 "files": info.files,
163 }));
164 }
165
166 Ok(json!(snapshots))
167 }
168
169 fn status(
170 param: Value,
171 _info: &ApiMethod,
172 _rpcenv: &mut dyn RpcEnvironment,
173 ) -> Result<Value, Error> {
174
175 let store = param["store"].as_str().unwrap();
176
177 let datastore = DataStore::lookup_datastore(store)?;
178
179 let base_path = datastore.base_path();
180
181 let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
182
183 use nix::NixPath;
184
185 let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
186 nix::errno::Errno::result(res)?;
187
188 let bsize = stat.f_bsize as u64;
189 Ok(json!({
190 "total": stat.f_blocks*bsize,
191 "used": (stat.f_blocks-stat.f_bfree)*bsize,
192 "avail": stat.f_bavail*bsize,
193 }))
194 }
195
196 fn api_method_status() -> ApiMethod {
197 ApiMethod::new(
198 status,
199 add_common_prune_prameters(
200 ObjectSchema::new("Get datastore status.")
201 .required(
202 "store",
203 StringSchema::new("Datastore name.")
204 )
205 )
206 )
207 }
208
209 fn prune(
210 param: Value,
211 _info: &ApiMethod,
212 _rpcenv: &mut dyn RpcEnvironment,
213 ) -> Result<Value, Error> {
214
215 let store = param["store"].as_str().unwrap();
216
217 let backup_type = tools::required_string_param(&param, "backup-type")?;
218 let backup_id = tools::required_string_param(&param, "backup-id")?;
219
220 let group = BackupGroup::new(backup_type, backup_id);
221
222 let datastore = DataStore::lookup_datastore(store)?;
223
224 let mut keep_all = true;
225
226 for opt in &["keep-last", "keep-daily", "keep-weekly", "keep-weekly", "keep-yearly"] {
227 if !param[opt].is_null() {
228 keep_all = false;
229 break;
230 }
231 }
232
233 let worker = WorkerTask::new("prune", Some(store.to_owned()), "root@pam", true)?;
234 let result = try_block! {
235 if keep_all {
236 worker.log("No prune selection - keeping all files.");
237 return Ok(());
238 } else {
239 worker.log(format!("Starting prune on store {}", store));
240 }
241
242 let mut list = group.list_backups(&datastore.base_path())?;
243
244 let mut mark = HashSet::new();
245
246 BackupInfo::sort_list(&mut list, false);
247
248 if let Some(keep_last) = param["keep-last"].as_u64() {
249 list.iter().take(keep_last as usize).for_each(|info| {
250 mark.insert(info.backup_dir.relative_path());
251 });
252 }
253
254 if let Some(keep_daily) = param["keep-daily"].as_u64() {
255 mark_selections(&mut mark, &list, keep_daily as usize, |local_time, _info| {
256 format!("{}/{}/{}", local_time.year(), local_time.month(), local_time.day())
257 });
258 }
259
260 if let Some(keep_weekly) = param["keep-weekly"].as_u64() {
261 mark_selections(&mut mark, &list, keep_weekly as usize, |local_time, _info| {
262 format!("{}/{}", local_time.year(), local_time.iso_week().week())
263 });
264 }
265
266 if let Some(keep_monthly) = param["keep-monthly"].as_u64() {
267 mark_selections(&mut mark, &list, keep_monthly as usize, |local_time, _info| {
268 format!("{}/{}", local_time.year(), local_time.month())
269 });
270 }
271
272 if let Some(keep_yearly) = param["keep-yearly"].as_u64() {
273 mark_selections(&mut mark, &list, keep_yearly as usize, |local_time, _info| {
274 format!("{}/{}", local_time.year(), local_time.year())
275 });
276 }
277
278 let mut remove_list: Vec<BackupInfo> = list.into_iter()
279 .filter(|info| !mark.contains(&info.backup_dir.relative_path())).collect();
280
281 BackupInfo::sort_list(&mut remove_list, true);
282
283 for info in remove_list {
284 worker.log(format!("remove {:?}", info.backup_dir));
285 datastore.remove_backup_dir(&info.backup_dir)?;
286 }
287
288 Ok(())
289 };
290
291 worker.log_result(&result);
292
293 if let Err(err) = result {
294 bail!("prune failed - {}", err);
295 }
296
297 Ok(json!(null))
298 }
299
300 pub fn add_common_prune_prameters(schema: ObjectSchema) -> ObjectSchema {
301
302 schema
303 .optional(
304 "keep-last",
305 IntegerSchema::new("Number of backups to keep.")
306 .minimum(1)
307 )
308 .optional(
309 "keep-daily",
310 IntegerSchema::new("Number of daily backups to keep.")
311 .minimum(1)
312 )
313 .optional(
314 "keep-weekly",
315 IntegerSchema::new("Number of weekly backups to keep.")
316 .minimum(1)
317 )
318 .optional(
319 "keep-monthly",
320 IntegerSchema::new("Number of monthly backups to keep.")
321 .minimum(1)
322 )
323 .optional(
324 "keep-yearly",
325 IntegerSchema::new("Number of yearly backups to keep.")
326 .minimum(1)
327 )
328 }
329
330 fn api_method_prune() -> ApiMethod {
331 ApiMethod::new(
332 prune,
333 add_common_prune_prameters(
334 ObjectSchema::new("Prune the datastore.")
335 .required(
336 "store",
337 StringSchema::new("Datastore name.")
338 )
339 .required("backup-type", BACKUP_TYPE_SCHEMA.clone())
340 .required("backup-id", BACKUP_ID_SCHEMA.clone())
341 )
342 )
343 }
344
345 fn start_garbage_collection(
346 param: Value,
347 _info: &ApiMethod,
348 rpcenv: &mut dyn RpcEnvironment,
349 ) -> Result<Value, Error> {
350
351 let store = param["store"].as_str().unwrap().to_string();
352
353 let datastore = DataStore::lookup_datastore(&store)?;
354
355 println!("Starting garbage collection on store {}", store);
356
357 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
358
359 let upid_str = WorkerTask::new_thread(
360 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
361 {
362 worker.log(format!("starting garbage collection on store {}", store));
363 datastore.garbage_collection(worker)
364 })?;
365
366 Ok(json!(upid_str))
367 }
368
369 pub fn api_method_start_garbage_collection() -> ApiMethod {
370 ApiMethod::new(
371 start_garbage_collection,
372 ObjectSchema::new("Start garbage collection.")
373 .required("store", StringSchema::new("Datastore name."))
374 )
375 }
376
377 fn garbage_collection_status(
378 param: Value,
379 _info: &ApiMethod,
380 _rpcenv: &mut dyn RpcEnvironment,
381 ) -> Result<Value, Error> {
382
383 let store = param["store"].as_str().unwrap();
384
385 let datastore = DataStore::lookup_datastore(&store)?;
386
387 println!("Garbage collection status on store {}", store);
388
389 let status = datastore.last_gc_status();
390
391 Ok(serde_json::to_value(&status)?)
392 }
393
394 pub fn api_method_garbage_collection_status() -> ApiMethod {
395 ApiMethod::new(
396 garbage_collection_status,
397 ObjectSchema::new("Garbage collection status.")
398 .required("store", StringSchema::new("Datastore name."))
399 )
400 }
401
402 fn get_datastore_list(
403 _param: Value,
404 _info: &ApiMethod,
405 _rpcenv: &mut dyn RpcEnvironment,
406 ) -> Result<Value, Error> {
407
408 let config = datastore::config()?;
409
410 Ok(config.convert_to_array("store"))
411 }
412
413
414 fn download_file(
415 _parts: Parts,
416 _req_body: Body,
417 param: Value,
418 _info: &ApiAsyncMethod,
419 _rpcenv: Box<dyn RpcEnvironment>,
420 ) -> Result<BoxFut, Error> {
421
422 let store = tools::required_string_param(&param, "store")?;
423
424 let datastore = DataStore::lookup_datastore(store)?;
425
426 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
427
428 let backup_type = tools::required_string_param(&param, "backup-type")?;
429 let backup_id = tools::required_string_param(&param, "backup-id")?;
430 let backup_time = tools::required_integer_param(&param, "backup-time")?;
431
432 println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
433 backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
434
435 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
436
437 let mut path = datastore.base_path();
438 path.push(backup_dir.relative_path());
439 path.push(&file_name);
440
441 let response_future = tokio::fs::File::open(path)
442 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
443 .and_then(move |file| {
444 let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new()).
445 map(|bytes| hyper::Chunk::from(bytes.freeze()));
446 let body = Body::wrap_stream(payload);
447
448 // fixme: set other headers ?
449 Ok(Response::builder()
450 .status(StatusCode::OK)
451 .header(header::CONTENT_TYPE, "application/octet-stream")
452 .body(body)
453 .unwrap())
454 });
455
456 Ok(Box::new(response_future))
457 }
458
459 pub fn api_method_download_file() -> ApiAsyncMethod {
460 ApiAsyncMethod::new(
461 download_file,
462 ObjectSchema::new("Download single raw file from backup snapshot.")
463 .required("store", StringSchema::new("Datastore name."))
464 .required("backup-type", BACKUP_TYPE_SCHEMA.clone())
465 .required("backup-id", BACKUP_ID_SCHEMA.clone())
466 .required("backup-time", BACKUP_TIME_SCHEMA.clone())
467 .required("file-name", StringSchema::new("Raw file name.").format(FILENAME_FORMAT.clone()))
468 )
469 }
470
471 fn upload_backup_log(
472 _parts: Parts,
473 req_body: Body,
474 param: Value,
475 _info: &ApiAsyncMethod,
476 _rpcenv: Box<dyn RpcEnvironment>,
477 ) -> Result<BoxFut, Error> {
478
479 let store = tools::required_string_param(&param, "store")?;
480
481 let datastore = DataStore::lookup_datastore(store)?;
482
483 let file_name = "client.log.blob";
484
485 let backup_type = tools::required_string_param(&param, "backup-type")?;
486 let backup_id = tools::required_string_param(&param, "backup-id")?;
487 let backup_time = tools::required_integer_param(&param, "backup-time")?;
488
489 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
490
491 let mut path = datastore.base_path();
492 path.push(backup_dir.relative_path());
493 path.push(&file_name);
494
495 if path.exists() {
496 bail!("backup already contains a log.");
497 }
498
499 println!("Upload backup log to {}/{}/{}/{}/{}", store,
500 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
501
502 let resp = req_body
503 .map_err(Error::from)
504 .fold(Vec::new(), |mut acc, chunk| {
505 acc.extend_from_slice(&*chunk);
506 Ok::<_, Error>(acc)
507 })
508 .and_then(move |data| {
509 let mut blob = DataBlob::from_raw(data)?;
510 // always comput CRC at server side
511 blob.set_crc(blob.compute_crc());
512 let raw_data = blob.raw_data();
513 crate::tools::file_set_contents(&path, raw_data, None)?;
514 Ok(())
515 })
516 .and_then(move |_| {
517 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
518 })
519 ;
520
521 Ok(Box::new(resp))
522 }
523
524 pub fn api_method_upload_backup_log() -> ApiAsyncMethod {
525 ApiAsyncMethod::new(
526 upload_backup_log,
527 ObjectSchema::new("Download single raw file from backup snapshot.")
528 .required("store", StringSchema::new("Datastore name."))
529 .required("backup-type", BACKUP_TYPE_SCHEMA.clone())
530 .required("backup-id", BACKUP_ID_SCHEMA.clone())
531 .required("backup-time", BACKUP_TIME_SCHEMA.clone())
532 )
533 }
534
535 pub fn router() -> Router {
536
537 let store_schema: Arc<Schema> = Arc::new(
538 StringSchema::new("Datastore name.").into()
539 );
540
541 let datastore_info = Router::new()
542 .subdir(
543 "download",
544 Router::new()
545 .download(api_method_download_file())
546 )
547 .subdir(
548 "upload-backup-log",
549 Router::new()
550 .upload(api_method_upload_backup_log())
551 )
552 .subdir(
553 "gc",
554 Router::new()
555 .get(api_method_garbage_collection_status())
556 .post(api_method_start_garbage_collection()))
557 .subdir(
558 "files",
559 Router::new()
560 .get(
561 ApiMethod::new(
562 list_snapshot_files,
563 ObjectSchema::new("List snapshot files.")
564 .required("store", store_schema.clone())
565 .required("backup-type", BACKUP_TYPE_SCHEMA.clone())
566 .required("backup-id", BACKUP_ID_SCHEMA.clone())
567 .required("backup-time", BACKUP_TIME_SCHEMA.clone())
568 )
569 )
570 )
571 .subdir(
572 "groups",
573 Router::new()
574 .get(ApiMethod::new(
575 list_groups,
576 ObjectSchema::new("List backup groups.")
577 .required("store", store_schema.clone()))))
578 .subdir(
579 "snapshots",
580 Router::new()
581 .get(
582 ApiMethod::new(
583 list_snapshots,
584 ObjectSchema::new("List backup groups.")
585 .required("store", store_schema.clone())
586 .optional("backup-type", BACKUP_TYPE_SCHEMA.clone())
587 .optional("backup-id", BACKUP_ID_SCHEMA.clone())
588 )
589 )
590 .delete(
591 ApiMethod::new(
592 delete_snapshots,
593 ObjectSchema::new("Delete backup snapshot.")
594 .required("store", store_schema.clone())
595 .required("backup-type", BACKUP_TYPE_SCHEMA.clone())
596 .required("backup-id", BACKUP_ID_SCHEMA.clone())
597 .required("backup-time", BACKUP_TIME_SCHEMA.clone())
598 )
599 )
600 )
601 .subdir(
602 "prune",
603 Router::new()
604 .post(api_method_prune())
605 )
606 .subdir(
607 "status",
608 Router::new()
609 .get(api_method_status())
610 )
611 .list_subdirs();
612
613
614
615 let route = Router::new()
616 .get(ApiMethod::new(
617 get_datastore_list,
618 ObjectSchema::new("Directory index.")))
619 .match_all("store", datastore_info);
620
621
622
623 route
624 }