]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
use const api definitions
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use failure::*;
2 use futures::*;
3
4 use crate::tools;
5 use crate::api2::types::*;
6 use crate::api_schema::*;
7 use crate::api_schema::router::*;
8 //use crate::server::rest::*;
9 use serde_json::{json, Value};
10 use std::collections::{HashSet, HashMap};
11 use chrono::{DateTime, Datelike, TimeZone, Local};
12 use std::path::PathBuf;
13
14 use proxmox::tools::{try_block, fs::file_get_contents, fs::file_set_contents};
15
16 use crate::config::datastore;
17
18 use crate::backup::*;
19 use crate::server::WorkerTask;
20
21 use hyper::{header, Body, Response, StatusCode};
22 use hyper::http::request::Parts;
23
24 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Value, Error> {
25
26 let mut path = store.base_path();
27 path.push(backup_dir.relative_path());
28 path.push("index.json.blob");
29
30 let raw_data = file_get_contents(&path)?;
31 let data = DataBlob::from_raw(raw_data)?.decode(None)?;
32 let mut result: Value = serde_json::from_reader(&mut &data[..])?;
33
34 let result = result["files"].take();
35
36 if result == Value::Null {
37 bail!("missing 'files' property in backup index {:?}", path);
38 }
39
40 Ok(result)
41 }
42
43 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
44
45 let mut group_hash = HashMap::new();
46
47 for info in backup_list {
48 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
49 let time_list = group_hash.entry(group_id).or_insert(vec![]);
50 time_list.push(info);
51 }
52
53 group_hash
54 }
55
56 fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
57 mark: &mut HashSet<PathBuf>,
58 list: &Vec<BackupInfo>,
59 keep: usize,
60 select_id: F,
61 ){
62 let mut hash = HashSet::new();
63 for info in list {
64 let local_time = info.backup_dir.backup_time().with_timezone(&Local);
65 if hash.len() >= keep as usize { break; }
66 let backup_id = info.backup_dir.relative_path();
67 let sel_id: String = select_id(local_time, &info);
68 if !hash.contains(&sel_id) {
69 hash.insert(sel_id);
70 //println!(" KEEP ID {} {}", backup_id, local_time.format("%c"));
71 mark.insert(backup_id);
72 }
73 }
74 }
75
76 fn list_groups(
77 param: Value,
78 _info: &ApiMethod,
79 _rpcenv: &mut dyn RpcEnvironment,
80 ) -> Result<Value, Error> {
81
82 let store = param["store"].as_str().unwrap();
83
84 let datastore = DataStore::lookup_datastore(store)?;
85
86 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
87
88 let group_hash = group_backups(backup_list);
89
90 let mut groups = vec![];
91
92 for (_group_id, mut list) in group_hash {
93
94 BackupInfo::sort_list(&mut list, false);
95
96 let info = &list[0];
97 let group = info.backup_dir.group();
98
99 groups.push(json!({
100 "backup-type": group.backup_type(),
101 "backup-id": group.backup_id(),
102 "last-backup": info.backup_dir.backup_time().timestamp(),
103 "backup-count": list.len() as u64,
104 "files": info.files,
105 }));
106 }
107
108 Ok(json!(groups))
109 }
110
111 fn list_snapshot_files (
112 param: Value,
113 _info: &ApiMethod,
114 _rpcenv: &mut dyn RpcEnvironment,
115 ) -> Result<Value, Error> {
116
117 let store = tools::required_string_param(&param, "store")?;
118 let backup_type = tools::required_string_param(&param, "backup-type")?;
119 let backup_id = tools::required_string_param(&param, "backup-id")?;
120 let backup_time = tools::required_integer_param(&param, "backup-time")?;
121
122 let datastore = DataStore::lookup_datastore(store)?;
123 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
124
125 let mut files = read_backup_index(&datastore, &snapshot)?;
126
127 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
128
129 let file_set = files.as_array().unwrap().iter().fold(HashSet::new(), |mut acc, item| {
130 acc.insert(item["filename"].as_str().unwrap().to_owned());
131 acc
132 });
133
134 for file in info.files {
135 if file_set.contains(&file) { continue; }
136 files.as_array_mut().unwrap().push(json!({ "filename": file }));
137 }
138
139 Ok(files)
140 }
141
142 fn delete_snapshots (
143 param: Value,
144 _info: &ApiMethod,
145 _rpcenv: &mut dyn RpcEnvironment,
146 ) -> Result<Value, Error> {
147
148 let store = tools::required_string_param(&param, "store")?;
149 let backup_type = tools::required_string_param(&param, "backup-type")?;
150 let backup_id = tools::required_string_param(&param, "backup-id")?;
151 let backup_time = tools::required_integer_param(&param, "backup-time")?;
152
153 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
154
155 let datastore = DataStore::lookup_datastore(store)?;
156
157 datastore.remove_backup_dir(&snapshot)?;
158
159 Ok(Value::Null)
160 }
161
162 fn list_snapshots (
163 param: Value,
164 _info: &ApiMethod,
165 _rpcenv: &mut dyn RpcEnvironment,
166 ) -> Result<Value, Error> {
167
168 let store = tools::required_string_param(&param, "store")?;
169 let backup_type = param["backup-type"].as_str();
170 let backup_id = param["backup-id"].as_str();
171
172 let datastore = DataStore::lookup_datastore(store)?;
173
174 let base_path = datastore.base_path();
175
176 let backup_list = BackupInfo::list_backups(&base_path)?;
177
178 let mut snapshots = vec![];
179
180 for info in backup_list {
181 let group = info.backup_dir.group();
182 if let Some(backup_type) = backup_type {
183 if backup_type != group.backup_type() { continue; }
184 }
185 if let Some(backup_id) = backup_id {
186 if backup_id != group.backup_id() { continue; }
187 }
188
189 let mut result_item = json!({
190 "backup-type": group.backup_type(),
191 "backup-id": group.backup_id(),
192 "backup-time": info.backup_dir.backup_time().timestamp(),
193 "files": info.files,
194 });
195
196 if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
197 let mut backup_size = 0;
198 for item in index.as_array().unwrap().iter() {
199 if let Some(item_size) = item["size"].as_u64() {
200 backup_size += item_size;
201 }
202 }
203 result_item["size"] = backup_size.into();
204 }
205
206 snapshots.push(result_item);
207 }
208
209 Ok(json!(snapshots))
210 }
211
212 fn status(
213 param: Value,
214 _info: &ApiMethod,
215 _rpcenv: &mut dyn RpcEnvironment,
216 ) -> Result<Value, Error> {
217
218 let store = param["store"].as_str().unwrap();
219
220 let datastore = DataStore::lookup_datastore(store)?;
221
222 let base_path = datastore.base_path();
223
224 let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
225
226 use nix::NixPath;
227
228 let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
229 nix::errno::Errno::result(res)?;
230
231 let bsize = stat.f_bsize as u64;
232 Ok(json!({
233 "total": stat.f_blocks*bsize,
234 "used": (stat.f_blocks-stat.f_bfree)*bsize,
235 "avail": stat.f_bavail*bsize,
236 }))
237 }
238
239 #[macro_export]
240 macro_rules! add_common_prune_prameters {
241 ($( $list:tt )*) => {
242 [
243 (
244 "keep-last",
245 true,
246 &IntegerSchema::new("Number of backups to keep.")
247 .minimum(1)
248 .schema()
249 ),
250 (
251 "keep-daily",
252 true,
253 &IntegerSchema::new("Number of daily backups to keep.")
254 .minimum(1)
255 .schema()
256 ),
257 (
258 "keep-weekly",
259 true,
260 &IntegerSchema::new("Number of weekly backups to keep.")
261 .minimum(1)
262 .schema()
263 ),
264 (
265 "keep-monthly",
266 true,
267 &IntegerSchema::new("Number of monthly backups to keep.")
268 .minimum(1)
269 .schema()
270 ),
271 (
272 "keep-yearly",
273 true,
274 &IntegerSchema::new("Number of yearly backups to keep.")
275 .minimum(1)
276 .schema()
277 ),
278 $( $list )*
279 ]
280 }
281 }
282
283 const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
284 &ApiHandler::Sync(&status),
285 &ObjectSchema::new(
286 "Get datastore status.",
287 &add_common_prune_prameters!(
288 ("store", false, &StringSchema::new("Datastore name.").schema()),
289 ),
290 )
291 );
292
293
294 fn prune(
295 param: Value,
296 _info: &ApiMethod,
297 _rpcenv: &mut dyn RpcEnvironment,
298 ) -> Result<Value, Error> {
299
300 let store = param["store"].as_str().unwrap();
301
302 let backup_type = tools::required_string_param(&param, "backup-type")?;
303 let backup_id = tools::required_string_param(&param, "backup-id")?;
304
305 let group = BackupGroup::new(backup_type, backup_id);
306
307 let datastore = DataStore::lookup_datastore(store)?;
308
309 let mut keep_all = true;
310
311 for opt in &["keep-last", "keep-daily", "keep-weekly", "keep-weekly", "keep-yearly"] {
312 if !param[opt].is_null() {
313 keep_all = false;
314 break;
315 }
316 }
317
318 let worker = WorkerTask::new("prune", Some(store.to_owned()), "root@pam", true)?;
319 let result = try_block! {
320 if keep_all {
321 worker.log("No prune selection - keeping all files.");
322 return Ok(());
323 } else {
324 worker.log(format!("Starting prune on store {}", store));
325 }
326
327 let mut list = group.list_backups(&datastore.base_path())?;
328
329 let mut mark = HashSet::new();
330
331 BackupInfo::sort_list(&mut list, false);
332
333 if let Some(keep_last) = param["keep-last"].as_u64() {
334 list.iter().take(keep_last as usize).for_each(|info| {
335 mark.insert(info.backup_dir.relative_path());
336 });
337 }
338
339 if let Some(keep_daily) = param["keep-daily"].as_u64() {
340 mark_selections(&mut mark, &list, keep_daily as usize, |local_time, _info| {
341 format!("{}/{}/{}", local_time.year(), local_time.month(), local_time.day())
342 });
343 }
344
345 if let Some(keep_weekly) = param["keep-weekly"].as_u64() {
346 mark_selections(&mut mark, &list, keep_weekly as usize, |local_time, _info| {
347 format!("{}/{}", local_time.year(), local_time.iso_week().week())
348 });
349 }
350
351 if let Some(keep_monthly) = param["keep-monthly"].as_u64() {
352 mark_selections(&mut mark, &list, keep_monthly as usize, |local_time, _info| {
353 format!("{}/{}", local_time.year(), local_time.month())
354 });
355 }
356
357 if let Some(keep_yearly) = param["keep-yearly"].as_u64() {
358 mark_selections(&mut mark, &list, keep_yearly as usize, |local_time, _info| {
359 format!("{}/{}", local_time.year(), local_time.year())
360 });
361 }
362
363 let mut remove_list: Vec<BackupInfo> = list.into_iter()
364 .filter(|info| !mark.contains(&info.backup_dir.relative_path())).collect();
365
366 BackupInfo::sort_list(&mut remove_list, true);
367
368 for info in remove_list {
369 worker.log(format!("remove {:?}", info.backup_dir));
370 datastore.remove_backup_dir(&info.backup_dir)?;
371 }
372
373 Ok(())
374 };
375
376 worker.log_result(&result);
377
378 if let Err(err) = result {
379 bail!("prune failed - {}", err);
380 }
381
382 Ok(json!(null))
383 }
384
385 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
386 &ApiHandler::Sync(&prune),
387 &ObjectSchema::new(
388 "Prune the datastore.",
389 &add_common_prune_prameters!(
390 ("store", false, &StringSchema::new("Datastore name.").schema()),
391 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
392 ("backup-id", false, &BACKUP_ID_SCHEMA),
393 )
394 )
395 );
396
397 fn start_garbage_collection(
398 param: Value,
399 _info: &ApiMethod,
400 rpcenv: &mut dyn RpcEnvironment,
401 ) -> Result<Value, Error> {
402
403 let store = param["store"].as_str().unwrap().to_string();
404
405 let datastore = DataStore::lookup_datastore(&store)?;
406
407 println!("Starting garbage collection on store {}", store);
408
409 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
410
411 let upid_str = WorkerTask::new_thread(
412 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
413 {
414 worker.log(format!("starting garbage collection on store {}", store));
415 datastore.garbage_collection(worker)
416 })?;
417
418 Ok(json!(upid_str))
419 }
420
421 pub const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
422 &ApiHandler::Sync(&start_garbage_collection),
423 &ObjectSchema::new(
424 "Start garbage collection.",
425 &[ ("store", false, &StringSchema::new("Datastore name.").schema()) ]
426 )
427 );
428
429 fn garbage_collection_status(
430 param: Value,
431 _info: &ApiMethod,
432 _rpcenv: &mut dyn RpcEnvironment,
433 ) -> Result<Value, Error> {
434
435 let store = param["store"].as_str().unwrap();
436
437 let datastore = DataStore::lookup_datastore(&store)?;
438
439 println!("Garbage collection status on store {}", store);
440
441 let status = datastore.last_gc_status();
442
443 Ok(serde_json::to_value(&status)?)
444 }
445
446 pub const API_METHOD_GARBAGE_COLLECTION_STATUS: ApiMethod = ApiMethod::new(
447 &ApiHandler::Sync(&garbage_collection_status),
448 &ObjectSchema::new(
449 "Garbage collection status.",
450 &[ ("store", false, &StringSchema::new("Datastore name.").schema()) ]
451 )
452 );
453
454 fn get_datastore_list(
455 _param: Value,
456 _info: &ApiMethod,
457 _rpcenv: &mut dyn RpcEnvironment,
458 ) -> Result<Value, Error> {
459
460 let config = datastore::config()?;
461
462 Ok(config.convert_to_array("store"))
463 }
464
465
466 fn download_file(
467 _parts: Parts,
468 _req_body: Body,
469 param: Value,
470 _info: &ApiMethod,
471 _rpcenv: Box<dyn RpcEnvironment>,
472 ) -> Result<BoxFut, Error> {
473
474 let store = tools::required_string_param(&param, "store")?;
475
476 let datastore = DataStore::lookup_datastore(store)?;
477
478 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
479
480 let backup_type = tools::required_string_param(&param, "backup-type")?;
481 let backup_id = tools::required_string_param(&param, "backup-id")?;
482 let backup_time = tools::required_integer_param(&param, "backup-time")?;
483
484 println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
485 backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
486
487 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
488
489 let mut path = datastore.base_path();
490 path.push(backup_dir.relative_path());
491 path.push(&file_name);
492
493 let response_future = tokio::fs::File::open(path)
494 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
495 .and_then(move |file| {
496 let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
497 .map_ok(|bytes| hyper::Chunk::from(bytes.freeze()));
498 let body = Body::wrap_stream(payload);
499
500 // fixme: set other headers ?
501 futures::future::ok(Response::builder()
502 .status(StatusCode::OK)
503 .header(header::CONTENT_TYPE, "application/octet-stream")
504 .body(body)
505 .unwrap())
506 });
507
508 Ok(Box::new(response_future))
509 }
510
511 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
512 &ApiHandler::Async(&download_file),
513 &ObjectSchema::new(
514 "Download single raw file from backup snapshot.",
515 &[
516 ("store", false, &StringSchema::new("Datastore name.").schema()),
517 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
518 ("backup-id", false, &BACKUP_ID_SCHEMA),
519 ("backup-time", false, &BACKUP_TIME_SCHEMA),
520 ("file-name", false, &StringSchema::new("Raw file name.")
521 .format(&FILENAME_FORMAT)
522 .schema()
523 ),
524 ],
525 )
526 );
527
528 fn upload_backup_log(
529 _parts: Parts,
530 req_body: Body,
531 param: Value,
532 _info: &ApiMethod,
533 _rpcenv: Box<dyn RpcEnvironment>,
534 ) -> Result<BoxFut, Error> {
535
536 let store = tools::required_string_param(&param, "store")?;
537
538 let datastore = DataStore::lookup_datastore(store)?;
539
540 let file_name = "client.log.blob";
541
542 let backup_type = tools::required_string_param(&param, "backup-type")?;
543 let backup_id = tools::required_string_param(&param, "backup-id")?;
544 let backup_time = tools::required_integer_param(&param, "backup-time")?;
545
546 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
547
548 let mut path = datastore.base_path();
549 path.push(backup_dir.relative_path());
550 path.push(&file_name);
551
552 if path.exists() {
553 bail!("backup already contains a log.");
554 }
555
556 println!("Upload backup log to {}/{}/{}/{}/{}", store,
557 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
558
559 let resp = req_body
560 .map_err(Error::from)
561 .try_fold(Vec::new(), |mut acc, chunk| {
562 acc.extend_from_slice(&*chunk);
563 future::ok::<_, Error>(acc)
564 })
565 .and_then(move |data| async move {
566 let blob = DataBlob::from_raw(data)?;
567 // always verify CRC at server side
568 blob.verify_crc()?;
569 let raw_data = blob.raw_data();
570 file_set_contents(&path, raw_data, None)?;
571 Ok(())
572 })
573 .and_then(move |_| {
574 future::ok(crate::server::formatter::json_response(Ok(Value::Null)))
575 })
576 ;
577
578 Ok(Box::new(resp))
579 }
580
581 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
582 &ApiHandler::Async(&upload_backup_log),
583 &ObjectSchema::new(
584 "Download single raw file from backup snapshot.",
585 &[
586 ("store", false, &StringSchema::new("Datastore name.").schema()),
587 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
588 ("backup-id", false, &BACKUP_ID_SCHEMA),
589 ("backup-time", false, &BACKUP_TIME_SCHEMA),
590 ],
591 )
592 );
593
594 const STORE_SCHEMA: Schema = StringSchema::new("Datastore name.").schema();
595
596 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
597 (
598 "download",
599 &Router::new()
600 .download(&API_METHOD_DOWNLOAD_FILE)
601 ),
602 (
603 "files",
604 &Router::new()
605 .get(
606 &ApiMethod::new(
607 &ApiHandler::Sync(&list_snapshot_files),
608 &ObjectSchema::new(
609 "List snapshot files.",
610 &[
611 ("store", false, &STORE_SCHEMA),
612 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
613 ("backup-id", false, &BACKUP_ID_SCHEMA),
614 ("backup-time", false, &BACKUP_TIME_SCHEMA),
615 ],
616 )
617 )
618 )
619 ),
620 (
621 "gc",
622 &Router::new()
623 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
624 .post(&API_METHOD_START_GARBAGE_COLLECTION)
625 ),
626 (
627 "groups",
628 &Router::new()
629 .get(
630 &ApiMethod::new(
631 &ApiHandler::Sync(&list_groups),
632 &ObjectSchema::new(
633 "List backup groups.",
634 &[ ("store", false, &STORE_SCHEMA) ],
635 )
636 )
637 )
638 ),
639 (
640 "prune",
641 &Router::new()
642 .post(&API_METHOD_PRUNE)
643 ),
644 (
645 "snapshots",
646 &Router::new()
647 .get(
648 &ApiMethod::new(
649 &ApiHandler::Sync(&list_snapshots),
650 &ObjectSchema::new(
651 "List backup groups.",
652 &[
653 ("store", false, &STORE_SCHEMA),
654 ("backup-type", true, &BACKUP_TYPE_SCHEMA),
655 ("backup-id", true, &BACKUP_ID_SCHEMA),
656 ],
657 )
658 )
659 )
660 .delete(
661 &ApiMethod::new(
662 &ApiHandler::Sync(&delete_snapshots),
663 &ObjectSchema::new(
664 "Delete backup snapshot.",
665 &[
666 ("store", false, &STORE_SCHEMA),
667 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
668 ("backup-id", false, &BACKUP_ID_SCHEMA),
669 ("backup-time", false, &BACKUP_TIME_SCHEMA),
670 ],
671 )
672 )
673 )
674 ),
675 (
676 "status",
677 &Router::new()
678 .get(&API_METHOD_STATUS)
679 ),
680 (
681 "upload-backup-log",
682 &Router::new()
683 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
684 ),
685 ];
686
687 const DATASTORE_INFO_ROUTER: Router = Router::new()
688 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
689 .subdirs(DATASTORE_INFO_SUBDIRS);
690
691
692 pub const ROUTER: Router = Router::new()
693 .get(
694 &ApiMethod::new(
695 &ApiHandler::Sync(&get_datastore_list),
696 &ObjectSchema::new("Directory index.", &[])
697 )
698 )
699 .match_all("store", &DATASTORE_INFO_ROUTER);