]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
sort all property lookup tables
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use failure::*;
2 use futures::*;
3
4 use crate::tools;
5 use crate::api2::types::*;
6 use crate::api_schema::*;
7 use crate::api_schema::router::*;
8 //use crate::server::rest::*;
9 use serde_json::{json, Value};
10 use std::collections::{HashSet, HashMap};
11 use chrono::{DateTime, Datelike, TimeZone, Local};
12 use std::path::PathBuf;
13
14 use proxmox::{sortable, identity};
15 use proxmox::tools::{try_block, fs::file_get_contents, fs::file_set_contents};
16
17 use crate::config::datastore;
18
19 use crate::backup::*;
20 use crate::server::WorkerTask;
21
22 use hyper::{header, Body, Response, StatusCode};
23 use hyper::http::request::Parts;
24
25 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Value, Error> {
26
27 let mut path = store.base_path();
28 path.push(backup_dir.relative_path());
29 path.push("index.json.blob");
30
31 let raw_data = file_get_contents(&path)?;
32 let data = DataBlob::from_raw(raw_data)?.decode(None)?;
33 let mut result: Value = serde_json::from_reader(&mut &data[..])?;
34
35 let result = result["files"].take();
36
37 if result == Value::Null {
38 bail!("missing 'files' property in backup index {:?}", path);
39 }
40
41 Ok(result)
42 }
43
44 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
45
46 let mut group_hash = HashMap::new();
47
48 for info in backup_list {
49 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
50 let time_list = group_hash.entry(group_id).or_insert(vec![]);
51 time_list.push(info);
52 }
53
54 group_hash
55 }
56
57 fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
58 mark: &mut HashSet<PathBuf>,
59 list: &Vec<BackupInfo>,
60 keep: usize,
61 select_id: F,
62 ){
63 let mut hash = HashSet::new();
64 for info in list {
65 let local_time = info.backup_dir.backup_time().with_timezone(&Local);
66 if hash.len() >= keep as usize { break; }
67 let backup_id = info.backup_dir.relative_path();
68 let sel_id: String = select_id(local_time, &info);
69 if !hash.contains(&sel_id) {
70 hash.insert(sel_id);
71 //println!(" KEEP ID {} {}", backup_id, local_time.format("%c"));
72 mark.insert(backup_id);
73 }
74 }
75 }
76
77 fn list_groups(
78 param: Value,
79 _info: &ApiMethod,
80 _rpcenv: &mut dyn RpcEnvironment,
81 ) -> Result<Value, Error> {
82
83 let store = param["store"].as_str().unwrap();
84
85 let datastore = DataStore::lookup_datastore(store)?;
86
87 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
88
89 let group_hash = group_backups(backup_list);
90
91 let mut groups = vec![];
92
93 for (_group_id, mut list) in group_hash {
94
95 BackupInfo::sort_list(&mut list, false);
96
97 let info = &list[0];
98 let group = info.backup_dir.group();
99
100 groups.push(json!({
101 "backup-type": group.backup_type(),
102 "backup-id": group.backup_id(),
103 "last-backup": info.backup_dir.backup_time().timestamp(),
104 "backup-count": list.len() as u64,
105 "files": info.files,
106 }));
107 }
108
109 Ok(json!(groups))
110 }
111
112 fn list_snapshot_files (
113 param: Value,
114 _info: &ApiMethod,
115 _rpcenv: &mut dyn RpcEnvironment,
116 ) -> Result<Value, Error> {
117
118 let store = tools::required_string_param(&param, "store")?;
119 let backup_type = tools::required_string_param(&param, "backup-type")?;
120 let backup_id = tools::required_string_param(&param, "backup-id")?;
121 let backup_time = tools::required_integer_param(&param, "backup-time")?;
122
123 let datastore = DataStore::lookup_datastore(store)?;
124 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
125
126 let mut files = read_backup_index(&datastore, &snapshot)?;
127
128 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
129
130 let file_set = files.as_array().unwrap().iter().fold(HashSet::new(), |mut acc, item| {
131 acc.insert(item["filename"].as_str().unwrap().to_owned());
132 acc
133 });
134
135 for file in info.files {
136 if file_set.contains(&file) { continue; }
137 files.as_array_mut().unwrap().push(json!({ "filename": file }));
138 }
139
140 Ok(files)
141 }
142
143 fn delete_snapshots (
144 param: Value,
145 _info: &ApiMethod,
146 _rpcenv: &mut dyn RpcEnvironment,
147 ) -> Result<Value, Error> {
148
149 let store = tools::required_string_param(&param, "store")?;
150 let backup_type = tools::required_string_param(&param, "backup-type")?;
151 let backup_id = tools::required_string_param(&param, "backup-id")?;
152 let backup_time = tools::required_integer_param(&param, "backup-time")?;
153
154 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
155
156 let datastore = DataStore::lookup_datastore(store)?;
157
158 datastore.remove_backup_dir(&snapshot)?;
159
160 Ok(Value::Null)
161 }
162
163 fn list_snapshots (
164 param: Value,
165 _info: &ApiMethod,
166 _rpcenv: &mut dyn RpcEnvironment,
167 ) -> Result<Value, Error> {
168
169 let store = tools::required_string_param(&param, "store")?;
170 let backup_type = param["backup-type"].as_str();
171 let backup_id = param["backup-id"].as_str();
172
173 let datastore = DataStore::lookup_datastore(store)?;
174
175 let base_path = datastore.base_path();
176
177 let backup_list = BackupInfo::list_backups(&base_path)?;
178
179 let mut snapshots = vec![];
180
181 for info in backup_list {
182 let group = info.backup_dir.group();
183 if let Some(backup_type) = backup_type {
184 if backup_type != group.backup_type() { continue; }
185 }
186 if let Some(backup_id) = backup_id {
187 if backup_id != group.backup_id() { continue; }
188 }
189
190 let mut result_item = json!({
191 "backup-type": group.backup_type(),
192 "backup-id": group.backup_id(),
193 "backup-time": info.backup_dir.backup_time().timestamp(),
194 "files": info.files,
195 });
196
197 if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
198 let mut backup_size = 0;
199 for item in index.as_array().unwrap().iter() {
200 if let Some(item_size) = item["size"].as_u64() {
201 backup_size += item_size;
202 }
203 }
204 result_item["size"] = backup_size.into();
205 }
206
207 snapshots.push(result_item);
208 }
209
210 Ok(json!(snapshots))
211 }
212
213 fn status(
214 param: Value,
215 _info: &ApiMethod,
216 _rpcenv: &mut dyn RpcEnvironment,
217 ) -> Result<Value, Error> {
218
219 let store = param["store"].as_str().unwrap();
220
221 let datastore = DataStore::lookup_datastore(store)?;
222
223 let base_path = datastore.base_path();
224
225 let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
226
227 use nix::NixPath;
228
229 let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
230 nix::errno::Errno::result(res)?;
231
232 let bsize = stat.f_bsize as u64;
233 Ok(json!({
234 "total": stat.f_blocks*bsize,
235 "used": (stat.f_blocks-stat.f_bfree)*bsize,
236 "avail": stat.f_bavail*bsize,
237 }))
238 }
239
240 #[macro_export]
241 macro_rules! add_common_prune_prameters {
242 ( [ $( $list1:tt )* ] ) => {
243 add_common_prune_prameters!([$( $list1 )* ] , [])
244 };
245 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
246 [
247 $( $list1 )*
248 (
249 "keep-daily",
250 true,
251 &IntegerSchema::new("Number of daily backups to keep.")
252 .minimum(1)
253 .schema()
254 ),
255 (
256 "keep-last",
257 true,
258 &IntegerSchema::new("Number of backups to keep.")
259 .minimum(1)
260 .schema()
261 ),
262 (
263 "keep-monthly",
264 true,
265 &IntegerSchema::new("Number of monthly backups to keep.")
266 .minimum(1)
267 .schema()
268 ),
269 (
270 "keep-weekly",
271 true,
272 &IntegerSchema::new("Number of weekly backups to keep.")
273 .minimum(1)
274 .schema()
275 ),
276 (
277 "keep-yearly",
278 true,
279 &IntegerSchema::new("Number of yearly backups to keep.")
280 .minimum(1)
281 .schema()
282 ),
283 $( $list2 )*
284 ]
285 }
286 }
287
288 const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
289 &ApiHandler::Sync(&status),
290 &ObjectSchema::new(
291 "Get datastore status.",
292 &add_common_prune_prameters!([],[
293 ("store", false, &StringSchema::new("Datastore name.").schema()),
294 ]),
295 )
296 );
297
298
299 fn prune(
300 param: Value,
301 _info: &ApiMethod,
302 _rpcenv: &mut dyn RpcEnvironment,
303 ) -> Result<Value, Error> {
304
305 let store = param["store"].as_str().unwrap();
306
307 let backup_type = tools::required_string_param(&param, "backup-type")?;
308 let backup_id = tools::required_string_param(&param, "backup-id")?;
309
310 let group = BackupGroup::new(backup_type, backup_id);
311
312 let datastore = DataStore::lookup_datastore(store)?;
313
314 let mut keep_all = true;
315
316 for opt in &["keep-last", "keep-daily", "keep-weekly", "keep-weekly", "keep-yearly"] {
317 if !param[opt].is_null() {
318 keep_all = false;
319 break;
320 }
321 }
322
323 let worker = WorkerTask::new("prune", Some(store.to_owned()), "root@pam", true)?;
324 let result = try_block! {
325 if keep_all {
326 worker.log("No prune selection - keeping all files.");
327 return Ok(());
328 } else {
329 worker.log(format!("Starting prune on store {}", store));
330 }
331
332 let mut list = group.list_backups(&datastore.base_path())?;
333
334 let mut mark = HashSet::new();
335
336 BackupInfo::sort_list(&mut list, false);
337
338 if let Some(keep_last) = param["keep-last"].as_u64() {
339 list.iter().take(keep_last as usize).for_each(|info| {
340 mark.insert(info.backup_dir.relative_path());
341 });
342 }
343
344 if let Some(keep_daily) = param["keep-daily"].as_u64() {
345 mark_selections(&mut mark, &list, keep_daily as usize, |local_time, _info| {
346 format!("{}/{}/{}", local_time.year(), local_time.month(), local_time.day())
347 });
348 }
349
350 if let Some(keep_weekly) = param["keep-weekly"].as_u64() {
351 mark_selections(&mut mark, &list, keep_weekly as usize, |local_time, _info| {
352 format!("{}/{}", local_time.year(), local_time.iso_week().week())
353 });
354 }
355
356 if let Some(keep_monthly) = param["keep-monthly"].as_u64() {
357 mark_selections(&mut mark, &list, keep_monthly as usize, |local_time, _info| {
358 format!("{}/{}", local_time.year(), local_time.month())
359 });
360 }
361
362 if let Some(keep_yearly) = param["keep-yearly"].as_u64() {
363 mark_selections(&mut mark, &list, keep_yearly as usize, |local_time, _info| {
364 format!("{}/{}", local_time.year(), local_time.year())
365 });
366 }
367
368 let mut remove_list: Vec<BackupInfo> = list.into_iter()
369 .filter(|info| !mark.contains(&info.backup_dir.relative_path())).collect();
370
371 BackupInfo::sort_list(&mut remove_list, true);
372
373 for info in remove_list {
374 worker.log(format!("remove {:?}", info.backup_dir));
375 datastore.remove_backup_dir(&info.backup_dir)?;
376 }
377
378 Ok(())
379 };
380
381 worker.log_result(&result);
382
383 if let Err(err) = result {
384 bail!("prune failed - {}", err);
385 }
386
387 Ok(json!(null))
388 }
389
390 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
391 &ApiHandler::Sync(&prune),
392 &ObjectSchema::new(
393 "Prune the datastore.",
394 &add_common_prune_prameters!([
395 ("backup-id", false, &BACKUP_ID_SCHEMA),
396 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
397 ],[
398 ("store", false, &StringSchema::new("Datastore name.").schema()),
399 ])
400 )
401 );
402
403 fn start_garbage_collection(
404 param: Value,
405 _info: &ApiMethod,
406 rpcenv: &mut dyn RpcEnvironment,
407 ) -> Result<Value, Error> {
408
409 let store = param["store"].as_str().unwrap().to_string();
410
411 let datastore = DataStore::lookup_datastore(&store)?;
412
413 println!("Starting garbage collection on store {}", store);
414
415 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
416
417 let upid_str = WorkerTask::new_thread(
418 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
419 {
420 worker.log(format!("starting garbage collection on store {}", store));
421 datastore.garbage_collection(worker)
422 })?;
423
424 Ok(json!(upid_str))
425 }
426
427 #[sortable]
428 pub const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
429 &ApiHandler::Sync(&start_garbage_collection),
430 &ObjectSchema::new(
431 "Start garbage collection.",
432 &sorted!([
433 ("store", false, &StringSchema::new("Datastore name.").schema()),
434 ])
435 )
436 );
437
438 fn garbage_collection_status(
439 param: Value,
440 _info: &ApiMethod,
441 _rpcenv: &mut dyn RpcEnvironment,
442 ) -> Result<Value, Error> {
443
444 let store = param["store"].as_str().unwrap();
445
446 let datastore = DataStore::lookup_datastore(&store)?;
447
448 println!("Garbage collection status on store {}", store);
449
450 let status = datastore.last_gc_status();
451
452 Ok(serde_json::to_value(&status)?)
453 }
454
455 #[sortable]
456 pub const API_METHOD_GARBAGE_COLLECTION_STATUS: ApiMethod = ApiMethod::new(
457 &ApiHandler::Sync(&garbage_collection_status),
458 &ObjectSchema::new(
459 "Garbage collection status.",
460 &sorted!([
461 ("store", false, &StringSchema::new("Datastore name.").schema()),
462 ])
463 )
464 );
465
466 fn get_datastore_list(
467 _param: Value,
468 _info: &ApiMethod,
469 _rpcenv: &mut dyn RpcEnvironment,
470 ) -> Result<Value, Error> {
471
472 let config = datastore::config()?;
473
474 Ok(config.convert_to_array("store"))
475 }
476
477
478 fn download_file(
479 _parts: Parts,
480 _req_body: Body,
481 param: Value,
482 _info: &ApiMethod,
483 _rpcenv: Box<dyn RpcEnvironment>,
484 ) -> Result<BoxFut, Error> {
485
486 let store = tools::required_string_param(&param, "store")?;
487
488 let datastore = DataStore::lookup_datastore(store)?;
489
490 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
491
492 let backup_type = tools::required_string_param(&param, "backup-type")?;
493 let backup_id = tools::required_string_param(&param, "backup-id")?;
494 let backup_time = tools::required_integer_param(&param, "backup-time")?;
495
496 println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
497 backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
498
499 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
500
501 let mut path = datastore.base_path();
502 path.push(backup_dir.relative_path());
503 path.push(&file_name);
504
505 let response_future = tokio::fs::File::open(path)
506 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
507 .and_then(move |file| {
508 let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
509 .map_ok(|bytes| hyper::Chunk::from(bytes.freeze()));
510 let body = Body::wrap_stream(payload);
511
512 // fixme: set other headers ?
513 futures::future::ok(Response::builder()
514 .status(StatusCode::OK)
515 .header(header::CONTENT_TYPE, "application/octet-stream")
516 .body(body)
517 .unwrap())
518 });
519
520 Ok(Box::new(response_future))
521 }
522
523 #[sortable]
524 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
525 &ApiHandler::Async(&download_file),
526 &ObjectSchema::new(
527 "Download single raw file from backup snapshot.",
528 &sorted!([
529 ("store", false, &StringSchema::new("Datastore name.").schema()),
530 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
531 ("backup-id", false, &BACKUP_ID_SCHEMA),
532 ("backup-time", false, &BACKUP_TIME_SCHEMA),
533 ("file-name", false, &StringSchema::new("Raw file name.")
534 .format(&FILENAME_FORMAT)
535 .schema()
536 ),
537 ]),
538 )
539 );
540
541 fn upload_backup_log(
542 _parts: Parts,
543 req_body: Body,
544 param: Value,
545 _info: &ApiMethod,
546 _rpcenv: Box<dyn RpcEnvironment>,
547 ) -> Result<BoxFut, Error> {
548
549 let store = tools::required_string_param(&param, "store")?;
550
551 let datastore = DataStore::lookup_datastore(store)?;
552
553 let file_name = "client.log.blob";
554
555 let backup_type = tools::required_string_param(&param, "backup-type")?;
556 let backup_id = tools::required_string_param(&param, "backup-id")?;
557 let backup_time = tools::required_integer_param(&param, "backup-time")?;
558
559 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
560
561 let mut path = datastore.base_path();
562 path.push(backup_dir.relative_path());
563 path.push(&file_name);
564
565 if path.exists() {
566 bail!("backup already contains a log.");
567 }
568
569 println!("Upload backup log to {}/{}/{}/{}/{}", store,
570 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
571
572 let resp = req_body
573 .map_err(Error::from)
574 .try_fold(Vec::new(), |mut acc, chunk| {
575 acc.extend_from_slice(&*chunk);
576 future::ok::<_, Error>(acc)
577 })
578 .and_then(move |data| async move {
579 let blob = DataBlob::from_raw(data)?;
580 // always verify CRC at server side
581 blob.verify_crc()?;
582 let raw_data = blob.raw_data();
583 file_set_contents(&path, raw_data, None)?;
584 Ok(())
585 })
586 .and_then(move |_| {
587 future::ok(crate::server::formatter::json_response(Ok(Value::Null)))
588 })
589 ;
590
591 Ok(Box::new(resp))
592 }
593
594 #[sortable]
595 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
596 &ApiHandler::Async(&upload_backup_log),
597 &ObjectSchema::new(
598 "Download single raw file from backup snapshot.",
599 &sorted!([
600 ("store", false, &StringSchema::new("Datastore name.").schema()),
601 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
602 ("backup-id", false, &BACKUP_ID_SCHEMA),
603 ("backup-time", false, &BACKUP_TIME_SCHEMA),
604 ]),
605 )
606 );
607
608 const STORE_SCHEMA: Schema = StringSchema::new("Datastore name.").schema();
609
610 #[sortable]
611 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
612 (
613 "download",
614 &Router::new()
615 .download(&API_METHOD_DOWNLOAD_FILE)
616 ),
617 (
618 "files",
619 &Router::new()
620 .get(
621 &ApiMethod::new(
622 &ApiHandler::Sync(&list_snapshot_files),
623 &ObjectSchema::new(
624 "List snapshot files.",
625 &sorted!([
626 ("store", false, &STORE_SCHEMA),
627 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
628 ("backup-id", false, &BACKUP_ID_SCHEMA),
629 ("backup-time", false, &BACKUP_TIME_SCHEMA),
630 ]),
631 )
632 )
633 )
634 ),
635 (
636 "gc",
637 &Router::new()
638 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
639 .post(&API_METHOD_START_GARBAGE_COLLECTION)
640 ),
641 (
642 "groups",
643 &Router::new()
644 .get(
645 &ApiMethod::new(
646 &ApiHandler::Sync(&list_groups),
647 &ObjectSchema::new(
648 "List backup groups.",
649 &sorted!([ ("store", false, &STORE_SCHEMA) ]),
650 )
651 )
652 )
653 ),
654 (
655 "prune",
656 &Router::new()
657 .post(&API_METHOD_PRUNE)
658 ),
659 (
660 "snapshots",
661 &Router::new()
662 .get(
663 &ApiMethod::new(
664 &ApiHandler::Sync(&list_snapshots),
665 &ObjectSchema::new(
666 "List backup groups.",
667 &sorted!([
668 ("store", false, &STORE_SCHEMA),
669 ("backup-type", true, &BACKUP_TYPE_SCHEMA),
670 ("backup-id", true, &BACKUP_ID_SCHEMA),
671 ]),
672 )
673 )
674 )
675 .delete(
676 &ApiMethod::new(
677 &ApiHandler::Sync(&delete_snapshots),
678 &ObjectSchema::new(
679 "Delete backup snapshot.",
680 &sorted!([
681 ("store", false, &STORE_SCHEMA),
682 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
683 ("backup-id", false, &BACKUP_ID_SCHEMA),
684 ("backup-time", false, &BACKUP_TIME_SCHEMA),
685 ]),
686 )
687 )
688 )
689 ),
690 (
691 "status",
692 &Router::new()
693 .get(&API_METHOD_STATUS)
694 ),
695 (
696 "upload-backup-log",
697 &Router::new()
698 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
699 ),
700 ];
701
702 const DATASTORE_INFO_ROUTER: Router = Router::new()
703 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
704 .subdirs(DATASTORE_INFO_SUBDIRS);
705
706
707 pub const ROUTER: Router = Router::new()
708 .get(
709 &ApiMethod::new(
710 &ApiHandler::Sync(&get_datastore_list),
711 &ObjectSchema::new("Directory index.", &[])
712 )
713 )
714 .match_all("store", &DATASTORE_INFO_ROUTER);