]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
src/api2/types.rs: define and use struct StorageStatus
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::convert::TryFrom;
3
4 use chrono::{TimeZone, Local};
5 use failure::*;
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::api;
12 use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType};
13 use proxmox::api::router::SubdirMap;
14 use proxmox::api::schema::*;
15 use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
16 use proxmox::try_block;
17 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
18
19 use crate::api2::types::*;
20 use crate::backup::*;
21 use crate::config::datastore;
22 use crate::server::WorkerTask;
23 use crate::tools;
24
25 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
26
27 let mut path = store.base_path();
28 path.push(backup_dir.relative_path());
29 path.push("index.json.blob");
30
31 let raw_data = file_get_contents(&path)?;
32 let index_size = raw_data.len() as u64;
33 let blob = DataBlob::from_raw(raw_data)?;
34
35 let manifest = BackupManifest::try_from(blob)?;
36
37 let mut result = Vec::new();
38 for item in manifest.files() {
39 result.push(BackupContent {
40 filename: item.filename.clone(),
41 size: Some(item.size),
42 });
43 }
44
45 result.push(BackupContent {
46 filename: "index.json.blob".to_string(),
47 size: Some(index_size),
48 });
49
50 Ok(result)
51 }
52
53 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
54
55 let mut group_hash = HashMap::new();
56
57 for info in backup_list {
58 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
59 let time_list = group_hash.entry(group_id).or_insert(vec![]);
60 time_list.push(info);
61 }
62
63 group_hash
64 }
65
66 #[api(
67 input: {
68 properties: {
69 store: {
70 schema: DATASTORE_SCHEMA,
71 },
72 },
73 },
74 returns: {
75 type: Array,
76 description: "Returns the list of backup groups.",
77 items: {
78 type: GroupListItem,
79 }
80 },
81 )]
82 /// List backup groups.
83 fn list_groups(
84 store: String,
85 ) -> Result<Vec<GroupListItem>, Error> {
86
87 let datastore = DataStore::lookup_datastore(&store)?;
88
89 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
90
91 let group_hash = group_backups(backup_list);
92
93 let mut groups = Vec::new();
94
95 for (_group_id, mut list) in group_hash {
96
97 BackupInfo::sort_list(&mut list, false);
98
99 let info = &list[0];
100 let group = info.backup_dir.group();
101
102 let result_item = GroupListItem {
103 backup_type: group.backup_type().to_string(),
104 backup_id: group.backup_id().to_string(),
105 last_backup: info.backup_dir.backup_time().timestamp(),
106 backup_count: list.len() as u64,
107 files: info.files.clone(),
108 };
109 groups.push(result_item);
110 }
111
112 Ok(groups)
113 }
114
115 #[api(
116 input: {
117 properties: {
118 store: {
119 schema: DATASTORE_SCHEMA,
120 },
121 "backup-type": {
122 schema: BACKUP_TYPE_SCHEMA,
123 },
124 "backup-id": {
125 schema: BACKUP_ID_SCHEMA,
126 },
127 "backup-time": {
128 schema: BACKUP_TIME_SCHEMA,
129 },
130 },
131 },
132 returns: {
133 type: Array,
134 description: "Returns the list of archive files inside a backup snapshots.",
135 items: {
136 type: BackupContent,
137 }
138 },
139 )]
140 /// List snapshot files.
141 fn list_snapshot_files(
142 store: String,
143 backup_type: String,
144 backup_id: String,
145 backup_time: i64,
146 _info: &ApiMethod,
147 _rpcenv: &mut dyn RpcEnvironment,
148 ) -> Result<Vec<BackupContent>, Error> {
149
150 let datastore = DataStore::lookup_datastore(&store)?;
151 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
152
153 let mut files = read_backup_index(&datastore, &snapshot)?;
154
155 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
156
157 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
158 acc.insert(item.filename.clone());
159 acc
160 });
161
162 for file in info.files {
163 if file_set.contains(&file) { continue; }
164 files.push(BackupContent { filename: file, size: None });
165 }
166
167 Ok(files)
168 }
169
170 #[api(
171 input: {
172 properties: {
173 store: {
174 schema: DATASTORE_SCHEMA,
175 },
176 "backup-type": {
177 schema: BACKUP_TYPE_SCHEMA,
178 },
179 "backup-id": {
180 schema: BACKUP_ID_SCHEMA,
181 },
182 "backup-time": {
183 schema: BACKUP_TIME_SCHEMA,
184 },
185 },
186 },
187 )]
188 /// Delete backup snapshot.
189 fn delete_snapshot(
190 store: String,
191 backup_type: String,
192 backup_id: String,
193 backup_time: i64,
194 _info: &ApiMethod,
195 _rpcenv: &mut dyn RpcEnvironment,
196 ) -> Result<Value, Error> {
197
198 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
199
200 let datastore = DataStore::lookup_datastore(&store)?;
201
202 datastore.remove_backup_dir(&snapshot)?;
203
204 Ok(Value::Null)
205 }
206
207 #[api(
208 input: {
209 properties: {
210 store: {
211 schema: DATASTORE_SCHEMA,
212 },
213 "backup-type": {
214 optional: true,
215 schema: BACKUP_TYPE_SCHEMA,
216 },
217 "backup-id": {
218 optional: true,
219 schema: BACKUP_ID_SCHEMA,
220 },
221 },
222 },
223 returns: {
224 type: Array,
225 description: "Returns the list of snapshots.",
226 items: {
227 type: SnapshotListItem,
228 }
229 },
230 )]
231 /// List backup snapshots.
232 fn list_snapshots (
233 param: Value,
234 _info: &ApiMethod,
235 _rpcenv: &mut dyn RpcEnvironment,
236 ) -> Result<Vec<SnapshotListItem>, Error> {
237
238 let store = tools::required_string_param(&param, "store")?;
239 let backup_type = param["backup-type"].as_str();
240 let backup_id = param["backup-id"].as_str();
241
242 let datastore = DataStore::lookup_datastore(store)?;
243
244 let base_path = datastore.base_path();
245
246 let backup_list = BackupInfo::list_backups(&base_path)?;
247
248 let mut snapshots = vec![];
249
250 for info in backup_list {
251 let group = info.backup_dir.group();
252 if let Some(backup_type) = backup_type {
253 if backup_type != group.backup_type() { continue; }
254 }
255 if let Some(backup_id) = backup_id {
256 if backup_id != group.backup_id() { continue; }
257 }
258
259 let mut result_item = SnapshotListItem {
260 backup_type: group.backup_type().to_string(),
261 backup_id: group.backup_id().to_string(),
262 backup_time: info.backup_dir.backup_time().timestamp(),
263 files: info.files,
264 size: None,
265 };
266
267 if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
268 let mut backup_size = 0;
269 for item in index.iter() {
270 if let Some(item_size) = item.size {
271 backup_size += item_size;
272 }
273 }
274 result_item.size = Some(backup_size);
275 }
276
277 snapshots.push(result_item);
278 }
279
280 Ok(snapshots)
281 }
282
283 #[api(
284 input: {
285 properties: {
286 store: {
287 schema: DATASTORE_SCHEMA,
288 },
289 },
290 },
291 returns: {
292 type: StorageStatus,
293 },
294 )]
295 /// Get datastore status.
296 fn status(
297 store: String,
298 _info: &ApiMethod,
299 _rpcenv: &mut dyn RpcEnvironment,
300 ) -> Result<StorageStatus, Error> {
301
302 let datastore = DataStore::lookup_datastore(&store)?;
303
304 let base_path = datastore.base_path();
305
306 let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
307
308 use nix::NixPath;
309
310 let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
311 nix::errno::Errno::result(res)?;
312
313 let bsize = stat.f_bsize as u64;
314
315 Ok(StorageStatus {
316 total: stat.f_blocks*bsize,
317 used: (stat.f_blocks-stat.f_bfree)*bsize,
318 avail: stat.f_bavail*bsize,
319 })
320 }
321
322 #[macro_export]
323 macro_rules! add_common_prune_prameters {
324 ( [ $( $list1:tt )* ] ) => {
325 add_common_prune_prameters!([$( $list1 )* ] , [])
326 };
327 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
328 [
329 $( $list1 )*
330 (
331 "keep-daily",
332 true,
333 &IntegerSchema::new("Number of daily backups to keep.")
334 .minimum(1)
335 .schema()
336 ),
337 (
338 "keep-hourly",
339 true,
340 &IntegerSchema::new("Number of hourly backups to keep.")
341 .minimum(1)
342 .schema()
343 ),
344 (
345 "keep-last",
346 true,
347 &IntegerSchema::new("Number of backups to keep.")
348 .minimum(1)
349 .schema()
350 ),
351 (
352 "keep-monthly",
353 true,
354 &IntegerSchema::new("Number of monthly backups to keep.")
355 .minimum(1)
356 .schema()
357 ),
358 (
359 "keep-weekly",
360 true,
361 &IntegerSchema::new("Number of weekly backups to keep.")
362 .minimum(1)
363 .schema()
364 ),
365 (
366 "keep-yearly",
367 true,
368 &IntegerSchema::new("Number of yearly backups to keep.")
369 .minimum(1)
370 .schema()
371 ),
372 $( $list2 )*
373 ]
374 }
375 }
376
377 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
378 &ApiHandler::Sync(&prune),
379 &ObjectSchema::new(
380 "Prune the datastore.",
381 &add_common_prune_prameters!([
382 ("backup-id", false, &BACKUP_ID_SCHEMA),
383 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
384 ("dry-run", true, &BooleanSchema::new(
385 "Just show what prune would do, but do not delete anything.")
386 .schema()
387 ),
388 ],[
389 ("store", false, &DATASTORE_SCHEMA),
390 ])
391 )
392 );
393
394 fn prune(
395 param: Value,
396 _info: &ApiMethod,
397 _rpcenv: &mut dyn RpcEnvironment,
398 ) -> Result<Value, Error> {
399
400 let store = param["store"].as_str().unwrap();
401
402 let backup_type = tools::required_string_param(&param, "backup-type")?;
403 let backup_id = tools::required_string_param(&param, "backup-id")?;
404
405 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
406
407 let group = BackupGroup::new(backup_type, backup_id);
408
409 let datastore = DataStore::lookup_datastore(store)?;
410
411 let prune_options = PruneOptions {
412 keep_last: param["keep-last"].as_u64(),
413 keep_hourly: param["keep-hourly"].as_u64(),
414 keep_daily: param["keep-daily"].as_u64(),
415 keep_weekly: param["keep-weekly"].as_u64(),
416 keep_monthly: param["keep-monthly"].as_u64(),
417 keep_yearly: param["keep-yearly"].as_u64(),
418 };
419
420 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
421
422 // We use a WorkerTask just to have a task log, but run synchrounously
423 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
424 let result = try_block! {
425 if !prune_options.keeps_something() {
426 worker.log("No prune selection - keeping all files.");
427 return Ok(());
428 } else {
429 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
430 if dry_run {
431 worker.log(format!("Testing prune on store \"{}\" group \"{}/{}\"",
432 store, backup_type, backup_id));
433 } else {
434 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
435 store, backup_type, backup_id));
436 }
437 }
438
439 let list = group.list_backups(&datastore.base_path())?;
440
441 let mut prune_info = compute_prune_info(list, &prune_options)?;
442
443 prune_info.reverse(); // delete older snapshots first
444
445 for (info, keep) in prune_info {
446 let backup_time = info.backup_dir.backup_time();
447 let timestamp = BackupDir::backup_time_to_string(backup_time);
448 let group = info.backup_dir.group();
449
450 let msg = format!(
451 "{}/{}/{} {}",
452 group.backup_type(),
453 group.backup_id(),
454 timestamp,
455 if keep { "keep" } else { "remove" },
456 );
457
458 worker.log(msg);
459
460 if !(dry_run || keep) {
461 datastore.remove_backup_dir(&info.backup_dir)?;
462 }
463 }
464
465 Ok(())
466 };
467
468 worker.log_result(&result);
469
470 if let Err(err) = result {
471 bail!("prune failed - {}", err);
472 }
473
474 Ok(json!(worker.to_string())) // return the UPID
475 }
476
477 #[sortable]
478 pub const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
479 &ApiHandler::Sync(&start_garbage_collection),
480 &ObjectSchema::new(
481 "Start garbage collection.",
482 &sorted!([
483 ("store", false, &DATASTORE_SCHEMA),
484 ])
485 )
486 );
487
488 fn start_garbage_collection(
489 param: Value,
490 _info: &ApiMethod,
491 rpcenv: &mut dyn RpcEnvironment,
492 ) -> Result<Value, Error> {
493
494 let store = param["store"].as_str().unwrap().to_string();
495
496 let datastore = DataStore::lookup_datastore(&store)?;
497
498 println!("Starting garbage collection on store {}", store);
499
500 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
501
502 let upid_str = WorkerTask::new_thread(
503 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
504 {
505 worker.log(format!("starting garbage collection on store {}", store));
506 datastore.garbage_collection(worker)
507 })?;
508
509 Ok(json!(upid_str))
510 }
511
512 #[sortable]
513 pub const API_METHOD_GARBAGE_COLLECTION_STATUS: ApiMethod = ApiMethod::new(
514 &ApiHandler::Sync(&garbage_collection_status),
515 &ObjectSchema::new(
516 "Garbage collection status.",
517 &sorted!([
518 ("store", false, &DATASTORE_SCHEMA),
519 ])
520 )
521 );
522
523 fn garbage_collection_status(
524 param: Value,
525 _info: &ApiMethod,
526 _rpcenv: &mut dyn RpcEnvironment,
527 ) -> Result<Value, Error> {
528
529 let store = param["store"].as_str().unwrap();
530
531 let datastore = DataStore::lookup_datastore(&store)?;
532
533 println!("Garbage collection status on store {}", store);
534
535 let status = datastore.last_gc_status();
536
537 Ok(serde_json::to_value(&status)?)
538 }
539
540
541 fn get_datastore_list(
542 _param: Value,
543 _info: &ApiMethod,
544 _rpcenv: &mut dyn RpcEnvironment,
545 ) -> Result<Value, Error> {
546
547 let (config, _digest) = datastore::config()?;
548
549 Ok(config.convert_to_array("store", None))
550 }
551
552 #[sortable]
553 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
554 &ApiHandler::AsyncHttp(&download_file),
555 &ObjectSchema::new(
556 "Download single raw file from backup snapshot.",
557 &sorted!([
558 ("store", false, &DATASTORE_SCHEMA),
559 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
560 ("backup-id", false, &BACKUP_ID_SCHEMA),
561 ("backup-time", false, &BACKUP_TIME_SCHEMA),
562 ("file-name", false, &StringSchema::new("Raw file name.")
563 .format(&FILENAME_FORMAT)
564 .schema()
565 ),
566 ]),
567 )
568 );
569
570 fn download_file(
571 _parts: Parts,
572 _req_body: Body,
573 param: Value,
574 _info: &ApiMethod,
575 _rpcenv: Box<dyn RpcEnvironment>,
576 ) -> ApiResponseFuture {
577
578 async move {
579 let store = tools::required_string_param(&param, "store")?;
580
581 let datastore = DataStore::lookup_datastore(store)?;
582
583 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
584
585 let backup_type = tools::required_string_param(&param, "backup-type")?;
586 let backup_id = tools::required_string_param(&param, "backup-id")?;
587 let backup_time = tools::required_integer_param(&param, "backup-time")?;
588
589 println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
590 backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
591
592 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
593
594 let mut path = datastore.base_path();
595 path.push(backup_dir.relative_path());
596 path.push(&file_name);
597
598 let file = tokio::fs::File::open(path)
599 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
600 .await?;
601
602 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
603 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
604 let body = Body::wrap_stream(payload);
605
606 // fixme: set other headers ?
607 Ok(Response::builder()
608 .status(StatusCode::OK)
609 .header(header::CONTENT_TYPE, "application/octet-stream")
610 .body(body)
611 .unwrap())
612 }.boxed()
613 }
614
615 #[sortable]
616 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
617 &ApiHandler::AsyncHttp(&upload_backup_log),
618 &ObjectSchema::new(
619 "Download single raw file from backup snapshot.",
620 &sorted!([
621 ("store", false, &DATASTORE_SCHEMA),
622 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
623 ("backup-id", false, &BACKUP_ID_SCHEMA),
624 ("backup-time", false, &BACKUP_TIME_SCHEMA),
625 ]),
626 )
627 );
628
629 fn upload_backup_log(
630 _parts: Parts,
631 req_body: Body,
632 param: Value,
633 _info: &ApiMethod,
634 _rpcenv: Box<dyn RpcEnvironment>,
635 ) -> ApiResponseFuture {
636
637 async move {
638 let store = tools::required_string_param(&param, "store")?;
639
640 let datastore = DataStore::lookup_datastore(store)?;
641
642 let file_name = "client.log.blob";
643
644 let backup_type = tools::required_string_param(&param, "backup-type")?;
645 let backup_id = tools::required_string_param(&param, "backup-id")?;
646 let backup_time = tools::required_integer_param(&param, "backup-time")?;
647
648 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
649
650 let mut path = datastore.base_path();
651 path.push(backup_dir.relative_path());
652 path.push(&file_name);
653
654 if path.exists() {
655 bail!("backup already contains a log.");
656 }
657
658 println!("Upload backup log to {}/{}/{}/{}/{}", store,
659 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
660
661 let data = req_body
662 .map_err(Error::from)
663 .try_fold(Vec::new(), |mut acc, chunk| {
664 acc.extend_from_slice(&*chunk);
665 future::ok::<_, Error>(acc)
666 })
667 .await?;
668
669 let blob = DataBlob::from_raw(data)?;
670 // always verify CRC at server side
671 blob.verify_crc()?;
672 let raw_data = blob.raw_data();
673 replace_file(&path, raw_data, CreateOptions::new())?;
674
675 // fixme: use correct formatter
676 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
677 }.boxed()
678 }
679
680 #[sortable]
681 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
682 (
683 "download",
684 &Router::new()
685 .download(&API_METHOD_DOWNLOAD_FILE)
686 ),
687 (
688 "files",
689 &Router::new()
690 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
691 ),
692 (
693 "gc",
694 &Router::new()
695 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
696 .post(&API_METHOD_START_GARBAGE_COLLECTION)
697 ),
698 (
699 "groups",
700 &Router::new()
701 .get(&API_METHOD_LIST_GROUPS)
702 ),
703 (
704 "prune",
705 &Router::new()
706 .post(&API_METHOD_PRUNE)
707 ),
708 (
709 "snapshots",
710 &Router::new()
711 .get(&API_METHOD_LIST_SNAPSHOTS)
712 .delete(&API_METHOD_DELETE_SNAPSHOT)
713 ),
714 (
715 "status",
716 &Router::new()
717 .get(&API_METHOD_STATUS)
718 ),
719 (
720 "upload-backup-log",
721 &Router::new()
722 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
723 ),
724 ];
725
726 const DATASTORE_INFO_ROUTER: Router = Router::new()
727 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
728 .subdirs(DATASTORE_INFO_SUBDIRS);
729
730
731 pub const ROUTER: Router = Router::new()
732 .get(
733 &ApiMethod::new(
734 &ApiHandler::Sync(&get_datastore_list),
735 &ObjectSchema::new("Directory index.", &[])
736 )
737 )
738 .match_all("store", &DATASTORE_INFO_ROUTER);