]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/admin/datastore.rs
src/api2/admin/datastore.rs - download_file: use BACKUP_ARCHIVE_NAME_SCHEMA for file...
[proxmox-backup.git] / src / api2 / admin / datastore.rs
1 use std::collections::{HashSet, HashMap};
2 use std::convert::TryFrom;
3
4 use chrono::{TimeZone, Local};
5 use failure::*;
6 use futures::*;
7 use hyper::http::request::Parts;
8 use hyper::{header, Body, Response, StatusCode};
9 use serde_json::{json, Value};
10
11 use proxmox::api::api;
12 use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType};
13 use proxmox::api::router::SubdirMap;
14 use proxmox::api::schema::*;
15 use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
16 use proxmox::try_block;
17 use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
18
19 use crate::api2::types::*;
20 use crate::backup::*;
21 use crate::config::datastore;
22 use crate::server::WorkerTask;
23 use crate::tools;
24
25 fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
26
27 let mut path = store.base_path();
28 path.push(backup_dir.relative_path());
29 path.push("index.json.blob");
30
31 let raw_data = file_get_contents(&path)?;
32 let index_size = raw_data.len() as u64;
33 let blob = DataBlob::from_raw(raw_data)?;
34
35 let manifest = BackupManifest::try_from(blob)?;
36
37 let mut result = Vec::new();
38 for item in manifest.files() {
39 result.push(BackupContent {
40 filename: item.filename.clone(),
41 size: Some(item.size),
42 });
43 }
44
45 result.push(BackupContent {
46 filename: "index.json.blob".to_string(),
47 size: Some(index_size),
48 });
49
50 Ok(result)
51 }
52
53 fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
54
55 let mut group_hash = HashMap::new();
56
57 for info in backup_list {
58 let group_id = info.backup_dir.group().group_path().to_str().unwrap().to_owned();
59 let time_list = group_hash.entry(group_id).or_insert(vec![]);
60 time_list.push(info);
61 }
62
63 group_hash
64 }
65
66 #[api(
67 input: {
68 properties: {
69 store: {
70 schema: DATASTORE_SCHEMA,
71 },
72 },
73 },
74 returns: {
75 type: Array,
76 description: "Returns the list of backup groups.",
77 items: {
78 type: GroupListItem,
79 }
80 },
81 )]
82 /// List backup groups.
83 fn list_groups(
84 store: String,
85 ) -> Result<Vec<GroupListItem>, Error> {
86
87 let datastore = DataStore::lookup_datastore(&store)?;
88
89 let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
90
91 let group_hash = group_backups(backup_list);
92
93 let mut groups = Vec::new();
94
95 for (_group_id, mut list) in group_hash {
96
97 BackupInfo::sort_list(&mut list, false);
98
99 let info = &list[0];
100 let group = info.backup_dir.group();
101
102 let result_item = GroupListItem {
103 backup_type: group.backup_type().to_string(),
104 backup_id: group.backup_id().to_string(),
105 last_backup: info.backup_dir.backup_time().timestamp(),
106 backup_count: list.len() as u64,
107 files: info.files.clone(),
108 };
109 groups.push(result_item);
110 }
111
112 Ok(groups)
113 }
114
115 #[api(
116 input: {
117 properties: {
118 store: {
119 schema: DATASTORE_SCHEMA,
120 },
121 "backup-type": {
122 schema: BACKUP_TYPE_SCHEMA,
123 },
124 "backup-id": {
125 schema: BACKUP_ID_SCHEMA,
126 },
127 "backup-time": {
128 schema: BACKUP_TIME_SCHEMA,
129 },
130 },
131 },
132 returns: {
133 type: Array,
134 description: "Returns the list of archive files inside a backup snapshots.",
135 items: {
136 type: BackupContent,
137 }
138 },
139 )]
140 /// List snapshot files.
141 fn list_snapshot_files(
142 store: String,
143 backup_type: String,
144 backup_id: String,
145 backup_time: i64,
146 _info: &ApiMethod,
147 _rpcenv: &mut dyn RpcEnvironment,
148 ) -> Result<Vec<BackupContent>, Error> {
149
150 let datastore = DataStore::lookup_datastore(&store)?;
151 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
152
153 let mut files = read_backup_index(&datastore, &snapshot)?;
154
155 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
156
157 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
158 acc.insert(item.filename.clone());
159 acc
160 });
161
162 for file in info.files {
163 if file_set.contains(&file) { continue; }
164 files.push(BackupContent { filename: file, size: None });
165 }
166
167 Ok(files)
168 }
169
170 #[api(
171 input: {
172 properties: {
173 store: {
174 schema: DATASTORE_SCHEMA,
175 },
176 "backup-type": {
177 schema: BACKUP_TYPE_SCHEMA,
178 },
179 "backup-id": {
180 schema: BACKUP_ID_SCHEMA,
181 },
182 "backup-time": {
183 schema: BACKUP_TIME_SCHEMA,
184 },
185 },
186 },
187 )]
188 /// Delete backup snapshot.
189 fn delete_snapshot(
190 store: String,
191 backup_type: String,
192 backup_id: String,
193 backup_time: i64,
194 _info: &ApiMethod,
195 _rpcenv: &mut dyn RpcEnvironment,
196 ) -> Result<Value, Error> {
197
198 let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
199
200 let datastore = DataStore::lookup_datastore(&store)?;
201
202 datastore.remove_backup_dir(&snapshot)?;
203
204 Ok(Value::Null)
205 }
206
207 #[api(
208 input: {
209 properties: {
210 store: {
211 schema: DATASTORE_SCHEMA,
212 },
213 "backup-type": {
214 optional: true,
215 schema: BACKUP_TYPE_SCHEMA,
216 },
217 "backup-id": {
218 optional: true,
219 schema: BACKUP_ID_SCHEMA,
220 },
221 },
222 },
223 returns: {
224 type: Array,
225 description: "Returns the list of snapshots.",
226 items: {
227 type: SnapshotListItem,
228 }
229 },
230 )]
231 /// List backup snapshots.
232 fn list_snapshots (
233 param: Value,
234 _info: &ApiMethod,
235 _rpcenv: &mut dyn RpcEnvironment,
236 ) -> Result<Vec<SnapshotListItem>, Error> {
237
238 let store = tools::required_string_param(&param, "store")?;
239 let backup_type = param["backup-type"].as_str();
240 let backup_id = param["backup-id"].as_str();
241
242 let datastore = DataStore::lookup_datastore(store)?;
243
244 let base_path = datastore.base_path();
245
246 let backup_list = BackupInfo::list_backups(&base_path)?;
247
248 let mut snapshots = vec![];
249
250 for info in backup_list {
251 let group = info.backup_dir.group();
252 if let Some(backup_type) = backup_type {
253 if backup_type != group.backup_type() { continue; }
254 }
255 if let Some(backup_id) = backup_id {
256 if backup_id != group.backup_id() { continue; }
257 }
258
259 let mut result_item = SnapshotListItem {
260 backup_type: group.backup_type().to_string(),
261 backup_id: group.backup_id().to_string(),
262 backup_time: info.backup_dir.backup_time().timestamp(),
263 files: info.files,
264 size: None,
265 };
266
267 if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
268 let mut backup_size = 0;
269 for item in index.iter() {
270 if let Some(item_size) = item.size {
271 backup_size += item_size;
272 }
273 }
274 result_item.size = Some(backup_size);
275 }
276
277 snapshots.push(result_item);
278 }
279
280 Ok(snapshots)
281 }
282
283 #[api(
284 input: {
285 properties: {
286 store: {
287 schema: DATASTORE_SCHEMA,
288 },
289 },
290 },
291 returns: {
292 type: StorageStatus,
293 },
294 )]
295 /// Get datastore status.
296 fn status(
297 store: String,
298 _info: &ApiMethod,
299 _rpcenv: &mut dyn RpcEnvironment,
300 ) -> Result<StorageStatus, Error> {
301
302 let datastore = DataStore::lookup_datastore(&store)?;
303
304 let base_path = datastore.base_path();
305
306 let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
307
308 use nix::NixPath;
309
310 let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
311 nix::errno::Errno::result(res)?;
312
313 let bsize = stat.f_bsize as u64;
314
315 Ok(StorageStatus {
316 total: stat.f_blocks*bsize,
317 used: (stat.f_blocks-stat.f_bfree)*bsize,
318 avail: stat.f_bavail*bsize,
319 })
320 }
321
322 #[macro_export]
323 macro_rules! add_common_prune_prameters {
324 ( [ $( $list1:tt )* ] ) => {
325 add_common_prune_prameters!([$( $list1 )* ] , [])
326 };
327 ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
328 [
329 $( $list1 )*
330 (
331 "keep-daily",
332 true,
333 &IntegerSchema::new("Number of daily backups to keep.")
334 .minimum(1)
335 .schema()
336 ),
337 (
338 "keep-hourly",
339 true,
340 &IntegerSchema::new("Number of hourly backups to keep.")
341 .minimum(1)
342 .schema()
343 ),
344 (
345 "keep-last",
346 true,
347 &IntegerSchema::new("Number of backups to keep.")
348 .minimum(1)
349 .schema()
350 ),
351 (
352 "keep-monthly",
353 true,
354 &IntegerSchema::new("Number of monthly backups to keep.")
355 .minimum(1)
356 .schema()
357 ),
358 (
359 "keep-weekly",
360 true,
361 &IntegerSchema::new("Number of weekly backups to keep.")
362 .minimum(1)
363 .schema()
364 ),
365 (
366 "keep-yearly",
367 true,
368 &IntegerSchema::new("Number of yearly backups to keep.")
369 .minimum(1)
370 .schema()
371 ),
372 $( $list2 )*
373 ]
374 }
375 }
376
377 const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
378 &ApiHandler::Sync(&prune),
379 &ObjectSchema::new(
380 "Prune the datastore.",
381 &add_common_prune_prameters!([
382 ("backup-id", false, &BACKUP_ID_SCHEMA),
383 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
384 ("dry-run", true, &BooleanSchema::new(
385 "Just show what prune would do, but do not delete anything.")
386 .schema()
387 ),
388 ],[
389 ("store", false, &DATASTORE_SCHEMA),
390 ])
391 )
392 );
393
394 fn prune(
395 param: Value,
396 _info: &ApiMethod,
397 _rpcenv: &mut dyn RpcEnvironment,
398 ) -> Result<Value, Error> {
399
400 let store = param["store"].as_str().unwrap();
401
402 let backup_type = tools::required_string_param(&param, "backup-type")?;
403 let backup_id = tools::required_string_param(&param, "backup-id")?;
404
405 let dry_run = param["dry-run"].as_bool().unwrap_or(false);
406
407 let group = BackupGroup::new(backup_type, backup_id);
408
409 let datastore = DataStore::lookup_datastore(store)?;
410
411 let prune_options = PruneOptions {
412 keep_last: param["keep-last"].as_u64(),
413 keep_hourly: param["keep-hourly"].as_u64(),
414 keep_daily: param["keep-daily"].as_u64(),
415 keep_weekly: param["keep-weekly"].as_u64(),
416 keep_monthly: param["keep-monthly"].as_u64(),
417 keep_yearly: param["keep-yearly"].as_u64(),
418 };
419
420 let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
421
422 // We use a WorkerTask just to have a task log, but run synchrounously
423 let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
424 let result = try_block! {
425 if !prune_options.keeps_something() {
426 worker.log("No prune selection - keeping all files.");
427 return Ok(());
428 } else {
429 worker.log(format!("retention options: {}", prune_options.cli_options_string()));
430 if dry_run {
431 worker.log(format!("Testing prune on store \"{}\" group \"{}/{}\"",
432 store, backup_type, backup_id));
433 } else {
434 worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
435 store, backup_type, backup_id));
436 }
437 }
438
439 let list = group.list_backups(&datastore.base_path())?;
440
441 let mut prune_info = compute_prune_info(list, &prune_options)?;
442
443 prune_info.reverse(); // delete older snapshots first
444
445 for (info, keep) in prune_info {
446 let backup_time = info.backup_dir.backup_time();
447 let timestamp = BackupDir::backup_time_to_string(backup_time);
448 let group = info.backup_dir.group();
449
450 let msg = format!(
451 "{}/{}/{} {}",
452 group.backup_type(),
453 group.backup_id(),
454 timestamp,
455 if keep { "keep" } else { "remove" },
456 );
457
458 worker.log(msg);
459
460 if !(dry_run || keep) {
461 datastore.remove_backup_dir(&info.backup_dir)?;
462 }
463 }
464
465 Ok(())
466 };
467
468 worker.log_result(&result);
469
470 if let Err(err) = result {
471 bail!("prune failed - {}", err);
472 }
473
474 Ok(json!(worker.to_string())) // return the UPID
475 }
476
477 #[api(
478 input: {
479 properties: {
480 store: {
481 schema: DATASTORE_SCHEMA,
482 },
483 },
484 },
485 returns: {
486 schema: UPID_SCHEMA,
487 },
488 )]
489 /// Start garbage collection.
490 fn start_garbage_collection(
491 store: String,
492 _info: &ApiMethod,
493 rpcenv: &mut dyn RpcEnvironment,
494 ) -> Result<Value, Error> {
495
496 let datastore = DataStore::lookup_datastore(&store)?;
497
498 println!("Starting garbage collection on store {}", store);
499
500 let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
501
502 let upid_str = WorkerTask::new_thread(
503 "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
504 {
505 worker.log(format!("starting garbage collection on store {}", store));
506 datastore.garbage_collection(worker)
507 })?;
508
509 Ok(json!(upid_str))
510 }
511
512 #[api(
513 input: {
514 properties: {
515 store: {
516 schema: DATASTORE_SCHEMA,
517 },
518 },
519 },
520 returns: {
521 type: GarbageCollectionStatus,
522 }
523 )]
524 /// Garbage collection status.
525 fn garbage_collection_status(
526 store: String,
527 _info: &ApiMethod,
528 _rpcenv: &mut dyn RpcEnvironment,
529 ) -> Result<GarbageCollectionStatus, Error> {
530
531 let datastore = DataStore::lookup_datastore(&store)?;
532
533 let status = datastore.last_gc_status();
534
535 Ok(status)
536 }
537
538
539 fn get_datastore_list(
540 _param: Value,
541 _info: &ApiMethod,
542 _rpcenv: &mut dyn RpcEnvironment,
543 ) -> Result<Value, Error> {
544
545 let (config, _digest) = datastore::config()?;
546
547 Ok(config.convert_to_array("store", None))
548 }
549
550 #[sortable]
551 pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
552 &ApiHandler::AsyncHttp(&download_file),
553 &ObjectSchema::new(
554 "Download single raw file from backup snapshot.",
555 &sorted!([
556 ("store", false, &DATASTORE_SCHEMA),
557 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
558 ("backup-id", false, &BACKUP_ID_SCHEMA),
559 ("backup-time", false, &BACKUP_TIME_SCHEMA),
560 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
561 ]),
562 )
563 );
564
565 fn download_file(
566 _parts: Parts,
567 _req_body: Body,
568 param: Value,
569 _info: &ApiMethod,
570 _rpcenv: Box<dyn RpcEnvironment>,
571 ) -> ApiResponseFuture {
572
573 async move {
574 let store = tools::required_string_param(&param, "store")?;
575
576 let datastore = DataStore::lookup_datastore(store)?;
577
578 let file_name = tools::required_string_param(&param, "file-name")?.to_owned();
579
580 let backup_type = tools::required_string_param(&param, "backup-type")?;
581 let backup_id = tools::required_string_param(&param, "backup-id")?;
582 let backup_time = tools::required_integer_param(&param, "backup-time")?;
583
584 println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
585 backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
586
587 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
588
589 let mut path = datastore.base_path();
590 path.push(backup_dir.relative_path());
591 path.push(&file_name);
592
593 let file = tokio::fs::File::open(path)
594 .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
595 .await?;
596
597 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
598 .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()));
599 let body = Body::wrap_stream(payload);
600
601 // fixme: set other headers ?
602 Ok(Response::builder()
603 .status(StatusCode::OK)
604 .header(header::CONTENT_TYPE, "application/octet-stream")
605 .body(body)
606 .unwrap())
607 }.boxed()
608 }
609
610 #[sortable]
611 pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
612 &ApiHandler::AsyncHttp(&upload_backup_log),
613 &ObjectSchema::new(
614 "Download single raw file from backup snapshot.",
615 &sorted!([
616 ("store", false, &DATASTORE_SCHEMA),
617 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
618 ("backup-id", false, &BACKUP_ID_SCHEMA),
619 ("backup-time", false, &BACKUP_TIME_SCHEMA),
620 ]),
621 )
622 );
623
624 fn upload_backup_log(
625 _parts: Parts,
626 req_body: Body,
627 param: Value,
628 _info: &ApiMethod,
629 _rpcenv: Box<dyn RpcEnvironment>,
630 ) -> ApiResponseFuture {
631
632 async move {
633 let store = tools::required_string_param(&param, "store")?;
634
635 let datastore = DataStore::lookup_datastore(store)?;
636
637 let file_name = "client.log.blob";
638
639 let backup_type = tools::required_string_param(&param, "backup-type")?;
640 let backup_id = tools::required_string_param(&param, "backup-id")?;
641 let backup_time = tools::required_integer_param(&param, "backup-time")?;
642
643 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
644
645 let mut path = datastore.base_path();
646 path.push(backup_dir.relative_path());
647 path.push(&file_name);
648
649 if path.exists() {
650 bail!("backup already contains a log.");
651 }
652
653 println!("Upload backup log to {}/{}/{}/{}/{}", store,
654 backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
655
656 let data = req_body
657 .map_err(Error::from)
658 .try_fold(Vec::new(), |mut acc, chunk| {
659 acc.extend_from_slice(&*chunk);
660 future::ok::<_, Error>(acc)
661 })
662 .await?;
663
664 let blob = DataBlob::from_raw(data)?;
665 // always verify CRC at server side
666 blob.verify_crc()?;
667 let raw_data = blob.raw_data();
668 replace_file(&path, raw_data, CreateOptions::new())?;
669
670 // fixme: use correct formatter
671 Ok(crate::server::formatter::json_response(Ok(Value::Null)))
672 }.boxed()
673 }
674
675 #[sortable]
676 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
677 (
678 "download",
679 &Router::new()
680 .download(&API_METHOD_DOWNLOAD_FILE)
681 ),
682 (
683 "files",
684 &Router::new()
685 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
686 ),
687 (
688 "gc",
689 &Router::new()
690 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
691 .post(&API_METHOD_START_GARBAGE_COLLECTION)
692 ),
693 (
694 "groups",
695 &Router::new()
696 .get(&API_METHOD_LIST_GROUPS)
697 ),
698 (
699 "prune",
700 &Router::new()
701 .post(&API_METHOD_PRUNE)
702 ),
703 (
704 "snapshots",
705 &Router::new()
706 .get(&API_METHOD_LIST_SNAPSHOTS)
707 .delete(&API_METHOD_DELETE_SNAPSHOT)
708 ),
709 (
710 "status",
711 &Router::new()
712 .get(&API_METHOD_STATUS)
713 ),
714 (
715 "upload-backup-log",
716 &Router::new()
717 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
718 ),
719 ];
720
721 const DATASTORE_INFO_ROUTER: Router = Router::new()
722 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
723 .subdirs(DATASTORE_INFO_SUBDIRS);
724
725
726 pub const ROUTER: Router = Router::new()
727 .get(
728 &ApiMethod::new(
729 &ApiHandler::Sync(&get_datastore_list),
730 &ObjectSchema::new("Directory index.", &[])
731 )
732 )
733 .match_all("store", &DATASTORE_INFO_ROUTER);