]>
Commit | Line | Data |
---|---|---|
1 | extern crate proxmox_backup; | |
2 | ||
3 | use failure::*; | |
4 | //use std::os::unix::io::AsRawFd; | |
5 | use chrono::{DateTime, Local, TimeZone}; | |
6 | use std::path::{Path, PathBuf}; | |
7 | use std::collections::HashMap; | |
8 | ||
9 | use proxmox_backup::tools; | |
10 | use proxmox_backup::cli::*; | |
11 | use proxmox_backup::api_schema::*; | |
12 | use proxmox_backup::api_schema::router::*; | |
13 | use proxmox_backup::client::*; | |
14 | use proxmox_backup::backup::*; | |
15 | //use proxmox_backup::backup::image_index::*; | |
16 | //use proxmox_backup::config::datastore; | |
17 | //use proxmox_backup::pxar::encoder::*; | |
18 | //use proxmox_backup::backup::datastore::*; | |
19 | ||
20 | use serde_json::{json, Value}; | |
21 | use hyper::Body; | |
22 | use std::sync::Arc; | |
23 | use regex::Regex; | |
24 | use xdg::BaseDirectories; | |
25 | ||
26 | use lazy_static::lazy_static; | |
27 | ||
28 | lazy_static! { | |
29 | static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|raw)):(.+)$").unwrap(); | |
30 | } | |
31 | ||
32 | ||
33 | fn record_repository(repo: &BackupRepository) { | |
34 | ||
35 | let base = match BaseDirectories::with_prefix("proxmox-backup") { | |
36 | Ok(v) => v, | |
37 | _ => return, | |
38 | }; | |
39 | ||
40 | // usually $HOME/.cache/proxmox-backup/repo-list | |
41 | let path = match base.place_cache_file("repo-list") { | |
42 | Ok(v) => v, | |
43 | _ => return, | |
44 | }; | |
45 | ||
46 | let mut data = tools::file_get_json(&path).unwrap_or(json!({})); | |
47 | ||
48 | let repo = repo.to_string(); | |
49 | ||
50 | data[&repo] = json!{ data[&repo].as_i64().unwrap_or(0) + 1 }; | |
51 | ||
52 | let mut map = serde_json::map::Map::new(); | |
53 | ||
54 | loop { | |
55 | let mut max_used = 0; | |
56 | let mut max_repo = None; | |
57 | for (repo, count) in data.as_object().unwrap() { | |
58 | if map.contains_key(repo) { continue; } | |
59 | if let Some(count) = count.as_i64() { | |
60 | if count > max_used { | |
61 | max_used = count; | |
62 | max_repo = Some(repo); | |
63 | } | |
64 | } | |
65 | } | |
66 | if let Some(repo) = max_repo { | |
67 | map.insert(repo.to_owned(), json!(max_used)); | |
68 | } else { | |
69 | break; | |
70 | } | |
71 | if map.len() > 10 { // store max. 10 repos | |
72 | break; | |
73 | } | |
74 | } | |
75 | ||
76 | let new_data = json!(map); | |
77 | ||
78 | let _ = tools::file_set_contents(path, new_data.to_string().as_bytes(), None); | |
79 | } | |
80 | ||
81 | fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> { | |
82 | ||
83 | let mut result = vec![]; | |
84 | ||
85 | let base = match BaseDirectories::with_prefix("proxmox-backup") { | |
86 | Ok(v) => v, | |
87 | _ => return result, | |
88 | }; | |
89 | ||
90 | // usually $HOME/.cache/proxmox-backup/repo-list | |
91 | let path = match base.place_cache_file("repo-list") { | |
92 | Ok(v) => v, | |
93 | _ => return result, | |
94 | }; | |
95 | ||
96 | let data = tools::file_get_json(&path).unwrap_or(json!({})); | |
97 | ||
98 | if let Some(map) = data.as_object() { | |
99 | for (repo, _count) in map { | |
100 | result.push(repo.to_owned()); | |
101 | } | |
102 | } | |
103 | ||
104 | result | |
105 | } | |
106 | ||
107 | fn backup_directory<P: AsRef<Path>>( | |
108 | client: &mut HttpClient, | |
109 | repo: &BackupRepository, | |
110 | dir_path: P, | |
111 | archive_name: &str, | |
112 | backup_id: &str, | |
113 | backup_time: DateTime<Local>, | |
114 | chunk_size: Option<u64>, | |
115 | all_file_systems: bool, | |
116 | verbose: bool, | |
117 | ) -> Result<(), Error> { | |
118 | ||
119 | let mut param = json!({ | |
120 | "archive-name": archive_name, | |
121 | "backup-type": "host", | |
122 | "backup-id": backup_id, | |
123 | "backup-time": backup_time.timestamp(), | |
124 | }); | |
125 | ||
126 | if let Some(size) = chunk_size { | |
127 | param["chunk-size"] = size.into(); | |
128 | } | |
129 | ||
130 | let query = tools::json_object_to_query(param)?; | |
131 | ||
132 | let path = format!("api2/json/admin/datastore/{}/pxar?{}", repo.store(), query); | |
133 | ||
134 | let stream = PxarBackupStream::open(dir_path.as_ref(), all_file_systems, verbose)?; | |
135 | ||
136 | let body = Body::wrap_stream(stream); | |
137 | ||
138 | client.upload("application/x-proxmox-backup-pxar", body, &path)?; | |
139 | ||
140 | Ok(()) | |
141 | } | |
142 | ||
143 | /**** | |
144 | fn backup_image(datastore: &DataStore, file: &std::fs::File, size: usize, target: &str, chunk_size: usize) -> Result<(), Error> { | |
145 | ||
146 | let mut target = PathBuf::from(target); | |
147 | ||
148 | if let Some(ext) = target.extension() { | |
149 | if ext != "fidx" { | |
150 | bail!("got wrong file extension - expected '.fidx'"); | |
151 | } | |
152 | } else { | |
153 | target.set_extension("fidx"); | |
154 | } | |
155 | ||
156 | let mut index = datastore.create_image_writer(&target, size, chunk_size)?; | |
157 | ||
158 | tools::file_chunker(file, chunk_size, |pos, chunk| { | |
159 | index.add_chunk(pos, chunk)?; | |
160 | Ok(true) | |
161 | })?; | |
162 | ||
163 | index.close()?; // commit changes | |
164 | ||
165 | Ok(()) | |
166 | } | |
167 | */ | |
168 | ||
169 | fn strip_chunked_file_expenstions(list: Vec<String>) -> Vec<String> { | |
170 | ||
171 | let mut result = vec![]; | |
172 | ||
173 | for file in list.into_iter() { | |
174 | if file.ends_with(".didx") { | |
175 | result.push(file[..file.len()-5].to_owned()); | |
176 | } else if file.ends_with(".fidx") { | |
177 | result.push(file[..file.len()-5].to_owned()); | |
178 | } else { | |
179 | result.push(file); // should not happen | |
180 | } | |
181 | } | |
182 | ||
183 | result | |
184 | } | |
185 | ||
186 | /* not used: | |
187 | fn list_backups( | |
188 | param: Value, | |
189 | _info: &ApiMethod, | |
190 | _rpcenv: &mut RpcEnvironment, | |
191 | ) -> Result<Value, Error> { | |
192 | ||
193 | let repo_url = tools::required_string_param(¶m, "repository")?; | |
194 | let repo: BackupRepository = repo_url.parse()?; | |
195 | ||
196 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
197 | ||
198 | let path = format!("api2/json/admin/datastore/{}/backups", repo.store()); | |
199 | ||
200 | let result = client.get(&path)?; | |
201 | ||
202 | record_repository(&repo); | |
203 | ||
204 | // fixme: implement and use output formatter instead .. | |
205 | let list = result["data"].as_array().unwrap(); | |
206 | ||
207 | for item in list { | |
208 | ||
209 | let id = item["backup-id"].as_str().unwrap(); | |
210 | let btype = item["backup-type"].as_str().unwrap(); | |
211 | let epoch = item["backup-time"].as_i64().unwrap(); | |
212 | ||
213 | let backup_dir = BackupDir::new(btype, id, epoch); | |
214 | ||
215 | let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect(); | |
216 | let files = strip_chunked_file_expenstions(files); | |
217 | ||
218 | for filename in files { | |
219 | let path = backup_dir.relative_path().to_str().unwrap().to_owned(); | |
220 | println!("{} | {}/{}", backup_dir.backup_time().format("%c"), path, filename); | |
221 | } | |
222 | } | |
223 | ||
224 | //Ok(result) | |
225 | Ok(Value::Null) | |
226 | } | |
227 | */ | |
228 | ||
229 | fn list_backup_groups( | |
230 | param: Value, | |
231 | _info: &ApiMethod, | |
232 | _rpcenv: &mut RpcEnvironment, | |
233 | ) -> Result<Value, Error> { | |
234 | ||
235 | let repo_url = tools::required_string_param(¶m, "repository")?; | |
236 | let repo: BackupRepository = repo_url.parse()?; | |
237 | ||
238 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
239 | ||
240 | let path = format!("api2/json/admin/datastore/{}/groups", repo.store()); | |
241 | ||
242 | let mut result = client.get(&path)?; | |
243 | ||
244 | record_repository(&repo); | |
245 | ||
246 | // fixme: implement and use output formatter instead .. | |
247 | let list = result["data"].as_array_mut().unwrap(); | |
248 | ||
249 | list.sort_unstable_by(|a, b| { | |
250 | let a_id = a["backup-id"].as_str().unwrap(); | |
251 | let a_backup_type = a["backup-type"].as_str().unwrap(); | |
252 | let b_id = b["backup-id"].as_str().unwrap(); | |
253 | let b_backup_type = b["backup-type"].as_str().unwrap(); | |
254 | ||
255 | let type_order = a_backup_type.cmp(b_backup_type); | |
256 | if type_order == std::cmp::Ordering::Equal { | |
257 | a_id.cmp(b_id) | |
258 | } else { | |
259 | type_order | |
260 | } | |
261 | }); | |
262 | ||
263 | for item in list { | |
264 | ||
265 | let id = item["backup-id"].as_str().unwrap(); | |
266 | let btype = item["backup-type"].as_str().unwrap(); | |
267 | let epoch = item["last-backup"].as_i64().unwrap(); | |
268 | let last_backup = Local.timestamp(epoch, 0); | |
269 | let backup_count = item["backup-count"].as_u64().unwrap(); | |
270 | ||
271 | let group = BackupGroup::new(btype, id); | |
272 | ||
273 | let path = group.group_path().to_str().unwrap().to_owned(); | |
274 | ||
275 | let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect(); | |
276 | let files = strip_chunked_file_expenstions(files); | |
277 | ||
278 | println!("{:20} | {} | {:5} | {}", path, last_backup.format("%c"), | |
279 | backup_count, tools::join(&files, ' ')); | |
280 | } | |
281 | ||
282 | //Ok(result) | |
283 | Ok(Value::Null) | |
284 | } | |
285 | ||
286 | fn list_snapshots( | |
287 | param: Value, | |
288 | _info: &ApiMethod, | |
289 | _rpcenv: &mut RpcEnvironment, | |
290 | ) -> Result<Value, Error> { | |
291 | ||
292 | let repo_url = tools::required_string_param(¶m, "repository")?; | |
293 | let repo: BackupRepository = repo_url.parse()?; | |
294 | ||
295 | let path = tools::required_string_param(¶m, "group")?; | |
296 | let group = BackupGroup::parse(path)?; | |
297 | ||
298 | let query = tools::json_object_to_query(json!({ | |
299 | "backup-type": group.backup_type(), | |
300 | "backup-id": group.backup_id(), | |
301 | }))?; | |
302 | ||
303 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
304 | ||
305 | let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query); | |
306 | ||
307 | // fixme: params | |
308 | let result = client.get(&path)?; | |
309 | ||
310 | record_repository(&repo); | |
311 | ||
312 | // fixme: implement and use output formatter instead .. | |
313 | let list = result["data"].as_array().unwrap(); | |
314 | ||
315 | for item in list { | |
316 | ||
317 | let id = item["backup-id"].as_str().unwrap(); | |
318 | let btype = item["backup-type"].as_str().unwrap(); | |
319 | let epoch = item["backup-time"].as_i64().unwrap(); | |
320 | ||
321 | let snapshot = BackupDir::new(btype, id, epoch); | |
322 | ||
323 | let path = snapshot.relative_path().to_str().unwrap().to_owned(); | |
324 | ||
325 | let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect(); | |
326 | let files = strip_chunked_file_expenstions(files); | |
327 | ||
328 | println!("{} | {} | {}", path, snapshot.backup_time().format("%c"), tools::join(&files, ' ')); | |
329 | } | |
330 | ||
331 | Ok(Value::Null) | |
332 | } | |
333 | ||
334 | fn forget_snapshots( | |
335 | param: Value, | |
336 | _info: &ApiMethod, | |
337 | _rpcenv: &mut RpcEnvironment, | |
338 | ) -> Result<Value, Error> { | |
339 | ||
340 | let repo_url = tools::required_string_param(¶m, "repository")?; | |
341 | let repo: BackupRepository = repo_url.parse()?; | |
342 | ||
343 | let path = tools::required_string_param(¶m, "snapshot")?; | |
344 | let snapshot = BackupDir::parse(path)?; | |
345 | ||
346 | let query = tools::json_object_to_query(json!({ | |
347 | "backup-type": snapshot.group().backup_type(), | |
348 | "backup-id": snapshot.group().backup_id(), | |
349 | "backup-time": snapshot.backup_time().timestamp(), | |
350 | }))?; | |
351 | ||
352 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
353 | ||
354 | let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query); | |
355 | ||
356 | let result = client.delete(&path)?; | |
357 | ||
358 | record_repository(&repo); | |
359 | ||
360 | Ok(result) | |
361 | } | |
362 | ||
363 | fn start_garbage_collection( | |
364 | param: Value, | |
365 | _info: &ApiMethod, | |
366 | _rpcenv: &mut RpcEnvironment, | |
367 | ) -> Result<Value, Error> { | |
368 | ||
369 | let repo_url = tools::required_string_param(¶m, "repository")?; | |
370 | let repo: BackupRepository = repo_url.parse()?; | |
371 | ||
372 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
373 | ||
374 | let path = format!("api2/json/admin/datastore/{}/gc", repo.store()); | |
375 | ||
376 | let result = client.post(&path)?; | |
377 | ||
378 | record_repository(&repo); | |
379 | ||
380 | Ok(result) | |
381 | } | |
382 | ||
383 | fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> { | |
384 | ||
385 | if let Some(caps) = BACKUPSPEC_REGEX.captures(value) { | |
386 | return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str())); | |
387 | } | |
388 | bail!("unable to parse directory specification '{}'", value); | |
389 | } | |
390 | ||
391 | fn create_backup( | |
392 | param: Value, | |
393 | _info: &ApiMethod, | |
394 | _rpcenv: &mut RpcEnvironment, | |
395 | ) -> Result<Value, Error> { | |
396 | ||
397 | let repo_url = tools::required_string_param(¶m, "repository")?; | |
398 | ||
399 | let backupspec_list = tools::required_array_param(¶m, "backupspec")?; | |
400 | ||
401 | let repo: BackupRepository = repo_url.parse()?; | |
402 | ||
403 | let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false); | |
404 | ||
405 | let verbose = param["verbose"].as_bool().unwrap_or(false); | |
406 | ||
407 | let chunk_size_opt = param["chunk-size"].as_u64().map(|v| v*1024); | |
408 | ||
409 | if let Some(size) = chunk_size_opt { | |
410 | verify_chunk_size(size)?; | |
411 | } | |
412 | ||
413 | let backup_id = param["host-id"].as_str().unwrap_or(&tools::nodename()); | |
414 | ||
415 | let mut upload_list = vec![]; | |
416 | ||
417 | for backupspec in backupspec_list { | |
418 | let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?; | |
419 | ||
420 | let stat = match nix::sys::stat::stat(filename) { | |
421 | Ok(s) => s, | |
422 | Err(err) => bail!("unable to access '{}' - {}", filename, err), | |
423 | }; | |
424 | ||
425 | if (stat.st_mode & libc::S_IFDIR) != 0 { | |
426 | ||
427 | upload_list.push((filename.to_owned(), target.to_owned())); | |
428 | ||
429 | } else if (stat.st_mode & (libc::S_IFREG|libc::S_IFBLK)) != 0 { | |
430 | if stat.st_size <= 0 { bail!("got strange file size '{}'", stat.st_size); } | |
431 | let _size = stat.st_size as usize; | |
432 | ||
433 | panic!("implement me"); | |
434 | ||
435 | //backup_image(&datastore, &file, size, &target, chunk_size)?; | |
436 | ||
437 | // let idx = datastore.open_image_reader(target)?; | |
438 | // idx.print_info(); | |
439 | ||
440 | } else { | |
441 | bail!("unsupported file type (expected a directory, file or block device)"); | |
442 | } | |
443 | } | |
444 | ||
445 | let backup_time = Local.timestamp(Local::now().timestamp(), 0); | |
446 | ||
447 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
448 | ||
449 | client.login()?; // login before starting backup | |
450 | ||
451 | record_repository(&repo); | |
452 | ||
453 | println!("Starting backup"); | |
454 | println!("Client name: {}", tools::nodename()); | |
455 | println!("Start Time: {}", backup_time.to_rfc3339()); | |
456 | ||
457 | for (filename, target) in upload_list { | |
458 | println!("Upload '{}' to '{:?}' as {}", filename, repo, target); | |
459 | backup_directory(&mut client, &repo, &filename, &target, backup_id, backup_time, | |
460 | chunk_size_opt, all_file_systems, verbose)?; | |
461 | } | |
462 | ||
463 | let end_time = Local.timestamp(Local::now().timestamp(), 0); | |
464 | let elapsed = end_time.signed_duration_since(backup_time); | |
465 | println!("Duration: {}", elapsed); | |
466 | ||
467 | println!("End Time: {}", end_time.to_rfc3339()); | |
468 | ||
469 | Ok(Value::Null) | |
470 | } | |
471 | ||
472 | fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> { | |
473 | ||
474 | let mut result = vec![]; | |
475 | ||
476 | let data: Vec<&str> = arg.splitn(2, ':').collect(); | |
477 | ||
478 | if data.len() != 2 { | |
479 | result.push(String::from("root.pxar:/")); | |
480 | result.push(String::from("etc.pxar:/etc")); | |
481 | return result; | |
482 | } | |
483 | ||
484 | let files = tools::complete_file_name(data[1], param); | |
485 | ||
486 | for file in files { | |
487 | result.push(format!("{}:{}", data[0], file)); | |
488 | } | |
489 | ||
490 | result | |
491 | } | |
492 | ||
493 | fn restore( | |
494 | param: Value, | |
495 | _info: &ApiMethod, | |
496 | _rpcenv: &mut RpcEnvironment, | |
497 | ) -> Result<Value, Error> { | |
498 | ||
499 | let repo_url = tools::required_string_param(¶m, "repository")?; | |
500 | let repo: BackupRepository = repo_url.parse()?; | |
501 | ||
502 | let archive_name = tools::required_string_param(¶m, "archive-name")?; | |
503 | ||
504 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
505 | ||
506 | client.login()?; // login before starting | |
507 | ||
508 | record_repository(&repo); | |
509 | ||
510 | let path = tools::required_string_param(¶m, "snapshot")?; | |
511 | ||
512 | let query; | |
513 | ||
514 | if path.matches('/').count() == 1 { | |
515 | let group = BackupGroup::parse(path)?; | |
516 | ||
517 | let subquery = tools::json_object_to_query(json!({ | |
518 | "backup-type": group.backup_type(), | |
519 | "backup-id": group.backup_id(), | |
520 | }))?; | |
521 | ||
522 | let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), subquery); | |
523 | let result = client.get(&path)?; | |
524 | ||
525 | let list = result["data"].as_array().unwrap(); | |
526 | if list.len() == 0 { | |
527 | bail!("backup group '{}' does not contain any snapshots:", path); | |
528 | } | |
529 | ||
530 | query = tools::json_object_to_query(json!({ | |
531 | "backup-type": group.backup_type(), | |
532 | "backup-id": group.backup_id(), | |
533 | "backup-time": list[0]["backup-time"].as_i64().unwrap(), | |
534 | "archive-name": archive_name, | |
535 | }))?; | |
536 | } else { | |
537 | let snapshot = BackupDir::parse(path)?; | |
538 | ||
539 | query = tools::json_object_to_query(json!({ | |
540 | "backup-type": snapshot.group().backup_type(), | |
541 | "backup-id": snapshot.group().backup_id(), | |
542 | "backup-time": snapshot.backup_time().timestamp(), | |
543 | "archive-name": archive_name, | |
544 | }))?; | |
545 | } | |
546 | ||
547 | let target = tools::required_string_param(¶m, "target")?; | |
548 | ||
549 | if archive_name.ends_with(".pxar") { | |
550 | let path = format!("api2/json/admin/datastore/{}/pxar?{}", repo.store(), query); | |
551 | ||
552 | println!("DOWNLOAD FILE {} to {}", path, target); | |
553 | ||
554 | let target = PathBuf::from(target); | |
555 | let writer = PxarDecodeWriter::new(&target, true)?; | |
556 | client.download(&path, Box::new(writer))?; | |
557 | } else { | |
558 | bail!("unknown file extensions - unable to download '{}'", archive_name); | |
559 | } | |
560 | ||
561 | Ok(Value::Null) | |
562 | } | |
563 | ||
564 | fn prune( | |
565 | mut param: Value, | |
566 | _info: &ApiMethod, | |
567 | _rpcenv: &mut RpcEnvironment, | |
568 | ) -> Result<Value, Error> { | |
569 | ||
570 | let repo_url = tools::required_string_param(¶m, "repository")?; | |
571 | let repo: BackupRepository = repo_url.parse()?; | |
572 | ||
573 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
574 | ||
575 | let path = format!("api2/json/admin/datastore/{}/prune", repo.store()); | |
576 | ||
577 | param.as_object_mut().unwrap().remove("repository"); | |
578 | ||
579 | let result = client.post_json(&path, param)?; | |
580 | ||
581 | record_repository(&repo); | |
582 | ||
583 | Ok(result) | |
584 | } | |
585 | ||
586 | fn try_get(repo: &BackupRepository, url: &str) -> Value { | |
587 | ||
588 | let mut client = HttpClient::new(repo.host(), repo.user()); | |
589 | ||
590 | let mut resp = match client.try_get(url) { | |
591 | Ok(v) => v, | |
592 | _ => return Value::Null, | |
593 | }; | |
594 | ||
595 | if let Some(map) = resp.as_object_mut() { | |
596 | if let Some(data) = map.remove("data") { | |
597 | return data; | |
598 | } | |
599 | } | |
600 | Value::Null | |
601 | } | |
602 | ||
603 | fn extract_repo(param: &HashMap<String, String>) -> Option<BackupRepository> { | |
604 | ||
605 | let repo_url = match param.get("repository") { | |
606 | Some(v) => v, | |
607 | _ => return None, | |
608 | }; | |
609 | ||
610 | let repo: BackupRepository = match repo_url.parse() { | |
611 | Ok(v) => v, | |
612 | _ => return None, | |
613 | }; | |
614 | ||
615 | Some(repo) | |
616 | } | |
617 | ||
618 | fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> { | |
619 | ||
620 | let mut result = vec![]; | |
621 | ||
622 | let repo = match extract_repo(param) { | |
623 | Some(v) => v, | |
624 | _ => return result, | |
625 | }; | |
626 | ||
627 | let path = format!("api2/json/admin/datastore/{}/groups", repo.store()); | |
628 | ||
629 | let data = try_get(&repo, &path); | |
630 | ||
631 | if let Some(list) = data.as_array() { | |
632 | for item in list { | |
633 | if let (Some(backup_id), Some(backup_type)) = | |
634 | (item["backup-id"].as_str(), item["backup-type"].as_str()) | |
635 | { | |
636 | result.push(format!("{}/{}", backup_type, backup_id)); | |
637 | } | |
638 | } | |
639 | } | |
640 | ||
641 | result | |
642 | } | |
643 | ||
644 | fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> { | |
645 | ||
646 | let mut result = vec![]; | |
647 | ||
648 | let repo = match extract_repo(param) { | |
649 | Some(v) => v, | |
650 | _ => return result, | |
651 | }; | |
652 | ||
653 | if arg.matches('/').count() < 2 { | |
654 | let groups = complete_backup_group(arg, param); | |
655 | for group in groups { | |
656 | result.push(group.to_string()); | |
657 | result.push(format!("{}/", group)); | |
658 | } | |
659 | return result; | |
660 | } | |
661 | ||
662 | let mut parts = arg.split('/'); | |
663 | let query = tools::json_object_to_query(json!({ | |
664 | "backup-type": parts.next().unwrap(), | |
665 | "backup-id": parts.next().unwrap(), | |
666 | })).unwrap(); | |
667 | ||
668 | let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query); | |
669 | ||
670 | let data = try_get(&repo, &path); | |
671 | ||
672 | if let Some(list) = data.as_array() { | |
673 | for item in list { | |
674 | if let (Some(backup_id), Some(backup_type), Some(backup_time)) = | |
675 | (item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64()) | |
676 | { | |
677 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time); | |
678 | result.push(snapshot.relative_path().to_str().unwrap().to_owned()); | |
679 | } | |
680 | } | |
681 | } | |
682 | ||
683 | result | |
684 | } | |
685 | ||
686 | fn complete_archive_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> { | |
687 | ||
688 | let mut result = vec![]; | |
689 | ||
690 | let repo = match extract_repo(param) { | |
691 | Some(v) => v, | |
692 | _ => return result, | |
693 | }; | |
694 | ||
695 | let snapshot = match param.get("snapshot") { | |
696 | Some(path) => { | |
697 | match BackupDir::parse(path) { | |
698 | Ok(v) => v, | |
699 | _ => return result, | |
700 | } | |
701 | } | |
702 | _ => return result, | |
703 | }; | |
704 | ||
705 | let query = tools::json_object_to_query(json!({ | |
706 | "backup-type": snapshot.group().backup_type(), | |
707 | "backup-id": snapshot.group().backup_id(), | |
708 | "backup-time": snapshot.backup_time().timestamp(), | |
709 | })).unwrap(); | |
710 | ||
711 | let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query); | |
712 | ||
713 | let data = try_get(&repo, &path); | |
714 | ||
715 | if let Some(list) = data.as_array() { | |
716 | for item in list { | |
717 | if let Some(filename) = item.as_str() { | |
718 | result.push(filename.to_owned()); | |
719 | } | |
720 | } | |
721 | } | |
722 | ||
723 | strip_chunked_file_expenstions(result) | |
724 | } | |
725 | ||
726 | fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> { | |
727 | ||
728 | let mut result = vec![]; | |
729 | ||
730 | let mut size = 64; | |
731 | loop { | |
732 | result.push(size.to_string()); | |
733 | size = size * 2; | |
734 | if size > 4096 { break; } | |
735 | } | |
736 | ||
737 | result | |
738 | } | |
739 | ||
740 | fn main() { | |
741 | ||
742 | let repo_url_schema: Arc<Schema> = Arc::new( | |
743 | StringSchema::new("Repository URL.") | |
744 | .format(BACKUP_REPO_URL.clone()) | |
745 | .max_length(256) | |
746 | .into() | |
747 | ); | |
748 | ||
749 | let backup_source_schema: Arc<Schema> = Arc::new( | |
750 | StringSchema::new("Backup source specification ([<label>:<path>]).") | |
751 | .format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))) | |
752 | .into() | |
753 | ); | |
754 | ||
755 | let backup_cmd_def = CliCommand::new( | |
756 | ApiMethod::new( | |
757 | create_backup, | |
758 | ObjectSchema::new("Create (host) backup.") | |
759 | .required("repository", repo_url_schema.clone()) | |
760 | .required( | |
761 | "backupspec", | |
762 | ArraySchema::new( | |
763 | "List of backup source specifications ([<label.ext>:<path>] ...)", | |
764 | backup_source_schema, | |
765 | ).min_length(1) | |
766 | ) | |
767 | .optional( | |
768 | "verbose", | |
769 | BooleanSchema::new("Verbose output.").default(false)) | |
770 | .optional( | |
771 | "host-id", | |
772 | StringSchema::new("Use specified ID for the backup group name ('host/<id>'). The default is the system hostname.")) | |
773 | .optional( | |
774 | "chunk-size", | |
775 | IntegerSchema::new("Chunk size in KB. Must be a power of 2.") | |
776 | .minimum(64) | |
777 | .maximum(4096) | |
778 | .default(4096) | |
779 | ) | |
780 | )) | |
781 | .arg_param(vec!["repository", "backupspec"]) | |
782 | .completion_cb("repository", complete_repository) | |
783 | .completion_cb("backupspec", complete_backup_source) | |
784 | .completion_cb("chunk-size", complete_chunk_size); | |
785 | ||
786 | let list_cmd_def = CliCommand::new( | |
787 | ApiMethod::new( | |
788 | list_backup_groups, | |
789 | ObjectSchema::new("List backup groups.") | |
790 | .required("repository", repo_url_schema.clone()) | |
791 | )) | |
792 | .arg_param(vec!["repository"]) | |
793 | .completion_cb("repository", complete_repository); | |
794 | ||
795 | let snapshots_cmd_def = CliCommand::new( | |
796 | ApiMethod::new( | |
797 | list_snapshots, | |
798 | ObjectSchema::new("List backup snapshots.") | |
799 | .required("repository", repo_url_schema.clone()) | |
800 | .required("group", StringSchema::new("Backup group.")) | |
801 | )) | |
802 | .arg_param(vec!["repository", "group"]) | |
803 | .completion_cb("group", complete_backup_group) | |
804 | .completion_cb("repository", complete_repository); | |
805 | ||
806 | let forget_cmd_def = CliCommand::new( | |
807 | ApiMethod::new( | |
808 | forget_snapshots, | |
809 | ObjectSchema::new("Forget (remove) backup snapshots.") | |
810 | .required("repository", repo_url_schema.clone()) | |
811 | .required("snapshot", StringSchema::new("Snapshot path.")) | |
812 | )) | |
813 | .arg_param(vec!["repository", "snapshot"]) | |
814 | .completion_cb("repository", complete_repository) | |
815 | .completion_cb("snapshot", complete_group_or_snapshot); | |
816 | ||
817 | let garbage_collect_cmd_def = CliCommand::new( | |
818 | ApiMethod::new( | |
819 | start_garbage_collection, | |
820 | ObjectSchema::new("Start garbage collection for a specific repository.") | |
821 | .required("repository", repo_url_schema.clone()) | |
822 | )) | |
823 | .arg_param(vec!["repository"]) | |
824 | .completion_cb("repository", complete_repository); | |
825 | ||
826 | let restore_cmd_def = CliCommand::new( | |
827 | ApiMethod::new( | |
828 | restore, | |
829 | ObjectSchema::new("Restore backup repository.") | |
830 | .required("repository", repo_url_schema.clone()) | |
831 | .required("snapshot", StringSchema::new("Group/Snapshot path.")) | |
832 | .required("archive-name", StringSchema::new("Backup archive name.")) | |
833 | .required("target", StringSchema::new("Target directory path.")) | |
834 | )) | |
835 | .arg_param(vec!["repository", "snapshot", "archive-name", "target"]) | |
836 | .completion_cb("repository", complete_repository) | |
837 | .completion_cb("snapshot", complete_group_or_snapshot) | |
838 | .completion_cb("archive-name", complete_archive_name) | |
839 | .completion_cb("target", tools::complete_file_name); | |
840 | ||
841 | let prune_cmd_def = CliCommand::new( | |
842 | ApiMethod::new( | |
843 | prune, | |
844 | proxmox_backup::api2::admin::datastore::add_common_prune_prameters( | |
845 | ObjectSchema::new("Prune backup repository.") | |
846 | .required("repository", repo_url_schema.clone()) | |
847 | ) | |
848 | )) | |
849 | .arg_param(vec!["repository"]) | |
850 | .completion_cb("repository", complete_repository); | |
851 | ||
852 | let cmd_def = CliCommandMap::new() | |
853 | .insert("backup".to_owned(), backup_cmd_def.into()) | |
854 | .insert("forget".to_owned(), forget_cmd_def.into()) | |
855 | .insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into()) | |
856 | .insert("list".to_owned(), list_cmd_def.into()) | |
857 | .insert("prune".to_owned(), prune_cmd_def.into()) | |
858 | .insert("restore".to_owned(), restore_cmd_def.into()) | |
859 | .insert("snapshots".to_owned(), snapshots_cmd_def.into()); | |
860 | ||
861 | run_cli_command(cmd_def.into()); | |
862 | ||
863 | } |