]> git.proxmox.com Git - proxmox-backup.git/blob - src/server/prune_job.rs
use new proxmox-sys crate
[proxmox-backup.git] / src / server / prune_job.rs
1 use std::sync::Arc;
2
3 use anyhow::Error;
4
5 use proxmox_sys::{task_log, task_warn};
6
7 use pbs_datastore::backup_info::BackupInfo;
8 use pbs_datastore::prune::compute_prune_info;
9 use pbs_datastore::DataStore;
10 use pbs_api_types::{Authid, PRIV_DATASTORE_MODIFY, PruneOptions};
11 use pbs_config::CachedUserInfo;
12 use proxmox_rest_server::WorkerTask;
13
14 use crate::server::jobstate::Job;
15
16 pub fn prune_datastore(
17 worker: Arc<WorkerTask>,
18 auth_id: Authid,
19 prune_options: PruneOptions,
20 store: &str,
21 datastore: Arc<DataStore>,
22 dry_run: bool,
23 ) -> Result<(), Error> {
24 task_log!(worker, "Starting datastore prune on store \"{}\"", store);
25
26 if dry_run {
27 task_log!(worker, "(dry test run)");
28 }
29
30 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
31
32 if keep_all {
33 task_log!(worker, "No prune selection - keeping all files.");
34 } else {
35 task_log!(
36 worker,
37 "retention options: {}",
38 pbs_datastore::prune::cli_options_string(&prune_options)
39 );
40 }
41
42 let user_info = CachedUserInfo::new()?;
43 let privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
44 let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
45
46 let base_path = datastore.base_path();
47
48 let groups = BackupInfo::list_backup_groups(&base_path)?;
49 for group in groups {
50 let list = group.list_backups(&base_path)?;
51
52 if !has_privs && !datastore.owns_backup(&group, &auth_id)? {
53 continue;
54 }
55
56 let mut prune_info = compute_prune_info(list, &prune_options)?;
57 prune_info.reverse(); // delete older snapshots first
58
59 task_log!(
60 worker,
61 "Starting prune on store \"{}\" group \"{}/{}\"",
62 store,
63 group.backup_type(),
64 group.backup_id()
65 );
66
67 for (info, mark) in prune_info {
68 let keep = keep_all || mark.keep();
69 task_log!(
70 worker,
71 "{} {}/{}/{}",
72 mark,
73 group.backup_type(),
74 group.backup_id(),
75 info.backup_dir.backup_time_string()
76 );
77 if !keep && !dry_run {
78 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
79 task_warn!(
80 worker,
81 "failed to remove dir {:?}: {}",
82 info.backup_dir.relative_path(),
83 err,
84 );
85 }
86 }
87 }
88 }
89
90 Ok(())
91 }
92
93 pub fn do_prune_job(
94 mut job: Job,
95 prune_options: PruneOptions,
96 store: String,
97 auth_id: &Authid,
98 schedule: Option<String>,
99 ) -> Result<String, Error> {
100 let datastore = DataStore::lookup_datastore(&store)?;
101
102 let worker_type = job.jobtype().to_string();
103 let auth_id = auth_id.clone();
104 let upid_str = WorkerTask::new_thread(
105 &worker_type,
106 Some(job.jobname().to_string()),
107 auth_id.to_string(),
108 false,
109 move |worker| {
110 job.start(&worker.upid().to_string())?;
111
112 if let Some(event_str) = schedule {
113 task_log!(worker, "task triggered by schedule '{}'", event_str);
114 }
115
116 let result = prune_datastore(worker.clone(), auth_id, prune_options, &store, datastore, false);
117
118 let status = worker.create_state(&result);
119
120 if let Err(err) = job.finish(status) {
121 eprintln!(
122 "could not finish job state for {}: {}",
123 job.jobtype().to_string(),
124 err
125 );
126 }
127
128 result
129 },
130 )?;
131 Ok(upid_str)
132 }