datastore::save_config(&config)?;
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
+ crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
+ crate::config::jobstate::create_state_file("verify", &datastore.name)?;
Ok(())
}
}
}
- if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
+ let mut gc_schedule_changed = false;
+ if gc_schedule.is_some() {
+ gc_schedule_changed = data.gc_schedule != gc_schedule;
+ data.gc_schedule = gc_schedule;
+ }
+
let mut prune_schedule_changed = false;
if prune_schedule.is_some() {
- prune_schedule_changed = true;
+ prune_schedule_changed = data.prune_schedule != prune_schedule;
data.prune_schedule = prune_schedule;
}
- if verify_schedule.is_some() { data.verify_schedule = verify_schedule; }
+
+ let mut verify_schedule_changed = false;
+ if verify_schedule.is_some() {
+ verify_schedule_changed = data.verify_schedule != verify_schedule;
+ data.verify_schedule = verify_schedule;
+ }
if keep_last.is_some() { data.keep_last = keep_last; }
if keep_hourly.is_some() { data.keep_hourly = keep_hourly; }
datastore::save_config(&config)?;
- // we want to reset the statefile, to avoid an immediate sync in some cases
+ // we want to reset the statefiles, to avoid an immediate action in some cases
// (e.g. going from monthly to weekly in the second week of the month)
+ if gc_schedule_changed {
+ crate::config::jobstate::create_state_file("garbage_collection", &name)?;
+ }
+
if prune_schedule_changed {
crate::config::jobstate::create_state_file("prune", &name)?;
}
+ if verify_schedule_changed {
+ crate::config::jobstate::create_state_file("verify", &name)?;
+ }
+
Ok(())
}
datastore::save_config(&config)?;
- crate::config::jobstate::remove_state_file("prune", &name)?;
+ // ignore errors
+ let _ = crate::config::jobstate::remove_state_file("prune", &name);
+ let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
+ let _ = crate::config::jobstate::remove_state_file("verify", &name);
Ok(())
}
Ok(())
}
-fn lookup_last_worker(worker_type: &str, worker_id: &str) -> Result<Option<server::UPID>, Error> {
-
- let list = proxmox_backup::server::read_task_list()?;
-
- let mut last: Option<&server::UPID> = None;
-
- for entry in list.iter() {
- if entry.upid.worker_type == worker_type {
- if let Some(ref id) = entry.upid.worker_id {
- if id == worker_id {
- match last {
- Some(ref upid) => {
- if upid.starttime < entry.upid.starttime {
- last = Some(&entry.upid)
- }
- }
- None => {
- last = Some(&entry.upid)
- }
- }
- }
- }
- }
- }
-
- Ok(last.cloned())
-}
-
-
async fn schedule_datastore_garbage_collection() {
use proxmox_backup::backup::DataStore;
use proxmox_backup::server::{UPID, WorkerTask};
- use proxmox_backup::config::datastore::{self, DataStoreConfig};
+ use proxmox_backup::config::{
+ jobstate::{self, Job},
+ datastore::{self, DataStoreConfig}
+ };
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
}
}
} else {
- match lookup_last_worker(worker_type, &store) {
- Ok(Some(upid)) => upid.starttime,
- Ok(None) => 0,
+ match jobstate::last_run_time(worker_type, &store) {
+ Ok(time) => time,
Err(err) => {
- eprintln!("lookup_last_job_start failed: {}", err);
+ eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
}
if next > now { continue; }
+ let mut job = match Job::new(worker_type, &store) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
Userid::backup_userid().clone(),
false,
move |worker| {
+ job.start(&worker.upid().to_string())?;
+
worker.log(format!("starting garbage collection on store {}", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
- datastore.garbage_collection(&worker)
+
+ let result = datastore.garbage_collection(&worker);
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
+ }
+
+ result
}
) {
eprintln!("unable to start garbage collection on store {} - {}", store2, err);
async fn schedule_datastore_verification() {
use proxmox_backup::backup::{DataStore, verify_all_backups};
use proxmox_backup::server::{WorkerTask};
- use proxmox_backup::config::datastore::{self, DataStoreConfig};
+ use proxmox_backup::config::{
+ jobstate::{self, Job},
+ datastore::{self, DataStoreConfig}
+ };
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
let worker_type = "verify";
- let last = match lookup_last_worker(worker_type, &store) {
- Ok(Some(upid)) => {
- if proxmox_backup::server::worker_is_active_local(&upid) {
- continue;
- }
- upid.starttime
- }
- Ok(None) => 0,
+ let last = match jobstate::last_run_time(worker_type, &store) {
+ Ok(time) => time,
Err(err) => {
- eprintln!("lookup_last_job_start failed: {}", err);
+ eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
};
if next > now { continue; }
+ let mut job = match Job::new(worker_type, &store) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+
let worker_id = store.clone();
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
Userid::backup_userid().clone(),
false,
move |worker| {
+ job.start(&worker.upid().to_string())?;
worker.log(format!("starting verification on store {}", store2));
worker.log(format!("task triggered by schedule '{}'", event_str));
- if let Ok(failed_dirs) = verify_all_backups(datastore, worker.clone()) {
+ let result = try_block!({
+ let failed_dirs = verify_all_backups(datastore, worker.clone())?;
if failed_dirs.len() > 0 {
worker.log("Failed to verify following snapshots:");
for dir in failed_dirs {
worker.log(format!("\t{}", dir));
}
- bail!("verification failed - please check the log for details");
+ Err(format_err!("verification failed - please check the log for details"))
+ } else {
+ Ok(())
}
+ });
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
}
- Ok(())
+
+ result
},
) {
eprintln!("unable to start verification on store {} - {}", store, err);