use crate::api2::pull::do_sync_job;
use crate::config::sync::{self, SyncJobStatus, SyncJobConfig};
use crate::server::UPID;
-use crate::config::jobstate::{Job, JobState};
+use crate::server::jobstate::{Job, JobState};
use crate::tools::systemd::time::{
parse_calendar_event, compute_next_event};
use proxmox::api::{api, ApiMethod, Router, RpcEnvironment};
use crate::api2::types::*;
-use crate::backup::do_verification_job;
-use crate::config::jobstate::{Job, JobState};
+use crate::server::do_verification_job;
+use crate::server::jobstate::{Job, JobState};
use crate::config::verify;
use crate::config::verify::{VerificationJobConfig, VerificationJobStatus};
use serde_json::Value;
use crate::config::cached_user_info::CachedUserInfo;
use crate::config::datastore::{self, DataStoreConfig, DIR_NAME_SCHEMA};
use crate::config::acl::{PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY};
+use crate::server::jobstate;
#[api(
input: {
datastore::save_config(&config)?;
- crate::config::jobstate::create_state_file("prune", &datastore.name)?;
- crate::config::jobstate::create_state_file("garbage_collection", &datastore.name)?;
+ jobstate::create_state_file("prune", &datastore.name)?;
+ jobstate::create_state_file("garbage_collection", &datastore.name)?;
Ok(())
}
// we want to reset the statefiles, to avoid an immediate action in some cases
// (e.g. going from monthly to weekly in the second week of the month)
if gc_schedule_changed {
- crate::config::jobstate::create_state_file("garbage_collection", &name)?;
+ jobstate::create_state_file("garbage_collection", &name)?;
}
if prune_schedule_changed {
- crate::config::jobstate::create_state_file("prune", &name)?;
+ jobstate::create_state_file("prune", &name)?;
}
Ok(())
datastore::save_config(&config)?;
// ignore errors
- let _ = crate::config::jobstate::remove_state_file("prune", &name);
- let _ = crate::config::jobstate::remove_state_file("garbage_collection", &name);
+ let _ = jobstate::remove_state_file("prune", &name);
+ let _ = jobstate::remove_state_file("garbage_collection", &name);
Ok(())
}
sync::save_config(&config)?;
- crate::config::jobstate::create_state_file("syncjob", &sync_job.id)?;
+ crate::server::jobstate::create_state_file("syncjob", &sync_job.id)?;
Ok(())
}
sync::save_config(&config)?;
- crate::config::jobstate::remove_state_file("syncjob", &id)?;
+ crate::server::jobstate::remove_state_file("syncjob", &id)?;
Ok(())
}
verify::save_config(&config)?;
- crate::config::jobstate::create_state_file("verificationjob", &verification_job.id)?;
+ crate::server::jobstate::create_state_file("verificationjob", &verification_job.id)?;
Ok(())
}
verify::save_config(&config)?;
- crate::config::jobstate::remove_state_file("verificationjob", &id)?;
+ crate::server::jobstate::remove_state_file("verificationjob", &id)?;
Ok(())
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_VERIFICATION_JOBS)
.post(&API_METHOD_CREATE_VERIFICATION_JOB)
- .match_all("id", &ITEM_ROUTER);
\ No newline at end of file
+ .match_all("id", &ITEM_ROUTER);
use proxmox::api::api;
use proxmox::api::{ApiMethod, Router, RpcEnvironment, Permission};
-use crate::server::{WorkerTask};
+use crate::server::{WorkerTask, jobstate::Job};
use crate::backup::DataStore;
use crate::client::{HttpClient, HttpClientOptions, BackupRepository, pull::pull_store};
use crate::api2::types::*;
use crate::config::{
remote,
sync::SyncJobConfig,
- jobstate::Job,
acl::{PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ},
cached_user_info::CachedUserInfo,
};
use anyhow::{bail, format_err, Error};
use crate::{
- server::WorkerTask,
api2::types::*,
- config::jobstate::Job,
- config::verify::VerificationJobConfig,
backup::{
DataStore,
DataBlob,
Ok(errors)
}
-
-/// Runs a verification job.
-pub fn do_verification_job(
- mut job: Job,
- verification_job: VerificationJobConfig,
- userid: &Userid,
- schedule: Option<String>,
-) -> Result<String, Error> {
- let datastore = DataStore::lookup_datastore(&verification_job.store)?;
-
- let mut backups_to_verify = BackupInfo::list_backups(&datastore.base_path())?;
- if verification_job.ignore_verified.unwrap_or(true) {
- backups_to_verify.retain(|backup_info| {
- let manifest = match datastore.load_manifest(&backup_info.backup_dir) {
- Ok((manifest, _)) => manifest,
- Err(_) => return false,
- };
-
- let raw_verify_state = manifest.unprotected["verify_state"].clone();
- let last_state = match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
- Ok(last_state) => last_state,
- Err(_) => return true,
- };
-
- let now = proxmox::tools::time::epoch_i64();
- let days_since_last_verify = (now - last_state.upid.starttime) / 86400;
- verification_job.outdated_after.is_some()
- && days_since_last_verify > verification_job.outdated_after.unwrap()
- })
- }
-
- let job_id = job.jobname().to_string();
- let worker_type = job.jobtype().to_string();
- let upid_str = WorkerTask::new_thread(
- &worker_type,
- Some(job.jobname().to_string()),
- userid.clone(),
- false,
- move |worker| {
- job.start(&worker.upid().to_string())?;
-
- task_log!(worker,"Starting datastore verify job '{}'", job_id);
- task_log!(worker,"verifying {} backups", backups_to_verify.len());
- if let Some(event_str) = schedule {
- task_log!(worker,"task triggered by schedule '{}'", event_str);
- }
-
- let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024 * 16)));
- let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
- let result = proxmox::try_block!({
- let mut failed_dirs: Vec<String> = Vec::new();
-
- for backup_info in backups_to_verify {
- let verification_result = verify_backup_dir(
- datastore.clone(),
- &backup_info.backup_dir,
- verified_chunks.clone(),
- corrupt_chunks.clone(),
- worker.clone(),
- worker.upid().clone()
- );
-
- if let Ok(false) = verification_result {
- failed_dirs.push(backup_info.backup_dir.to_string());
- } // otherwise successful or aborted
- }
-
- if !failed_dirs.is_empty() {
- task_log!(worker,"Failed to verify following snapshots:",);
- for dir in failed_dirs {
- task_log!(worker, "\t{}", dir)
- }
- bail!("verification failed - please check the log for details");
- }
- Ok(())
- });
-
- let status = worker.create_state(&result);
-
- match job.finish(status) {
- Err(err) => eprintln!(
- "could not finish job state for {}: {}",
- job.jobtype().to_string(),
- err
- ),
- Ok(_) => (),
- }
-
- result
- },
- )?;
- Ok(upid_str)
-}
config::update_self_signed_cert(false)?;
proxmox_backup::rrd::create_rrdb_dir()?;
- proxmox_backup::config::jobstate::create_jobstate_dir()?;
+ proxmox_backup::server::jobstate::create_jobstate_dir()?;
if let Err(err) = generate_auth_key() {
bail!("unable to generate auth key - {}", err);
use proxmox::try_block;
use proxmox::api::RpcEnvironmentType;
+use proxmox_backup::{
+ backup::DataStore,
+ server::{
+ UPID,
+ WorkerTask,
+ ApiConfig,
+ rest::*,
+ jobstate::{
+ self,
+ Job,
+ },
+ rotate_task_log_archive,
+ },
+ tools::systemd::time::{
+ parse_calendar_event,
+ compute_next_event,
+ },
+};
+
+
use proxmox_backup::api2::types::Userid;
use proxmox_backup::configdir;
use proxmox_backup::buildcfg;
use proxmox_backup::server;
-use proxmox_backup::server::{ApiConfig, rest::*};
use proxmox_backup::auth_helpers::*;
use proxmox_backup::tools::{
daemon,
};
use proxmox_backup::api2::pull::do_sync_job;
-use proxmox_backup::backup::do_verification_job;
+use proxmox_backup::server::do_verification_job;
fn main() -> Result<(), Error> {
proxmox_backup::tools::setup_safe_path_env();
async fn schedule_datastore_garbage_collection() {
- use proxmox_backup::backup::DataStore;
- use proxmox_backup::server::{UPID, WorkerTask};
- use proxmox_backup::config::{
- jobstate::{self, Job},
- datastore::{self, DataStoreConfig}
+ use proxmox_backup::config::datastore::{
+ self,
+ DataStoreConfig,
};
- use proxmox_backup::tools::systemd::time::{
- parse_calendar_event, compute_next_event};
let config = match datastore::config() {
Err(err) => {
async fn schedule_datastore_prune() {
- use proxmox_backup::backup::{
- PruneOptions, DataStore, BackupGroup, compute_prune_info};
- use proxmox_backup::server::{WorkerTask};
- use proxmox_backup::config::{
- jobstate::{self, Job},
- datastore::{self, DataStoreConfig}
+ use proxmox_backup::{
+ backup::{
+ PruneOptions,
+ BackupGroup,
+ compute_prune_info,
+ },
+ config::datastore::{
+ self,
+ DataStoreConfig,
+ },
};
- use proxmox_backup::tools::systemd::time::{
- parse_calendar_event, compute_next_event};
let config = match datastore::config() {
Err(err) => {
async fn schedule_datastore_sync_jobs() {
- use proxmox_backup::{
- config::{ sync::{self, SyncJobConfig}, jobstate::{self, Job} },
- tools::systemd::time::{ parse_calendar_event, compute_next_event },
+ use proxmox_backup::config::sync::{
+ self,
+ SyncJobConfig,
};
let config = match sync::config() {
}
async fn schedule_datastore_verify_jobs() {
- use proxmox_backup::{
- config::{verify::{self, VerificationJobConfig}, jobstate::{self, Job}},
- tools::systemd::time::{parse_calendar_event, compute_next_event},
+
+ use proxmox_backup::config::verify::{
+ self,
+ VerificationJobConfig,
};
+
let config = match verify::config() {
Err(err) => {
eprintln!("unable to read verification job config - {}", err);
}
async fn schedule_task_log_rotate() {
- use proxmox_backup::{
- config::jobstate::{self, Job},
- server::rotate_task_log_archive,
- };
- use proxmox_backup::server::WorkerTask;
- use proxmox_backup::tools::systemd::time::{
- parse_calendar_event, compute_next_event};
let worker_type = "logrotate";
let job_id = "task_archive";
pub mod acl;
pub mod cached_user_info;
pub mod datastore;
-pub mod jobstate;
pub mod network;
pub mod remote;
pub mod sync;
+++ /dev/null
-//! Generic JobState handling
-//!
-//! A 'Job' can have 3 states
-//! - Created, when a schedule was created but never executed
-//! - Started, when a job is running right now
-//! - Finished, when a job was running in the past
-//!
-//! and is identified by 2 values: jobtype and jobname (e.g. 'syncjob' and 'myfirstsyncjob')
-//!
-//! This module Provides 2 helper structs to handle those coniditons
-//! 'Job' which handles locking and writing to a file
-//! 'JobState' which is the actual state
-//!
-//! an example usage would be
-//! ```no_run
-//! # use anyhow::{bail, Error};
-//! # use proxmox_backup::server::TaskState;
-//! # use proxmox_backup::config::jobstate::*;
-//! # fn some_code() -> TaskState { TaskState::OK { endtime: 0 } }
-//! # fn code() -> Result<(), Error> {
-//! // locks the correct file under /var/lib
-//! // or fails if someone else holds the lock
-//! let mut job = match Job::new("jobtype", "jobname") {
-//! Ok(job) => job,
-//! Err(err) => bail!("could not lock jobstate"),
-//! };
-//!
-//! // job holds the lock, we can start it
-//! job.start("someupid")?;
-//! // do something
-//! let task_state = some_code();
-//! job.finish(task_state)?;
-//!
-//! // release the lock
-//! drop(job);
-//! # Ok(())
-//! # }
-//!
-//! ```
-use std::fs::File;
-use std::path::{Path, PathBuf};
-use std::time::Duration;
-
-use anyhow::{bail, format_err, Error};
-use proxmox::tools::fs::{
- create_path, file_read_optional_string, open_file_locked, replace_file, CreateOptions,
-};
-use serde::{Deserialize, Serialize};
-
-use crate::server::{upid_read_status, worker_is_active_local, TaskState, UPID};
-
-#[serde(rename_all = "kebab-case")]
-#[derive(Serialize, Deserialize)]
-/// Represents the State of a specific Job
-pub enum JobState {
- /// A job was created at 'time', but never started/finished
- Created { time: i64 },
- /// The Job was last started in 'upid',
- Started { upid: String },
- /// The Job was last started in 'upid', which finished with 'state'
- Finished { upid: String, state: TaskState },
-}
-
-/// Represents a Job and holds the correct lock
-pub struct Job {
- jobtype: String,
- jobname: String,
- /// The State of the job
- pub state: JobState,
- _lock: File,
-}
-
-const JOB_STATE_BASEDIR: &str = "/var/lib/proxmox-backup/jobstates";
-
-/// Create jobstate stat dir with correct permission
-pub fn create_jobstate_dir() -> Result<(), Error> {
- let backup_user = crate::backup::backup_user()?;
- let opts = CreateOptions::new()
- .owner(backup_user.uid)
- .group(backup_user.gid);
-
- create_path(JOB_STATE_BASEDIR, None, Some(opts))
- .map_err(|err: Error| format_err!("unable to create rrdb stat dir - {}", err))?;
-
- Ok(())
-}
-
-fn get_path(jobtype: &str, jobname: &str) -> PathBuf {
- let mut path = PathBuf::from(JOB_STATE_BASEDIR);
- path.push(format!("{}-{}.json", jobtype, jobname));
- path
-}
-
-fn get_lock<P>(path: P) -> Result<File, Error>
-where
- P: AsRef<Path>,
-{
- let mut path = path.as_ref().to_path_buf();
- path.set_extension("lck");
- let lock = open_file_locked(&path, Duration::new(10, 0), true)?;
- let backup_user = crate::backup::backup_user()?;
- nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
- Ok(lock)
-}
-
-/// Removes the statefile of a job, this is useful if we delete a job
-pub fn remove_state_file(jobtype: &str, jobname: &str) -> Result<(), Error> {
- let mut path = get_path(jobtype, jobname);
- let _lock = get_lock(&path)?;
- std::fs::remove_file(&path).map_err(|err| {
- format_err!(
- "cannot remove statefile for {} - {}: {}",
- jobtype,
- jobname,
- err
- )
- })?;
- path.set_extension("lck");
- // ignore errors
- let _ = std::fs::remove_file(&path).map_err(|err| {
- format_err!(
- "cannot remove lockfile for {} - {}: {}",
- jobtype,
- jobname,
- err
- )
- });
- Ok(())
-}
-
-/// Creates the statefile with the state 'Created'
-/// overwrites if it exists already
-pub fn create_state_file(jobtype: &str, jobname: &str) -> Result<(), Error> {
- let mut job = Job::new(jobtype, jobname)?;
- job.write_state()
-}
-
-/// Returns the last run time of a job by reading the statefile
-/// Note that this is not locked
-pub fn last_run_time(jobtype: &str, jobname: &str) -> Result<i64, Error> {
- match JobState::load(jobtype, jobname)? {
- JobState::Created { time } => Ok(time),
- JobState::Started { upid } | JobState::Finished { upid, .. } => {
- let upid: UPID = upid
- .parse()
- .map_err(|err| format_err!("could not parse upid from state: {}", err))?;
- Ok(upid.starttime)
- }
- }
-}
-
-impl JobState {
- /// Loads and deserializes the jobstate from type and name.
- /// When the loaded state indicates a started UPID,
- /// we go and check if it has already stopped, and
- /// returning the correct state.
- ///
- /// This does not update the state in the file.
- pub fn load(jobtype: &str, jobname: &str) -> Result<Self, Error> {
- if let Some(state) = file_read_optional_string(get_path(jobtype, jobname))? {
- match serde_json::from_str(&state)? {
- JobState::Started { upid } => {
- let parsed: UPID = upid
- .parse()
- .map_err(|err| format_err!("error parsing upid: {}", err))?;
-
- if !worker_is_active_local(&parsed) {
- let state = upid_read_status(&parsed)
- .map_err(|err| format_err!("error reading upid log status: {}", err))?;
-
- Ok(JobState::Finished { upid, state })
- } else {
- Ok(JobState::Started { upid })
- }
- }
- other => Ok(other),
- }
- } else {
- Ok(JobState::Created {
- time: proxmox::tools::time::epoch_i64() - 30,
- })
- }
- }
-}
-
-impl Job {
- /// Creates a new instance of a job with the correct lock held
- /// (will be hold until the job is dropped again).
- ///
- /// This does not load the state from the file, to do that,
- /// 'load' must be called
- pub fn new(jobtype: &str, jobname: &str) -> Result<Self, Error> {
- let path = get_path(jobtype, jobname);
-
- let _lock = get_lock(&path)?;
-
- Ok(Self {
- jobtype: jobtype.to_string(),
- jobname: jobname.to_string(),
- state: JobState::Created {
- time: proxmox::tools::time::epoch_i64(),
- },
- _lock,
- })
- }
-
- /// Start the job and update the statefile accordingly
- /// Fails if the job was already started
- pub fn start(&mut self, upid: &str) -> Result<(), Error> {
- match self.state {
- JobState::Started { .. } => {
- bail!("cannot start job that is started!");
- }
- _ => {}
- }
-
- self.state = JobState::Started {
- upid: upid.to_string(),
- };
-
- self.write_state()
- }
-
- /// Finish the job and update the statefile accordingly with the given taskstate
- /// Fails if the job was not yet started
- pub fn finish(&mut self, state: TaskState) -> Result<(), Error> {
- let upid = match &self.state {
- JobState::Created { .. } => bail!("cannot finish when not started"),
- JobState::Started { upid } => upid,
- JobState::Finished { upid, .. } => upid,
- }
- .to_string();
-
- self.state = JobState::Finished { upid, state };
-
- self.write_state()
- }
-
- pub fn jobtype(&self) -> &str {
- &self.jobtype
- }
-
- pub fn jobname(&self) -> &str {
- &self.jobname
- }
-
- fn write_state(&mut self) -> Result<(), Error> {
- let serialized = serde_json::to_string(&self.state)?;
- let path = get_path(&self.jobtype, &self.jobname);
-
- let backup_user = crate::backup::backup_user()?;
- let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
- // set the correct owner/group/permissions while saving file
- // owner(rw) = backup, group(r)= backup
- let options = CreateOptions::new()
- .perm(mode)
- .owner(backup_user.uid)
- .group(backup_user.gid);
-
- replace_file(path, serialized.as_bytes(), options)
- }
-}
#[macro_use]
pub mod rest;
+pub mod jobstate;
+
+mod verify_job;
+pub use verify_job::*;
--- /dev/null
+//! Generic JobState handling
+//!
+//! A 'Job' can have 3 states
+//! - Created, when a schedule was created but never executed
+//! - Started, when a job is running right now
+//! - Finished, when a job was running in the past
+//!
+//! and is identified by 2 values: jobtype and jobname (e.g. 'syncjob' and 'myfirstsyncjob')
+//!
+//! This module Provides 2 helper structs to handle those coniditons
+//! 'Job' which handles locking and writing to a file
+//! 'JobState' which is the actual state
+//!
+//! an example usage would be
+//! ```no_run
+//! # use anyhow::{bail, Error};
+//! # use proxmox_backup::server::TaskState;
+//! # use proxmox_backup::config::jobstate::*;
+//! # fn some_code() -> TaskState { TaskState::OK { endtime: 0 } }
+//! # fn code() -> Result<(), Error> {
+//! // locks the correct file under /var/lib
+//! // or fails if someone else holds the lock
+//! let mut job = match Job::new("jobtype", "jobname") {
+//! Ok(job) => job,
+//! Err(err) => bail!("could not lock jobstate"),
+//! };
+//!
+//! // job holds the lock, we can start it
+//! job.start("someupid")?;
+//! // do something
+//! let task_state = some_code();
+//! job.finish(task_state)?;
+//!
+//! // release the lock
+//! drop(job);
+//! # Ok(())
+//! # }
+//!
+//! ```
+use std::fs::File;
+use std::path::{Path, PathBuf};
+use std::time::Duration;
+
+use anyhow::{bail, format_err, Error};
+use proxmox::tools::fs::{
+ create_path, file_read_optional_string, open_file_locked, replace_file, CreateOptions,
+};
+use serde::{Deserialize, Serialize};
+
+use crate::server::{upid_read_status, worker_is_active_local, TaskState, UPID};
+
+#[serde(rename_all = "kebab-case")]
+#[derive(Serialize, Deserialize)]
+/// Represents the State of a specific Job
+pub enum JobState {
+ /// A job was created at 'time', but never started/finished
+ Created { time: i64 },
+ /// The Job was last started in 'upid',
+ Started { upid: String },
+ /// The Job was last started in 'upid', which finished with 'state'
+ Finished { upid: String, state: TaskState },
+}
+
+/// Represents a Job and holds the correct lock
+pub struct Job {
+ jobtype: String,
+ jobname: String,
+ /// The State of the job
+ pub state: JobState,
+ _lock: File,
+}
+
+const JOB_STATE_BASEDIR: &str = "/var/lib/proxmox-backup/jobstates";
+
+/// Create jobstate stat dir with correct permission
+pub fn create_jobstate_dir() -> Result<(), Error> {
+ let backup_user = crate::backup::backup_user()?;
+ let opts = CreateOptions::new()
+ .owner(backup_user.uid)
+ .group(backup_user.gid);
+
+ create_path(JOB_STATE_BASEDIR, None, Some(opts))
+ .map_err(|err: Error| format_err!("unable to create rrdb stat dir - {}", err))?;
+
+ Ok(())
+}
+
+fn get_path(jobtype: &str, jobname: &str) -> PathBuf {
+ let mut path = PathBuf::from(JOB_STATE_BASEDIR);
+ path.push(format!("{}-{}.json", jobtype, jobname));
+ path
+}
+
+fn get_lock<P>(path: P) -> Result<File, Error>
+where
+ P: AsRef<Path>,
+{
+ let mut path = path.as_ref().to_path_buf();
+ path.set_extension("lck");
+ let lock = open_file_locked(&path, Duration::new(10, 0), true)?;
+ let backup_user = crate::backup::backup_user()?;
+ nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
+ Ok(lock)
+}
+
+/// Removes the statefile of a job, this is useful if we delete a job
+pub fn remove_state_file(jobtype: &str, jobname: &str) -> Result<(), Error> {
+ let mut path = get_path(jobtype, jobname);
+ let _lock = get_lock(&path)?;
+ std::fs::remove_file(&path).map_err(|err| {
+ format_err!(
+ "cannot remove statefile for {} - {}: {}",
+ jobtype,
+ jobname,
+ err
+ )
+ })?;
+ path.set_extension("lck");
+ // ignore errors
+ let _ = std::fs::remove_file(&path).map_err(|err| {
+ format_err!(
+ "cannot remove lockfile for {} - {}: {}",
+ jobtype,
+ jobname,
+ err
+ )
+ });
+ Ok(())
+}
+
+/// Creates the statefile with the state 'Created'
+/// overwrites if it exists already
+pub fn create_state_file(jobtype: &str, jobname: &str) -> Result<(), Error> {
+ let mut job = Job::new(jobtype, jobname)?;
+ job.write_state()
+}
+
+/// Returns the last run time of a job by reading the statefile
+/// Note that this is not locked
+pub fn last_run_time(jobtype: &str, jobname: &str) -> Result<i64, Error> {
+ match JobState::load(jobtype, jobname)? {
+ JobState::Created { time } => Ok(time),
+ JobState::Started { upid } | JobState::Finished { upid, .. } => {
+ let upid: UPID = upid
+ .parse()
+ .map_err(|err| format_err!("could not parse upid from state: {}", err))?;
+ Ok(upid.starttime)
+ }
+ }
+}
+
+impl JobState {
+ /// Loads and deserializes the jobstate from type and name.
+ /// When the loaded state indicates a started UPID,
+ /// we go and check if it has already stopped, and
+ /// returning the correct state.
+ ///
+ /// This does not update the state in the file.
+ pub fn load(jobtype: &str, jobname: &str) -> Result<Self, Error> {
+ if let Some(state) = file_read_optional_string(get_path(jobtype, jobname))? {
+ match serde_json::from_str(&state)? {
+ JobState::Started { upid } => {
+ let parsed: UPID = upid
+ .parse()
+ .map_err(|err| format_err!("error parsing upid: {}", err))?;
+
+ if !worker_is_active_local(&parsed) {
+ let state = upid_read_status(&parsed)
+ .map_err(|err| format_err!("error reading upid log status: {}", err))?;
+
+ Ok(JobState::Finished { upid, state })
+ } else {
+ Ok(JobState::Started { upid })
+ }
+ }
+ other => Ok(other),
+ }
+ } else {
+ Ok(JobState::Created {
+ time: proxmox::tools::time::epoch_i64() - 30,
+ })
+ }
+ }
+}
+
+impl Job {
+ /// Creates a new instance of a job with the correct lock held
+ /// (will be hold until the job is dropped again).
+ ///
+ /// This does not load the state from the file, to do that,
+ /// 'load' must be called
+ pub fn new(jobtype: &str, jobname: &str) -> Result<Self, Error> {
+ let path = get_path(jobtype, jobname);
+
+ let _lock = get_lock(&path)?;
+
+ Ok(Self {
+ jobtype: jobtype.to_string(),
+ jobname: jobname.to_string(),
+ state: JobState::Created {
+ time: proxmox::tools::time::epoch_i64(),
+ },
+ _lock,
+ })
+ }
+
+ /// Start the job and update the statefile accordingly
+ /// Fails if the job was already started
+ pub fn start(&mut self, upid: &str) -> Result<(), Error> {
+ match self.state {
+ JobState::Started { .. } => {
+ bail!("cannot start job that is started!");
+ }
+ _ => {}
+ }
+
+ self.state = JobState::Started {
+ upid: upid.to_string(),
+ };
+
+ self.write_state()
+ }
+
+ /// Finish the job and update the statefile accordingly with the given taskstate
+ /// Fails if the job was not yet started
+ pub fn finish(&mut self, state: TaskState) -> Result<(), Error> {
+ let upid = match &self.state {
+ JobState::Created { .. } => bail!("cannot finish when not started"),
+ JobState::Started { upid } => upid,
+ JobState::Finished { upid, .. } => upid,
+ }
+ .to_string();
+
+ self.state = JobState::Finished { upid, state };
+
+ self.write_state()
+ }
+
+ pub fn jobtype(&self) -> &str {
+ &self.jobtype
+ }
+
+ pub fn jobname(&self) -> &str {
+ &self.jobname
+ }
+
+ fn write_state(&mut self) -> Result<(), Error> {
+ let serialized = serde_json::to_string(&self.state)?;
+ let path = get_path(&self.jobtype, &self.jobname);
+
+ let backup_user = crate::backup::backup_user()?;
+ let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
+ // set the correct owner/group/permissions while saving file
+ // owner(rw) = backup, group(r)= backup
+ let options = CreateOptions::new()
+ .perm(mode)
+ .owner(backup_user.uid)
+ .group(backup_user.gid);
+
+ replace_file(path, serialized.as_bytes(), options)
+ }
+}