use serde_json::Value;
use ::serde::{Deserialize, Serialize};
-use proxmox::api::{api, Router, RpcEnvironment};
+use proxmox::api::{api, Permission, Router, RpcEnvironment};
use proxmox::tools::fs::open_file_locked;
use crate::api2::types::*;
+
+use crate::config::acl::{
+ PRIV_DATASTORE_AUDIT,
+ PRIV_DATASTORE_BACKUP,
+ PRIV_DATASTORE_MODIFY,
+ PRIV_DATASTORE_PRUNE,
+ PRIV_REMOTE_AUDIT,
+ PRIV_REMOTE_READ,
+};
+
+use crate::config::cached_user_info::CachedUserInfo;
use crate::config::sync::{self, SyncJobConfig};
-// fixme: add access permissions
+pub fn check_sync_job_read_access(
+ user_info: &CachedUserInfo,
+ auth_id: &Authid,
+ job: &SyncJobConfig,
+) -> bool {
+ let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
+ if datastore_privs & PRIV_DATASTORE_AUDIT == 0 {
+ return false;
+ }
+
+ let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote]);
+ remote_privs & PRIV_REMOTE_AUDIT != 0
+}
+
+// user can run the corresponding pull job
+pub fn check_sync_job_modify_access(
+ user_info: &CachedUserInfo,
+ auth_id: &Authid,
+ job: &SyncJobConfig,
+) -> bool {
+ let datastore_privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
+ if datastore_privs & PRIV_DATASTORE_BACKUP == 0 {
+ return false;
+ }
+
+ if let Some(true) = job.remove_vanished {
+ if datastore_privs & PRIV_DATASTORE_PRUNE == 0 {
+ return false;
+ }
+ }
+
+ let correct_owner = match job.owner {
+ Some(ref owner) => {
+ owner == auth_id
+ || (owner.is_token()
+ && !auth_id.is_token()
+ && owner.user() == auth_id.user())
+ },
+ // default sync owner
+ None => auth_id == Authid::root_auth_id(),
+ };
+
+ // same permission as changing ownership after syncing
+ if !correct_owner && datastore_privs & PRIV_DATASTORE_MODIFY == 0 {
+ return false;
+ }
+
+ let remote_privs = user_info.lookup_privs(&auth_id, &["remote", &job.remote, &job.remote_store]);
+ remote_privs & PRIV_REMOTE_READ != 0
+}
#[api(
input: {
type: Array,
items: { type: sync::SyncJobConfig },
},
+ access: {
+ description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
+ permission: &Permission::Anybody,
+ },
)]
/// List all sync jobs
pub fn list_sync_jobs(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobConfig>, Error> {
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let user_info = CachedUserInfo::new()?;
let (config, digest) = sync::config()?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
- Ok(list)
+ let list = list
+ .into_iter()
+ .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, &sync_job))
+ .collect();
+ Ok(list)
}
#[api(
store: {
schema: DATASTORE_SCHEMA,
},
+ owner: {
+ type: Authid,
+ optional: true,
+ },
remote: {
schema: REMOTE_ID_SCHEMA,
},
},
},
},
+ access: {
+ description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
+ permission: &Permission::Anybody,
+ },
)]
/// Create a new sync job.
-pub fn create_sync_job(param: Value) -> Result<(), Error> {
+pub fn create_sync_job(
+ param: Value,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let user_info = CachedUserInfo::new()?;
- let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
+ let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
- let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?;
+ let sync_job: sync::SyncJobConfig = serde_json::from_value(param)?;
+ if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
+ bail!("permission check failed");
+ }
let (mut config, _digest) = sync::config()?;
- if let Some(_) = config.sections.get(&sync_job.id) {
+ if config.sections.get(&sync_job.id).is_some() {
bail!("job '{}' already exists.", sync_job.id);
}
sync::save_config(&config)?;
+ crate::server::jobstate::create_state_file("syncjob", &sync_job.id)?;
+
Ok(())
}
},
},
},
- returns: {
- description: "The sync job configuration.",
- type: sync::SyncJobConfig,
+ returns: { type: sync::SyncJobConfig },
+ access: {
+ description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
+ permission: &Permission::Anybody,
},
)]
/// Read a sync job configuration.
id: String,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<SyncJobConfig, Error> {
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let user_info = CachedUserInfo::new()?;
+
let (config, digest) = sync::config()?;
let sync_job = config.lookup("sync", &id)?;
+ if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) {
+ bail!("permission check failed");
+ }
+
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(sync_job)
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
+ /// Delete the owner property.
+ owner,
/// Delete the comment property.
comment,
/// Delete the job schedule.
schema: DATASTORE_SCHEMA,
optional: true,
},
+ owner: {
+ type: Authid,
+ optional: true,
+ },
remote: {
schema: REMOTE_ID_SCHEMA,
optional: true,
},
},
},
+ access: {
+ permission: &Permission::Anybody,
+ description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
+ },
)]
/// Update sync job config.
pub fn update_sync_job(
id: String,
store: Option<String>,
+ owner: Option<Authid>,
remote: Option<String>,
remote_store: Option<String>,
remove_vanished: Option<bool>,
schedule: Option<String>,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let user_info = CachedUserInfo::new()?;
- let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
+ let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
// pass/compare digest
let (mut config, expected_digest) = sync::config()?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
+ DeletableProperty::owner => { data.owner = None; },
DeletableProperty::comment => { data.comment = None; },
DeletableProperty::schedule => { data.schedule = None; },
DeletableProperty::remove_vanished => { data.remove_vanished = None; },
if let Some(store) = store { data.store = store; }
if let Some(remote) = remote { data.remote = remote; }
if let Some(remote_store) = remote_store { data.remote_store = remote_store; }
-
-
+ if let Some(owner) = owner { data.owner = Some(owner); }
+
if schedule.is_some() { data.schedule = schedule; }
if remove_vanished.is_some() { data.remove_vanished = remove_vanished; }
+ if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
+ bail!("permission check failed");
+ }
+
config.set_data(&id, "sync", &data)?;
sync::save_config(&config)?;
},
},
},
+ access: {
+ permission: &Permission::Anybody,
+ description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
+ },
)]
/// Remove a sync job configuration
-pub fn delete_sync_job(id: String, digest: Option<String>) -> Result<(), Error> {
+pub fn delete_sync_job(
+ id: String,
+ digest: Option<String>,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let user_info = CachedUserInfo::new()?;
- let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0))?;
+ let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?;
let (mut config, expected_digest) = sync::config()?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
- match config.sections.get(&id) {
- Some(_) => { config.sections.remove(&id); },
- None => bail!("job '{}' does not exist.", id),
- }
+ match config.lookup("sync", &id) {
+ Ok(job) => {
+ if !check_sync_job_modify_access(&user_info, &auth_id, &job) {
+ bail!("permission check failed");
+ }
+ config.sections.remove(&id);
+ },
+ Err(_) => { bail!("job '{}' does not exist.", id) },
+ };
sync::save_config(&config)?;
+ crate::server::jobstate::remove_state_file("syncjob", &id)?;
+
Ok(())
}
.get(&API_METHOD_LIST_SYNC_JOBS)
.post(&API_METHOD_CREATE_SYNC_JOB)
.match_all("id", &ITEM_ROUTER);
+
+
+#[test]
+fn sync_job_access_test() -> Result<(), Error> {
+ let (user_cfg, _) = crate::config::user::test_cfg_from_str(r###"
+user: noperm@pbs
+
+user: read@pbs
+
+user: write@pbs
+
+"###).expect("test user.cfg is not parsable");
+ let acl_tree = crate::config::acl::AclTree::from_raw(r###"
+acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit
+acl:1:/datastore/localstore1:write@pbs:DatastoreBackup
+acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser
+acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin
+acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit
+acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
+"###).expect("test acl.cfg is not parsable");
+
+ let user_info = CachedUserInfo::test_new(user_cfg, acl_tree);
+
+ let root_auth_id = Authid::root_auth_id();
+
+ let no_perm_auth_id: Authid = "noperm@pbs".parse()?;
+ let read_auth_id: Authid = "read@pbs".parse()?;
+ let write_auth_id: Authid = "write@pbs".parse()?;
+
+ let mut job = SyncJobConfig {
+ id: "regular".to_string(),
+ remote: "remote0".to_string(),
+ remote_store: "remotestore1".to_string(),
+ store: "localstore0".to_string(),
+ owner: Some(write_auth_id.clone()),
+ comment: None,
+ remove_vanished: None,
+ schedule: None,
+ };
+
+ // should work without ACLs
+ assert_eq!(check_sync_job_read_access(&user_info, &root_auth_id, &job), true);
+ assert_eq!(check_sync_job_modify_access(&user_info, &root_auth_id, &job), true);
+
+ // user without permissions must fail
+ assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
+ assert_eq!(check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job), false);
+
+ // reading without proper read permissions on either remote or local must fail
+ assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+
+ // reading without proper read permissions on local end must fail
+ job.remote = "remote1".to_string();
+ assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+
+ // reading without proper read permissions on remote end must fail
+ job.remote = "remote0".to_string();
+ job.store = "localstore1".to_string();
+ assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+
+ // writing without proper write permissions on either end must fail
+ job.store = "localstore0".to_string();
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+
+ // writing without proper write permissions on local end must fail
+ job.remote = "remote1".to_string();
+
+ // writing without proper write permissions on remote end must fail
+ job.remote = "remote0".to_string();
+ job.store = "localstore1".to_string();
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+
+ // reset remote to one where users have access
+ job.remote = "remote1".to_string();
+
+ // user with read permission can only read, but not modify/run
+ assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), true);
+ job.owner = Some(read_auth_id.clone());
+ assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+ job.owner = None;
+ assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+ job.owner = Some(write_auth_id.clone());
+ assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+
+ // user with simple write permission can modify/run
+ assert_eq!(check_sync_job_read_access(&user_info, &write_auth_id, &job), true);
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+
+ // but can't modify/run with deletion
+ job.remove_vanished = Some(true);
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+
+ // unless they have Datastore.Prune as well
+ job.store = "localstore2".to_string();
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+
+ // changing owner is not possible
+ job.owner = Some(read_auth_id.clone());
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+
+ // also not to the default 'root@pam'
+ job.owner = None;
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+
+ // unless they have Datastore.Modify as well
+ job.store = "localstore3".to_string();
+ job.owner = Some(read_auth_id);
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+ job.owner = None;
+ assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+
+ Ok(())
+}