) -> Result<bool, Error> {
let user_info = CachedUserInfo::new()?;
- if !user_info.is_active_user(&userid) {
+ let auth_id = Authid::from(userid.clone());
+ if !user_info.is_active_auth_id(&auth_id) {
bail!("user account disabled or expired.");
}
path_vec.push(part);
}
}
-
- user_info.check_privs(userid, &path_vec, *privilege, false)?;
+ user_info.check_privs(&auth_id, &path_vec, *privilege, false)?;
return Ok(false);
}
}
) -> Result<Value, Error> {
let current_user: Userid = rpcenv
- .get_user()
+ .get_auth_id()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
+ let current_auth = Authid::from(current_user.clone());
let mut allowed = userid == current_user;
if !allowed {
let user_info = CachedUserInfo::new()?;
- let privs = user_info.lookup_privs(¤t_user, &[]);
+ let privs = user_info.lookup_privs(¤t_auth, &[]);
if (privs & PRIV_PERMISSIONS_MODIFY) != 0 { allowed = true; }
}
optional: true,
schema: ACL_PROPAGATE_SCHEMA,
},
- userid: {
+ auth_id: {
optional: true,
- type: Userid,
+ type: Authid,
},
group: {
optional: true,
path: String,
role: String,
propagate: Option<bool>,
- userid: Option<Userid>,
+ auth_id: Option<Authid>,
group: Option<String>,
delete: Option<bool>,
digest: Option<String>,
if let Some(ref _group) = group {
bail!("parameter 'group' - groups are currently not supported.");
- } else if let Some(ref userid) = userid {
+ } else if let Some(ref auth_id) = auth_id {
if !delete { // Note: we allow to delete non-existent users
let user_cfg = crate::config::user::cached_config()?;
- if user_cfg.sections.get(&userid.to_string()).is_none() {
- bail!("no such user.");
+ if user_cfg.sections.get(&auth_id.to_string()).is_none() {
+ bail!(format!("no such {}.",
+ if auth_id.is_token() { "API token" } else { "user" }));
}
}
} else {
acl::check_acl_path(&path)?;
}
- if let Some(userid) = userid {
+ if let Some(auth_id) = auth_id {
if delete {
- tree.delete_user_role(&path, &userid, &role);
+ tree.delete_user_role(&path, &auth_id, &role);
} else {
- tree.insert_user_role(&path, &userid, &role, propagate);
+ tree.insert_user_role(&path, &auth_id, &role, propagate);
}
} else if let Some(group) = group {
if delete {
let (config, digest) = user::config()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ // intentionally user only for now
+ let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
+ let auth_id = Authid::from(userid.clone());
+
let user_info = CachedUserInfo::new()?;
- let top_level_privs = user_info.lookup_privs(&userid, &["access", "users"]);
+ let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
let filter_by_privs = |user: &user::User| {
fn check_backup_owner(
store: &DataStore,
group: &BackupGroup,
- userid: &Userid,
+ auth_id: &Authid,
) -> Result<(), Error> {
let owner = store.get_owner(group)?;
- if &owner != userid {
- bail!("backup owner check failed ({} != {})", userid, owner);
+ if &owner != auth_id {
+ bail!("backup owner check failed ({} != {})", auth_id, owner);
}
Ok(())
}
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
- if !list_all && owner != userid {
+ if !list_all && owner != auth_id {
continue;
}
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
- if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
- if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, snapshot.group(), &auth_id)?; }
datastore.remove_backup_dir(&snapshot, false)?;
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SnapshotListItem>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let datastore = DataStore::lookup_datastore(&store)?;
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
let owner = datastore.get_owner(group)?;
- if !list_all && owner != userid {
+ if !list_all && owner != auth_id {
continue;
}
_ => bail!("parameters do not specify a backup group or snapshot"),
}
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let upid_str = WorkerTask::new_thread(
worker_type,
Some(worker_id.clone()),
- userid,
+ auth_id,
to_stdout,
move |worker| {
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
let backup_type = tools::required_string_param(¶m, "backup-type")?;
let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
let datastore = DataStore::lookup_datastore(&store)?;
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
- if !allowed { check_backup_owner(&datastore, &group, &userid)?; }
+ if !allowed { check_backup_owner(&datastore, &group, &auth_id)?; }
let prune_options = PruneOptions {
keep_last: param["keep-last"].as_u64(),
// We use a WorkerTask just to have a task log, but run synchrounously
- let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?;
+ let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
if keep_all {
worker.log("No prune selection - keeping all files.");
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
println!("Starting garbage collection on store {}", store);
let upid_str = WorkerTask::new_thread(
"garbage_collection",
Some(store.clone()),
- Userid::root_userid().clone(),
+ auth_id.clone(),
to_stdout,
move |worker| {
worker.log(format!("starting garbage collection on store {}", store));
let (config, _digest) = datastore::config()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, data)) in &config.sections {
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if allowed {
let mut entry = json!({ "store": store });
let store = tools::required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
let store = tools::required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
- check_backup_owner(&datastore, backup_dir.group(), &userid)?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ check_backup_owner(&datastore, backup_dir.group(), &auth_id)?;
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let file_name = CATALOG_NAME;
let store = tools::required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(&store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let mut components = base64::decode(&filepath)?;
if components.len() > 0 && components[0] == '/' as u8 {
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
- if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; }
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &auth_id)?; }
datastore.update_manifest(&backup_dir,|manifest| {
manifest.unprotected["notes"] = notes.into();
schema: BACKUP_ID_SCHEMA,
},
"new-owner": {
- type: Userid,
+ type: Authid,
},
},
},
store: String,
backup_type: String,
backup_id: String,
- new_owner: Userid,
+ new_owner: Authid,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let user_info = CachedUserInfo::new()?;
- if !user_info.is_active_user(&new_owner) {
- bail!("user '{}' is inactive or non-existent", new_owner);
+ if !user_info.is_active_auth_id(&new_owner) {
+ bail!("{} '{}' is inactive or non-existent",
+ if new_owner.is_token() {
+ "API token".to_string()
+ } else {
+ "user".to_string()
+ },
+ new_owner);
}
datastore.set_owner(&backup_group, &new_owner, true)?;
let (config, _digest) = sync::config()?;
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let job = Job::new("syncjob", &id)?;
- let upid_str = do_sync_job(job, sync_job, &userid, None)?;
+ let upid_str = do_sync_job(job, sync_job, &auth_id, None)?;
Ok(upid_str)
}
let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let job = Job::new("verificationjob", &id)?;
- let upid_str = do_verification_job(job, verification_job, &userid, None)?;
+ let upid_str = do_verification_job(job, verification_job, &auth_id, None)?;
Ok(upid_str)
}
let debug = param["debug"].as_bool().unwrap_or(false);
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(¶m, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&userid, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
+ user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
let datastore = DataStore::lookup_datastore(&store)?;
};
// lock backup group to only allow one backup per group at a time
- let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &userid)?;
+ let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
// permission check
- if owner != userid && worker_type != "benchmark" {
+ if owner != auth_id && worker_type != "benchmark" {
// only the owner is allowed to create additional snapshots
- bail!("backup owner check failed ({} != {})", userid, owner);
+ bail!("backup owner check failed ({} != {})", auth_id, owner);
}
let last_backup = {
if !is_new { bail!("backup directory already exists."); }
- WorkerTask::spawn(worker_type, Some(worker_id), userid.clone(), true, move |worker| {
+ WorkerTask::spawn(worker_type, Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = BackupEnvironment::new(
- env_type, userid, worker.clone(), datastore, backup_dir);
+ env_type, auth_id, worker.clone(), datastore, backup_dir);
env.debug = debug;
env.last_backup = last_backup;
use proxmox::tools::fs::{replace_file, CreateOptions};
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
-use crate::api2::types::Userid;
+use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::WorkerTask;
use crate::server::formatter::*;
pub struct BackupEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
- user: Userid,
+ auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
impl BackupEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
- user: Userid,
+ auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
Self {
result_attributes: json!({}),
env_type,
- user,
+ auth_id,
worker,
datastore,
debug: false,
WorkerTask::new_thread(
"verify",
Some(worker_id),
- self.user.clone(),
+ self.auth_id.clone(),
false,
move |worker| {
worker.log("Automatically verifying newly added snapshot");
self.env_type
}
- fn set_user(&mut self, _user: Option<String>) {
- panic!("unable to change user");
+ fn set_auth_id(&mut self, _auth_id: Option<String>) {
+ panic!("unable to change auth_id");
}
- fn get_user(&self) -> Option<String> {
- Some(self.user.to_string())
+ fn get_auth_id(&self) -> Option<String> {
+ Some(self.auth_id.to_string())
}
}
let (config, digest) = datastore::config()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store.name]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0
};
default: 8007,
},
userid: {
- type: Userid,
+ type: Authid,
},
password: {
schema: remote::REMOTE_PASSWORD_SCHEMA,
},
userid: {
optional: true,
- type: Userid,
+ type: Authid,
},
password: {
optional: true,
cmd: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
+ // intentionally user only for now
let userid: Userid = rpcenv
- .get_user()
+ .get_auth_id()
.ok_or_else(|| format_err!("unknown user"))?
.parse()?;
+ let auth_id = Authid::from(userid.clone());
if userid.realm() != "pam" {
bail!("only pam users can use the console");
let upid = WorkerTask::spawn(
"termproxy",
None,
- userid,
+ auth_id,
false,
move |worker| async move {
// move inside the worker so that it survives and does not close the port
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ // intentionally user only for now
+ let userid: Userid = rpcenv.get_auth_id().unwrap().parse()?;
let ticket = tools::required_string_param(¶m, "vncticket")?;
let port: u16 = tools::required_integer_param(¶m, "port")? as u16;
use crate::tools::http;
use crate::config::acl::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
-use crate::api2::types::{APTUpdateInfo, NODE_SCHEMA, Userid, UPID_SCHEMA};
+use crate::api2::types::{Authid, APTUpdateInfo, NODE_SCHEMA, UPID_SCHEMA};
const_regex! {
VERSION_EPOCH_REGEX = r"^\d+:";
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let quiet = quiet.unwrap_or(API_METHOD_APT_UPDATE_DATABASE_PARAM_DEFAULT_QUIET);
- let upid_str = WorkerTask::new_thread("aptupdate", None, userid, to_stdout, move |worker| {
+ let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
if !quiet { worker.log("starting apt-get update") }
// TODO: set proxy /etc/apt/apt.conf.d/76pbsproxy like PVE
};
use crate::server::WorkerTask;
-use crate::api2::types::{Userid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
+use crate::api2::types::{Authid, UPID_SCHEMA, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA};
pub mod directory;
pub mod zfs;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?;
}
let upid_str = WorkerTask::new_thread(
- "diskinit", Some(disk.clone()), userid, to_stdout, move |worker|
+ "diskinit", Some(disk.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("initialize disk {}", disk));
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let info = get_disk_usage_info(&disk, true)?;
}
let upid_str = WorkerTask::new_thread(
- "dircreate", Some(name.clone()), userid, to_stdout, move |worker|
+ "dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create datastore '{}' on disk {}", name, disk));
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let add_datastore = add_datastore.unwrap_or(false);
}
let upid_str = WorkerTask::new_thread(
- "zfscreate", Some(name.clone()), userid, to_stdout, move |worker|
+ "zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
{
worker.log(format!("create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text));
network::assert_ifupdown2_installed()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), userid, true, |_worker| async {
+ let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id, true, |_worker| async {
let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
Ok(json_service_state(&service, status))
}
-fn run_service_command(service: &str, cmd: &str, userid: Userid) -> Result<Value, Error> {
+fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
let workerid = format!("srv{}", &cmd);
let upid = WorkerTask::new_thread(
&workerid,
Some(service.clone()),
- userid,
+ auth_id,
false,
move |_worker| {
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("starting service {}", service);
- run_service_command(&service, "start", userid)
+ run_service_command(&service, "start", auth_id)
}
#[api(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("stopping service {}", service);
- run_service_command(&service, "stop", userid)
+ run_service_command(&service, "stop", auth_id)
}
#[api(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("re-starting service {}", service);
if &service == "proxmox-backup-proxy" {
// special case, avoid aborting running tasks
- run_service_command(&service, "reload", userid)
+ run_service_command(&service, "reload", auth_id)
} else {
- run_service_command(&service, "restart", userid)
+ run_service_command(&service, "restart", auth_id)
}
}
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("reloading service {}", service);
- run_service_command(&service, "reload", userid)
+ run_service_command(&service, "reload", auth_id)
}
use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
use crate::config::acl::{PRIV_SYS_AUDIT,PRIV_SYS_MODIFY};
use crate::config::cached_user_info::CachedUserInfo;
-use crate::api2::types::{NODE_SCHEMA, Userid};
+use crate::api2::types::{NODE_SCHEMA, Authid};
#[api(
input: {
},
};
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &[]);
+ let user_privs = user_info.lookup_privs(&auth_id, &[]);
if (user_privs & PRIV_SYS_AUDIT) == 0 {
// not enough privileges for full state
let upid = extract_upid(¶m)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- if userid != upid.userid {
+ if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
+ user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let mut result = json!({
"starttime": upid.starttime,
"type": upid.worker_type,
"id": upid.worker_id,
- "user": upid.userid,
+ "user": upid.auth_id,
});
if crate::server::worker_is_active(&upid).await? {
let upid = extract_upid(¶m)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- if userid != upid.userid {
+ if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
+ user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)?;
}
let test_status = param["test-status"].as_bool().unwrap_or(false);
let upid = extract_upid(¶m)?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- if userid != upid.userid {
+ if auth_id != upid.auth_id {
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&userid, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
+ user_info.check_privs(&auth_id, &["system", "tasks"], PRIV_SYS_MODIFY, false)?;
}
server::abort_worker_async(upid);
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
Err(_) => return None,
};
- if !list_all && info.upid.userid != userid { return None; }
+ if !list_all && info.upid.auth_id != auth_id { return None; }
- if let Some(userid) = &userfilter {
- if !info.upid.userid.as_str().contains(userid) { return None; }
+ if let Some(needle) = &userfilter {
+ if !info.upid.auth_id.to_string().contains(needle) { return None; }
}
if let Some(store) = store {
pub fn check_pull_privs(
- userid: &Userid,
+ auth_id: &Authid,
store: &str,
remote: &str,
remote_store: &str,
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
- user_info.check_privs(userid, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
+ user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
+ user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
if delete {
- user_info.check_privs(userid, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
+ user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
}
Ok(())
pub fn do_sync_job(
mut job: Job,
sync_job: SyncJobConfig,
- userid: &Userid,
+ auth_id: &Authid,
schedule: Option<String>,
) -> Result<String, Error> {
let job_id = job.jobname().to_string();
let worker_type = job.jobtype().to_string();
- let email = crate::server::lookup_user_email(userid);
+ let email = crate::server::lookup_user_email(auth_id.user());
let upid_str = WorkerTask::spawn(
&worker_type,
Some(job.jobname().to_string()),
- userid.clone(),
+ auth_id.clone(),
false,
move |worker| async move {
worker.log(format!("Sync datastore '{}' from '{}/{}'",
sync_job.store, sync_job.remote, sync_job.remote_store));
- crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, Userid::backup_userid().clone()).await?;
+ let backup_auth_id = Authid::backup_auth_id();
+
+ crate::client::pull::pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, backup_auth_id.clone()).await?;
worker.log(format!("sync job '{}' end", &job_id));
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let delete = remove_vanished.unwrap_or(true);
- check_pull_privs(&userid, &store, &remote, &remote_store, delete)?;
+ check_pull_privs(&auth_id, &store, &remote, &remote_store, delete)?;
let (client, src_repo, tgt_store) = get_pull_parameters(&store, &remote, &remote_store).await?;
// fixme: set to_stdout to false?
- let upid_str = WorkerTask::spawn("sync", Some(store.clone()), userid.clone(), true, move |worker| async move {
+ let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.clone(), true, move |worker| async move {
worker.log(format!("sync datastore '{}' start", store));
- let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, userid);
+ let pull_future = pull_store(&worker, &client, &src_repo, tgt_store.clone(), delete, auth_id);
let future = select!{
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = tools::required_string_param(¶m, "store")?.to_owned();
let user_info = CachedUserInfo::new()?;
- let privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let priv_read = privs & PRIV_DATASTORE_READ != 0;
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
if !priv_read {
let owner = datastore.get_owner(backup_dir.group())?;
- if owner != userid {
+ if owner != auth_id {
bail!("backup owner check failed!");
}
}
let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
- WorkerTask::spawn("reader", Some(worker_id), userid.clone(), true, move |worker| {
+ WorkerTask::spawn("reader", Some(worker_id), auth_id.clone(), true, move |worker| {
let mut env = ReaderEnvironment::new(
env_type,
- userid,
+ auth_id,
worker.clone(),
datastore,
backup_dir,
use proxmox::api::{RpcEnvironment, RpcEnvironmentType};
-use crate::api2::types::Userid;
+use crate::api2::types::Authid;
use crate::backup::*;
use crate::server::formatter::*;
use crate::server::WorkerTask;
pub struct ReaderEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
- user: Userid,
+ auth_id: Authid,
pub debug: bool,
pub formatter: &'static OutputFormatter,
pub worker: Arc<WorkerTask>,
impl ReaderEnvironment {
pub fn new(
env_type: RpcEnvironmentType,
- user: Userid,
+ auth_id: Authid,
worker: Arc<WorkerTask>,
datastore: Arc<DataStore>,
backup_dir: BackupDir,
Self {
result_attributes: json!({}),
env_type,
- user,
+ auth_id,
worker,
datastore,
debug: false,
self.env_type
}
- fn set_user(&mut self, _user: Option<String>) {
- panic!("unable to change user");
+ fn set_auth_id(&mut self, _auth_id: Option<String>) {
+ panic!("unable to change auth_id");
}
- fn get_user(&self) -> Option<String> {
- Some(self.user.to_string())
+ fn get_auth_id(&self) -> Option<String> {
+ Some(self.auth_id.to_string())
}
}
DATASTORE_SCHEMA,
RRDMode,
RRDTimeFrameResolution,
+ Authid,
TaskListItem,
TaskStateType,
- Userid,
};
use crate::server;
let (config, _digest) = datastore::config()?;
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, _)) in &config.sections {
- let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if !allowed {
continue;
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
- let userid: Userid = rpcenv.get_user().unwrap().parse()?;
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- let user_privs = user_info.lookup_privs(&userid, &["system", "tasks"]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
let list_all = (user_privs & PRIV_SYS_AUDIT) != 0;
let since = since.unwrap_or_else(|| 0);
.filter_map(|info| {
match info {
Ok(info) => {
- if list_all || info.upid.userid == userid {
+ if list_all || info.upid.auth_id == auth_id {
if let Some(filter) = &typefilter {
if !info.upid.worker_type.contains(filter) {
return None;
},
},
owner: {
- type: Userid,
+ type: Authid,
optional: true,
},
},
pub files: Vec<String>,
/// The owner of group
#[serde(skip_serializing_if="Option::is_none")]
- pub owner: Option<Userid>,
+ pub owner: Option<Authid>,
}
#[api()]
},
},
owner: {
- type: Userid,
+ type: Authid,
optional: true,
},
},
pub size: Option<u64>,
/// The owner of the snapshots group
#[serde(skip_serializing_if="Option::is_none")]
- pub owner: Option<Userid>,
+ pub owner: Option<Authid>,
}
#[api(
#[api(
properties: {
upid: { schema: UPID_SCHEMA },
- user: { type: Userid },
+ userid: { type: Authid },
},
)]
#[derive(Serialize, Deserialize)]
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
- /// The user who started the task
- pub user: Userid,
+ /// The authenticated entity who started the task
+ pub userid: Authid,
/// The task end time (Epoch)
#[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>,
starttime: info.upid.starttime,
worker_type: info.upid.worker_type,
worker_id: info.upid.worker_id,
- user: info.upid.userid,
+ userid: info.upid.auth_id,
endtime,
status,
}
use crate::tools;
use crate::tools::format::HumanByte;
use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
-use crate::api2::types::{GarbageCollectionStatus, Userid};
+use crate::api2::types::{Authid, GarbageCollectionStatus};
use crate::server::UPID;
lazy_static! {
/// Returns the backup owner.
///
- /// The backup owner is the user who first created the backup group.
- pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
+ /// The backup owner is the entity who first created the backup group.
+ pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Authid, Error> {
let mut full_path = self.base_path();
full_path.push(backup_group.group_path());
full_path.push("owner");
pub fn set_owner(
&self,
backup_group: &BackupGroup,
- userid: &Userid,
+ auth_id: &Authid,
force: bool,
) -> Result<(), Error> {
let mut path = self.base_path();
let mut file = open_options.open(&path)
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
- writeln!(file, "{}", userid)
+ writeln!(file, "{}", auth_id)
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
Ok(())
pub fn create_locked_backup_group(
&self,
backup_group: &BackupGroup,
- userid: &Userid,
- ) -> Result<(Userid, DirLockGuard), Error> {
+ auth_id: &Authid,
+ ) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first:
let base_path = self.base_path();
match std::fs::create_dir(&full_path) {
Ok(_) => {
let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
- self.set_owner(backup_group, userid, false)?;
+ self.set_owner(backup_group, auth_id, false)?;
let owner = self.get_owner(backup_group)?; // just to be sure
Ok((owner, guard))
}
use proxmox_backup::api2::version;
use proxmox_backup::client::*;
use proxmox_backup::pxar::catalog::*;
-use proxmox_backup::config::user::complete_user_name;
+use proxmox_backup::config::user::complete_userid;
use proxmox_backup::backup::{
archive_type,
decrypt_key,
description: "Backup group.",
},
"new-owner": {
- type: Userid,
+ type: Authid,
},
}
}
let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
.arg_param(&["group", "new-owner"])
.completion_cb("group", complete_backup_group)
- .completion_cb("new-owner", complete_user_name)
+ .completion_cb("new-owner", complete_userid)
.completion_cb("repository", complete_repository);
let cmd_def = CliCommandMap::new()
let mut rpcenv = CliEnvironment::new();
- rpcenv.set_user(Some(String::from("root@pam")));
+ rpcenv.set_auth_id(Some(String::from("root@pam")));
proxmox_backup::tools::runtime::main(run_async_cli_command(cmd_def, rpcenv));
}
};
-use proxmox_backup::api2::types::Userid;
+use proxmox_backup::api2::types::{Authid, Userid};
use proxmox_backup::configdir;
use proxmox_backup::buildcfg;
use proxmox_backup::server;
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
- Userid::backup_userid().clone(),
+ Authid::backup_auth_id().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(store.clone()),
- Userid::backup_userid().clone(),
+ Authid::backup_auth_id().clone(),
false,
move |worker| {
Err(_) => continue, // could not get lock
};
- let userid = Userid::backup_userid();
+ let auth_id = Authid::backup_auth_id();
- if let Err(err) = do_sync_job(job, job_config, userid, Some(event_str)) {
+ if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str)) {
eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
}
}
Ok(job) => job,
Err(_) => continue, // could not get lock
};
- let userid = Userid::backup_userid().clone();
- if let Err(err) = do_verification_job(job, job_config, &userid, Some(event_str)) {
+ let auth_id = Authid::backup_auth_id();
+ if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str)) {
eprintln!("unable to start datastore verification job {} - {}", &job_id, err);
}
}
if let Err(err) = WorkerTask::new_thread(
worker_type,
Some(job_id.to_string()),
- Userid::backup_userid().clone(),
+ Authid::backup_auth_id().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
"update",
CliCommand::new(&api2::access::acl::API_METHOD_UPDATE_ACL)
.arg_param(&["path", "role"])
- .completion_cb("userid", config::user::complete_user_name)
+ .completion_cb("userid", config::user::complete_userid)
.completion_cb("path", config::datastore::complete_acl_path)
);
"update",
CliCommand::new(&api2::access::user::API_METHOD_UPDATE_USER)
.arg_param(&["userid"])
- .completion_cb("userid", config::user::complete_user_name)
+ .completion_cb("userid", config::user::complete_userid)
)
.insert(
"remove",
CliCommand::new(&api2::access::user::API_METHOD_DELETE_USER)
.arg_param(&["userid"])
- .completion_cb("userid", config::user::complete_user_name)
+ .completion_cb("userid", config::user::complete_userid)
);
cmd_def.into()
src_repo: &BackupRepository,
tgt_store: Arc<DataStore>,
delete: bool,
- userid: Userid,
+ auth_id: Authid,
) -> Result<(), Error> {
// explicit create shared lock to prevent GC on newly created chunks
for (groups_done, item) in list.into_iter().enumerate() {
let group = BackupGroup::new(&item.backup_type, &item.backup_id);
- let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &userid)?;
+ let (owner, _lock_guard) = tgt_store.create_locked_backup_group(&group, &auth_id)?;
// permission check
- if userid != owner { // only the owner is allowed to create additional snapshots
+ if auth_id != owner { // only the owner is allowed to create additional snapshots
worker.log(format!("sync group {}/{} failed - owner check failed ({} != {})",
- item.backup_type, item.backup_id, userid, owner));
+ item.backup_type, item.backup_id, auth_id, owner));
errors = true; // do not stop here, instead continue
} else if let Err(err) = pull_group(
use proxmox::constnamedbitmap;
use proxmox::api::{api, schema::*};
-use crate::api2::types::Userid;
+use crate::api2::types::{Authid,Userid};
// define Privilege bitfield
}
pub struct AclTreeNode {
- pub users: HashMap<Userid, HashMap<String, bool>>,
+ pub users: HashMap<Authid, HashMap<String, bool>>,
pub groups: HashMap<String, HashMap<String, bool>>,
pub children: BTreeMap<String, AclTreeNode>,
}
}
}
- pub fn extract_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
- let user_roles = self.extract_user_roles(user, all);
+ pub fn extract_roles(&self, auth_id: &Authid, all: bool) -> HashSet<String> {
+ let user_roles = self.extract_user_roles(auth_id, all);
if !user_roles.is_empty() {
// user privs always override group privs
return user_roles
};
- self.extract_group_roles(user, all)
+ self.extract_group_roles(auth_id.user(), all)
}
- pub fn extract_user_roles(&self, user: &Userid, all: bool) -> HashSet<String> {
+ pub fn extract_user_roles(&self, auth_id: &Authid, all: bool) -> HashSet<String> {
let mut set = HashSet::new();
- let roles = match self.users.get(user) {
+ let roles = match self.users.get(auth_id) {
Some(m) => m,
None => return set,
};
roles.remove(role);
}
- pub fn delete_user_role(&mut self, userid: &Userid, role: &str) {
- let roles = match self.users.get_mut(userid) {
+ pub fn delete_user_role(&mut self, auth_id: &Authid, role: &str) {
+ let roles = match self.users.get_mut(auth_id) {
Some(r) => r,
None => return,
};
}
}
- pub fn insert_user_role(&mut self, user: Userid, role: String, propagate: bool) {
- let map = self.users.entry(user).or_insert_with(|| HashMap::new());
+ pub fn insert_user_role(&mut self, auth_id: Authid, role: String, propagate: bool) {
+ let map = self.users.entry(auth_id).or_insert_with(|| HashMap::new());
if role == ROLE_NAME_NO_ACCESS {
map.clear();
map.insert(role, propagate);
node.delete_group_role(group, role);
}
- pub fn delete_user_role(&mut self, path: &str, userid: &Userid, role: &str) {
+ pub fn delete_user_role(&mut self, path: &str, auth_id: &Authid, role: &str) {
let path = split_acl_path(path);
let node = match self.get_node(&path) {
Some(n) => n,
None => return,
};
- node.delete_user_role(userid, role);
+ node.delete_user_role(auth_id, role);
}
pub fn insert_group_role(&mut self, path: &str, group: &str, role: &str, propagate: bool) {
node.insert_group_role(group.to_string(), role.to_string(), propagate);
}
- pub fn insert_user_role(&mut self, path: &str, user: &Userid, role: &str, propagate: bool) {
+ pub fn insert_user_role(&mut self, path: &str, auth_id: &Authid, role: &str, propagate: bool) {
let path = split_acl_path(path);
let node = self.get_or_insert_node(&path);
- node.insert_user_role(user.to_owned(), role.to_string(), propagate);
+ node.insert_user_role(auth_id.to_owned(), role.to_string(), propagate);
}
fn write_node_config(
let mut role_ug_map0 = HashMap::new();
let mut role_ug_map1 = HashMap::new();
- for (user, roles) in &node.users {
+ for (auth_id, roles) in &node.users {
// no need to save, because root is always 'Administrator'
- if user == "root@pam" { continue; }
+ if !auth_id.is_token() && auth_id.user() == "root@pam" { continue; }
for (role, propagate) in roles {
let role = role.as_str();
- let user = user.to_string();
+ let auth_id = auth_id.to_string();
if *propagate {
role_ug_map1.entry(role).or_insert_with(|| BTreeSet::new())
- .insert(user);
+ .insert(auth_id);
} else {
role_ug_map0.entry(role).or_insert_with(|| BTreeSet::new())
- .insert(user);
+ .insert(auth_id);
}
}
}
Ok(tree)
}
- pub fn roles(&self, userid: &Userid, path: &[&str]) -> HashSet<String> {
+ pub fn roles(&self, auth_id: &Authid, path: &[&str]) -> HashSet<String> {
let mut node = &self.root;
- let mut role_set = node.extract_roles(userid, path.is_empty());
+ let mut role_set = node.extract_roles(auth_id, path.is_empty());
for (pos, comp) in path.iter().enumerate() {
let last_comp = (pos + 1) == path.len();
Some(n) => n,
None => return role_set, // path not found
};
- let new_set = node.extract_roles(userid, last_comp);
+ let new_set = node.extract_roles(auth_id, last_comp);
if !new_set.is_empty() {
// overwrite previous settings
role_set = new_set;
use anyhow::{Error};
use super::AclTree;
- use crate::api2::types::Userid;
+ use crate::api2::types::Authid;
fn check_roles(
tree: &AclTree,
- user: &Userid,
+ auth_id: &Authid,
path: &str,
expected_roles: &str,
) {
let path_vec = super::split_acl_path(path);
- let mut roles = tree.roles(user, &path_vec)
+ let mut roles = tree.roles(auth_id, &path_vec)
.iter().map(|v| v.clone()).collect::<Vec<String>>();
roles.sort();
let roles = roles.join(",");
- assert_eq!(roles, expected_roles, "\nat check_roles for '{}' on '{}'", user, path);
+ assert_eq!(roles, expected_roles, "\nat check_roles for '{}' on '{}'", auth_id, path);
}
#[test]
acl:1:/storage/store1:user1@pbs:DatastoreBackup
acl:1:/storage/store2:user2@pbs:DatastoreBackup
"###)?;
- let user1: Userid = "user1@pbs".parse()?;
+ let user1: Authid = "user1@pbs".parse()?;
check_roles(&tree, &user1, "/", "");
check_roles(&tree, &user1, "/storage", "Admin");
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
check_roles(&tree, &user1, "/storage/store2", "Admin");
- let user2: Userid = "user2@pbs".parse()?;
+ let user2: Authid = "user2@pbs".parse()?;
check_roles(&tree, &user2, "/", "");
check_roles(&tree, &user2, "/storage", "");
check_roles(&tree, &user2, "/storage/store1", "");
acl:1:/storage:user1@pbs:NoAccess
acl:1:/storage/store1:user1@pbs:DatastoreBackup
"###)?;
- let user1: Userid = "user1@pbs".parse()?;
+ let user1: Authid = "user1@pbs".parse()?;
check_roles(&tree, &user1, "/", "Admin");
check_roles(&tree, &user1, "/storage", "NoAccess");
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
let mut tree = AclTree::new();
- let user1: Userid = "user1@pbs".parse()?;
+ let user1: Authid = "user1@pbs".parse()?;
tree.insert_user_role("/", &user1, "Admin", true);
tree.insert_user_role("/", &user1, "Audit", true);
let mut tree = AclTree::new();
- let user1: Userid = "user1@pbs".parse()?;
+ let user1: Authid = "user1@pbs".parse()?;
tree.insert_user_role("/storage", &user1, "NoAccess", true);
use proxmox::api::UserInformation;
use super::acl::{AclTree, ROLE_NAMES, ROLE_ADMIN};
-use super::user::User;
-use crate::api2::types::Userid;
+use super::user::{ApiToken, User};
+use crate::api2::types::{Authid, Userid};
-/// Cache User/Group/Acl configuration data for fast permission tests
+/// Cache User/Group/Token/Acl configuration data for fast permission tests
pub struct CachedUserInfo {
user_cfg: Arc<SectionConfigData>,
acl_tree: Arc<AclTree>,
Ok(config)
}
- /// Test if a user account is enabled and not expired
- pub fn is_active_user(&self, userid: &Userid) -> bool {
+ /// Test if a authentication id is enabled and not expired
+ pub fn is_active_auth_id(&self, auth_id: &Authid) -> bool {
+ let userid = auth_id.user();
+
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
if !info.enable.unwrap_or(true) {
return false;
return false;
}
}
- return true;
} else {
return false;
}
+
+ if auth_id.is_token() {
+ if let Ok(info) = self.user_cfg.lookup::<ApiToken>("token", &auth_id.to_string()) {
+ if !info.enable.unwrap_or(true) {
+ return false;
+ }
+ if let Some(expire) = info.expire {
+ if expire > 0 && expire <= now() {
+ return false;
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ return true;
}
pub fn check_privs(
&self,
- userid: &Userid,
+ auth_id: &Authid,
path: &[&str],
required_privs: u64,
partial: bool,
) -> Result<(), Error> {
- let user_privs = self.lookup_privs(&userid, path);
+ let privs = self.lookup_privs(&auth_id, path);
let allowed = if partial {
- (user_privs & required_privs) != 0
+ (privs & required_privs) != 0
} else {
- (user_privs & required_privs) == required_privs
+ (privs & required_privs) == required_privs
};
if !allowed {
// printing the path doesn't leaks any information as long as we
Ok(())
}
- pub fn is_superuser(&self, userid: &Userid) -> bool {
- userid == "root@pam"
+ pub fn is_superuser(&self, auth_id: &Authid) -> bool {
+ !auth_id.is_token() && auth_id.user() == "root@pam"
}
pub fn is_group_member(&self, _userid: &Userid, _group: &str) -> bool {
false
}
- pub fn lookup_privs(&self, userid: &Userid, path: &[&str]) -> u64 {
-
- if self.is_superuser(userid) {
+ pub fn lookup_privs(&self, auth_id: &Authid, path: &[&str]) -> u64 {
+ if self.is_superuser(auth_id) {
return ROLE_ADMIN;
}
- let roles = self.acl_tree.roles(userid, path);
+ let roles = self.acl_tree.roles(auth_id, path);
let mut privs: u64 = 0;
for role in roles {
if let Some((role_privs, _)) = ROLE_NAMES.get(role.as_str()) {
privs |= role_privs;
}
}
+
+ if auth_id.is_token() {
+ // limit privs to that of owning user
+ let user_auth_id = Authid::from(auth_id.user().clone());
+ privs &= self.lookup_privs(&user_auth_id, path);
+ }
+
privs
}
}
false
}
- fn lookup_privs(&self, userid: &str, path: &[&str]) -> u64 {
- match userid.parse::<Userid>() {
- Ok(userid) => Self::lookup_privs(self, &userid, path),
+ fn lookup_privs(&self, auth_id: &str, path: &[&str]) -> u64 {
+ match auth_id.parse::<Authid>() {
+ Ok(auth_id) => Self::lookup_privs(self, &auth_id, path),
Err(_) => 0,
}
}
type: u16,
},
userid: {
- type: Userid,
+ type: Authid,
},
password: {
schema: REMOTE_PASSWORD_SCHEMA,
.max_length(64)
.schema();
+#[api(
+ properties: {
+ tokenid: {
+ schema: PROXMOX_TOKEN_ID_SCHEMA,
+ },
+ comment: {
+ optional: true,
+ schema: SINGLE_LINE_COMMENT_SCHEMA,
+ },
+ enable: {
+ optional: true,
+ schema: ENABLE_USER_SCHEMA,
+ },
+ expire: {
+ optional: true,
+ schema: EXPIRE_USER_SCHEMA,
+ },
+ }
+)]
+#[derive(Serialize,Deserialize)]
+/// ApiToken properties.
+pub struct ApiToken {
+ pub tokenid: Authid,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub comment: Option<String>,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub enable: Option<bool>,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub expire: Option<i64>,
+}
#[api(
properties: {
}
fn init() -> SectionConfig {
- let obj_schema = match User::API_SCHEMA {
- Schema::Object(ref obj_schema) => obj_schema,
+ let mut config = SectionConfig::new(&Authid::API_SCHEMA);
+
+ let user_schema = match User::API_SCHEMA {
+ Schema::Object(ref user_schema) => user_schema,
_ => unreachable!(),
};
+ let user_plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), user_schema);
+ config.register_plugin(user_plugin);
- let plugin = SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), obj_schema);
- let mut config = SectionConfig::new(&Userid::API_SCHEMA);
-
- config.register_plugin(plugin);
+ let token_schema = match ApiToken::API_SCHEMA {
+ Schema::Object(ref token_schema) => token_schema,
+ _ => unreachable!(),
+ };
+ let token_plugin = SectionConfigPlugin::new("token".to_string(), Some("tokenid".to_string()), token_schema);
+ config.register_plugin(token_plugin);
config
}
}
// shell completion helper
-pub fn complete_user_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
+pub fn complete_userid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
match config() {
- Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
+ Ok((data, _digest)) => {
+ data.sections.iter()
+ .filter_map(|(id, (section_type, _))| {
+ if section_type == "user" {
+ Some(id.to_string())
+ } else {
+ None
+ }
+ }).collect()
+ },
Err(_) => return vec![],
}
}
+
+// shell completion helper
+pub fn complete_authid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
+ match config() {
+ Ok((data, _digest)) => data.sections.iter().map(|(id, _)| id.to_string()).collect(),
+ Err(_) => vec![],
+ }
+}
pub struct RestEnvironment {
env_type: RpcEnvironmentType,
result_attributes: Value,
- user: Option<String>,
+ auth_id: Option<String>,
client_ip: Option<std::net::SocketAddr>,
}
pub fn new(env_type: RpcEnvironmentType) -> Self {
Self {
result_attributes: json!({}),
- user: None,
+ auth_id: None,
client_ip: None,
env_type,
}
self.env_type
}
- fn set_user(&mut self, user: Option<String>) {
- self.user = user;
+ fn set_auth_id(&mut self, auth_id: Option<String>) {
+ self.auth_id = auth_id;
}
- fn get_user(&self) -> Option<String> {
- self.user.clone()
+ fn get_auth_id(&self) -> Option<String> {
+ self.auth_id.clone()
}
fn set_client_ip(&mut self, client_ip: Option<std::net::SocketAddr>) {
use super::ApiConfig;
use crate::auth_helpers::*;
-use crate::api2::types::Userid;
+use crate::api2::types::{Authid, Userid};
use crate::tools;
use crate::tools::FileLogger;
use crate::tools::ticket::Ticket;
log::error!("{} {}: {} {}: [client {}] {}", method.as_str(), path, status.as_str(), reason, peer, message);
}
if let Some(logfile) = logfile {
- let user = match resp.extensions().get::<Userid>() {
- Some(userid) => userid.as_str(),
- None => "-",
+ let auth_id = match resp.extensions().get::<Authid>() {
+ Some(auth_id) => auth_id.to_string(),
+ None => "-".to_string(),
};
let now = proxmox::tools::time::epoch_i64();
// time format which apache/nginx use (by default), copied from pve-http-server
.log(format!(
"{} - {} [{}] \"{} {}\" {} {} {}",
peer.ip(),
- user,
+ auth_id,
datetime,
method.as_str(),
path,
.unwrap();
if let Some(userid) = userid {
- resp.extensions_mut().insert(userid);
+ resp.extensions_mut().insert(Authid::from((userid, None)));
}
resp
ticket: &Option<String>,
csrf_token: &Option<String>,
user_info: &CachedUserInfo,
-) -> Result<Userid, Error> {
+) -> Result<Authid, Error> {
let ticket_lifetime = tools::ticket::TICKET_LIFETIME;
let ticket = ticket.as_ref().map(String::as_str);
let userid: Userid = Ticket::parse(&ticket.ok_or_else(|| format_err!("missing ticket"))?)?
.verify_with_time_frame(public_auth_key(), "PBS", None, -300..ticket_lifetime)?;
- if !user_info.is_active_user(&userid) {
+ let auth_id = Authid::from(userid.clone());
+ if !user_info.is_active_auth_id(&auth_id) {
bail!("user account disabled or expired.");
}
}
}
- Ok(userid)
+ Ok(Authid::from(userid))
}
async fn handle_request(
if auth_required {
let (ticket, csrf_token, _) = extract_auth_data(&parts.headers);
match check_auth(&method, &ticket, &csrf_token, &user_info) {
- Ok(userid) => rpcenv.set_user(Some(userid.to_string())),
+ Ok(authid) => rpcenv.set_auth_id(Some(authid.to_string())),
Err(err) => {
// always delay unauthorized calls by 3 seconds (from start of request)
let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
return Ok((formatter.format_error)(err));
}
Some(api_method) => {
- let user = rpcenv.get_user();
- if !check_api_permission(api_method.access.permission, user.as_deref(), &uri_param, user_info.as_ref()) {
+ let auth_id = rpcenv.get_auth_id();
+ if !check_api_permission(api_method.access.permission, auth_id.as_deref(), &uri_param, user_info.as_ref()) {
let err = http_err!(FORBIDDEN, "permission check failed");
tokio::time::delay_until(Instant::from_std(access_forbidden_time)).await;
return Ok((formatter.format_error)(err));
Err(err) => (formatter.format_error)(err),
};
- if let Some(user) = user {
- let userid: Userid = user.parse()?;
- response.extensions_mut().insert(userid);
+ if let Some(auth_id) = auth_id {
+ let auth_id: Authid = auth_id.parse()?;
+ response.extensions_mut().insert(auth_id);
}
return Ok(response);
let (ticket, csrf_token, language) = extract_auth_data(&parts.headers);
if ticket != None {
match check_auth(&method, &ticket, &csrf_token, &user_info) {
- Ok(userid) => {
- let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), &userid);
- return Ok(get_index(Some(userid), Some(new_csrf_token), language, &api, parts));
+ Ok(auth_id) => {
+ let userid = auth_id.user();
+ let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), userid);
+ return Ok(get_index(Some(userid.clone()), Some(new_csrf_token), language, &api, parts));
}
_ => {
tokio::time::delay_until(Instant::from_std(delay_unauth_time)).await;
use proxmox::const_regex;
use proxmox::sys::linux::procfs;
-use crate::api2::types::Userid;
+use crate::api2::types::Authid;
/// Unique Process/Task Identifier
///
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
- /// The user who started the task
- pub userid: Userid,
+ /// The authenticated entity who started the task
+ pub auth_id: Authid,
/// The node name.
pub node: String,
}
pub PROXMOX_UPID_REGEX = concat!(
r"^UPID:(?P<node>[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?):(?P<pid>[0-9A-Fa-f]{8}):",
r"(?P<pstart>[0-9A-Fa-f]{8,9}):(?P<task_id>[0-9A-Fa-f]{8,16}):(?P<starttime>[0-9A-Fa-f]{8}):",
- r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<userid>[^:\s]+):$"
+ r"(?P<wtype>[^:\s]+):(?P<wid>[^:\s]*):(?P<authid>[^:\s]+):$"
);
}
pub fn new(
worker_type: &str,
worker_id: Option<String>,
- userid: Userid,
+ auth_id: Authid,
) -> Result<Self, Error> {
let pid = unsafe { libc::getpid() };
task_id,
worker_type: worker_type.to_owned(),
worker_id,
- userid,
+ auth_id,
node: proxmox::tools::nodename().to_owned(),
})
}
task_id: usize::from_str_radix(&cap["task_id"], 16).unwrap(),
worker_type: cap["wtype"].to_string(),
worker_id,
- userid: cap["userid"].parse()?,
+ auth_id: cap["authid"].parse()?,
node: cap["node"].to_string(),
})
} else {
// more that 8 characters for pstart
write!(f, "UPID:{}:{:08X}:{:08X}:{:08X}:{:08X}:{}:{}:{}:",
- self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.userid)
+ self.node, self.pid, self.pstart, self.task_id, self.starttime, self.worker_type, wid, self.auth_id)
}
}
pub fn do_verification_job(
mut job: Job,
verification_job: VerificationJobConfig,
- userid: &Userid,
+ auth_id: &Authid,
schedule: Option<String>,
) -> Result<String, Error> {
}
};
- let email = crate::server::lookup_user_email(userid);
+ let email = crate::server::lookup_user_email(auth_id.user());
let job_id = job.jobname().to_string();
let worker_type = job.jobtype().to_string();
let upid_str = WorkerTask::new_thread(
&worker_type,
Some(job.jobname().to_string()),
- userid.clone(),
+ auth_id.clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
use crate::tools::logrotate::{LogRotate, LogRotateFiles};
use crate::tools::{FileLogger, FileLogOptions};
-use crate::api2::types::Userid;
+use crate::api2::types::Authid;
macro_rules! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
macro_rules! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
impl WorkerTask {
- pub fn new(worker_type: &str, worker_id: Option<String>, userid: Userid, to_stdout: bool) -> Result<Arc<Self>, Error> {
+ pub fn new(worker_type: &str, worker_id: Option<String>, auth_id: Authid, to_stdout: bool) -> Result<Arc<Self>, Error> {
println!("register worker");
- let upid = UPID::new(worker_type, worker_id, userid)?;
+ let upid = UPID::new(worker_type, worker_id, auth_id)?;
let task_id = upid.task_id;
let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
pub fn spawn<F, T>(
worker_type: &str,
worker_id: Option<String>,
- userid: Userid,
+ auth_id: Authid,
to_stdout: bool,
f: F,
) -> Result<String, Error>
where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
T: Send + 'static + Future<Output = Result<(), Error>>,
{
- let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
+ let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
let upid_str = worker.upid.to_string();
let f = f(worker.clone());
tokio::spawn(async move {
pub fn new_thread<F>(
worker_type: &str,
worker_id: Option<String>,
- userid: Userid,
+ auth_id: Authid,
to_stdout: bool,
f: F,
) -> Result<String, Error>
{
println!("register worker thread");
- let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
+ let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
let upid_str = worker.upid.to_string();
let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
let res = server::WorkerTask::new_thread(
"garbage_collection",
None,
- proxmox_backup::api2::types::Userid::root_userid().clone(),
+ proxmox_backup::api2::types::Authid::root_auth_id().clone(),
true,
move |worker| {
println!("WORKER {}", worker);
'delete': 1,
path: rec.data.path,
role: rec.data.roleid,
- userid: rec.data.ugid,
+ auth_id: rec.data.ugid,
},
callback: function() {
me.reload();
{
xtype: 'pbsUserSelector',
fieldLabel: gettext('User'),
- name: 'userid',
+ name: 'auth_id',
allowBlank: false,
},
{