]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/config/sync.rs
dc08081673e45e49c0060e5e057b02b155275d01
[proxmox-backup.git] / src / api2 / config / sync.rs
1 use ::serde::{Deserialize, Serialize};
2 use anyhow::{bail, Error};
3 use hex::FromHex;
4 use serde_json::Value;
5
6 use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
7 use proxmox_schema::{api, param_bail};
8
9 use pbs_api_types::{
10 Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
11 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
12 PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
13 };
14 use pbs_config::sync;
15
16 use pbs_config::CachedUserInfo;
17
18 pub fn check_sync_job_read_access(
19 user_info: &CachedUserInfo,
20 auth_id: &Authid,
21 job: &SyncJobConfig,
22 ) -> bool {
23 let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
24 if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
25 return false;
26 }
27
28 let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
29 remote_privs & PRIV_REMOTE_AUDIT != 0
30 }
31
32 /// checks whether user can run the corresponding pull job
33 ///
34 /// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly.
35 /// remote side checks/filters remote datastore/namespace/group access.
36 pub fn check_sync_job_modify_access(
37 user_info: &CachedUserInfo,
38 auth_id: &Authid,
39 job: &SyncJobConfig,
40 ) -> bool {
41 let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
42 if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 {
43 return false;
44 }
45
46 if let Some(true) = job.remove_vanished {
47 if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 {
48 return false;
49 }
50 }
51
52 let correct_owner = match job.owner {
53 Some(ref owner) => {
54 owner == auth_id
55 || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user())
56 }
57 // default sync owner
58 None => auth_id == Authid::root_auth_id(),
59 };
60
61 // same permission as changing ownership after syncing
62 if !correct_owner && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 {
63 return false;
64 }
65
66 let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote, &job.remote_store]);
67 remote_privs & PRIV_REMOTE_READ != 0
68 }
69
70 #[api(
71 input: {
72 properties: {},
73 },
74 returns: {
75 description: "List configured jobs.",
76 type: Array,
77 items: { type: SyncJobConfig },
78 },
79 access: {
80 description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
81 permission: &Permission::Anybody,
82 },
83 )]
84 /// List all sync jobs
85 pub fn list_sync_jobs(
86 _param: Value,
87 rpcenv: &mut dyn RpcEnvironment,
88 ) -> Result<Vec<SyncJobConfig>, Error> {
89 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
90 let user_info = CachedUserInfo::new()?;
91
92 let (config, digest) = sync::config()?;
93
94 let list = config.convert_to_typed_array("sync")?;
95
96 rpcenv["digest"] = hex::encode(digest).into();
97
98 let list = list
99 .into_iter()
100 .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
101 .collect();
102 Ok(list)
103 }
104
105 #[api(
106 protected: true,
107 input: {
108 properties: {
109 config: {
110 type: SyncJobConfig,
111 flatten: true,
112 },
113 },
114 },
115 access: {
116 description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
117 permission: &Permission::Anybody,
118 },
119 )]
120 /// Create a new sync job.
121 pub fn create_sync_job(
122 config: SyncJobConfig,
123 rpcenv: &mut dyn RpcEnvironment,
124 ) -> Result<(), Error> {
125 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
126 let user_info = CachedUserInfo::new()?;
127
128 let _lock = sync::lock_config()?;
129
130 if !check_sync_job_modify_access(&user_info, &auth_id, &config) {
131 bail!("permission check failed");
132 }
133
134 if let Some(max_depth) = config.max_depth {
135 if let Some(ref ns) = config.ns {
136 ns.check_max_depth(max_depth)?;
137 }
138 if let Some(ref ns) = config.remote_ns {
139 ns.check_max_depth(max_depth)?;
140 }
141 }
142
143 let (mut section_config, _digest) = sync::config()?;
144
145 if section_config.sections.get(&config.id).is_some() {
146 param_bail!("id", "job '{}' already exists.", config.id);
147 }
148
149 section_config.set_data(&config.id, "sync", &config)?;
150
151 sync::save_config(&section_config)?;
152
153 crate::server::jobstate::create_state_file("syncjob", &config.id)?;
154
155 Ok(())
156 }
157
158 #[api(
159 input: {
160 properties: {
161 id: {
162 schema: JOB_ID_SCHEMA,
163 },
164 },
165 },
166 returns: { type: SyncJobConfig },
167 access: {
168 description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
169 permission: &Permission::Anybody,
170 },
171 )]
172 /// Read a sync job configuration.
173 pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<SyncJobConfig, Error> {
174 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
175 let user_info = CachedUserInfo::new()?;
176
177 let (config, digest) = sync::config()?;
178
179 let sync_job = config.lookup("sync", &id)?;
180 if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) {
181 bail!("permission check failed");
182 }
183
184 rpcenv["digest"] = hex::encode(digest).into();
185
186 Ok(sync_job)
187 }
188
189 #[api()]
190 #[derive(Serialize, Deserialize)]
191 #[serde(rename_all = "kebab-case")]
192 #[allow(non_camel_case_types)]
193 /// Deletable property name
194 pub enum DeletableProperty {
195 /// Delete the owner property.
196 owner,
197 /// Delete the comment property.
198 comment,
199 /// Delete the job schedule.
200 schedule,
201 /// Delete the remove-vanished flag.
202 remove_vanished,
203 /// Delete the group_filter property.
204 group_filter,
205 /// Delete the rate_in property.
206 rate_in,
207 /// Delete the burst_in property.
208 burst_in,
209 /// Delete the rate_out property.
210 rate_out,
211 /// Delete the burst_out property.
212 burst_out,
213 /// Delete the ns property,
214 ns,
215 /// Delete the remote_ns property,
216 remote_ns,
217 /// Delete the max_depth property,
218 max_depth,
219 }
220
221 #[api(
222 protected: true,
223 input: {
224 properties: {
225 id: {
226 schema: JOB_ID_SCHEMA,
227 },
228 update: {
229 type: SyncJobConfigUpdater,
230 flatten: true,
231 },
232 delete: {
233 description: "List of properties to delete.",
234 type: Array,
235 optional: true,
236 items: {
237 type: DeletableProperty,
238 }
239 },
240 digest: {
241 optional: true,
242 schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
243 },
244 },
245 },
246 access: {
247 permission: &Permission::Anybody,
248 description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
249 },
250 )]
251 /// Update sync job config.
252 #[allow(clippy::too_many_arguments)]
253 pub fn update_sync_job(
254 id: String,
255 update: SyncJobConfigUpdater,
256 delete: Option<Vec<DeletableProperty>>,
257 digest: Option<String>,
258 rpcenv: &mut dyn RpcEnvironment,
259 ) -> Result<(), Error> {
260 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
261 let user_info = CachedUserInfo::new()?;
262
263 let _lock = sync::lock_config()?;
264
265 let (mut config, expected_digest) = sync::config()?;
266
267 if let Some(ref digest) = digest {
268 let digest = <[u8; 32]>::from_hex(digest)?;
269 crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
270 }
271
272 let mut data: SyncJobConfig = config.lookup("sync", &id)?;
273
274 if let Some(delete) = delete {
275 for delete_prop in delete {
276 match delete_prop {
277 DeletableProperty::owner => {
278 data.owner = None;
279 }
280 DeletableProperty::comment => {
281 data.comment = None;
282 }
283 DeletableProperty::schedule => {
284 data.schedule = None;
285 }
286 DeletableProperty::remove_vanished => {
287 data.remove_vanished = None;
288 }
289 DeletableProperty::group_filter => {
290 data.group_filter = None;
291 }
292 DeletableProperty::rate_in => {
293 data.limit.rate_in = None;
294 }
295 DeletableProperty::rate_out => {
296 data.limit.rate_out = None;
297 }
298 DeletableProperty::burst_in => {
299 data.limit.burst_in = None;
300 }
301 DeletableProperty::burst_out => {
302 data.limit.burst_out = None;
303 }
304 DeletableProperty::ns => {
305 data.ns = None;
306 }
307 DeletableProperty::remote_ns => {
308 data.remote_ns = None;
309 }
310 DeletableProperty::max_depth => {
311 data.max_depth = None;
312 }
313 }
314 }
315 }
316
317 if let Some(comment) = update.comment {
318 let comment = comment.trim().to_string();
319 if comment.is_empty() {
320 data.comment = None;
321 } else {
322 data.comment = Some(comment);
323 }
324 }
325
326 if let Some(store) = update.store {
327 data.store = store;
328 }
329 if let Some(ns) = update.ns {
330 data.ns = Some(ns);
331 }
332 if let Some(remote) = update.remote {
333 data.remote = remote;
334 }
335 if let Some(remote_store) = update.remote_store {
336 data.remote_store = remote_store;
337 }
338 if let Some(remote_ns) = update.remote_ns {
339 data.remote_ns = Some(remote_ns);
340 }
341 if let Some(owner) = update.owner {
342 data.owner = Some(owner);
343 }
344 if let Some(group_filter) = update.group_filter {
345 data.group_filter = Some(group_filter);
346 }
347
348 if update.limit.rate_in.is_some() {
349 data.limit.rate_in = update.limit.rate_in;
350 }
351
352 if update.limit.rate_out.is_some() {
353 data.limit.rate_out = update.limit.rate_out;
354 }
355
356 if update.limit.burst_in.is_some() {
357 data.limit.burst_in = update.limit.burst_in;
358 }
359
360 if update.limit.burst_out.is_some() {
361 data.limit.burst_out = update.limit.burst_out;
362 }
363
364 let schedule_changed = data.schedule != update.schedule;
365 if update.schedule.is_some() {
366 data.schedule = update.schedule;
367 }
368 if update.remove_vanished.is_some() {
369 data.remove_vanished = update.remove_vanished;
370 }
371 if let Some(max_depth) = update.max_depth {
372 data.max_depth = Some(max_depth);
373 }
374
375 if let Some(max_depth) = data.max_depth {
376 if let Some(ref ns) = data.ns {
377 ns.check_max_depth(max_depth)?;
378 }
379 if let Some(ref ns) = data.remote_ns {
380 ns.check_max_depth(max_depth)?;
381 }
382 }
383
384 if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
385 bail!("permission check failed");
386 }
387
388 config.set_data(&id, "sync", &data)?;
389
390 sync::save_config(&config)?;
391
392 if schedule_changed {
393 crate::server::jobstate::update_job_last_run_time("syncjob", &id)?;
394 }
395
396 Ok(())
397 }
398
399 #[api(
400 protected: true,
401 input: {
402 properties: {
403 id: {
404 schema: JOB_ID_SCHEMA,
405 },
406 digest: {
407 optional: true,
408 schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
409 },
410 },
411 },
412 access: {
413 permission: &Permission::Anybody,
414 description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
415 },
416 )]
417 /// Remove a sync job configuration
418 pub fn delete_sync_job(
419 id: String,
420 digest: Option<String>,
421 rpcenv: &mut dyn RpcEnvironment,
422 ) -> Result<(), Error> {
423 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
424 let user_info = CachedUserInfo::new()?;
425
426 let _lock = sync::lock_config()?;
427
428 let (mut config, expected_digest) = sync::config()?;
429
430 if let Some(ref digest) = digest {
431 let digest = <[u8; 32]>::from_hex(digest)?;
432 crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
433 }
434
435 match config.lookup("sync", &id) {
436 Ok(job) => {
437 if !check_sync_job_modify_access(&user_info, &auth_id, &job) {
438 bail!("permission check failed");
439 }
440 config.sections.remove(&id);
441 }
442 Err(_) => {
443 http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
444 }
445 };
446
447 sync::save_config(&config)?;
448
449 crate::server::jobstate::remove_state_file("syncjob", &id)?;
450
451 Ok(())
452 }
453
454 const ITEM_ROUTER: Router = Router::new()
455 .get(&API_METHOD_READ_SYNC_JOB)
456 .put(&API_METHOD_UPDATE_SYNC_JOB)
457 .delete(&API_METHOD_DELETE_SYNC_JOB);
458
459 pub const ROUTER: Router = Router::new()
460 .get(&API_METHOD_LIST_SYNC_JOBS)
461 .post(&API_METHOD_CREATE_SYNC_JOB)
462 .match_all("id", &ITEM_ROUTER);
463
464 #[test]
465 fn sync_job_access_test() -> Result<(), Error> {
466 let (user_cfg, _) = pbs_config::user::test_cfg_from_str(
467 r###"
468 user: noperm@pbs
469
470 user: read@pbs
471
472 user: write@pbs
473
474 "###,
475 )
476 .expect("test user.cfg is not parsable");
477 let acl_tree = pbs_config::acl::AclTree::from_raw(
478 r###"
479 acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit
480 acl:1:/datastore/localstore1:write@pbs:DatastoreBackup
481 acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser
482 acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin
483 acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit
484 acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
485 "###,
486 )
487 .expect("test acl.cfg is not parsable");
488
489 let user_info = CachedUserInfo::test_new(user_cfg, acl_tree);
490
491 let root_auth_id = Authid::root_auth_id();
492
493 let no_perm_auth_id: Authid = "noperm@pbs".parse()?;
494 let read_auth_id: Authid = "read@pbs".parse()?;
495 let write_auth_id: Authid = "write@pbs".parse()?;
496
497 let mut job = SyncJobConfig {
498 id: "regular".to_string(),
499 remote: "remote0".to_string(),
500 remote_store: "remotestore1".to_string(),
501 remote_ns: None,
502 store: "localstore0".to_string(),
503 ns: None,
504 owner: Some(write_auth_id.clone()),
505 comment: None,
506 remove_vanished: None,
507 max_depth: None,
508 group_filter: None,
509 schedule: None,
510 limit: pbs_api_types::RateLimitConfig::default(), // no limit
511 };
512
513 // should work without ACLs
514 assert!(check_sync_job_read_access(&user_info, root_auth_id, &job));
515 assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job));
516
517 // user without permissions must fail
518 assert!(!check_sync_job_read_access(
519 &user_info,
520 &no_perm_auth_id,
521 &job
522 ));
523 assert!(!check_sync_job_modify_access(
524 &user_info,
525 &no_perm_auth_id,
526 &job
527 ));
528
529 // reading without proper read permissions on either remote or local must fail
530 assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
531
532 // reading without proper read permissions on local end must fail
533 job.remote = "remote1".to_string();
534 assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
535
536 // reading without proper read permissions on remote end must fail
537 job.remote = "remote0".to_string();
538 job.store = "localstore1".to_string();
539 assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
540
541 // writing without proper write permissions on either end must fail
542 job.store = "localstore0".to_string();
543 assert!(!check_sync_job_modify_access(
544 &user_info,
545 &write_auth_id,
546 &job
547 ));
548
549 // writing without proper write permissions on local end must fail
550 job.remote = "remote1".to_string();
551
552 // writing without proper write permissions on remote end must fail
553 job.remote = "remote0".to_string();
554 job.store = "localstore1".to_string();
555 assert!(!check_sync_job_modify_access(
556 &user_info,
557 &write_auth_id,
558 &job
559 ));
560
561 // reset remote to one where users have access
562 job.remote = "remote1".to_string();
563
564 // user with read permission can only read, but not modify/run
565 assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job));
566 job.owner = Some(read_auth_id.clone());
567 assert!(!check_sync_job_modify_access(
568 &user_info,
569 &read_auth_id,
570 &job
571 ));
572 job.owner = None;
573 assert!(!check_sync_job_modify_access(
574 &user_info,
575 &read_auth_id,
576 &job
577 ));
578 job.owner = Some(write_auth_id.clone());
579 assert!(!check_sync_job_modify_access(
580 &user_info,
581 &read_auth_id,
582 &job
583 ));
584
585 // user with simple write permission can modify/run
586 assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job));
587 assert!(check_sync_job_modify_access(
588 &user_info,
589 &write_auth_id,
590 &job
591 ));
592
593 // but can't modify/run with deletion
594 job.remove_vanished = Some(true);
595 assert!(!check_sync_job_modify_access(
596 &user_info,
597 &write_auth_id,
598 &job
599 ));
600
601 // unless they have Datastore.Prune as well
602 job.store = "localstore2".to_string();
603 assert!(check_sync_job_modify_access(
604 &user_info,
605 &write_auth_id,
606 &job
607 ));
608
609 // changing owner is not possible
610 job.owner = Some(read_auth_id.clone());
611 assert!(!check_sync_job_modify_access(
612 &user_info,
613 &write_auth_id,
614 &job
615 ));
616
617 // also not to the default 'root@pam'
618 job.owner = None;
619 assert!(!check_sync_job_modify_access(
620 &user_info,
621 &write_auth_id,
622 &job
623 ));
624
625 // unless they have Datastore.Modify as well
626 job.store = "localstore3".to_string();
627 job.owner = Some(read_auth_id);
628 assert!(check_sync_job_modify_access(
629 &user_info,
630 &write_auth_id,
631 &job
632 ));
633 job.owner = None;
634 assert!(check_sync_job_modify_access(
635 &user_info,
636 &write_auth_id,
637 &job
638 ));
639
640 Ok(())
641 }