]> git.proxmox.com Git - proxmox-backup.git/blob - src/api2/config/sync.rs
fix non-camel-case enums
[proxmox-backup.git] / src / api2 / config / sync.rs
1 use ::serde::{Deserialize, Serialize};
2 use anyhow::{bail, Error};
3 use hex::FromHex;
4 use serde_json::Value;
5
6 use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
7 use proxmox_schema::{api, param_bail};
8
9 use pbs_api_types::{
10 Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
11 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
12 PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
13 };
14 use pbs_config::sync;
15
16 use pbs_config::CachedUserInfo;
17
18 pub fn check_sync_job_read_access(
19 user_info: &CachedUserInfo,
20 auth_id: &Authid,
21 job: &SyncJobConfig,
22 ) -> bool {
23 let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
24 if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
25 return false;
26 }
27
28 let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
29 remote_privs & PRIV_REMOTE_AUDIT != 0
30 }
31
32 /// checks whether user can run the corresponding pull job
33 ///
34 /// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly.
35 /// remote side checks/filters remote datastore/namespace/group access.
36 pub fn check_sync_job_modify_access(
37 user_info: &CachedUserInfo,
38 auth_id: &Authid,
39 job: &SyncJobConfig,
40 ) -> bool {
41 let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
42 if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 {
43 return false;
44 }
45
46 if let Some(true) = job.remove_vanished {
47 if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 {
48 return false;
49 }
50 }
51
52 let correct_owner = match job.owner {
53 Some(ref owner) => {
54 owner == auth_id
55 || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user())
56 }
57 // default sync owner
58 None => auth_id == Authid::root_auth_id(),
59 };
60
61 // same permission as changing ownership after syncing
62 if !correct_owner && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 {
63 return false;
64 }
65
66 let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote, &job.remote_store]);
67 remote_privs & PRIV_REMOTE_READ != 0
68 }
69
70 #[api(
71 input: {
72 properties: {},
73 },
74 returns: {
75 description: "List configured jobs.",
76 type: Array,
77 items: { type: SyncJobConfig },
78 },
79 access: {
80 description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
81 permission: &Permission::Anybody,
82 },
83 )]
84 /// List all sync jobs
85 pub fn list_sync_jobs(
86 _param: Value,
87 rpcenv: &mut dyn RpcEnvironment,
88 ) -> Result<Vec<SyncJobConfig>, Error> {
89 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
90 let user_info = CachedUserInfo::new()?;
91
92 let (config, digest) = sync::config()?;
93
94 let list = config.convert_to_typed_array("sync")?;
95
96 rpcenv["digest"] = hex::encode(digest).into();
97
98 let list = list
99 .into_iter()
100 .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
101 .collect();
102 Ok(list)
103 }
104
105 #[api(
106 protected: true,
107 input: {
108 properties: {
109 config: {
110 type: SyncJobConfig,
111 flatten: true,
112 },
113 },
114 },
115 access: {
116 description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
117 permission: &Permission::Anybody,
118 },
119 )]
120 /// Create a new sync job.
121 pub fn create_sync_job(
122 config: SyncJobConfig,
123 rpcenv: &mut dyn RpcEnvironment,
124 ) -> Result<(), Error> {
125 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
126 let user_info = CachedUserInfo::new()?;
127
128 let _lock = sync::lock_config()?;
129
130 if !check_sync_job_modify_access(&user_info, &auth_id, &config) {
131 bail!("permission check failed");
132 }
133
134 if let Some(max_depth) = config.max_depth {
135 if let Some(ref ns) = config.ns {
136 ns.check_max_depth(max_depth)?;
137 }
138 if let Some(ref ns) = config.remote_ns {
139 ns.check_max_depth(max_depth)?;
140 }
141 }
142
143 let (mut section_config, _digest) = sync::config()?;
144
145 if section_config.sections.get(&config.id).is_some() {
146 param_bail!("id", "job '{}' already exists.", config.id);
147 }
148
149 section_config.set_data(&config.id, "sync", &config)?;
150
151 sync::save_config(&section_config)?;
152
153 crate::server::jobstate::create_state_file("syncjob", &config.id)?;
154
155 Ok(())
156 }
157
158 #[api(
159 input: {
160 properties: {
161 id: {
162 schema: JOB_ID_SCHEMA,
163 },
164 },
165 },
166 returns: { type: SyncJobConfig },
167 access: {
168 description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
169 permission: &Permission::Anybody,
170 },
171 )]
172 /// Read a sync job configuration.
173 pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<SyncJobConfig, Error> {
174 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
175 let user_info = CachedUserInfo::new()?;
176
177 let (config, digest) = sync::config()?;
178
179 let sync_job = config.lookup("sync", &id)?;
180 if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) {
181 bail!("permission check failed");
182 }
183
184 rpcenv["digest"] = hex::encode(digest).into();
185
186 Ok(sync_job)
187 }
188
189 #[api()]
190 #[derive(Serialize, Deserialize)]
191 #[serde(rename_all = "kebab-case")]
192 /// Deletable property name
193 pub enum DeletableProperty {
194 /// Delete the owner property.
195 Owner,
196 /// Delete the comment property.
197 Comment,
198 /// Delete the job schedule.
199 Schedule,
200 /// Delete the remove-vanished flag.
201 RemoveVanished,
202 /// Delete the group_filter property.
203 GroupFilter,
204 /// Delete the rate_in property.
205 RateIn,
206 /// Delete the burst_in property.
207 BurstIn,
208 /// Delete the rate_out property.
209 RateOut,
210 /// Delete the burst_out property.
211 BurstOut,
212 /// Delete the ns property,
213 Ns,
214 /// Delete the remote_ns property,
215 RemoteNs,
216 /// Delete the max_depth property,
217 MaxDepth,
218 }
219
220 #[api(
221 protected: true,
222 input: {
223 properties: {
224 id: {
225 schema: JOB_ID_SCHEMA,
226 },
227 update: {
228 type: SyncJobConfigUpdater,
229 flatten: true,
230 },
231 delete: {
232 description: "List of properties to delete.",
233 type: Array,
234 optional: true,
235 items: {
236 type: DeletableProperty,
237 }
238 },
239 digest: {
240 optional: true,
241 schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
242 },
243 },
244 },
245 access: {
246 permission: &Permission::Anybody,
247 description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
248 },
249 )]
250 /// Update sync job config.
251 #[allow(clippy::too_many_arguments)]
252 pub fn update_sync_job(
253 id: String,
254 update: SyncJobConfigUpdater,
255 delete: Option<Vec<DeletableProperty>>,
256 digest: Option<String>,
257 rpcenv: &mut dyn RpcEnvironment,
258 ) -> Result<(), Error> {
259 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
260 let user_info = CachedUserInfo::new()?;
261
262 let _lock = sync::lock_config()?;
263
264 let (mut config, expected_digest) = sync::config()?;
265
266 if let Some(ref digest) = digest {
267 let digest = <[u8; 32]>::from_hex(digest)?;
268 crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
269 }
270
271 let mut data: SyncJobConfig = config.lookup("sync", &id)?;
272
273 if let Some(delete) = delete {
274 for delete_prop in delete {
275 match delete_prop {
276 DeletableProperty::Owner => {
277 data.owner = None;
278 }
279 DeletableProperty::Comment => {
280 data.comment = None;
281 }
282 DeletableProperty::Schedule => {
283 data.schedule = None;
284 }
285 DeletableProperty::RemoveVanished => {
286 data.remove_vanished = None;
287 }
288 DeletableProperty::GroupFilter => {
289 data.group_filter = None;
290 }
291 DeletableProperty::RateIn => {
292 data.limit.rate_in = None;
293 }
294 DeletableProperty::RateOut => {
295 data.limit.rate_out = None;
296 }
297 DeletableProperty::BurstIn => {
298 data.limit.burst_in = None;
299 }
300 DeletableProperty::BurstOut => {
301 data.limit.burst_out = None;
302 }
303 DeletableProperty::Ns => {
304 data.ns = None;
305 }
306 DeletableProperty::RemoteNs => {
307 data.remote_ns = None;
308 }
309 DeletableProperty::MaxDepth => {
310 data.max_depth = None;
311 }
312 }
313 }
314 }
315
316 if let Some(comment) = update.comment {
317 let comment = comment.trim().to_string();
318 if comment.is_empty() {
319 data.comment = None;
320 } else {
321 data.comment = Some(comment);
322 }
323 }
324
325 if let Some(store) = update.store {
326 data.store = store;
327 }
328 if let Some(ns) = update.ns {
329 data.ns = Some(ns);
330 }
331 if let Some(remote) = update.remote {
332 data.remote = remote;
333 }
334 if let Some(remote_store) = update.remote_store {
335 data.remote_store = remote_store;
336 }
337 if let Some(remote_ns) = update.remote_ns {
338 data.remote_ns = Some(remote_ns);
339 }
340 if let Some(owner) = update.owner {
341 data.owner = Some(owner);
342 }
343 if let Some(group_filter) = update.group_filter {
344 data.group_filter = Some(group_filter);
345 }
346
347 if update.limit.rate_in.is_some() {
348 data.limit.rate_in = update.limit.rate_in;
349 }
350
351 if update.limit.rate_out.is_some() {
352 data.limit.rate_out = update.limit.rate_out;
353 }
354
355 if update.limit.burst_in.is_some() {
356 data.limit.burst_in = update.limit.burst_in;
357 }
358
359 if update.limit.burst_out.is_some() {
360 data.limit.burst_out = update.limit.burst_out;
361 }
362
363 let schedule_changed = data.schedule != update.schedule;
364 if update.schedule.is_some() {
365 data.schedule = update.schedule;
366 }
367 if update.remove_vanished.is_some() {
368 data.remove_vanished = update.remove_vanished;
369 }
370 if let Some(max_depth) = update.max_depth {
371 data.max_depth = Some(max_depth);
372 }
373
374 if let Some(max_depth) = data.max_depth {
375 if let Some(ref ns) = data.ns {
376 ns.check_max_depth(max_depth)?;
377 }
378 if let Some(ref ns) = data.remote_ns {
379 ns.check_max_depth(max_depth)?;
380 }
381 }
382
383 if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
384 bail!("permission check failed");
385 }
386
387 config.set_data(&id, "sync", &data)?;
388
389 sync::save_config(&config)?;
390
391 if schedule_changed {
392 crate::server::jobstate::update_job_last_run_time("syncjob", &id)?;
393 }
394
395 Ok(())
396 }
397
398 #[api(
399 protected: true,
400 input: {
401 properties: {
402 id: {
403 schema: JOB_ID_SCHEMA,
404 },
405 digest: {
406 optional: true,
407 schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
408 },
409 },
410 },
411 access: {
412 permission: &Permission::Anybody,
413 description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
414 },
415 )]
416 /// Remove a sync job configuration
417 pub fn delete_sync_job(
418 id: String,
419 digest: Option<String>,
420 rpcenv: &mut dyn RpcEnvironment,
421 ) -> Result<(), Error> {
422 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
423 let user_info = CachedUserInfo::new()?;
424
425 let _lock = sync::lock_config()?;
426
427 let (mut config, expected_digest) = sync::config()?;
428
429 if let Some(ref digest) = digest {
430 let digest = <[u8; 32]>::from_hex(digest)?;
431 crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
432 }
433
434 match config.lookup("sync", &id) {
435 Ok(job) => {
436 if !check_sync_job_modify_access(&user_info, &auth_id, &job) {
437 bail!("permission check failed");
438 }
439 config.sections.remove(&id);
440 }
441 Err(_) => {
442 http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
443 }
444 };
445
446 sync::save_config(&config)?;
447
448 crate::server::jobstate::remove_state_file("syncjob", &id)?;
449
450 Ok(())
451 }
452
453 const ITEM_ROUTER: Router = Router::new()
454 .get(&API_METHOD_READ_SYNC_JOB)
455 .put(&API_METHOD_UPDATE_SYNC_JOB)
456 .delete(&API_METHOD_DELETE_SYNC_JOB);
457
458 pub const ROUTER: Router = Router::new()
459 .get(&API_METHOD_LIST_SYNC_JOBS)
460 .post(&API_METHOD_CREATE_SYNC_JOB)
461 .match_all("id", &ITEM_ROUTER);
462
463 #[test]
464 fn sync_job_access_test() -> Result<(), Error> {
465 let (user_cfg, _) = pbs_config::user::test_cfg_from_str(
466 r###"
467 user: noperm@pbs
468
469 user: read@pbs
470
471 user: write@pbs
472
473 "###,
474 )
475 .expect("test user.cfg is not parsable");
476 let acl_tree = pbs_config::acl::AclTree::from_raw(
477 r###"
478 acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit
479 acl:1:/datastore/localstore1:write@pbs:DatastoreBackup
480 acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser
481 acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin
482 acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit
483 acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
484 "###,
485 )
486 .expect("test acl.cfg is not parsable");
487
488 let user_info = CachedUserInfo::test_new(user_cfg, acl_tree);
489
490 let root_auth_id = Authid::root_auth_id();
491
492 let no_perm_auth_id: Authid = "noperm@pbs".parse()?;
493 let read_auth_id: Authid = "read@pbs".parse()?;
494 let write_auth_id: Authid = "write@pbs".parse()?;
495
496 let mut job = SyncJobConfig {
497 id: "regular".to_string(),
498 remote: "remote0".to_string(),
499 remote_store: "remotestore1".to_string(),
500 remote_ns: None,
501 store: "localstore0".to_string(),
502 ns: None,
503 owner: Some(write_auth_id.clone()),
504 comment: None,
505 remove_vanished: None,
506 max_depth: None,
507 group_filter: None,
508 schedule: None,
509 limit: pbs_api_types::RateLimitConfig::default(), // no limit
510 };
511
512 // should work without ACLs
513 assert!(check_sync_job_read_access(&user_info, root_auth_id, &job));
514 assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job));
515
516 // user without permissions must fail
517 assert!(!check_sync_job_read_access(
518 &user_info,
519 &no_perm_auth_id,
520 &job
521 ));
522 assert!(!check_sync_job_modify_access(
523 &user_info,
524 &no_perm_auth_id,
525 &job
526 ));
527
528 // reading without proper read permissions on either remote or local must fail
529 assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
530
531 // reading without proper read permissions on local end must fail
532 job.remote = "remote1".to_string();
533 assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
534
535 // reading without proper read permissions on remote end must fail
536 job.remote = "remote0".to_string();
537 job.store = "localstore1".to_string();
538 assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
539
540 // writing without proper write permissions on either end must fail
541 job.store = "localstore0".to_string();
542 assert!(!check_sync_job_modify_access(
543 &user_info,
544 &write_auth_id,
545 &job
546 ));
547
548 // writing without proper write permissions on local end must fail
549 job.remote = "remote1".to_string();
550
551 // writing without proper write permissions on remote end must fail
552 job.remote = "remote0".to_string();
553 job.store = "localstore1".to_string();
554 assert!(!check_sync_job_modify_access(
555 &user_info,
556 &write_auth_id,
557 &job
558 ));
559
560 // reset remote to one where users have access
561 job.remote = "remote1".to_string();
562
563 // user with read permission can only read, but not modify/run
564 assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job));
565 job.owner = Some(read_auth_id.clone());
566 assert!(!check_sync_job_modify_access(
567 &user_info,
568 &read_auth_id,
569 &job
570 ));
571 job.owner = None;
572 assert!(!check_sync_job_modify_access(
573 &user_info,
574 &read_auth_id,
575 &job
576 ));
577 job.owner = Some(write_auth_id.clone());
578 assert!(!check_sync_job_modify_access(
579 &user_info,
580 &read_auth_id,
581 &job
582 ));
583
584 // user with simple write permission can modify/run
585 assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job));
586 assert!(check_sync_job_modify_access(
587 &user_info,
588 &write_auth_id,
589 &job
590 ));
591
592 // but can't modify/run with deletion
593 job.remove_vanished = Some(true);
594 assert!(!check_sync_job_modify_access(
595 &user_info,
596 &write_auth_id,
597 &job
598 ));
599
600 // unless they have Datastore.Prune as well
601 job.store = "localstore2".to_string();
602 assert!(check_sync_job_modify_access(
603 &user_info,
604 &write_auth_id,
605 &job
606 ));
607
608 // changing owner is not possible
609 job.owner = Some(read_auth_id.clone());
610 assert!(!check_sync_job_modify_access(
611 &user_info,
612 &write_auth_id,
613 &job
614 ));
615
616 // also not to the default 'root@pam'
617 job.owner = None;
618 assert!(!check_sync_job_modify_access(
619 &user_info,
620 &write_auth_id,
621 &job
622 ));
623
624 // unless they have Datastore.Modify as well
625 job.store = "localstore3".to_string();
626 job.owner = Some(read_auth_id);
627 assert!(check_sync_job_modify_access(
628 &user_info,
629 &write_auth_id,
630 &job
631 ));
632 job.owner = None;
633 assert!(check_sync_job_modify_access(
634 &user_info,
635 &write_auth_id,
636 &job
637 ));
638
639 Ok(())
640 }