]> git.proxmox.com Git - proxmox-backup.git/blob - pbs-api-types/src/jobs.rs
cleanup schema function calls
[proxmox-backup.git] / pbs-api-types / src / jobs.rs
1 use anyhow::format_err;
2 use std::str::FromStr;
3
4 use regex::Regex;
5 use serde::{Deserialize, Serialize};
6
7 use proxmox_schema::*;
8
9 use crate::{
10 Userid, Authid, RateLimitConfig,
11 REMOTE_ID_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
12 SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, DATASTORE_SCHEMA,
13 BACKUP_GROUP_SCHEMA, BACKUP_TYPE_SCHEMA,
14 };
15
16 const_regex!{
17
18 /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
19 pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
20 /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
21 pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
22 }
23
24 pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
25 .format(&PROXMOX_SAFE_ID_FORMAT)
26 .min_length(3)
27 .max_length(32)
28 .schema();
29
30 pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
31 "Run sync job at specified schedule.")
32 .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
33 .type_text("<calendar-event>")
34 .schema();
35
36 pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
37 "Run garbage collection job at specified schedule.")
38 .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
39 .type_text("<calendar-event>")
40 .schema();
41
42 pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
43 "Run prune job at specified schedule.")
44 .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
45 .type_text("<calendar-event>")
46 .schema();
47
48 pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
49 "Run verify job at specified schedule.")
50 .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
51 .type_text("<calendar-event>")
52 .schema();
53
54 pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
55 "Delete vanished backups. This remove the local copy if the remote backup was deleted.")
56 .default(false)
57 .schema();
58
59 #[api(
60 properties: {
61 "next-run": {
62 description: "Estimated time of the next run (UNIX epoch).",
63 optional: true,
64 type: Integer,
65 },
66 "last-run-state": {
67 description: "Result of the last run.",
68 optional: true,
69 type: String,
70 },
71 "last-run-upid": {
72 description: "Task UPID of the last run.",
73 optional: true,
74 type: String,
75 },
76 "last-run-endtime": {
77 description: "Endtime of the last run.",
78 optional: true,
79 type: Integer,
80 },
81 }
82 )]
83 #[derive(Serialize,Deserialize,Default)]
84 #[serde(rename_all="kebab-case")]
85 /// Job Scheduling Status
86 pub struct JobScheduleStatus {
87 #[serde(skip_serializing_if="Option::is_none")]
88 pub next_run: Option<i64>,
89 #[serde(skip_serializing_if="Option::is_none")]
90 pub last_run_state: Option<String>,
91 #[serde(skip_serializing_if="Option::is_none")]
92 pub last_run_upid: Option<String>,
93 #[serde(skip_serializing_if="Option::is_none")]
94 pub last_run_endtime: Option<i64>,
95 }
96
97 #[api()]
98 #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
99 #[serde(rename_all = "lowercase")]
100 /// When do we send notifications
101 pub enum Notify {
102 /// Never send notification
103 Never,
104 /// Send notifications for failed and successful jobs
105 Always,
106 /// Send notifications for failed jobs only
107 Error,
108 }
109
110 #[api(
111 properties: {
112 gc: {
113 type: Notify,
114 optional: true,
115 },
116 verify: {
117 type: Notify,
118 optional: true,
119 },
120 sync: {
121 type: Notify,
122 optional: true,
123 },
124 },
125 )]
126 #[derive(Debug, Serialize, Deserialize)]
127 /// Datastore notify settings
128 pub struct DatastoreNotify {
129 /// Garbage collection settings
130 pub gc: Option<Notify>,
131 /// Verify job setting
132 pub verify: Option<Notify>,
133 /// Sync job setting
134 pub sync: Option<Notify>,
135 }
136
137 pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
138 "Datastore notification setting")
139 .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
140 .schema();
141
142 pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
143 "Do not verify backups that are already verified if their verification is not outdated.")
144 .default(true)
145 .schema();
146
147 pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
148 "Days after that a verification becomes outdated")
149 .minimum(1)
150 .schema();
151
152 #[api(
153 properties: {
154 id: {
155 schema: JOB_ID_SCHEMA,
156 },
157 store: {
158 schema: DATASTORE_SCHEMA,
159 },
160 "ignore-verified": {
161 optional: true,
162 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
163 },
164 "outdated-after": {
165 optional: true,
166 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
167 },
168 comment: {
169 optional: true,
170 schema: SINGLE_LINE_COMMENT_SCHEMA,
171 },
172 schedule: {
173 optional: true,
174 schema: VERIFICATION_SCHEDULE_SCHEMA,
175 },
176 }
177 )]
178 #[derive(Serialize,Deserialize,Updater)]
179 #[serde(rename_all="kebab-case")]
180 /// Verification Job
181 pub struct VerificationJobConfig {
182 /// unique ID to address this job
183 #[updater(skip)]
184 pub id: String,
185 /// the datastore ID this verificaiton job affects
186 pub store: String,
187 #[serde(skip_serializing_if="Option::is_none")]
188 /// if not set to false, check the age of the last snapshot verification to filter
189 /// out recent ones, depending on 'outdated_after' configuration.
190 pub ignore_verified: Option<bool>,
191 #[serde(skip_serializing_if="Option::is_none")]
192 /// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
193 pub outdated_after: Option<i64>,
194 #[serde(skip_serializing_if="Option::is_none")]
195 pub comment: Option<String>,
196 #[serde(skip_serializing_if="Option::is_none")]
197 /// when to schedule this job in calendar event notation
198 pub schedule: Option<String>,
199 }
200
201 #[api(
202 properties: {
203 config: {
204 type: VerificationJobConfig,
205 },
206 status: {
207 type: JobScheduleStatus,
208 },
209 },
210 )]
211 #[derive(Serialize,Deserialize)]
212 #[serde(rename_all="kebab-case")]
213 /// Status of Verification Job
214 pub struct VerificationJobStatus {
215 #[serde(flatten)]
216 pub config: VerificationJobConfig,
217 #[serde(flatten)]
218 pub status: JobScheduleStatus,
219 }
220
221 #[api(
222 properties: {
223 store: {
224 schema: DATASTORE_SCHEMA,
225 },
226 pool: {
227 schema: MEDIA_POOL_NAME_SCHEMA,
228 },
229 drive: {
230 schema: DRIVE_NAME_SCHEMA,
231 },
232 "eject-media": {
233 description: "Eject media upon job completion.",
234 type: bool,
235 optional: true,
236 },
237 "export-media-set": {
238 description: "Export media set upon job completion.",
239 type: bool,
240 optional: true,
241 },
242 "latest-only": {
243 description: "Backup latest snapshots only.",
244 type: bool,
245 optional: true,
246 },
247 "notify-user": {
248 optional: true,
249 type: Userid,
250 },
251 "group-filter": {
252 schema: GROUP_FILTER_LIST_SCHEMA,
253 optional: true,
254 },
255 }
256 )]
257 #[derive(Serialize,Deserialize,Clone,Updater)]
258 #[serde(rename_all="kebab-case")]
259 /// Tape Backup Job Setup
260 pub struct TapeBackupJobSetup {
261 pub store: String,
262 pub pool: String,
263 pub drive: String,
264 #[serde(skip_serializing_if="Option::is_none")]
265 pub eject_media: Option<bool>,
266 #[serde(skip_serializing_if="Option::is_none")]
267 pub export_media_set: Option<bool>,
268 #[serde(skip_serializing_if="Option::is_none")]
269 pub latest_only: Option<bool>,
270 /// Send job email notification to this user
271 #[serde(skip_serializing_if="Option::is_none")]
272 pub notify_user: Option<Userid>,
273 #[serde(skip_serializing_if="Option::is_none")]
274 pub group_filter: Option<Vec<GroupFilter>>,
275 }
276
277 #[api(
278 properties: {
279 id: {
280 schema: JOB_ID_SCHEMA,
281 },
282 setup: {
283 type: TapeBackupJobSetup,
284 },
285 comment: {
286 optional: true,
287 schema: SINGLE_LINE_COMMENT_SCHEMA,
288 },
289 schedule: {
290 optional: true,
291 schema: SYNC_SCHEDULE_SCHEMA,
292 },
293 }
294 )]
295 #[derive(Serialize,Deserialize,Clone,Updater)]
296 #[serde(rename_all="kebab-case")]
297 /// Tape Backup Job
298 pub struct TapeBackupJobConfig {
299 #[updater(skip)]
300 pub id: String,
301 #[serde(flatten)]
302 pub setup: TapeBackupJobSetup,
303 #[serde(skip_serializing_if="Option::is_none")]
304 pub comment: Option<String>,
305 #[serde(skip_serializing_if="Option::is_none")]
306 pub schedule: Option<String>,
307 }
308
309 #[api(
310 properties: {
311 config: {
312 type: TapeBackupJobConfig,
313 },
314 status: {
315 type: JobScheduleStatus,
316 },
317 },
318 )]
319 #[derive(Serialize,Deserialize)]
320 #[serde(rename_all="kebab-case")]
321 /// Status of Tape Backup Job
322 pub struct TapeBackupJobStatus {
323 #[serde(flatten)]
324 pub config: TapeBackupJobConfig,
325 #[serde(flatten)]
326 pub status: JobScheduleStatus,
327 /// Next tape used (best guess)
328 #[serde(skip_serializing_if="Option::is_none")]
329 pub next_media_label: Option<String>,
330 }
331
332 #[derive(Clone, Debug)]
333 /// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
334 pub enum GroupFilter {
335 /// BackupGroup type - either `vm`, `ct`, or `host`.
336 BackupType(String),
337 /// Full identifier of BackupGroup, including type
338 Group(String),
339 /// A regular expression matched against the full identifier of the BackupGroup
340 Regex(Regex),
341 }
342
343 impl std::str::FromStr for GroupFilter {
344 type Err = anyhow::Error;
345
346 fn from_str(s: &str) -> Result<Self, Self::Err> {
347 match s.split_once(":") {
348 Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())),
349 Some(("type", value)) => BACKUP_TYPE_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::BackupType(value.to_string())),
350 Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)),
351 Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)),
352 None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")),
353 }.map_err(|err| format_err!("'{}' - {}", s, err))
354 }
355 }
356
357 // used for serializing below, caution!
358 impl std::fmt::Display for GroupFilter {
359 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
360 match self {
361 GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type),
362 GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group),
363 GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
364 }
365 }
366 }
367
368 proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
369 proxmox_serde::forward_serialize_to_display!(GroupFilter);
370
371 fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
372 GroupFilter::from_str(input).map(|_| ())
373 }
374
375 pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
376 "Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE').")
377 .format(&ApiStringFormat::VerifyFn(verify_group_filter))
378 .type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>")
379 .schema();
380
381 pub const GROUP_FILTER_LIST_SCHEMA: Schema = ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
382
383 #[api(
384 properties: {
385 id: {
386 schema: JOB_ID_SCHEMA,
387 },
388 store: {
389 schema: DATASTORE_SCHEMA,
390 },
391 "owner": {
392 type: Authid,
393 optional: true,
394 },
395 remote: {
396 schema: REMOTE_ID_SCHEMA,
397 },
398 "remote-store": {
399 schema: DATASTORE_SCHEMA,
400 },
401 "remove-vanished": {
402 schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
403 optional: true,
404 },
405 comment: {
406 optional: true,
407 schema: SINGLE_LINE_COMMENT_SCHEMA,
408 },
409 limit: {
410 type: RateLimitConfig,
411 },
412 schedule: {
413 optional: true,
414 schema: SYNC_SCHEDULE_SCHEMA,
415 },
416 "group-filter": {
417 schema: GROUP_FILTER_LIST_SCHEMA,
418 optional: true,
419 },
420 }
421 )]
422 #[derive(Serialize,Deserialize,Clone,Updater)]
423 #[serde(rename_all="kebab-case")]
424 /// Sync Job
425 pub struct SyncJobConfig {
426 #[updater(skip)]
427 pub id: String,
428 pub store: String,
429 #[serde(skip_serializing_if="Option::is_none")]
430 pub owner: Option<Authid>,
431 pub remote: String,
432 pub remote_store: String,
433 #[serde(skip_serializing_if="Option::is_none")]
434 pub remove_vanished: Option<bool>,
435 #[serde(skip_serializing_if="Option::is_none")]
436 pub comment: Option<String>,
437 #[serde(skip_serializing_if="Option::is_none")]
438 pub schedule: Option<String>,
439 #[serde(skip_serializing_if="Option::is_none")]
440 pub group_filter: Option<Vec<GroupFilter>>,
441 #[serde(flatten)]
442 pub limit: RateLimitConfig,
443 }
444
445 #[api(
446 properties: {
447 config: {
448 type: SyncJobConfig,
449 },
450 status: {
451 type: JobScheduleStatus,
452 },
453 },
454 )]
455
456 #[derive(Serialize,Deserialize)]
457 #[serde(rename_all="kebab-case")]
458 /// Status of Sync Job
459 pub struct SyncJobStatus {
460 #[serde(flatten)]
461 pub config: SyncJobConfig,
462 #[serde(flatten)]
463 pub status: JobScheduleStatus,
464 }