]> git.proxmox.com Git - proxmox-backup.git/blob - proxmox-rest-server/src/worker_task.rs
cleanup WorkerTaskContext
[proxmox-backup.git] / proxmox-rest-server / src / worker_task.rs
1 use std::collections::{HashMap, VecDeque};
2 use std::fs::File;
3 use std::path::PathBuf;
4 use std::io::{Read, Write, BufRead, BufReader};
5 use std::panic::UnwindSafe;
6 use std::sync::atomic::{AtomicBool, Ordering};
7 use std::sync::{Arc, Mutex};
8
9 use anyhow::{bail, format_err, Error};
10 use futures::*;
11 use lazy_static::lazy_static;
12 use serde_json::{json, Value};
13 use serde::{Serialize, Deserialize};
14 use tokio::sync::oneshot;
15 use nix::fcntl::OFlag;
16 use once_cell::sync::OnceCell;
17
18 use proxmox::sys::linux::procfs;
19 use proxmox::try_block;
20 use proxmox::tools::fs::{create_path, replace_file, atomic_open_or_create_file, CreateOptions};
21 use proxmox::api::upid::UPID;
22
23 use pbs_tools::task::WorkerTaskContext;
24 use pbs_tools::logrotate::{LogRotate, LogRotateFiles};
25
26 use crate::{CommandoSocket, FileLogger, FileLogOptions};
27
28 struct TaskListLockGuard(File);
29
30 struct WorkerTaskSetup {
31 file_opts: CreateOptions,
32 taskdir: PathBuf,
33 task_lock_fn: PathBuf,
34 active_tasks_fn: PathBuf,
35 task_index_fn: PathBuf,
36 task_archive_fn: PathBuf,
37 }
38
39 static WORKER_TASK_SETUP: OnceCell<WorkerTaskSetup> = OnceCell::new();
40
41 fn worker_task_setup() -> Result<&'static WorkerTaskSetup, Error> {
42 WORKER_TASK_SETUP.get()
43 .ok_or_else(|| format_err!("WorkerTask library is not initialized"))
44 }
45
46 impl WorkerTaskSetup {
47
48 fn new(basedir: PathBuf, file_opts: CreateOptions) -> Self {
49
50 let mut taskdir = basedir.clone();
51 taskdir.push("tasks");
52
53 let mut task_lock_fn = taskdir.clone();
54 task_lock_fn.push(".active.lock");
55
56 let mut active_tasks_fn = taskdir.clone();
57 active_tasks_fn.push("active");
58
59 let mut task_index_fn = taskdir.clone();
60 task_index_fn.push("index");
61
62 let mut task_archive_fn = taskdir.clone();
63 task_archive_fn.push("archive");
64
65 Self {
66 file_opts,
67 taskdir,
68 task_lock_fn,
69 active_tasks_fn,
70 task_index_fn,
71 task_archive_fn,
72 }
73 }
74
75 fn lock_task_list_files(&self, exclusive: bool) -> Result<TaskListLockGuard, Error> {
76 let options = self.file_opts.clone()
77 .perm(nix::sys::stat::Mode::from_bits_truncate(0o660));
78
79 let timeout = std::time::Duration::new(10, 0);
80
81 let file = proxmox::tools::fs::open_file_locked(
82 &self.task_lock_fn,
83 timeout,
84 exclusive,
85 options,
86 )?;
87
88 Ok(TaskListLockGuard(file))
89 }
90
91 fn log_path(&self, upid: &UPID) -> std::path::PathBuf {
92 let mut path = self.taskdir.clone();
93 path.push(format!("{:02X}", upid.pstart % 256));
94 path.push(upid.to_string());
95 path
96 }
97
98 // atomically read/update the task list, update status of finished tasks
99 // new_upid is added to the list when specified.
100 fn update_active_workers(&self, new_upid: Option<&UPID>) -> Result<(), Error> {
101
102 let lock = self.lock_task_list_files(true)?;
103
104 // TODO remove with 1.x
105 let mut finish_list: Vec<TaskListInfo> = read_task_file_from_path(&self.task_index_fn)?;
106 let had_index_file = !finish_list.is_empty();
107
108 // We use filter_map because one negative case wants to *move* the data into `finish_list`,
109 // clippy doesn't quite catch this!
110 #[allow(clippy::unnecessary_filter_map)]
111 let mut active_list: Vec<TaskListInfo> = read_task_file_from_path(&self.active_tasks_fn)?
112 .into_iter()
113 .filter_map(|info| {
114 if info.state.is_some() {
115 // this can happen when the active file still includes finished tasks
116 finish_list.push(info);
117 return None;
118 }
119
120 if !worker_is_active_local(&info.upid) {
121 // println!("Detected stopped task '{}'", &info.upid_str);
122 let now = proxmox::tools::time::epoch_i64();
123 let status = upid_read_status(&info.upid).unwrap_or(TaskState::Unknown { endtime: now });
124 finish_list.push(TaskListInfo {
125 upid: info.upid,
126 upid_str: info.upid_str,
127 state: Some(status)
128 });
129 return None;
130 }
131
132 Some(info)
133 }).collect();
134
135 if let Some(upid) = new_upid {
136 active_list.push(TaskListInfo { upid: upid.clone(), upid_str: upid.to_string(), state: None });
137 }
138
139 let active_raw = render_task_list(&active_list);
140
141 let options = self.file_opts.clone()
142 .perm(nix::sys::stat::Mode::from_bits_truncate(0o660));
143
144 replace_file(
145 &self.active_tasks_fn,
146 active_raw.as_bytes(),
147 options,
148 )?;
149
150 finish_list.sort_unstable_by(|a, b| {
151 match (&a.state, &b.state) {
152 (Some(s1), Some(s2)) => s1.cmp(&s2),
153 (Some(_), None) => std::cmp::Ordering::Less,
154 (None, Some(_)) => std::cmp::Ordering::Greater,
155 _ => a.upid.starttime.cmp(&b.upid.starttime),
156 }
157 });
158
159 if !finish_list.is_empty() {
160 let options = self.file_opts.clone()
161 .perm(nix::sys::stat::Mode::from_bits_truncate(0o660));
162
163 let mut writer = atomic_open_or_create_file(
164 &self.task_archive_fn,
165 OFlag::O_APPEND | OFlag::O_RDWR,
166 &[],
167 options,
168 )?;
169 for info in &finish_list {
170 writer.write_all(render_task_line(&info).as_bytes())?;
171 }
172 }
173
174 // TODO Remove with 1.x
175 // for compatibility, if we had an INDEX file, we do not need it anymore
176 if had_index_file {
177 let _ = nix::unistd::unlink(&self.task_index_fn);
178 }
179
180 drop(lock);
181
182 Ok(())
183 }
184
185 // Create task log directory with correct permissions
186 fn create_task_log_dirs(&self) -> Result<(), Error> {
187
188 try_block!({
189 let dir_opts = self.file_opts.clone()
190 .perm(nix::sys::stat::Mode::from_bits_truncate(0o755));
191
192 create_path(&self.taskdir, Some(dir_opts.clone()), Some(dir_opts.clone()))?;
193 // fixme:??? create_path(pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR, None, Some(opts))?;
194 Ok(())
195 }).map_err(|err: Error| format_err!("unable to create task log dir - {}", err))
196 }
197 }
198
199 /// Initialize the WorkerTask library
200 pub fn init_worker_tasks(basedir: PathBuf, file_opts: CreateOptions) -> Result<(), Error> {
201 let setup = WorkerTaskSetup::new(basedir, file_opts);
202 setup.create_task_log_dirs()?;
203 WORKER_TASK_SETUP.set(setup)
204 .map_err(|_| format_err!("init_worker_tasks failed - already initialized"))
205 }
206
207 /// checks if the Task Archive is bigger that 'size_threshold' bytes, and
208 /// rotates it if it is
209 pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
210
211 let setup = worker_task_setup()?;
212
213 let _lock = setup.lock_task_list_files(true)?;
214
215 let mut logrotate = LogRotate::new(&setup.task_archive_fn, compress)
216 .ok_or_else(|| format_err!("could not get archive file names"))?;
217
218 logrotate.rotate(size_threshold, None, max_files)
219 }
220
221
222 /// Path to the worker log file
223 pub fn upid_log_path(upid: &UPID) -> Result<std::path::PathBuf, Error> {
224 let setup = worker_task_setup()?;
225 Ok(setup.log_path(upid))
226 }
227
228 /// Read endtime (time of last log line) and exitstatus from task log file
229 /// If there is not a single line with at valid datetime, we assume the
230 /// starttime to be the endtime
231 pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
232
233 let setup = worker_task_setup()?;
234
235 let mut status = TaskState::Unknown { endtime: upid.starttime };
236
237 let path = setup.log_path(upid);
238
239 let mut file = File::open(path)?;
240
241 /// speedup - only read tail
242 use std::io::Seek;
243 use std::io::SeekFrom;
244 let _ = file.seek(SeekFrom::End(-8192)); // ignore errors
245
246 let mut data = Vec::with_capacity(8192);
247 file.read_to_end(&mut data)?;
248
249 // strip newlines at the end of the task logs
250 while data.last() == Some(&b'\n') {
251 data.pop();
252 }
253
254 let last_line = match data.iter().rposition(|c| *c == b'\n') {
255 Some(start) if data.len() > (start+1) => &data[start+1..],
256 Some(_) => &data, // should not happen, since we removed all trailing newlines
257 None => &data,
258 };
259
260 let last_line = std::str::from_utf8(last_line)
261 .map_err(|err| format_err!("upid_read_status: utf8 parse failed: {}", err))?;
262
263 let mut iter = last_line.splitn(2, ": ");
264 if let Some(time_str) = iter.next() {
265 if let Ok(endtime) = proxmox::tools::time::parse_rfc3339(time_str) {
266 // set the endtime even if we cannot parse the state
267 status = TaskState::Unknown { endtime };
268 if let Some(rest) = iter.next().and_then(|rest| rest.strip_prefix("TASK ")) {
269 if let Ok(state) = TaskState::from_endtime_and_message(endtime, rest) {
270 status = state;
271 }
272 }
273 }
274 }
275
276 Ok(status)
277 }
278
279 lazy_static! {
280 static ref WORKER_TASK_LIST: Mutex<HashMap<usize, Arc<WorkerTask>>> = Mutex::new(HashMap::new());
281 }
282
283 /// checks if the task UPID refers to a worker from this process
284 fn is_local_worker(upid: &UPID) -> bool {
285 upid.pid == crate::pid() && upid.pstart == crate::pstart()
286 }
287
288 /// Test if the task is still running
289 pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
290 if is_local_worker(upid) {
291 return Ok(WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id));
292 }
293
294 if procfs::check_process_running_pstart(upid.pid, upid.pstart).is_none() {
295 return Ok(false);
296 }
297
298 let sock = crate::ctrl_sock_from_pid(upid.pid);
299 let cmd = json!({
300 "command": "worker-task-status",
301 "args": {
302 "upid": upid.to_string(),
303 },
304 });
305 let status = crate::send_command(sock, &cmd).await?;
306
307 if let Some(active) = status.as_bool() {
308 Ok(active)
309 } else {
310 bail!("got unexpected result {:?} (expected bool)", status);
311 }
312 }
313
314 /// Test if the task is still running (fast but inaccurate implementation)
315 ///
316 /// If the task is spawned from a different process, we simply return if
317 /// that process is still running. This information is good enough to detect
318 /// stale tasks...
319 pub fn worker_is_active_local(upid: &UPID) -> bool {
320 if is_local_worker(upid) {
321 WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id)
322 } else {
323 procfs::check_process_running_pstart(upid.pid, upid.pstart).is_some()
324 }
325 }
326
327 pub fn register_task_control_commands(
328 commando_sock: &mut CommandoSocket,
329 ) -> Result<(), Error> {
330 fn get_upid(args: Option<&Value>) -> Result<UPID, Error> {
331 let args = if let Some(args) = args { args } else { bail!("missing args") };
332 let upid = match args.get("upid") {
333 Some(Value::String(upid)) => upid.parse::<UPID>()?,
334 None => bail!("no upid in args"),
335 _ => bail!("unable to parse upid"),
336 };
337 if !is_local_worker(&upid) {
338 bail!("upid does not belong to this process");
339 }
340 Ok(upid)
341 }
342
343 commando_sock.register_command("worker-task-abort".into(), move |args| {
344 let upid = get_upid(args)?;
345
346 abort_local_worker(upid);
347
348 Ok(Value::Null)
349 })?;
350 commando_sock.register_command("worker-task-status".into(), move |args| {
351 let upid = get_upid(args)?;
352
353 let active = WORKER_TASK_LIST.lock().unwrap().contains_key(&upid.task_id);
354
355 Ok(active.into())
356 })?;
357
358 Ok(())
359 }
360
361 pub fn abort_worker_async(upid: UPID) {
362 tokio::spawn(async move {
363 if let Err(err) = abort_worker(upid).await {
364 eprintln!("abort worker failed - {}", err);
365 }
366 });
367 }
368
369 pub async fn abort_worker(upid: UPID) -> Result<(), Error> {
370
371 let sock = crate::ctrl_sock_from_pid(upid.pid);
372 let cmd = json!({
373 "command": "worker-task-abort",
374 "args": {
375 "upid": upid.to_string(),
376 },
377 });
378 crate::send_command(sock, &cmd).map_ok(|_| ()).await
379 }
380
381 fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<TaskState>), Error> {
382
383 let data = line.splitn(3, ' ').collect::<Vec<&str>>();
384
385 let len = data.len();
386
387 match len {
388 1 => Ok((data[0].to_owned(), data[0].parse::<UPID>()?, None)),
389 3 => {
390 let endtime = i64::from_str_radix(data[1], 16)?;
391 let state = TaskState::from_endtime_and_message(endtime, data[2])?;
392 Ok((data[0].to_owned(), data[0].parse::<UPID>()?, Some(state)))
393 }
394 _ => bail!("wrong number of components"),
395 }
396 }
397
398 /// Task State
399 #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
400 pub enum TaskState {
401 /// The Task ended with an undefined state
402 Unknown { endtime: i64 },
403 /// The Task ended and there were no errors or warnings
404 OK { endtime: i64 },
405 /// The Task had 'count' amount of warnings and no errors
406 Warning { count: u64, endtime: i64 },
407 /// The Task ended with the error described in 'message'
408 Error { message: String, endtime: i64 },
409 }
410
411 impl TaskState {
412 pub fn endtime(&self) -> i64 {
413 match *self {
414 TaskState::Unknown { endtime } => endtime,
415 TaskState::OK { endtime } => endtime,
416 TaskState::Warning { endtime, .. } => endtime,
417 TaskState::Error { endtime, .. } => endtime,
418 }
419 }
420
421 fn result_text(&self) -> String {
422 match self {
423 TaskState::Error { message, .. } => format!("TASK ERROR: {}", message),
424 other => format!("TASK {}", other),
425 }
426 }
427
428 fn from_endtime_and_message(endtime: i64, s: &str) -> Result<Self, Error> {
429 if s == "unknown" {
430 Ok(TaskState::Unknown { endtime })
431 } else if s == "OK" {
432 Ok(TaskState::OK { endtime })
433 } else if let Some(warnings) = s.strip_prefix("WARNINGS: ") {
434 let count: u64 = warnings.parse()?;
435 Ok(TaskState::Warning{ count, endtime })
436 } else if !s.is_empty() {
437 let message = if let Some(err) = s.strip_prefix("ERROR: ") { err } else { s }.to_string();
438 Ok(TaskState::Error{ message, endtime })
439 } else {
440 bail!("unable to parse Task Status '{}'", s);
441 }
442 }
443 }
444
445 impl std::cmp::PartialOrd for TaskState {
446 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
447 Some(self.endtime().cmp(&other.endtime()))
448 }
449 }
450
451 impl std::cmp::Ord for TaskState {
452 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
453 self.endtime().cmp(&other.endtime())
454 }
455 }
456
457 impl std::fmt::Display for TaskState {
458 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
459 match self {
460 TaskState::Unknown { .. } => write!(f, "unknown"),
461 TaskState::OK { .. }=> write!(f, "OK"),
462 TaskState::Warning { count, .. } => write!(f, "WARNINGS: {}", count),
463 TaskState::Error { message, .. } => write!(f, "{}", message),
464 }
465 }
466 }
467
468 /// Task details including parsed UPID
469 ///
470 /// If there is no `state`, the task is still running.
471 #[derive(Debug)]
472 pub struct TaskListInfo {
473 /// The parsed UPID
474 pub upid: UPID,
475 /// UPID string representation
476 pub upid_str: String,
477 /// Task `(endtime, status)` if already finished
478 pub state: Option<TaskState>, // endtime, status
479 }
480
481 fn render_task_line(info: &TaskListInfo) -> String {
482 let mut raw = String::new();
483 if let Some(status) = &info.state {
484 raw.push_str(&format!("{} {:08X} {}\n", info.upid_str, status.endtime(), status));
485 } else {
486 raw.push_str(&info.upid_str);
487 raw.push('\n');
488 }
489
490 raw
491 }
492
493 fn render_task_list(list: &[TaskListInfo]) -> String {
494 let mut raw = String::new();
495 for info in list {
496 raw.push_str(&render_task_line(&info));
497 }
498 raw
499 }
500
501 // note this is not locked, caller has to make sure it is
502 // this will skip (and log) lines that are not valid status lines
503 fn read_task_file<R: Read>(reader: R) -> Result<Vec<TaskListInfo>, Error>
504 {
505 let reader = BufReader::new(reader);
506 let mut list = Vec::new();
507 for line in reader.lines() {
508 let line = line?;
509 match parse_worker_status_line(&line) {
510 Ok((upid_str, upid, state)) => list.push(TaskListInfo {
511 upid_str,
512 upid,
513 state
514 }),
515 Err(err) => {
516 eprintln!("unable to parse worker status '{}' - {}", line, err);
517 continue;
518 }
519 };
520 }
521
522 Ok(list)
523 }
524
525 // note this is not locked, caller has to make sure it is
526 fn read_task_file_from_path<P>(path: P) -> Result<Vec<TaskListInfo>, Error>
527 where
528 P: AsRef<std::path::Path> + std::fmt::Debug,
529 {
530 let file = match File::open(&path) {
531 Ok(f) => f,
532 Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(Vec::new()),
533 Err(err) => bail!("unable to open task list {:?} - {}", path, err),
534 };
535
536 read_task_file(file)
537 }
538
539 pub struct TaskListInfoIterator {
540 list: VecDeque<TaskListInfo>,
541 end: bool,
542 archive: Option<LogRotateFiles>,
543 lock: Option<TaskListLockGuard>,
544 }
545
546 impl TaskListInfoIterator {
547 pub fn new(active_only: bool) -> Result<Self, Error> {
548
549 let setup = worker_task_setup()?;
550
551 let (read_lock, active_list) = {
552 let lock = setup.lock_task_list_files(false)?;
553 let active_list = read_task_file_from_path(&setup.active_tasks_fn)?;
554
555 let needs_update = active_list
556 .iter()
557 .any(|info| info.state.is_some() || !worker_is_active_local(&info.upid));
558
559 // TODO remove with 1.x
560 let index_exists = setup.task_index_fn.is_file();
561
562 if needs_update || index_exists {
563 drop(lock);
564 setup.update_active_workers(None)?;
565 let lock = setup.lock_task_list_files(false)?;
566 let active_list = read_task_file_from_path(&setup.active_tasks_fn)?;
567 (lock, active_list)
568 } else {
569 (lock, active_list)
570 }
571 };
572
573 let archive = if active_only {
574 None
575 } else {
576 let logrotate = LogRotate::new(&setup.task_archive_fn, true)
577 .ok_or_else(|| format_err!("could not get archive file names"))?;
578 Some(logrotate.files())
579 };
580
581 let lock = if active_only { None } else { Some(read_lock) };
582
583 Ok(Self {
584 list: active_list.into(),
585 end: active_only,
586 archive,
587 lock,
588 })
589 }
590 }
591
592 impl Iterator for TaskListInfoIterator {
593 type Item = Result<TaskListInfo, Error>;
594
595 fn next(&mut self) -> Option<Self::Item> {
596 loop {
597 if let Some(element) = self.list.pop_back() {
598 return Some(Ok(element));
599 } else if self.end {
600 return None;
601 } else {
602 if let Some(mut archive) = self.archive.take() {
603 if let Some(file) = archive.next() {
604 let list = match read_task_file(file) {
605 Ok(list) => list,
606 Err(err) => return Some(Err(err)),
607 };
608 self.list.append(&mut list.into());
609 self.archive = Some(archive);
610 continue;
611 }
612 }
613
614 self.end = true;
615 self.lock.take();
616 }
617 }
618 }
619 }
620
621 /// Launch long running worker tasks.
622 ///
623 /// A worker task can either be a whole thread, or a simply tokio
624 /// task/future. Each task can `log()` messages, which are stored
625 /// persistently to files. Task should poll the `abort_requested`
626 /// flag, and stop execution when requested.
627 pub struct WorkerTask {
628 setup: &'static WorkerTaskSetup,
629 upid: UPID,
630 data: Mutex<WorkerTaskData>,
631 abort_requested: AtomicBool,
632 }
633
634 impl std::fmt::Display for WorkerTask {
635
636 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
637 self.upid.fmt(f)
638 }
639 }
640
641 struct WorkerTaskData {
642 logger: FileLogger,
643 progress: f64, // 0..1
644 warn_count: u64,
645 pub abort_listeners: Vec<oneshot::Sender<()>>,
646 }
647
648 impl WorkerTask {
649
650 pub fn new(
651 worker_type: &str,
652 worker_id: Option<String>,
653 auth_id: String,
654 to_stdout: bool,
655 ) -> Result<Arc<Self>, Error> {
656
657 let setup = worker_task_setup()?;
658
659 let upid = UPID::new(worker_type, worker_id, auth_id)?;
660 let task_id = upid.task_id;
661
662 let mut path = setup.taskdir.clone();
663
664 path.push(format!("{:02X}", upid.pstart & 255));
665
666 let dir_opts = setup.file_opts.clone()
667 .perm(nix::sys::stat::Mode::from_bits_truncate(0o755));
668
669 create_path(&path, None, Some(dir_opts))?;
670
671 path.push(upid.to_string());
672
673 let logger_options = FileLogOptions {
674 to_stdout,
675 exclusive: true,
676 prefix_time: true,
677 read: true,
678 file_opts: setup.file_opts.clone(),
679 ..Default::default()
680 };
681 let logger = FileLogger::new(&path, logger_options)?;
682
683 let worker = Arc::new(Self {
684 setup,
685 upid: upid.clone(),
686 abort_requested: AtomicBool::new(false),
687 data: Mutex::new(WorkerTaskData {
688 logger,
689 progress: 0.0,
690 warn_count: 0,
691 abort_listeners: vec![],
692 }),
693 });
694
695 // scope to drop the lock again after inserting
696 {
697 let mut hash = WORKER_TASK_LIST.lock().unwrap();
698 hash.insert(task_id, worker.clone());
699 crate::set_worker_count(hash.len());
700 }
701
702 setup.update_active_workers(Some(&upid))?;
703
704 Ok(worker)
705 }
706
707 /// Spawn a new tokio task/future.
708 pub fn spawn<F, T>(
709 worker_type: &str,
710 worker_id: Option<String>,
711 auth_id: String,
712 to_stdout: bool,
713 f: F,
714 ) -> Result<String, Error>
715 where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
716 T: Send + 'static + Future<Output = Result<(), Error>>,
717 {
718 let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
719 let upid_str = worker.upid.to_string();
720 let f = f(worker.clone());
721 tokio::spawn(async move {
722 let result = f.await;
723 worker.log_result(&result);
724 });
725
726 Ok(upid_str)
727 }
728
729 /// Create a new worker thread.
730 pub fn new_thread<F>(
731 worker_type: &str,
732 worker_id: Option<String>,
733 auth_id: String,
734 to_stdout: bool,
735 f: F,
736 ) -> Result<String, Error>
737 where F: Send + UnwindSafe + 'static + FnOnce(Arc<WorkerTask>) -> Result<(), Error>
738 {
739 let worker = WorkerTask::new(worker_type, worker_id, auth_id, to_stdout)?;
740 let upid_str = worker.upid.to_string();
741
742 let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
743 let worker1 = worker.clone();
744 let result = match std::panic::catch_unwind(move || f(worker1)) {
745 Ok(r) => r,
746 Err(panic) => {
747 match panic.downcast::<&str>() {
748 Ok(panic_msg) => {
749 Err(format_err!("worker panicked: {}", panic_msg))
750 }
751 Err(_) => {
752 Err(format_err!("worker panicked: unknown type."))
753 }
754 }
755 }
756 };
757
758 worker.log_result(&result);
759 });
760
761 Ok(upid_str)
762 }
763
764 /// create state from self and a result
765 pub fn create_state(&self, result: &Result<(), Error>) -> TaskState {
766 let warn_count = self.data.lock().unwrap().warn_count;
767
768 let endtime = proxmox::tools::time::epoch_i64();
769
770 if let Err(err) = result {
771 TaskState::Error { message: err.to_string(), endtime }
772 } else if warn_count > 0 {
773 TaskState::Warning { count: warn_count, endtime }
774 } else {
775 TaskState::OK { endtime }
776 }
777 }
778
779 /// Log task result, remove task from running list
780 pub fn log_result(&self, result: &Result<(), Error>) {
781 let state = self.create_state(result);
782 self.log_message(state.result_text());
783
784 WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id);
785 let _ = self.setup.update_active_workers(None);
786 crate::set_worker_count(WORKER_TASK_LIST.lock().unwrap().len());
787 }
788
789 /// Log a message.
790 pub fn log_message<S: AsRef<str>>(&self, msg: S) {
791 let mut data = self.data.lock().unwrap();
792 data.logger.log(msg);
793 }
794
795 /// Log a message as warning.
796 pub fn log_warning<S: AsRef<str>>(&self, msg: S) {
797 let mut data = self.data.lock().unwrap();
798 data.logger.log(format!("WARN: {}", msg.as_ref()));
799 data.warn_count += 1;
800 }
801
802 /// Set progress indicator
803 pub fn progress(&self, progress: f64) {
804 if progress >= 0.0 && progress <= 1.0 {
805 let mut data = self.data.lock().unwrap();
806 data.progress = progress;
807 } else {
808 // fixme: log!("task '{}': ignoring strange value for progress '{}'", self.upid, progress);
809 }
810 }
811
812 /// Request abort
813 pub fn request_abort(&self) {
814 eprintln!("set abort flag for worker {}", self.upid);
815
816 let prev_abort = self.abort_requested.swap(true, Ordering::SeqCst);
817 if !prev_abort { // log abort one time
818 self.log_message(format!("received abort request ..."));
819 }
820 // noitify listeners
821 let mut data = self.data.lock().unwrap();
822 loop {
823 match data.abort_listeners.pop() {
824 None => { break; },
825 Some(ch) => {
826 let _ = ch.send(()); // ignore errors here
827 },
828 }
829 }
830 }
831
832 /// Get a future which resolves on task abort
833 pub fn abort_future(&self) -> oneshot::Receiver<()> {
834 let (tx, rx) = oneshot::channel::<()>();
835
836 let mut data = self.data.lock().unwrap();
837 if self.abort_requested() {
838 let _ = tx.send(());
839 } else {
840 data.abort_listeners.push(tx);
841 }
842 rx
843 }
844
845 pub fn upid(&self) -> &UPID {
846 &self.upid
847 }
848 }
849
850 impl WorkerTaskContext for WorkerTask {
851
852 fn abort_requested(&self) -> bool {
853 self.abort_requested.load(Ordering::SeqCst)
854 }
855
856 fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
857 match level {
858 log::Level::Error => self.log_warning(&message.to_string()),
859 log::Level::Warn => self.log_warning(&message.to_string()),
860 log::Level::Info => self.log_message(&message.to_string()),
861 log::Level::Debug => self.log_message(&format!("DEBUG: {}", message)),
862 log::Level::Trace => self.log_message(&format!("TRACE: {}", message)),
863 }
864 }
865 }
866
867 /// Wait for a locally spanned worker task
868 ///
869 /// Note: local workers should print logs to stdout, so there is no
870 /// need to fetch/display logs. We just wait for the worker to finish.
871 pub async fn wait_for_local_worker(upid_str: &str) -> Result<(), Error> {
872
873 let upid: UPID = upid_str.parse()?;
874
875 let sleep_duration = core::time::Duration::new(0, 100_000_000);
876
877 loop {
878 if worker_is_active_local(&upid) {
879 tokio::time::sleep(sleep_duration).await;
880 } else {
881 break;
882 }
883 }
884 Ok(())
885 }
886
887 /// Request abort of a local worker (if existing and running)
888 pub fn abort_local_worker(upid: UPID) {
889 if let Some(ref worker) = WORKER_TASK_LIST.lock().unwrap().get(&upid.task_id) {
890 worker.request_abort();
891 }
892 }