1 use std
::collections
::{HashMap, VecDeque}
;
4 use std
::io
::{Read, Write, BufRead, BufReader}
;
5 use std
::panic
::UnwindSafe
;
6 use std
::sync
::atomic
::{AtomicBool, Ordering}
;
7 use std
::sync
::{Arc, Mutex}
;
9 use anyhow
::{bail, format_err, Error}
;
11 use lazy_static
::lazy_static
;
13 use serde_json
::{json, Value}
;
14 use serde
::{Serialize, Deserialize}
;
15 use tokio
::sync
::oneshot
;
17 use proxmox
::sys
::linux
::procfs
;
18 use proxmox
::try_block
;
19 use proxmox
::tools
::fs
::{create_path, open_file_locked, replace_file, CreateOptions}
;
23 use crate::tools
::logrotate
::{LogRotate, LogRotateFiles}
;
24 use crate::tools
::FileLogger
;
25 use crate::api2
::types
::Userid
;
27 macro_rules
! PROXMOX_BACKUP_VAR_RUN_DIR_M { () => ("/run/proxmox-backup") }
28 macro_rules
! PROXMOX_BACKUP_LOG_DIR_M { () => ("/var/log/proxmox-backup") }
29 macro_rules
! PROXMOX_BACKUP_TASK_DIR_M { () => (concat!( PROXMOX_BACKUP_LOG_DIR_M!(), "/tasks")) }
31 pub const PROXMOX_BACKUP_VAR_RUN_DIR
: &str = PROXMOX_BACKUP_VAR_RUN_DIR_M
!();
32 pub const PROXMOX_BACKUP_LOG_DIR
: &str = PROXMOX_BACKUP_LOG_DIR_M
!();
33 pub const PROXMOX_BACKUP_TASK_DIR
: &str = PROXMOX_BACKUP_TASK_DIR_M
!();
34 pub const PROXMOX_BACKUP_TASK_LOCK_FN
: &str = concat
!(PROXMOX_BACKUP_TASK_DIR_M
!(), "/.active.lock");
35 pub const PROXMOX_BACKUP_ACTIVE_TASK_FN
: &str = concat
!(PROXMOX_BACKUP_TASK_DIR_M
!(), "/active");
36 pub const PROXMOX_BACKUP_INDEX_TASK_FN
: &str = concat
!(PROXMOX_BACKUP_TASK_DIR_M
!(), "/index");
37 pub const PROXMOX_BACKUP_ARCHIVE_TASK_FN
: &str = concat
!(PROXMOX_BACKUP_TASK_DIR_M
!(), "/archive");
39 const MAX_INDEX_TASKS
: usize = 1000;
42 static ref WORKER_TASK_LIST
: Mutex
<HashMap
<usize, Arc
<WorkerTask
>>> = Mutex
::new(HashMap
::new());
44 static ref MY_PID
: i32 = unsafe { libc::getpid() }
;
45 static ref MY_PID_PSTART
: u64 = procfs
::PidStat
::read_from_pid(Pid
::from_raw(*MY_PID
))
50 /// Test if the task is still running
51 pub async
fn worker_is_active(upid
: &UPID
) -> Result
<bool
, Error
> {
52 if (upid
.pid
== *MY_PID
) && (upid
.pstart
== *MY_PID_PSTART
) {
53 return Ok(WORKER_TASK_LIST
.lock().unwrap().contains_key(&upid
.task_id
));
56 if !procfs
::check_process_running_pstart(upid
.pid
, upid
.pstart
).is_some() {
60 let socketname
= format
!(
61 "\0{}/proxmox-task-control-{}.sock", PROXMOX_BACKUP_VAR_RUN_DIR
, upid
.pid
);
65 "upid": upid
.to_string(),
68 let status
= super::send_command(socketname
, cmd
).await?
;
70 if let Some(active
) = status
.as_bool() {
73 bail
!("got unexpected result {:?} (expected bool)", status
);
77 /// Test if the task is still running (fast but inaccurate implementation)
79 /// If the task is spanned from a different process, we simply return if
80 /// that process is still running. This information is good enough to detect
82 pub fn worker_is_active_local(upid
: &UPID
) -> bool
{
83 if (upid
.pid
== *MY_PID
) && (upid
.pstart
== *MY_PID_PSTART
) {
84 WORKER_TASK_LIST
.lock().unwrap().contains_key(&upid
.task_id
)
86 procfs
::check_process_running_pstart(upid
.pid
, upid
.pstart
).is_some()
90 pub fn create_task_control_socket() -> Result
<(), Error
> {
92 let socketname
= format
!(
93 "\0{}/proxmox-task-control-{}.sock", PROXMOX_BACKUP_VAR_RUN_DIR
, *MY_PID
);
95 let control_future
= super::create_control_socket(socketname
, |param
| {
96 let param
= param
.as_object()
97 .ok_or_else(|| format_err
!("unable to parse parameters (expected json object)"))?
;
98 if param
.keys().count() != 2 { bail!("wrong number of parameters"); }
100 let command
= param
["command"].as_str()
101 .ok_or_else(|| format_err
!("unable to parse parameters (missing command)"))?
;
103 // we have only two commands for now
104 if !(command
== "abort-task" || command
== "status") { bail!("got unknown command '{}'
", command); }
106 let upid_str = param["upid
"].as_str()
107 .ok_or_else(|| format_err!("unable to parse
parameters (missing upid
)"))?;
109 let upid = upid_str.parse::<UPID>()?;
111 if !((upid.pid == *MY_PID) && (upid.pstart == *MY_PID_PSTART)) {
112 bail!("upid does not belong to this process
");
115 let hash = WORKER_TASK_LIST.lock().unwrap();
119 if let Some(ref worker) = hash.get(&upid.task_id) {
120 worker.request_abort();
122 // assume task is already stopped
127 let active = hash.contains_key(&upid.task_id);
131 bail!("got unknown command '{}'
", command);
136 tokio::spawn(control_future);
141 pub fn abort_worker_async(upid: UPID) {
142 tokio::spawn(async move {
143 if let Err(err) = abort_worker(upid).await {
144 eprintln!("abort worker failed
- {}
", err);
149 pub async fn abort_worker(upid: UPID) -> Result<(), Error> {
151 let target_pid = upid.pid;
153 let socketname = format!(
154 "\0{}
/proxmox
-task
-control
-{}
.sock
", PROXMOX_BACKUP_VAR_RUN_DIR, target_pid);
157 "command
": "abort
-task
",
158 "upid
": upid.to_string(),
161 super::send_command(socketname, cmd).map_ok(|_| ()).await
164 fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<TaskState>), Error> {
166 let data = line.splitn(3, ' ').collect::<Vec<&str>>();
168 let len = data.len();
171 1 => Ok((data[0].to_owned(), data[0].parse::<UPID>()?, None)),
173 let endtime = i64::from_str_radix(data[1], 16)?;
174 let state = TaskState::from_endtime_and_message(endtime, data[2])?;
175 Ok((data[0].to_owned(), data[0].parse::<UPID>()?, Some(state)))
177 _ => bail!("wrong number of components
"),
181 /// Create task log directory with correct permissions
182 pub fn create_task_log_dirs() -> Result<(), Error> {
185 let backup_user = crate::backup::backup_user()?;
186 let opts = CreateOptions::new()
187 .owner(backup_user.uid)
188 .group(backup_user.gid);
190 create_path(PROXMOX_BACKUP_LOG_DIR, None, Some(opts.clone()))?;
191 create_path(PROXMOX_BACKUP_TASK_DIR, None, Some(opts.clone()))?;
192 create_path(PROXMOX_BACKUP_VAR_RUN_DIR, None, Some(opts))?;
194 }).map_err(|err: Error| format_err!("unable to create task log dir
- {}
", err))?;
199 /// Read endtime (time of last log line) and exitstatus from task log file
200 /// If there is not a single line with at valid datetime, we assume the
201 /// starttime to be the endtime
202 pub fn upid_read_status(upid: &UPID) -> Result<TaskState, Error> {
204 let mut status = TaskState::Unknown { endtime: upid.starttime };
206 let path = upid.log_path();
208 let mut file = File::open(path)?;
210 /// speedup - only read tail
212 use std::io::SeekFrom;
213 let _ = file.seek(SeekFrom::End(-8192)); // ignore errors
215 let mut data = Vec::with_capacity(8192);
216 file.read_to_end(&mut data)?;
218 // task logs should end with newline, we do not want it here
219 if data.len() > 0 && data[data.len()-1] == b'\n' {
225 for pos in (0..data.len()).rev() {
226 if data[pos] == b'\n' {
227 start = data.len().min(pos + 1);
234 let last_line = std::str::from_utf8(last_line)
235 .map_err(|err| format_err!("upid_read_status
: utf8 parse failed
: {}
", err))?;
237 let mut iter = last_line.splitn(2, ": ");
238 if let Some(time_str) = iter.next() {
239 if let Ok(endtime) = proxmox::tools::time::parse_rfc3339(time_str) {
240 if let Some(rest) = iter.next().and_then(|rest| rest.strip_prefix("TASK
")) {
241 if let Ok(state) = TaskState::from_endtime_and_message(endtime, rest) {
252 #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
254 /// The Task ended with an undefined state
255 Unknown { endtime: i64 },
256 /// The Task ended and there were no errors or warnings
258 /// The Task had 'count' amount of warnings and no errors
259 Warning { count: u64, endtime: i64 },
260 /// The Task ended with the error described in 'message'
261 Error { message: String, endtime: i64 },
265 pub fn endtime(&self) -> i64 {
267 TaskState::Unknown { endtime } => endtime,
268 TaskState::OK { endtime } => endtime,
269 TaskState::Warning { endtime, .. } => endtime,
270 TaskState::Error { endtime, .. } => endtime,
274 fn result_text(&self) -> String {
276 TaskState::Error { message, .. } => format!("TASK ERROR
: {}
", message),
277 other => format!("TASK {}
", other),
281 fn from_endtime_and_message(endtime: i64, s: &str) -> Result<Self, Error> {
283 Ok(TaskState::Unknown { endtime })
284 } else if s == "OK
" {
285 Ok(TaskState::OK { endtime })
286 } else if s.starts_with("WARNINGS
: ") {
287 let count: u64 = s[10..].parse()?;
288 Ok(TaskState::Warning{ count, endtime })
289 } else if s.len() > 0 {
290 let message = if s.starts_with("ERROR
: ") { &s[7..] } else { s }.to_string();
291 Ok(TaskState::Error{ message, endtime })
293 bail!("unable to parse Task Status '{}'
", s);
298 impl std::cmp::PartialOrd for TaskState {
299 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
300 Some(self.endtime().cmp(&other.endtime()))
304 impl std::cmp::Ord for TaskState {
305 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
306 self.endtime().cmp(&other.endtime())
310 impl std::fmt::Display for TaskState {
311 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
313 TaskState::Unknown { .. } => write!(f, "unknown
"),
314 TaskState::OK { .. }=> write!(f, "OK
"),
315 TaskState::Warning { count, .. } => write!(f, "WARNINGS
: {}
", count),
316 TaskState::Error { message, .. } => write!(f, "{}
", message),
321 /// Task details including parsed UPID
323 /// If there is no `state`, the task is still running.
325 pub struct TaskListInfo {
328 /// UPID string representation
329 pub upid_str: String,
330 /// Task `(endtime, status)` if already finished
331 pub state: Option<TaskState>, // endtime, status
334 fn lock_task_list_files(exclusive: bool) -> Result<std::fs::File, Error> {
335 let backup_user = crate::backup::backup_user()?;
337 let lock = open_file_locked(PROXMOX_BACKUP_TASK_LOCK_FN, std::time::Duration::new(10, 0), exclusive)?;
338 nix::unistd::chown(PROXMOX_BACKUP_TASK_LOCK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
343 /// checks if the Task Archive is bigger that 'size_threshold' bytes, and
344 /// rotates it if it is
345 pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
346 let _lock = lock_task_list_files(true)?;
347 let path = Path::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN);
348 let metadata = match path.metadata() {
349 Ok(metadata) => metadata,
350 Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(false),
351 Err(err) => bail!("unable to open task archive
- {}
", err),
354 if metadata.len() > size_threshold {
355 let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress).ok_or_else(|| format_err!("could not get archive file names
"))?;
356 let backup_user = crate::backup::backup_user()?;
359 .owner(backup_user.uid)
360 .group(backup_user.gid),
369 // atomically read/update the task list, update status of finished tasks
370 // new_upid is added to the list when specified.
371 fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
373 let backup_user = crate::backup::backup_user()?;
375 let lock = lock_task_list_files(true)?;
377 let mut finish_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN)?;
378 let mut active_list: Vec<TaskListInfo> = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?
381 if info.state.is_some() {
382 // this can happen when the active file still includes finished tasks
383 finish_list.push(info);
387 if !worker_is_active_local(&info.upid) {
388 println!("Detected stopped UPID {}
", &info.upid_str);
389 let now = proxmox::tools::time::epoch_i64();
390 let status = upid_read_status(&info.upid)
391 .unwrap_or_else(|_| TaskState::Unknown { endtime: now });
392 finish_list.push(TaskListInfo {
394 upid_str: info.upid_str,
403 if let Some(upid) = new_upid {
404 active_list.push(TaskListInfo { upid: upid.clone(), upid_str: upid.to_string(), state: None });
407 let active_raw = render_task_list(&active_list);
410 PROXMOX_BACKUP_ACTIVE_TASK_FN,
411 active_raw.as_bytes(),
413 .owner(backup_user.uid)
414 .group(backup_user.gid),
417 finish_list.sort_unstable_by(|a, b| {
418 match (&a.state, &b.state) {
419 (Some(s1), Some(s2)) => s1.cmp(&s2),
420 (Some(_), None) => std::cmp::Ordering::Less,
421 (None, Some(_)) => std::cmp::Ordering::Greater,
422 _ => a.upid.starttime.cmp(&b.upid.starttime),
427 let start = if finish_list.len() > MAX_INDEX_TASKS {
428 finish_list.len() - MAX_INDEX_TASKS
433 let end = (start+MAX_INDEX_TASKS).min(finish_list.len());
435 let index_raw = if end > start {
436 render_task_list(&finish_list[start..end])
442 PROXMOX_BACKUP_INDEX_TASK_FN,
443 index_raw.as_bytes(),
445 .owner(backup_user.uid)
446 .group(backup_user.gid),
449 if !finish_list.is_empty() && start > 0 {
450 match std::fs::OpenOptions::new().append(true).create(true).open(PROXMOX_BACKUP_ARCHIVE_TASK_FN) {
452 for info in &finish_list[0..start] {
453 writer.write_all(render_task_line(&info).as_bytes())?;
456 Err(err) => bail!("could not write task archive
- {}
", err),
459 nix::unistd::chown(PROXMOX_BACKUP_ARCHIVE_TASK_FN, Some(backup_user.uid), Some(backup_user.gid))?;
467 fn render_task_line(info: &TaskListInfo) -> String {
468 let mut raw = String::new();
469 if let Some(status) = &info.state {
470 raw.push_str(&format!("{} {:08X} {}
\n", info.upid_str, status.endtime(), status));
472 raw.push_str(&info.upid_str);
479 fn render_task_list(list: &[TaskListInfo]) -> String {
480 let mut raw = String::new();
482 raw.push_str(&render_task_line(&info));
487 // note this is not locked, caller has to make sure it is
488 // this will skip (and log) lines that are not valid status lines
489 fn read_task_file<R: Read>(reader: R) -> Result<Vec<TaskListInfo>, Error>
491 let reader = BufReader::new(reader);
492 let mut list = Vec::new();
493 for line in reader.lines() {
495 match parse_worker_status_line(&line) {
496 Ok((upid_str, upid, state)) => list.push(TaskListInfo {
502 eprintln!("unable to parse worker status '{}'
- {}
", line, err);
511 // note this is not locked, caller has to make sure it is
512 fn read_task_file_from_path<P>(path: P) -> Result<Vec<TaskListInfo>, Error>
514 P: AsRef<std::path::Path> + std::fmt::Debug,
516 let file = match File::open(&path) {
518 Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(Vec::new()),
519 Err(err) => bail!("unable to open task list {:?}
- {}
", path, err),
532 pub struct TaskListInfoIterator {
533 list: VecDeque<TaskListInfo>,
535 archive: Option<LogRotateFiles>,
539 impl TaskListInfoIterator {
540 pub fn new(active_only: bool) -> Result<Self, Error> {
541 let (read_lock, active_list) = {
542 let lock = lock_task_list_files(false)?;
543 let active_list = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?;
545 let needs_update = active_list
547 .any(|info| info.state.is_some() || !worker_is_active_local(&info.upid));
551 update_active_workers(None)?;
552 let lock = lock_task_list_files(false)?;
553 let active_list = read_task_file_from_path(PROXMOX_BACKUP_ACTIVE_TASK_FN)?;
560 let archive = if active_only {
563 let logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, true).ok_or_else(|| format_err!("could not get archive file names
"))?;
564 Some(logrotate.files())
567 let file = if active_only { TaskFile::End } else { TaskFile::Active };
568 let lock = if active_only { None } else { Some(read_lock) };
571 list: active_list.into(),
579 impl Iterator for TaskListInfoIterator {
580 type Item = Result<TaskListInfo, Error>;
582 fn next(&mut self) -> Option<Self::Item> {
584 if let Some(element) = self.list.pop_back() {
585 return Some(Ok(element));
588 TaskFile::Active => {
589 let index = match read_task_file_from_path(PROXMOX_BACKUP_INDEX_TASK_FN) {
591 Err(err) => return Some(Err(err)),
593 self.list.append(&mut index.into());
594 self.file = TaskFile::Index;
596 TaskFile::Index | TaskFile::Archive => {
597 if let Some(mut archive) = self.archive.take() {
598 if let Some(file) = archive.next() {
599 let list = match read_task_file(file) {
601 Err(err) => return Some(Err(err)),
603 self.list.append(&mut list.into());
604 self.archive = Some(archive);
605 self.file = TaskFile::Archive;
609 self.file = TaskFile::End;
613 TaskFile::End => return None,
620 /// Launch long running worker tasks.
622 /// A worker task can either be a whole thread, or a simply tokio
623 /// task/future. Each task can `log()` messages, which are stored
624 /// persistently to files. Task should poll the `abort_requested`
625 /// flag, and stop execution when requested.
627 pub struct WorkerTask {
629 data: Mutex<WorkerTaskData>,
630 abort_requested: AtomicBool,
633 impl std::fmt::Display for WorkerTask {
635 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
641 struct WorkerTaskData {
643 progress: f64, // 0..1
645 pub abort_listeners: Vec<oneshot::Sender<()>>,
648 impl Drop for WorkerTask {
651 println!("unregister worker
");
657 pub fn new(worker_type: &str, worker_id: Option<String>, userid: Userid, to_stdout: bool) -> Result<Arc<Self>, Error> {
658 println!("register worker
");
660 let upid = UPID::new(worker_type, worker_id, userid)?;
661 let task_id = upid.task_id;
663 let mut path = std::path::PathBuf::from(PROXMOX_BACKUP_TASK_DIR);
665 path.push(format!("{:02X}
", upid.pstart % 256));
667 let backup_user = crate::backup::backup_user()?;
669 create_path(&path, None, Some(CreateOptions::new().owner(backup_user.uid).group(backup_user.gid)))?;
671 path.push(upid.to_string());
673 println!("FILE
: {:?}
", path);
675 let logger = FileLogger::new(&path, to_stdout)?;
676 nix::unistd::chown(&path, Some(backup_user.uid), Some(backup_user.gid))?;
678 let worker = Arc::new(Self {
680 abort_requested: AtomicBool::new(false),
681 data: Mutex::new(WorkerTaskData {
685 abort_listeners: vec![],
689 // scope to drop the lock again after inserting
691 let mut hash = WORKER_TASK_LIST.lock().unwrap();
692 hash.insert(task_id, worker.clone());
693 super::set_worker_count(hash.len());
696 update_active_workers(Some(&upid))?;
701 /// Spawn a new tokio task/future.
704 worker_id: Option<String>,
708 ) -> Result<String, Error>
709 where F: Send + 'static + FnOnce(Arc<WorkerTask>) -> T,
710 T: Send + 'static + Future<Output = Result<(), Error>>,
712 let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
713 let upid_str = worker.upid.to_string();
714 let f = f(worker.clone());
715 tokio::spawn(async move {
716 let result = f.await;
717 worker.log_result(&result);
723 /// Create a new worker thread.
724 pub fn new_thread<F>(
726 worker_id: Option<String>,
730 ) -> Result<String, Error>
731 where F: Send + UnwindSafe + 'static + FnOnce(Arc<WorkerTask>) -> Result<(), Error>
733 println!("register worker thread
");
735 let worker = WorkerTask::new(worker_type, worker_id, userid, to_stdout)?;
736 let upid_str = worker.upid.to_string();
738 let _child = std::thread::Builder::new().name(upid_str.clone()).spawn(move || {
739 let worker1 = worker.clone();
740 let result = match std::panic::catch_unwind(move || f(worker1)) {
743 match panic.downcast::<&str>() {
745 Err(format_err!("worker panicked
: {}
", panic_msg))
748 Err(format_err!("worker panicked
: unknown
type."))
754 worker.log_result(&result);
760 /// create state from self and a result
761 pub fn create_state(&self, result: &Result<(), Error>) -> TaskState {
762 let warn_count = self.data.lock().unwrap().warn_count;
764 let endtime = proxmox::tools::time::epoch_i64();
766 if let Err(err) = result {
767 TaskState::Error { message: err.to_string(), endtime }
768 } else if warn_count > 0 {
769 TaskState::Warning { count: warn_count, endtime }
771 TaskState::OK { endtime }
775 /// Log task result, remove task from running list
776 pub fn log_result(&self, result: &Result<(), Error>) {
777 let state = self.create_state(result);
778 self.log(state.result_text());
780 WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id);
781 let _ = update_active_workers(None);
782 super::set_worker_count(WORKER_TASK_LIST.lock().unwrap().len());
786 pub fn log<S: AsRef<str>>(&self, msg: S) {
787 let mut data = self.data.lock().unwrap();
788 data.logger.log(msg);
791 /// Log a message as warning.
792 pub fn warn<S: AsRef<str>>(&self, msg: S) {
793 let mut data = self.data.lock().unwrap();
794 data.logger.log(format!("WARN
: {}
", msg.as_ref()));
795 data.warn_count += 1;
798 /// Set progress indicator
799 pub fn progress(&self, progress: f64) {
800 if progress >= 0.0 && progress <= 1.0 {
801 let mut data = self.data.lock().unwrap();
802 data.progress = progress;
804 // fixme: log!("task '{}'
: ignoring strange value
for progress '{}'
", self.upid, progress);
809 pub fn request_abort(&self) {
810 eprintln!("set abort flag
for worker {}
", self.upid);
811 self.abort_requested.store(true, Ordering::SeqCst);
813 let mut data = self.data.lock().unwrap();
815 match data.abort_listeners.pop() {
818 let _ = ch.send(()); // ignore erros here
824 /// Test if abort was requested.
825 pub fn abort_requested(&self) -> bool {
826 self.abort_requested.load(Ordering::SeqCst)
829 /// Fail if abort was requested.
830 pub fn fail_on_abort(&self) -> Result<(), Error> {
831 if self.abort_requested() {
832 bail!("abort requested
- aborting task
");
837 /// Get a future which resolves on task abort
838 pub fn abort_future(&self) -> oneshot::Receiver<()> {
839 let (tx, rx) = oneshot::channel::<()>();
841 let mut data = self.data.lock().unwrap();
842 if self.abort_requested() {
845 data.abort_listeners.push(tx);
850 pub fn upid(&self) -> &UPID {
855 impl crate::task::TaskState for WorkerTask {
856 fn check_abort(&self) -> Result<(), Error> {
860 fn log(&self, level: log::Level, message: &std::fmt::Arguments) {
862 log::Level::Error => self.warn(&message.to_string()),
863 log::Level::Warn => self.warn(&message.to_string()),
864 log::Level::Info => self.log(&message.to_string()),
865 log::Level::Debug => self.log(&format!("DEBUG
: {}
", message)),
866 log::Level::Trace => self.log(&format!("TRACE
: {}
", message)),