output: &BuildOutput,
out_dir: &Path,
package_id: PackageId,
-) {
+) -> CargoResult<()> {
let library_paths = output
.library_paths
.iter()
out_dir,
}
.to_json_string();
- state.stdout(msg);
+ state.stdout(msg)?;
+ Ok(())
}
fn build_work(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Job> {
warnings_in_case_of_panic.push(warning.to_owned());
}
if extra_verbose {
- state.stdout(format!("{}{}", prefix, stdout));
+ state.stdout(format!("{}{}", prefix, stdout))?;
}
Ok(())
},
&mut |stderr| {
if extra_verbose {
- state.stderr(format!("{}{}", prefix, stderr));
+ state.stderr(format!("{}{}", prefix, stderr))?;
}
Ok(())
},
BuildOutput::parse(&output.stdout, &pkg_name, &script_out_dir, &script_out_dir)?;
if json_messages {
- emit_build_output(state, &parsed_output, script_out_dir.as_path(), id);
+ emit_build_output(state, &parsed_output, script_out_dir.as_path(), id)?;
}
build_script_outputs
.lock()
};
if json_messages {
- emit_build_output(state, &output, script_out_dir.as_path(), id);
+ emit_build_output(state, &output, script_out_dir.as_path(), id)?;
}
build_script_outputs
/// Channel back to the main thread to coordinate messages and such.
messages: Arc<Queue<Message>>,
- /// Normally messages are handled in a bounded way. When the job is fresh
- /// however we need to immediately return to prevent a deadlock as the messages
- /// are processed on the same thread as they are sent from.
- messages_bounded: bool,
+ /// Normally output is sent to the job queue with backpressure. When the job is fresh
+ /// however we need to immediately display the output to prevent a deadlock as the
+ /// output messages are processed on the same thread as they are sent from. `output`
+ /// defines where to output in this case.
+ output: Option<&'a Config>,
/// The job id that this state is associated with, used when sending
/// messages back to the main thread.
.push(Message::BuildPlanMsg(module_name, cmd, filenames));
}
- pub fn stdout(&self, stdout: String) {
- if self.messages_bounded {
- self.messages.push_bounded(Message::Stdout(stdout));
+ pub fn stdout(&self, stdout: String) -> CargoResult<()> {
+ if let Some(config) = self.output {
+ writeln!(config.shell().out(), "{}", stdout)?;
} else {
- self.messages.push(Message::Stdout(stdout));
+ self.messages.push_bounded(Message::Stdout(stdout));
}
+ Ok(())
}
- pub fn stderr(&self, stderr: String) {
- if self.messages_bounded {
- self.messages.push_bounded(Message::Stderr(stderr));
+ pub fn stderr(&self, stderr: String) -> CargoResult<()> {
+ if let Some(config) = self.output {
+ let mut shell = config.shell();
+ shell.print_ansi(stderr.as_bytes())?;
+ shell.err().write_all(b"\n")?;
} else {
- self.messages.push(Message::Stderr(stderr));
+ self.messages.push_bounded(Message::Stderr(stderr));
}
+ Ok(())
}
/// A method used to signal to the coordinator thread that the rmeta file
self.note_working_on(cx.bcx.config, unit, fresh)?;
}
- let doit = move || {
- let state = JobState {
- id,
- messages: messages.clone(),
- messages_bounded: job.freshness() == Freshness::Dirty,
- rmeta_required: Cell::new(rmeta_required),
- _marker: marker::PhantomData,
- };
-
+ let doit = move |state: JobState<'_>| {
let mut sender = FinishOnDrop {
- messages: &messages,
+ messages: &state.messages,
id,
result: None,
};
// we need to make sure that the metadata is flagged as produced so
// send a synthetic message here.
if state.rmeta_required.get() && sender.result.as_ref().unwrap().is_ok() {
- messages.push(Message::Finish(id, Artifact::Metadata, Ok(())));
+ state
+ .messages
+ .push(Message::Finish(state.id, Artifact::Metadata, Ok(())));
}
// Use a helper struct with a `Drop` implementation to guarantee
self.timings.add_fresh();
// Running a fresh job on the same thread is often much faster than spawning a new
// thread to run the job.
- doit();
+ doit(JobState {
+ id,
+ messages: messages.clone(),
+ output: Some(cx.bcx.config),
+ rmeta_required: Cell::new(rmeta_required),
+ _marker: marker::PhantomData,
+ });
}
Freshness::Dirty => {
self.timings.add_dirty();
- scope.spawn(move |_| doit());
+ scope.spawn(move |_| {
+ doit(JobState {
+ id,
+ messages: messages.clone(),
+ output: None,
+ rmeta_required: Cell::new(rmeta_required),
+ _marker: marker::PhantomData,
+ })
+ });
}
}
fresh,
}
.to_json_string();
- state.stdout(msg);
+ state.stdout(msg)?;
}
Ok(())
}))
_package_id: PackageId,
_target: &Target,
) -> CargoResult<()> {
- state.stdout(line.to_string());
+ state.stdout(line.to_string())?;
Ok(())
}
// something like that), so skip over everything that doesn't look like a
// JSON message.
if !line.starts_with('{') {
- state.stderr(line.to_string());
+ state.stderr(line.to_string())?;
return Ok(true);
}
// to stderr.
Err(e) => {
debug!("failed to parse json: {:?}", e);
- state.stderr(line.to_string());
+ state.stderr(line.to_string())?;
return Ok(true);
}
};
.map(|v| String::from_utf8(v).expect("utf8"))
.expect("strip should never fail")
};
- state.stderr(rendered);
+ state.stderr(rendered)?;
return Ok(true);
}
}
// Switch json lines from rustc/rustdoc that appear on stderr to stdout
// instead. We want the stdout of Cargo to always be machine parseable as
// stderr has our colorized human-readable messages.
- state.stdout(msg);
+ state.stdout(msg)?;
Ok(true)
}