]> git.proxmox.com Git - proxmox-backup.git/blob - src/server/rest.rs
move ApiConfig, FileLogger and CommandoSocket to proxmox-rest-server workspace
[proxmox-backup.git] / src / server / rest.rs
1 use std::collections::HashMap;
2 use std::future::Future;
3 use std::hash::BuildHasher;
4 use std::path::{Path, PathBuf};
5 use std::pin::Pin;
6 use std::sync::{Arc, Mutex};
7 use std::task::{Context, Poll};
8
9 use anyhow::{bail, format_err, Error};
10 use futures::future::{self, FutureExt, TryFutureExt};
11 use futures::stream::TryStreamExt;
12 use hyper::body::HttpBody;
13 use hyper::header::{self, HeaderMap};
14 use hyper::http::request::Parts;
15 use hyper::{Body, Request, Response, StatusCode};
16 use lazy_static::lazy_static;
17 use regex::Regex;
18 use serde_json::{json, Value};
19 use tokio::fs::File;
20 use tokio::time::Instant;
21 use url::form_urlencoded;
22
23 use proxmox::api::schema::{
24 parse_parameter_strings, parse_simple_value, verify_json_object, ObjectSchemaType,
25 ParameterSchema,
26 };
27 use proxmox::api::{
28 check_api_permission, ApiHandler, ApiMethod, HttpError, Permission, RpcEnvironment,
29 RpcEnvironmentType,
30 };
31 use proxmox::http_err;
32 use proxmox::tools::fs::CreateOptions;
33
34 use pbs_tools::compression::{DeflateEncoder, Level};
35 use pbs_tools::stream::AsyncReaderStream;
36 use pbs_api_types::{Authid, Userid};
37 use proxmox_rest_server::{ApiConfig, FileLogger, FileLogOptions, AuthError};
38
39 use super::environment::RestEnvironment;
40 use super::formatter::*;
41
42 use crate::auth_helpers::*;
43 use pbs_config::CachedUserInfo;
44 use crate::tools;
45 use crate::tools::compression::CompressionMethod;
46
47 extern "C" {
48 fn tzset();
49 }
50
51 pub struct RestServer {
52 pub api_config: Arc<ApiConfig>,
53 }
54
55 const MAX_URI_QUERY_LENGTH: usize = 3072;
56 const CHUNK_SIZE_LIMIT: u64 = 32 * 1024;
57
58 impl RestServer {
59 pub fn new(api_config: ApiConfig) -> Self {
60 Self {
61 api_config: Arc::new(api_config),
62 }
63 }
64 }
65
66 impl tower_service::Service<&Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>>
67 for RestServer
68 {
69 type Response = ApiService;
70 type Error = Error;
71 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
72
73 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
74 Poll::Ready(Ok(()))
75 }
76
77 fn call(
78 &mut self,
79 ctx: &Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>,
80 ) -> Self::Future {
81 match ctx.get_ref().peer_addr() {
82 Err(err) => future::err(format_err!("unable to get peer address - {}", err)).boxed(),
83 Ok(peer) => future::ok(ApiService {
84 peer,
85 api_config: self.api_config.clone(),
86 })
87 .boxed(),
88 }
89 }
90 }
91
92 impl tower_service::Service<&tokio::net::TcpStream> for RestServer {
93 type Response = ApiService;
94 type Error = Error;
95 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
96
97 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
98 Poll::Ready(Ok(()))
99 }
100
101 fn call(&mut self, ctx: &tokio::net::TcpStream) -> Self::Future {
102 match ctx.peer_addr() {
103 Err(err) => future::err(format_err!("unable to get peer address - {}", err)).boxed(),
104 Ok(peer) => future::ok(ApiService {
105 peer,
106 api_config: self.api_config.clone(),
107 })
108 .boxed(),
109 }
110 }
111 }
112
113 impl tower_service::Service<&tokio::net::UnixStream> for RestServer {
114 type Response = ApiService;
115 type Error = Error;
116 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
117
118 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
119 Poll::Ready(Ok(()))
120 }
121
122 fn call(&mut self, _ctx: &tokio::net::UnixStream) -> Self::Future {
123 // TODO: Find a way to actually represent the vsock peer in the ApiService struct - for now
124 // it doesn't really matter, so just use a fake IP address
125 let fake_peer = "0.0.0.0:807".parse().unwrap();
126 future::ok(ApiService {
127 peer: fake_peer,
128 api_config: self.api_config.clone(),
129 })
130 .boxed()
131 }
132 }
133
134 pub struct ApiService {
135 pub peer: std::net::SocketAddr,
136 pub api_config: Arc<ApiConfig>,
137 }
138
139 fn log_response(
140 logfile: Option<&Arc<Mutex<FileLogger>>>,
141 peer: &std::net::SocketAddr,
142 method: hyper::Method,
143 path_query: &str,
144 resp: &Response<Body>,
145 user_agent: Option<String>,
146 ) {
147 if resp.extensions().get::<NoLogExtension>().is_some() {
148 return;
149 };
150
151 // we also log URL-to-long requests, so avoid message bigger than PIPE_BUF (4k on Linux)
152 // to profit from atomicty guarantees for O_APPEND opened logfiles
153 let path = &path_query[..MAX_URI_QUERY_LENGTH.min(path_query.len())];
154
155 let status = resp.status();
156 if !(status.is_success() || status.is_informational()) {
157 let reason = status.canonical_reason().unwrap_or("unknown reason");
158
159 let message = match resp.extensions().get::<ErrorMessageExtension>() {
160 Some(data) => &data.0,
161 None => "request failed",
162 };
163
164 log::error!(
165 "{} {}: {} {}: [client {}] {}",
166 method.as_str(),
167 path,
168 status.as_str(),
169 reason,
170 peer,
171 message
172 );
173 }
174 if let Some(logfile) = logfile {
175 let auth_id = match resp.extensions().get::<Authid>() {
176 Some(auth_id) => auth_id.to_string(),
177 None => "-".to_string(),
178 };
179 let now = proxmox::tools::time::epoch_i64();
180 // time format which apache/nginx use (by default), copied from pve-http-server
181 let datetime = proxmox::tools::time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
182 .unwrap_or_else(|_| "-".to_string());
183
184 logfile.lock().unwrap().log(format!(
185 "{} - {} [{}] \"{} {}\" {} {} {}",
186 peer.ip(),
187 auth_id,
188 datetime,
189 method.as_str(),
190 path,
191 status.as_str(),
192 resp.body().size_hint().lower(),
193 user_agent.unwrap_or_else(|| "-".to_string()),
194 ));
195 }
196 }
197 pub fn auth_logger() -> Result<FileLogger, Error> {
198 let backup_user = pbs_config::backup_user()?;
199
200 let file_opts = CreateOptions::new()
201 .owner(backup_user.uid)
202 .group(backup_user.gid);
203
204 let logger_options = FileLogOptions {
205 append: true,
206 prefix_time: true,
207 file_opts,
208 ..Default::default()
209 };
210 FileLogger::new(pbs_buildcfg::API_AUTH_LOG_FN, logger_options)
211 }
212
213 fn get_proxied_peer(headers: &HeaderMap) -> Option<std::net::SocketAddr> {
214 lazy_static! {
215 static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap();
216 }
217 let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?;
218 let capture = RE.captures(&forwarded)?;
219 let rhost = capture.get(1)?.as_str();
220
221 rhost.parse().ok()
222 }
223
224 fn get_user_agent(headers: &HeaderMap) -> Option<String> {
225 let agent = headers.get(header::USER_AGENT)?.to_str();
226 agent
227 .map(|s| {
228 let mut s = s.to_owned();
229 s.truncate(128);
230 s
231 })
232 .ok()
233 }
234
235 impl tower_service::Service<Request<Body>> for ApiService {
236 type Response = Response<Body>;
237 type Error = Error;
238 #[allow(clippy::type_complexity)]
239 type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
240
241 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
242 Poll::Ready(Ok(()))
243 }
244
245 fn call(&mut self, req: Request<Body>) -> Self::Future {
246 let path = req.uri().path_and_query().unwrap().as_str().to_owned();
247 let method = req.method().clone();
248 let user_agent = get_user_agent(req.headers());
249
250 let config = Arc::clone(&self.api_config);
251 let peer = match get_proxied_peer(req.headers()) {
252 Some(proxied_peer) => proxied_peer,
253 None => self.peer,
254 };
255 async move {
256 let response = match handle_request(Arc::clone(&config), req, &peer).await {
257 Ok(response) => response,
258 Err(err) => {
259 let (err, code) = match err.downcast_ref::<HttpError>() {
260 Some(apierr) => (apierr.message.clone(), apierr.code),
261 _ => (err.to_string(), StatusCode::BAD_REQUEST),
262 };
263 Response::builder()
264 .status(code)
265 .extension(ErrorMessageExtension(err.to_string()))
266 .body(err.into())?
267 }
268 };
269 let logger = config.get_file_log();
270 log_response(logger, &peer, method, &path, &response, user_agent);
271 Ok(response)
272 }
273 .boxed()
274 }
275 }
276
277 fn parse_query_parameters<S: 'static + BuildHasher + Send>(
278 param_schema: ParameterSchema,
279 form: &str, // x-www-form-urlencoded body data
280 parts: &Parts,
281 uri_param: &HashMap<String, String, S>,
282 ) -> Result<Value, Error> {
283 let mut param_list: Vec<(String, String)> = vec![];
284
285 if !form.is_empty() {
286 for (k, v) in form_urlencoded::parse(form.as_bytes()).into_owned() {
287 param_list.push((k, v));
288 }
289 }
290
291 if let Some(query_str) = parts.uri.query() {
292 for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
293 if k == "_dc" {
294 continue;
295 } // skip extjs "disable cache" parameter
296 param_list.push((k, v));
297 }
298 }
299
300 for (k, v) in uri_param {
301 param_list.push((k.clone(), v.clone()));
302 }
303
304 let params = parse_parameter_strings(&param_list, param_schema, true)?;
305
306 Ok(params)
307 }
308
309 async fn get_request_parameters<S: 'static + BuildHasher + Send>(
310 param_schema: ParameterSchema,
311 parts: Parts,
312 req_body: Body,
313 uri_param: HashMap<String, String, S>,
314 ) -> Result<Value, Error> {
315 let mut is_json = false;
316
317 if let Some(value) = parts.headers.get(header::CONTENT_TYPE) {
318 match value.to_str().map(|v| v.split(';').next()) {
319 Ok(Some("application/x-www-form-urlencoded")) => {
320 is_json = false;
321 }
322 Ok(Some("application/json")) => {
323 is_json = true;
324 }
325 _ => bail!("unsupported content type {:?}", value.to_str()),
326 }
327 }
328
329 let body = TryStreamExt::map_err(req_body, |err| {
330 http_err!(BAD_REQUEST, "Problems reading request body: {}", err)
331 })
332 .try_fold(Vec::new(), |mut acc, chunk| async move {
333 // FIXME: max request body size?
334 if acc.len() + chunk.len() < 64 * 1024 {
335 acc.extend_from_slice(&*chunk);
336 Ok(acc)
337 } else {
338 Err(http_err!(BAD_REQUEST, "Request body too large"))
339 }
340 })
341 .await?;
342
343 let utf8_data =
344 std::str::from_utf8(&body).map_err(|err| format_err!("Request body not uft8: {}", err))?;
345
346 if is_json {
347 let mut params: Value = serde_json::from_str(utf8_data)?;
348 for (k, v) in uri_param {
349 if let Some((_optional, prop_schema)) = param_schema.lookup(&k) {
350 params[&k] = parse_simple_value(&v, prop_schema)?;
351 }
352 }
353 verify_json_object(&params, &param_schema)?;
354 return Ok(params);
355 } else {
356 parse_query_parameters(param_schema, utf8_data, &parts, &uri_param)
357 }
358 }
359
360 struct NoLogExtension();
361
362 async fn proxy_protected_request(
363 info: &'static ApiMethod,
364 mut parts: Parts,
365 req_body: Body,
366 peer: &std::net::SocketAddr,
367 ) -> Result<Response<Body>, Error> {
368 let mut uri_parts = parts.uri.clone().into_parts();
369
370 uri_parts.scheme = Some(http::uri::Scheme::HTTP);
371 uri_parts.authority = Some(http::uri::Authority::from_static("127.0.0.1:82"));
372 let new_uri = http::Uri::from_parts(uri_parts).unwrap();
373
374 parts.uri = new_uri;
375
376 let mut request = Request::from_parts(parts, req_body);
377 request.headers_mut().insert(
378 header::FORWARDED,
379 format!("for=\"{}\";", peer).parse().unwrap(),
380 );
381
382 let reload_timezone = info.reload_timezone;
383
384 let resp = hyper::client::Client::new()
385 .request(request)
386 .map_err(Error::from)
387 .map_ok(|mut resp| {
388 resp.extensions_mut().insert(NoLogExtension());
389 resp
390 })
391 .await?;
392
393 if reload_timezone {
394 unsafe {
395 tzset();
396 }
397 }
398
399 Ok(resp)
400 }
401
402 pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher + Send>(
403 mut rpcenv: Env,
404 info: &'static ApiMethod,
405 formatter: &'static OutputFormatter,
406 parts: Parts,
407 req_body: Body,
408 uri_param: HashMap<String, String, S>,
409 ) -> Result<Response<Body>, Error> {
410 let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
411 let compression = extract_compression_method(&parts.headers);
412
413 let result = match info.handler {
414 ApiHandler::AsyncHttp(handler) => {
415 let params = parse_query_parameters(info.parameters, "", &parts, &uri_param)?;
416 (handler)(parts, req_body, params, info, Box::new(rpcenv)).await
417 }
418 ApiHandler::Sync(handler) => {
419 let params =
420 get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
421 (handler)(params, info, &mut rpcenv).map(|data| (formatter.format_data)(data, &rpcenv))
422 }
423 ApiHandler::Async(handler) => {
424 let params =
425 get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
426 (handler)(params, info, &mut rpcenv)
427 .await
428 .map(|data| (formatter.format_data)(data, &rpcenv))
429 }
430 };
431
432 let mut resp = match result {
433 Ok(resp) => resp,
434 Err(err) => {
435 if let Some(httperr) = err.downcast_ref::<HttpError>() {
436 if httperr.code == StatusCode::UNAUTHORIZED {
437 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
438 }
439 }
440 (formatter.format_error)(err)
441 }
442 };
443
444 let resp = match compression {
445 Some(CompressionMethod::Deflate) => {
446 resp.headers_mut().insert(
447 header::CONTENT_ENCODING,
448 CompressionMethod::Deflate.content_encoding(),
449 );
450 resp.map(|body| {
451 Body::wrap_stream(DeflateEncoder::with_quality(
452 TryStreamExt::map_err(body, |err| {
453 proxmox::io_format_err!("error during compression: {}", err)
454 }),
455 Level::Default,
456 ))
457 })
458 }
459 None => resp,
460 };
461
462 if info.reload_timezone {
463 unsafe {
464 tzset();
465 }
466 }
467
468 Ok(resp)
469 }
470
471 fn get_index(
472 userid: Option<Userid>,
473 csrf_token: Option<String>,
474 language: Option<String>,
475 api: &Arc<ApiConfig>,
476 parts: Parts,
477 ) -> Response<Body> {
478 let nodename = proxmox::tools::nodename();
479 let user = userid.as_ref().map(|u| u.as_str()).unwrap_or("");
480
481 let csrf_token = csrf_token.unwrap_or_else(|| String::from(""));
482
483 let mut debug = false;
484 let mut template_file = "index";
485
486 if let Some(query_str) = parts.uri.query() {
487 for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
488 if k == "debug" && v != "0" && v != "false" {
489 debug = true;
490 } else if k == "console" {
491 template_file = "console";
492 }
493 }
494 }
495
496 let mut lang = String::from("");
497 if let Some(language) = language {
498 if Path::new(&format!("/usr/share/pbs-i18n/pbs-lang-{}.js", language)).exists() {
499 lang = language;
500 }
501 }
502
503 let data = json!({
504 "NodeName": nodename,
505 "UserName": user,
506 "CSRFPreventionToken": csrf_token,
507 "language": lang,
508 "debug": debug,
509 });
510
511 let (ct, index) = match api.render_template(template_file, &data) {
512 Ok(index) => ("text/html", index),
513 Err(err) => ("text/plain", format!("Error rendering template: {}", err)),
514 };
515
516 let mut resp = Response::builder()
517 .status(StatusCode::OK)
518 .header(header::CONTENT_TYPE, ct)
519 .body(index.into())
520 .unwrap();
521
522 if let Some(userid) = userid {
523 resp.extensions_mut().insert(Authid::from((userid, None)));
524 }
525
526 resp
527 }
528
529 fn extension_to_content_type(filename: &Path) -> (&'static str, bool) {
530 if let Some(ext) = filename.extension().and_then(|osstr| osstr.to_str()) {
531 return match ext {
532 "css" => ("text/css", false),
533 "html" => ("text/html", false),
534 "js" => ("application/javascript", false),
535 "json" => ("application/json", false),
536 "map" => ("application/json", false),
537 "png" => ("image/png", true),
538 "ico" => ("image/x-icon", true),
539 "gif" => ("image/gif", true),
540 "svg" => ("image/svg+xml", false),
541 "jar" => ("application/java-archive", true),
542 "woff" => ("application/font-woff", true),
543 "woff2" => ("application/font-woff2", true),
544 "ttf" => ("application/font-snft", true),
545 "pdf" => ("application/pdf", true),
546 "epub" => ("application/epub+zip", true),
547 "mp3" => ("audio/mpeg", true),
548 "oga" => ("audio/ogg", true),
549 "tgz" => ("application/x-compressed-tar", true),
550 _ => ("application/octet-stream", false),
551 };
552 }
553
554 ("application/octet-stream", false)
555 }
556
557 async fn simple_static_file_download(
558 filename: PathBuf,
559 content_type: &'static str,
560 compression: Option<CompressionMethod>,
561 ) -> Result<Response<Body>, Error> {
562 use tokio::io::AsyncReadExt;
563
564 let mut file = File::open(filename)
565 .await
566 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
567
568 let mut data: Vec<u8> = Vec::new();
569
570 let mut response = match compression {
571 Some(CompressionMethod::Deflate) => {
572 let mut enc = DeflateEncoder::with_quality(data, Level::Default);
573 enc.compress_vec(&mut file, CHUNK_SIZE_LIMIT as usize)
574 .await?;
575 let mut response = Response::new(enc.into_inner().into());
576 response.headers_mut().insert(
577 header::CONTENT_ENCODING,
578 CompressionMethod::Deflate.content_encoding(),
579 );
580 response
581 }
582 None => {
583 file.read_to_end(&mut data)
584 .await
585 .map_err(|err| http_err!(BAD_REQUEST, "File read failed: {}", err))?;
586 Response::new(data.into())
587 }
588 };
589
590 response.headers_mut().insert(
591 header::CONTENT_TYPE,
592 header::HeaderValue::from_static(content_type),
593 );
594
595 Ok(response)
596 }
597
598 async fn chuncked_static_file_download(
599 filename: PathBuf,
600 content_type: &'static str,
601 compression: Option<CompressionMethod>,
602 ) -> Result<Response<Body>, Error> {
603 let mut resp = Response::builder()
604 .status(StatusCode::OK)
605 .header(header::CONTENT_TYPE, content_type);
606
607 let file = File::open(filename)
608 .await
609 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
610
611 let body = match compression {
612 Some(CompressionMethod::Deflate) => {
613 resp = resp.header(
614 header::CONTENT_ENCODING,
615 CompressionMethod::Deflate.content_encoding(),
616 );
617 Body::wrap_stream(DeflateEncoder::with_quality(
618 AsyncReaderStream::new(file),
619 Level::Default,
620 ))
621 }
622 None => Body::wrap_stream(AsyncReaderStream::new(file)),
623 };
624
625 Ok(resp.body(body).unwrap())
626 }
627
628 async fn handle_static_file_download(
629 filename: PathBuf,
630 compression: Option<CompressionMethod>,
631 ) -> Result<Response<Body>, Error> {
632 let metadata = tokio::fs::metadata(filename.clone())
633 .map_err(|err| http_err!(BAD_REQUEST, "File access problems: {}", err))
634 .await?;
635
636 let (content_type, nocomp) = extension_to_content_type(&filename);
637 let compression = if nocomp { None } else { compression };
638
639 if metadata.len() < CHUNK_SIZE_LIMIT {
640 simple_static_file_download(filename, content_type, compression).await
641 } else {
642 chuncked_static_file_download(filename, content_type, compression).await
643 }
644 }
645
646 fn extract_lang_header(headers: &http::HeaderMap) -> Option<String> {
647 if let Some(Ok(cookie)) = headers.get("COOKIE").map(|v| v.to_str()) {
648 return tools::extract_cookie(cookie, "PBSLangCookie");
649 }
650 None
651 }
652
653 // FIXME: support handling multiple compression methods
654 fn extract_compression_method(headers: &http::HeaderMap) -> Option<CompressionMethod> {
655 if let Some(Ok(encodings)) = headers.get(header::ACCEPT_ENCODING).map(|v| v.to_str()) {
656 for encoding in encodings.split(&[',', ' '][..]) {
657 if let Ok(method) = encoding.parse() {
658 return Some(method);
659 }
660 }
661 }
662 None
663 }
664
665 async fn handle_request(
666 api: Arc<ApiConfig>,
667 req: Request<Body>,
668 peer: &std::net::SocketAddr,
669 ) -> Result<Response<Body>, Error> {
670 let (parts, body) = req.into_parts();
671 let method = parts.method.clone();
672 let (path, components) = tools::normalize_uri_path(parts.uri.path())?;
673
674 let comp_len = components.len();
675
676 let query = parts.uri.query().unwrap_or_default();
677 if path.len() + query.len() > MAX_URI_QUERY_LENGTH {
678 return Ok(Response::builder()
679 .status(StatusCode::URI_TOO_LONG)
680 .body("".into())
681 .unwrap());
682 }
683
684 let env_type = api.env_type();
685 let mut rpcenv = RestEnvironment::new(env_type);
686
687 rpcenv.set_client_ip(Some(*peer));
688
689 let auth = &api.api_auth;
690
691 let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
692 let access_forbidden_time = std::time::Instant::now() + std::time::Duration::from_millis(500);
693
694 if comp_len >= 1 && components[0] == "api2" {
695 if comp_len >= 2 {
696 let format = components[1];
697
698 let formatter = match format {
699 "json" => &JSON_FORMATTER,
700 "extjs" => &EXTJS_FORMATTER,
701 _ => bail!("Unsupported output format '{}'.", format),
702 };
703
704 let mut uri_param = HashMap::new();
705 let api_method = api.find_method(&components[2..], method.clone(), &mut uri_param);
706
707 let mut auth_required = true;
708 if let Some(api_method) = api_method {
709 if let Permission::World = *api_method.access.permission {
710 auth_required = false; // no auth for endpoints with World permission
711 }
712 }
713
714 if auth_required {
715 match auth.check_auth(&parts.headers, &method) {
716 Ok(authid) => rpcenv.set_auth_id(Some(authid)),
717 Err(auth_err) => {
718 let err = match auth_err {
719 AuthError::Generic(err) => err,
720 AuthError::NoData => {
721 format_err!("no authentication credentials provided.")
722 }
723 };
724 let peer = peer.ip();
725 auth_logger()?.log(format!(
726 "authentication failure; rhost={} msg={}",
727 peer, err
728 ));
729
730 // always delay unauthorized calls by 3 seconds (from start of request)
731 let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
732 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
733 return Ok((formatter.format_error)(err));
734 }
735 }
736 }
737
738 match api_method {
739 None => {
740 let err = http_err!(NOT_FOUND, "Path '{}' not found.", path);
741 return Ok((formatter.format_error)(err));
742 }
743 Some(api_method) => {
744 let auth_id = rpcenv.get_auth_id();
745 let user_info = CachedUserInfo::new()?;
746
747 if !check_api_permission(
748 api_method.access.permission,
749 auth_id.as_deref(),
750 &uri_param,
751 user_info.as_ref(),
752 ) {
753 let err = http_err!(FORBIDDEN, "permission check failed");
754 tokio::time::sleep_until(Instant::from_std(access_forbidden_time)).await;
755 return Ok((formatter.format_error)(err));
756 }
757
758 let result = if api_method.protected && env_type == RpcEnvironmentType::PUBLIC {
759 proxy_protected_request(api_method, parts, body, peer).await
760 } else {
761 handle_api_request(rpcenv, api_method, formatter, parts, body, uri_param)
762 .await
763 };
764
765 let mut response = match result {
766 Ok(resp) => resp,
767 Err(err) => (formatter.format_error)(err),
768 };
769
770 if let Some(auth_id) = auth_id {
771 let auth_id: Authid = auth_id.parse()?;
772 response.extensions_mut().insert(auth_id);
773 }
774
775 return Ok(response);
776 }
777 }
778 }
779 } else {
780 // not Auth required for accessing files!
781
782 if method != hyper::Method::GET {
783 bail!("Unsupported HTTP method {}", method);
784 }
785
786 if comp_len == 0 {
787 let language = extract_lang_header(&parts.headers);
788 match auth.check_auth(&parts.headers, &method) {
789 Ok(auth_id) => {
790 let auth_id: Authid = auth_id.parse()?;
791 if !auth_id.is_token() {
792 let userid = auth_id.user();
793 let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), userid);
794 return Ok(get_index(
795 Some(userid.clone()),
796 Some(new_csrf_token),
797 language,
798 &api,
799 parts,
800 ));
801 }
802 }
803 Err(AuthError::Generic(_)) => {
804 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
805 }
806 Err(AuthError::NoData) => {}
807 }
808 return Ok(get_index(None, None, language, &api, parts));
809 } else {
810 let filename = api.find_alias(&components);
811 let compression = extract_compression_method(&parts.headers);
812 return handle_static_file_download(filename, compression).await;
813 }
814 }
815
816 Err(http_err!(NOT_FOUND, "Path '{}' not found.", path))
817 }