]> git.proxmox.com Git - proxmox-backup.git/blob - proxmox-rest-server/src/rest.rs
tree-wide: fix needless borrows
[proxmox-backup.git] / proxmox-rest-server / src / rest.rs
1 use std::collections::HashMap;
2 use std::future::Future;
3 use std::hash::BuildHasher;
4 use std::path::{Path, PathBuf};
5 use std::pin::Pin;
6 use std::sync::{Arc, Mutex};
7 use std::task::{Context, Poll};
8
9 use anyhow::{bail, format_err, Error};
10 use futures::future::{self, FutureExt, TryFutureExt};
11 use futures::stream::TryStreamExt;
12 use hyper::body::HttpBody;
13 use hyper::header::{self, HeaderMap};
14 use hyper::http::request::Parts;
15 use hyper::{Body, Request, Response, StatusCode};
16 use lazy_static::lazy_static;
17 use regex::Regex;
18 use serde_json::Value;
19 use tokio::fs::File;
20 use tokio::time::Instant;
21 use url::form_urlencoded;
22 use tower_service::Service;
23
24 use proxmox_router::{
25 check_api_permission, ApiHandler, ApiMethod, HttpError, Permission, RpcEnvironment,
26 RpcEnvironmentType, UserInformation,
27 };
28 use proxmox_router::http_err;
29 use proxmox_schema::{ObjectSchemaType, ParameterSchema};
30
31 use proxmox_http::client::RateLimitedStream;
32
33 use proxmox_async::compression::{DeflateEncoder, Level};
34 use proxmox_async::stream::AsyncReaderStream;
35
36 use crate::{
37 ApiConfig, FileLogger, AuthError, RestEnvironment, CompressionMethod,
38 normalize_uri_path, formatter::*,
39 };
40
41 extern "C" {
42 fn tzset();
43 }
44
45 struct AuthStringExtension(String);
46
47 struct EmptyUserInformation {}
48
49 impl UserInformation for EmptyUserInformation {
50 fn is_superuser(&self, _userid: &str) -> bool { false }
51 fn is_group_member(&self, _userid: &str, _group: &str) -> bool { false }
52 fn lookup_privs(&self, _userid: &str, _path: &[&str]) -> u64 { 0 }
53 }
54
55 /// REST server implementation (configured with [ApiConfig])
56 ///
57 /// This struct implements the [Service] trait in order to use it with
58 /// [hyper::server::Builder::serve].
59 pub struct RestServer {
60 api_config: Arc<ApiConfig>,
61 }
62
63 const MAX_URI_QUERY_LENGTH: usize = 3072;
64 const CHUNK_SIZE_LIMIT: u64 = 32 * 1024;
65
66 impl RestServer {
67 /// Creates a new instance.
68 pub fn new(api_config: ApiConfig) -> Self {
69 Self {
70 api_config: Arc::new(api_config),
71 }
72 }
73 }
74
75 impl Service<&Pin<Box<tokio_openssl::SslStream<RateLimitedStream<tokio::net::TcpStream>>>>>
76 for RestServer
77 {
78 type Response = ApiService;
79 type Error = Error;
80 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
81
82 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
83 Poll::Ready(Ok(()))
84 }
85
86 fn call(
87 &mut self,
88 ctx: &Pin<Box<tokio_openssl::SslStream<RateLimitedStream<tokio::net::TcpStream>>>>,
89 ) -> Self::Future {
90 match ctx.get_ref().peer_addr() {
91 Err(err) => future::err(format_err!("unable to get peer address - {}", err)).boxed(),
92 Ok(peer) => future::ok(ApiService {
93 peer,
94 api_config: self.api_config.clone(),
95 })
96 .boxed(),
97 }
98 }
99 }
100
101 impl Service<&Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>>
102 for RestServer
103 {
104 type Response = ApiService;
105 type Error = Error;
106 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
107
108 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
109 Poll::Ready(Ok(()))
110 }
111
112 fn call(
113 &mut self,
114 ctx: &Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>,
115 ) -> Self::Future {
116 match ctx.get_ref().peer_addr() {
117 Err(err) => future::err(format_err!("unable to get peer address - {}", err)).boxed(),
118 Ok(peer) => future::ok(ApiService {
119 peer,
120 api_config: self.api_config.clone(),
121 })
122 .boxed(),
123 }
124 }
125 }
126
127 impl Service<&hyper::server::conn::AddrStream> for RestServer {
128 type Response = ApiService;
129 type Error = Error;
130 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
131
132 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
133 Poll::Ready(Ok(()))
134 }
135
136 fn call(&mut self, ctx: &hyper::server::conn::AddrStream) -> Self::Future {
137 let peer = ctx.remote_addr();
138 future::ok(ApiService {
139 peer,
140 api_config: self.api_config.clone(),
141 })
142 .boxed()
143 }
144 }
145
146 impl Service<&tokio::net::UnixStream> for RestServer {
147 type Response = ApiService;
148 type Error = Error;
149 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
150
151 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
152 Poll::Ready(Ok(()))
153 }
154
155 fn call(&mut self, _ctx: &tokio::net::UnixStream) -> Self::Future {
156 // TODO: Find a way to actually represent the vsock peer in the ApiService struct - for now
157 // it doesn't really matter, so just use a fake IP address
158 let fake_peer = "0.0.0.0:807".parse().unwrap();
159 future::ok(ApiService {
160 peer: fake_peer,
161 api_config: self.api_config.clone(),
162 })
163 .boxed()
164 }
165 }
166
167 // Helper [Service] containing the peer Address
168 //
169 // The lower level connection [Service] implementation on
170 // [RestServer] extracts the peer address and return an [ApiService].
171 //
172 // Rust wants this type 'pub' here (else we get 'private type `ApiService`
173 // in public interface'). The type is still private because the crate does
174 // not export it.
175 pub struct ApiService {
176 pub peer: std::net::SocketAddr,
177 pub api_config: Arc<ApiConfig>,
178 }
179
180 fn log_response(
181 logfile: Option<&Arc<Mutex<FileLogger>>>,
182 peer: &std::net::SocketAddr,
183 method: hyper::Method,
184 path_query: &str,
185 resp: &Response<Body>,
186 user_agent: Option<String>,
187 ) {
188 if resp.extensions().get::<NoLogExtension>().is_some() {
189 return;
190 };
191
192 // we also log URL-to-long requests, so avoid message bigger than PIPE_BUF (4k on Linux)
193 // to profit from atomicty guarantees for O_APPEND opened logfiles
194 let path = &path_query[..MAX_URI_QUERY_LENGTH.min(path_query.len())];
195
196 let status = resp.status();
197 if !(status.is_success() || status.is_informational()) {
198 let reason = status.canonical_reason().unwrap_or("unknown reason");
199
200 let message = match resp.extensions().get::<ErrorMessageExtension>() {
201 Some(data) => &data.0,
202 None => "request failed",
203 };
204
205 log::error!(
206 "{} {}: {} {}: [client {}] {}",
207 method.as_str(),
208 path,
209 status.as_str(),
210 reason,
211 peer,
212 message
213 );
214 }
215 if let Some(logfile) = logfile {
216 let auth_id = match resp.extensions().get::<AuthStringExtension>() {
217 Some(AuthStringExtension(auth_id)) => auth_id.clone(),
218 None => "-".to_string(),
219 };
220 let now = proxmox_time::epoch_i64();
221 // time format which apache/nginx use (by default), copied from pve-http-server
222 let datetime = proxmox_time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
223 .unwrap_or_else(|_| "-".to_string());
224
225 logfile.lock().unwrap().log(format!(
226 "{} - {} [{}] \"{} {}\" {} {} {}",
227 peer.ip(),
228 auth_id,
229 datetime,
230 method.as_str(),
231 path,
232 status.as_str(),
233 resp.body().size_hint().lower(),
234 user_agent.unwrap_or_else(|| "-".to_string()),
235 ));
236 }
237 }
238
239 fn get_proxied_peer(headers: &HeaderMap) -> Option<std::net::SocketAddr> {
240 lazy_static! {
241 static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap();
242 }
243 let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?;
244 let capture = RE.captures(forwarded)?;
245 let rhost = capture.get(1)?.as_str();
246
247 rhost.parse().ok()
248 }
249
250 fn get_user_agent(headers: &HeaderMap) -> Option<String> {
251 let agent = headers.get(header::USER_AGENT)?.to_str();
252 agent
253 .map(|s| {
254 let mut s = s.to_owned();
255 s.truncate(128);
256 s
257 })
258 .ok()
259 }
260
261 impl Service<Request<Body>> for ApiService {
262 type Response = Response<Body>;
263 type Error = Error;
264 #[allow(clippy::type_complexity)]
265 type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
266
267 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
268 Poll::Ready(Ok(()))
269 }
270
271 fn call(&mut self, req: Request<Body>) -> Self::Future {
272 let path = req.uri().path_and_query().unwrap().as_str().to_owned();
273 let method = req.method().clone();
274 let user_agent = get_user_agent(req.headers());
275
276 let config = Arc::clone(&self.api_config);
277 let peer = match get_proxied_peer(req.headers()) {
278 Some(proxied_peer) => proxied_peer,
279 None => self.peer,
280 };
281 async move {
282 let response = match handle_request(Arc::clone(&config), req, &peer).await {
283 Ok(response) => response,
284 Err(err) => {
285 let (err, code) = match err.downcast_ref::<HttpError>() {
286 Some(apierr) => (apierr.message.clone(), apierr.code),
287 _ => (err.to_string(), StatusCode::BAD_REQUEST),
288 };
289 Response::builder()
290 .status(code)
291 .extension(ErrorMessageExtension(err.to_string()))
292 .body(err.into())?
293 }
294 };
295 let logger = config.get_access_log();
296 log_response(logger, &peer, method, &path, &response, user_agent);
297 Ok(response)
298 }
299 .boxed()
300 }
301 }
302
303 fn parse_query_parameters<S: 'static + BuildHasher + Send>(
304 param_schema: ParameterSchema,
305 form: &str, // x-www-form-urlencoded body data
306 parts: &Parts,
307 uri_param: &HashMap<String, String, S>,
308 ) -> Result<Value, Error> {
309 let mut param_list: Vec<(String, String)> = vec![];
310
311 if !form.is_empty() {
312 for (k, v) in form_urlencoded::parse(form.as_bytes()).into_owned() {
313 param_list.push((k, v));
314 }
315 }
316
317 if let Some(query_str) = parts.uri.query() {
318 for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
319 if k == "_dc" {
320 continue;
321 } // skip extjs "disable cache" parameter
322 param_list.push((k, v));
323 }
324 }
325
326 for (k, v) in uri_param {
327 param_list.push((k.clone(), v.clone()));
328 }
329
330 let params = param_schema.parse_parameter_strings(&param_list, true)?;
331
332 Ok(params)
333 }
334
335 async fn get_request_parameters<S: 'static + BuildHasher + Send>(
336 param_schema: ParameterSchema,
337 parts: Parts,
338 req_body: Body,
339 uri_param: HashMap<String, String, S>,
340 ) -> Result<Value, Error> {
341 let mut is_json = false;
342
343 if let Some(value) = parts.headers.get(header::CONTENT_TYPE) {
344 match value.to_str().map(|v| v.split(';').next()) {
345 Ok(Some("application/x-www-form-urlencoded")) => {
346 is_json = false;
347 }
348 Ok(Some("application/json")) => {
349 is_json = true;
350 }
351 _ => bail!("unsupported content type {:?}", value.to_str()),
352 }
353 }
354
355 let body = TryStreamExt::map_err(req_body, |err| {
356 http_err!(BAD_REQUEST, "Problems reading request body: {}", err)
357 })
358 .try_fold(Vec::new(), |mut acc, chunk| async move {
359 // FIXME: max request body size?
360 if acc.len() + chunk.len() < 64 * 1024 {
361 acc.extend_from_slice(&*chunk);
362 Ok(acc)
363 } else {
364 Err(http_err!(BAD_REQUEST, "Request body too large"))
365 }
366 })
367 .await?;
368
369 let utf8_data =
370 std::str::from_utf8(&body).map_err(|err| format_err!("Request body not uft8: {}", err))?;
371
372 if is_json {
373 let mut params: Value = serde_json::from_str(utf8_data)?;
374 for (k, v) in uri_param {
375 if let Some((_optional, prop_schema)) = param_schema.lookup(&k) {
376 params[&k] = prop_schema.parse_simple_value(&v)?;
377 }
378 }
379 param_schema.verify_json(&params)?;
380 return Ok(params);
381 } else {
382 parse_query_parameters(param_schema, utf8_data, &parts, &uri_param)
383 }
384 }
385
386 struct NoLogExtension();
387
388 async fn proxy_protected_request(
389 info: &'static ApiMethod,
390 mut parts: Parts,
391 req_body: Body,
392 peer: &std::net::SocketAddr,
393 ) -> Result<Response<Body>, Error> {
394 let mut uri_parts = parts.uri.clone().into_parts();
395
396 uri_parts.scheme = Some(http::uri::Scheme::HTTP);
397 uri_parts.authority = Some(http::uri::Authority::from_static("127.0.0.1:82"));
398 let new_uri = http::Uri::from_parts(uri_parts).unwrap();
399
400 parts.uri = new_uri;
401
402 let mut request = Request::from_parts(parts, req_body);
403 request.headers_mut().insert(
404 header::FORWARDED,
405 format!("for=\"{}\";", peer).parse().unwrap(),
406 );
407
408 let reload_timezone = info.reload_timezone;
409
410 let resp = hyper::client::Client::new()
411 .request(request)
412 .map_err(Error::from)
413 .map_ok(|mut resp| {
414 resp.extensions_mut().insert(NoLogExtension());
415 resp
416 })
417 .await?;
418
419 if reload_timezone {
420 unsafe {
421 tzset();
422 }
423 }
424
425 Ok(resp)
426 }
427
428 pub(crate) async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher + Send>(
429 mut rpcenv: Env,
430 info: &'static ApiMethod,
431 formatter: &'static dyn OutputFormatter,
432 parts: Parts,
433 req_body: Body,
434 uri_param: HashMap<String, String, S>,
435 ) -> Result<Response<Body>, Error> {
436 let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
437 let compression = extract_compression_method(&parts.headers);
438
439 let result = match info.handler {
440 ApiHandler::AsyncHttp(handler) => {
441 let params = parse_query_parameters(info.parameters, "", &parts, &uri_param)?;
442 (handler)(parts, req_body, params, info, Box::new(rpcenv)).await
443 }
444 ApiHandler::Sync(handler) => {
445 let params =
446 get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
447 (handler)(params, info, &mut rpcenv).map(|data| formatter.format_data(data, &rpcenv))
448 }
449 ApiHandler::Async(handler) => {
450 let params =
451 get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
452 (handler)(params, info, &mut rpcenv)
453 .await
454 .map(|data| formatter.format_data(data, &rpcenv))
455 }
456 };
457
458 let mut resp = match result {
459 Ok(resp) => resp,
460 Err(err) => {
461 if let Some(httperr) = err.downcast_ref::<HttpError>() {
462 if httperr.code == StatusCode::UNAUTHORIZED {
463 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
464 }
465 }
466 formatter.format_error(err)
467 }
468 };
469
470 let resp = match compression {
471 Some(CompressionMethod::Deflate) => {
472 resp.headers_mut().insert(
473 header::CONTENT_ENCODING,
474 CompressionMethod::Deflate.content_encoding(),
475 );
476 resp.map(|body| {
477 Body::wrap_stream(DeflateEncoder::with_quality(
478 TryStreamExt::map_err(body, |err| {
479 proxmox_sys::io_format_err!("error during compression: {}", err)
480 }),
481 Level::Default,
482 ))
483 })
484 }
485 None => resp,
486 };
487
488 if info.reload_timezone {
489 unsafe {
490 tzset();
491 }
492 }
493
494 Ok(resp)
495 }
496
497
498 fn extension_to_content_type(filename: &Path) -> (&'static str, bool) {
499 if let Some(ext) = filename.extension().and_then(|osstr| osstr.to_str()) {
500 return match ext {
501 "css" => ("text/css", false),
502 "html" => ("text/html", false),
503 "js" => ("application/javascript", false),
504 "json" => ("application/json", false),
505 "map" => ("application/json", false),
506 "png" => ("image/png", true),
507 "ico" => ("image/x-icon", true),
508 "gif" => ("image/gif", true),
509 "svg" => ("image/svg+xml", false),
510 "jar" => ("application/java-archive", true),
511 "woff" => ("application/font-woff", true),
512 "woff2" => ("application/font-woff2", true),
513 "ttf" => ("application/font-snft", true),
514 "pdf" => ("application/pdf", true),
515 "epub" => ("application/epub+zip", true),
516 "mp3" => ("audio/mpeg", true),
517 "oga" => ("audio/ogg", true),
518 "tgz" => ("application/x-compressed-tar", true),
519 _ => ("application/octet-stream", false),
520 };
521 }
522
523 ("application/octet-stream", false)
524 }
525
526 async fn simple_static_file_download(
527 filename: PathBuf,
528 content_type: &'static str,
529 compression: Option<CompressionMethod>,
530 ) -> Result<Response<Body>, Error> {
531 use tokio::io::AsyncReadExt;
532
533 let mut file = File::open(filename)
534 .await
535 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
536
537 let mut data: Vec<u8> = Vec::new();
538
539 let mut response = match compression {
540 Some(CompressionMethod::Deflate) => {
541 let mut enc = DeflateEncoder::with_quality(data, Level::Default);
542 enc.compress_vec(&mut file, CHUNK_SIZE_LIMIT as usize)
543 .await?;
544 let mut response = Response::new(enc.into_inner().into());
545 response.headers_mut().insert(
546 header::CONTENT_ENCODING,
547 CompressionMethod::Deflate.content_encoding(),
548 );
549 response
550 }
551 None => {
552 file.read_to_end(&mut data)
553 .await
554 .map_err(|err| http_err!(BAD_REQUEST, "File read failed: {}", err))?;
555 Response::new(data.into())
556 }
557 };
558
559 response.headers_mut().insert(
560 header::CONTENT_TYPE,
561 header::HeaderValue::from_static(content_type),
562 );
563
564 Ok(response)
565 }
566
567 async fn chuncked_static_file_download(
568 filename: PathBuf,
569 content_type: &'static str,
570 compression: Option<CompressionMethod>,
571 ) -> Result<Response<Body>, Error> {
572 let mut resp = Response::builder()
573 .status(StatusCode::OK)
574 .header(header::CONTENT_TYPE, content_type);
575
576 let file = File::open(filename)
577 .await
578 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
579
580 let body = match compression {
581 Some(CompressionMethod::Deflate) => {
582 resp = resp.header(
583 header::CONTENT_ENCODING,
584 CompressionMethod::Deflate.content_encoding(),
585 );
586 Body::wrap_stream(DeflateEncoder::with_quality(
587 AsyncReaderStream::new(file),
588 Level::Default,
589 ))
590 }
591 None => Body::wrap_stream(AsyncReaderStream::new(file)),
592 };
593
594 Ok(resp.body(body).unwrap())
595 }
596
597 async fn handle_static_file_download(
598 filename: PathBuf,
599 compression: Option<CompressionMethod>,
600 ) -> Result<Response<Body>, Error> {
601 let metadata = tokio::fs::metadata(filename.clone())
602 .map_err(|err| http_err!(BAD_REQUEST, "File access problems: {}", err))
603 .await?;
604
605 let (content_type, nocomp) = extension_to_content_type(&filename);
606 let compression = if nocomp { None } else { compression };
607
608 if metadata.len() < CHUNK_SIZE_LIMIT {
609 simple_static_file_download(filename, content_type, compression).await
610 } else {
611 chuncked_static_file_download(filename, content_type, compression).await
612 }
613 }
614
615 // FIXME: support handling multiple compression methods
616 fn extract_compression_method(headers: &http::HeaderMap) -> Option<CompressionMethod> {
617 if let Some(Ok(encodings)) = headers.get(header::ACCEPT_ENCODING).map(|v| v.to_str()) {
618 for encoding in encodings.split(&[',', ' '][..]) {
619 if let Ok(method) = encoding.parse() {
620 return Some(method);
621 }
622 }
623 }
624 None
625 }
626
627 async fn handle_request(
628 api: Arc<ApiConfig>,
629 req: Request<Body>,
630 peer: &std::net::SocketAddr,
631 ) -> Result<Response<Body>, Error> {
632 let (parts, body) = req.into_parts();
633 let method = parts.method.clone();
634 let (path, components) = normalize_uri_path(parts.uri.path())?;
635
636 let comp_len = components.len();
637
638 let query = parts.uri.query().unwrap_or_default();
639 if path.len() + query.len() > MAX_URI_QUERY_LENGTH {
640 return Ok(Response::builder()
641 .status(StatusCode::URI_TOO_LONG)
642 .body("".into())
643 .unwrap());
644 }
645
646 let env_type = api.env_type();
647 let mut rpcenv = RestEnvironment::new(env_type, Arc::clone(&api));
648
649 rpcenv.set_client_ip(Some(*peer));
650
651 let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
652 let access_forbidden_time = std::time::Instant::now() + std::time::Duration::from_millis(500);
653
654 if comp_len >= 1 && components[0] == "api2" {
655 if comp_len >= 2 {
656 let format = components[1];
657
658 let formatter: &dyn OutputFormatter = match format {
659 "json" => JSON_FORMATTER,
660 "extjs" => EXTJS_FORMATTER,
661 _ => bail!("Unsupported output format '{}'.", format),
662 };
663
664 let mut uri_param = HashMap::new();
665 let api_method = api.find_method(&components[2..], method.clone(), &mut uri_param);
666
667 let mut auth_required = true;
668 if let Some(api_method) = api_method {
669 if let Permission::World = *api_method.access.permission {
670 auth_required = false; // no auth for endpoints with World permission
671 }
672 }
673
674 let mut user_info: Box<dyn UserInformation + Send + Sync> = Box::new(EmptyUserInformation {});
675
676 if auth_required {
677 match api.check_auth(&parts.headers, &method).await {
678 Ok((authid, info)) => {
679 rpcenv.set_auth_id(Some(authid));
680 user_info = info;
681 }
682 Err(auth_err) => {
683 let err = match auth_err {
684 AuthError::Generic(err) => err,
685 AuthError::NoData => {
686 format_err!("no authentication credentials provided.")
687 }
688 };
689 // fixme: log Username??
690 rpcenv.log_failed_auth(None, &err.to_string());
691
692 // always delay unauthorized calls by 3 seconds (from start of request)
693 let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
694 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
695 return Ok(formatter.format_error(err));
696 }
697 }
698 }
699
700 match api_method {
701 None => {
702 let err = http_err!(NOT_FOUND, "Path '{}' not found.", path);
703 return Ok(formatter.format_error(err));
704 }
705 Some(api_method) => {
706 let auth_id = rpcenv.get_auth_id();
707 let user_info = user_info;
708
709 if !check_api_permission(
710 api_method.access.permission,
711 auth_id.as_deref(),
712 &uri_param,
713 user_info.as_ref(),
714 ) {
715 let err = http_err!(FORBIDDEN, "permission check failed");
716 tokio::time::sleep_until(Instant::from_std(access_forbidden_time)).await;
717 return Ok(formatter.format_error(err));
718 }
719
720 let result = if api_method.protected && env_type == RpcEnvironmentType::PUBLIC {
721 proxy_protected_request(api_method, parts, body, peer).await
722 } else {
723 handle_api_request(rpcenv, api_method, formatter, parts, body, uri_param)
724 .await
725 };
726
727 let mut response = match result {
728 Ok(resp) => resp,
729 Err(err) => formatter.format_error(err),
730 };
731
732 if let Some(auth_id) = auth_id {
733 response.extensions_mut().insert(AuthStringExtension(auth_id));
734 }
735
736 return Ok(response);
737 }
738 }
739 }
740 } else {
741 // not Auth required for accessing files!
742
743 if method != hyper::Method::GET {
744 bail!("Unsupported HTTP method {}", method);
745 }
746
747 if comp_len == 0 {
748 match api.check_auth(&parts.headers, &method).await {
749 Ok((auth_id, _user_info)) => {
750 rpcenv.set_auth_id(Some(auth_id));
751 return Ok(api.get_index(rpcenv, parts).await);
752 }
753 Err(AuthError::Generic(_)) => {
754 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
755 }
756 Err(AuthError::NoData) => {}
757 }
758 return Ok(api.get_index(rpcenv, parts).await);
759 } else {
760 let filename = api.find_alias(&components);
761 let compression = extract_compression_method(&parts.headers);
762 return handle_static_file_download(filename, compression).await;
763 }
764 }
765
766 Err(http_err!(NOT_FOUND, "Path '{}' not found.", path))
767 }