]> git.proxmox.com Git - proxmox-backup.git/blob - src/server/rest.rs
make get_index and ApiConfig property (callback)
[proxmox-backup.git] / src / server / rest.rs
1 use std::collections::HashMap;
2 use std::future::Future;
3 use std::hash::BuildHasher;
4 use std::path::{Path, PathBuf};
5 use std::pin::Pin;
6 use std::sync::{Arc, Mutex};
7 use std::task::{Context, Poll};
8
9 use anyhow::{bail, format_err, Error};
10 use futures::future::{self, FutureExt, TryFutureExt};
11 use futures::stream::TryStreamExt;
12 use hyper::body::HttpBody;
13 use hyper::header::{self, HeaderMap};
14 use hyper::http::request::Parts;
15 use hyper::{Body, Request, Response, StatusCode};
16 use lazy_static::lazy_static;
17 use regex::Regex;
18 use serde_json::Value;
19 use tokio::fs::File;
20 use tokio::time::Instant;
21 use url::form_urlencoded;
22
23 use proxmox::api::schema::{
24 parse_parameter_strings, parse_simple_value, verify_json_object, ObjectSchemaType,
25 ParameterSchema,
26 };
27 use proxmox::api::{
28 check_api_permission, ApiHandler, ApiMethod, HttpError, Permission, RpcEnvironment,
29 RpcEnvironmentType,
30 };
31 use proxmox::http_err;
32 use proxmox::tools::fs::CreateOptions;
33
34 use pbs_tools::compression::{DeflateEncoder, Level};
35 use pbs_tools::stream::AsyncReaderStream;
36 use pbs_api_types::Authid;
37 use proxmox_rest_server::{
38 ApiConfig, FileLogger, FileLogOptions, AuthError, RestEnvironment, CompressionMethod,
39 extract_cookie, normalize_uri_path,
40 };
41 use proxmox_rest_server::formatter::*;
42
43 use pbs_config::CachedUserInfo;
44
45 extern "C" {
46 fn tzset();
47 }
48
49 pub struct RestServer {
50 pub api_config: Arc<ApiConfig>,
51 }
52
53 const MAX_URI_QUERY_LENGTH: usize = 3072;
54 const CHUNK_SIZE_LIMIT: u64 = 32 * 1024;
55
56 impl RestServer {
57 pub fn new(api_config: ApiConfig) -> Self {
58 Self {
59 api_config: Arc::new(api_config),
60 }
61 }
62 }
63
64 impl tower_service::Service<&Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>>
65 for RestServer
66 {
67 type Response = ApiService;
68 type Error = Error;
69 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
70
71 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
72 Poll::Ready(Ok(()))
73 }
74
75 fn call(
76 &mut self,
77 ctx: &Pin<Box<tokio_openssl::SslStream<tokio::net::TcpStream>>>,
78 ) -> Self::Future {
79 match ctx.get_ref().peer_addr() {
80 Err(err) => future::err(format_err!("unable to get peer address - {}", err)).boxed(),
81 Ok(peer) => future::ok(ApiService {
82 peer,
83 api_config: self.api_config.clone(),
84 })
85 .boxed(),
86 }
87 }
88 }
89
90 impl tower_service::Service<&tokio::net::TcpStream> for RestServer {
91 type Response = ApiService;
92 type Error = Error;
93 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
94
95 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
96 Poll::Ready(Ok(()))
97 }
98
99 fn call(&mut self, ctx: &tokio::net::TcpStream) -> Self::Future {
100 match ctx.peer_addr() {
101 Err(err) => future::err(format_err!("unable to get peer address - {}", err)).boxed(),
102 Ok(peer) => future::ok(ApiService {
103 peer,
104 api_config: self.api_config.clone(),
105 })
106 .boxed(),
107 }
108 }
109 }
110
111 impl tower_service::Service<&tokio::net::UnixStream> for RestServer {
112 type Response = ApiService;
113 type Error = Error;
114 type Future = Pin<Box<dyn Future<Output = Result<ApiService, Error>> + Send>>;
115
116 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
117 Poll::Ready(Ok(()))
118 }
119
120 fn call(&mut self, _ctx: &tokio::net::UnixStream) -> Self::Future {
121 // TODO: Find a way to actually represent the vsock peer in the ApiService struct - for now
122 // it doesn't really matter, so just use a fake IP address
123 let fake_peer = "0.0.0.0:807".parse().unwrap();
124 future::ok(ApiService {
125 peer: fake_peer,
126 api_config: self.api_config.clone(),
127 })
128 .boxed()
129 }
130 }
131
132 pub struct ApiService {
133 pub peer: std::net::SocketAddr,
134 pub api_config: Arc<ApiConfig>,
135 }
136
137 fn log_response(
138 logfile: Option<&Arc<Mutex<FileLogger>>>,
139 peer: &std::net::SocketAddr,
140 method: hyper::Method,
141 path_query: &str,
142 resp: &Response<Body>,
143 user_agent: Option<String>,
144 ) {
145 if resp.extensions().get::<NoLogExtension>().is_some() {
146 return;
147 };
148
149 // we also log URL-to-long requests, so avoid message bigger than PIPE_BUF (4k on Linux)
150 // to profit from atomicty guarantees for O_APPEND opened logfiles
151 let path = &path_query[..MAX_URI_QUERY_LENGTH.min(path_query.len())];
152
153 let status = resp.status();
154 if !(status.is_success() || status.is_informational()) {
155 let reason = status.canonical_reason().unwrap_or("unknown reason");
156
157 let message = match resp.extensions().get::<ErrorMessageExtension>() {
158 Some(data) => &data.0,
159 None => "request failed",
160 };
161
162 log::error!(
163 "{} {}: {} {}: [client {}] {}",
164 method.as_str(),
165 path,
166 status.as_str(),
167 reason,
168 peer,
169 message
170 );
171 }
172 if let Some(logfile) = logfile {
173 let auth_id = match resp.extensions().get::<Authid>() {
174 Some(auth_id) => auth_id.to_string(),
175 None => "-".to_string(),
176 };
177 let now = proxmox::tools::time::epoch_i64();
178 // time format which apache/nginx use (by default), copied from pve-http-server
179 let datetime = proxmox::tools::time::strftime_local("%d/%m/%Y:%H:%M:%S %z", now)
180 .unwrap_or_else(|_| "-".to_string());
181
182 logfile.lock().unwrap().log(format!(
183 "{} - {} [{}] \"{} {}\" {} {} {}",
184 peer.ip(),
185 auth_id,
186 datetime,
187 method.as_str(),
188 path,
189 status.as_str(),
190 resp.body().size_hint().lower(),
191 user_agent.unwrap_or_else(|| "-".to_string()),
192 ));
193 }
194 }
195 pub fn auth_logger() -> Result<FileLogger, Error> {
196 let backup_user = pbs_config::backup_user()?;
197
198 let file_opts = CreateOptions::new()
199 .owner(backup_user.uid)
200 .group(backup_user.gid);
201
202 let logger_options = FileLogOptions {
203 append: true,
204 prefix_time: true,
205 file_opts,
206 ..Default::default()
207 };
208 FileLogger::new(pbs_buildcfg::API_AUTH_LOG_FN, logger_options)
209 }
210
211 fn get_proxied_peer(headers: &HeaderMap) -> Option<std::net::SocketAddr> {
212 lazy_static! {
213 static ref RE: Regex = Regex::new(r#"for="([^"]+)""#).unwrap();
214 }
215 let forwarded = headers.get(header::FORWARDED)?.to_str().ok()?;
216 let capture = RE.captures(&forwarded)?;
217 let rhost = capture.get(1)?.as_str();
218
219 rhost.parse().ok()
220 }
221
222 fn get_user_agent(headers: &HeaderMap) -> Option<String> {
223 let agent = headers.get(header::USER_AGENT)?.to_str();
224 agent
225 .map(|s| {
226 let mut s = s.to_owned();
227 s.truncate(128);
228 s
229 })
230 .ok()
231 }
232
233 impl tower_service::Service<Request<Body>> for ApiService {
234 type Response = Response<Body>;
235 type Error = Error;
236 #[allow(clippy::type_complexity)]
237 type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
238
239 fn poll_ready(&mut self, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
240 Poll::Ready(Ok(()))
241 }
242
243 fn call(&mut self, req: Request<Body>) -> Self::Future {
244 let path = req.uri().path_and_query().unwrap().as_str().to_owned();
245 let method = req.method().clone();
246 let user_agent = get_user_agent(req.headers());
247
248 let config = Arc::clone(&self.api_config);
249 let peer = match get_proxied_peer(req.headers()) {
250 Some(proxied_peer) => proxied_peer,
251 None => self.peer,
252 };
253 async move {
254 let response = match handle_request(Arc::clone(&config), req, &peer).await {
255 Ok(response) => response,
256 Err(err) => {
257 let (err, code) = match err.downcast_ref::<HttpError>() {
258 Some(apierr) => (apierr.message.clone(), apierr.code),
259 _ => (err.to_string(), StatusCode::BAD_REQUEST),
260 };
261 Response::builder()
262 .status(code)
263 .extension(ErrorMessageExtension(err.to_string()))
264 .body(err.into())?
265 }
266 };
267 let logger = config.get_file_log();
268 log_response(logger, &peer, method, &path, &response, user_agent);
269 Ok(response)
270 }
271 .boxed()
272 }
273 }
274
275 fn parse_query_parameters<S: 'static + BuildHasher + Send>(
276 param_schema: ParameterSchema,
277 form: &str, // x-www-form-urlencoded body data
278 parts: &Parts,
279 uri_param: &HashMap<String, String, S>,
280 ) -> Result<Value, Error> {
281 let mut param_list: Vec<(String, String)> = vec![];
282
283 if !form.is_empty() {
284 for (k, v) in form_urlencoded::parse(form.as_bytes()).into_owned() {
285 param_list.push((k, v));
286 }
287 }
288
289 if let Some(query_str) = parts.uri.query() {
290 for (k, v) in form_urlencoded::parse(query_str.as_bytes()).into_owned() {
291 if k == "_dc" {
292 continue;
293 } // skip extjs "disable cache" parameter
294 param_list.push((k, v));
295 }
296 }
297
298 for (k, v) in uri_param {
299 param_list.push((k.clone(), v.clone()));
300 }
301
302 let params = parse_parameter_strings(&param_list, param_schema, true)?;
303
304 Ok(params)
305 }
306
307 async fn get_request_parameters<S: 'static + BuildHasher + Send>(
308 param_schema: ParameterSchema,
309 parts: Parts,
310 req_body: Body,
311 uri_param: HashMap<String, String, S>,
312 ) -> Result<Value, Error> {
313 let mut is_json = false;
314
315 if let Some(value) = parts.headers.get(header::CONTENT_TYPE) {
316 match value.to_str().map(|v| v.split(';').next()) {
317 Ok(Some("application/x-www-form-urlencoded")) => {
318 is_json = false;
319 }
320 Ok(Some("application/json")) => {
321 is_json = true;
322 }
323 _ => bail!("unsupported content type {:?}", value.to_str()),
324 }
325 }
326
327 let body = TryStreamExt::map_err(req_body, |err| {
328 http_err!(BAD_REQUEST, "Problems reading request body: {}", err)
329 })
330 .try_fold(Vec::new(), |mut acc, chunk| async move {
331 // FIXME: max request body size?
332 if acc.len() + chunk.len() < 64 * 1024 {
333 acc.extend_from_slice(&*chunk);
334 Ok(acc)
335 } else {
336 Err(http_err!(BAD_REQUEST, "Request body too large"))
337 }
338 })
339 .await?;
340
341 let utf8_data =
342 std::str::from_utf8(&body).map_err(|err| format_err!("Request body not uft8: {}", err))?;
343
344 if is_json {
345 let mut params: Value = serde_json::from_str(utf8_data)?;
346 for (k, v) in uri_param {
347 if let Some((_optional, prop_schema)) = param_schema.lookup(&k) {
348 params[&k] = parse_simple_value(&v, prop_schema)?;
349 }
350 }
351 verify_json_object(&params, &param_schema)?;
352 return Ok(params);
353 } else {
354 parse_query_parameters(param_schema, utf8_data, &parts, &uri_param)
355 }
356 }
357
358 struct NoLogExtension();
359
360 async fn proxy_protected_request(
361 info: &'static ApiMethod,
362 mut parts: Parts,
363 req_body: Body,
364 peer: &std::net::SocketAddr,
365 ) -> Result<Response<Body>, Error> {
366 let mut uri_parts = parts.uri.clone().into_parts();
367
368 uri_parts.scheme = Some(http::uri::Scheme::HTTP);
369 uri_parts.authority = Some(http::uri::Authority::from_static("127.0.0.1:82"));
370 let new_uri = http::Uri::from_parts(uri_parts).unwrap();
371
372 parts.uri = new_uri;
373
374 let mut request = Request::from_parts(parts, req_body);
375 request.headers_mut().insert(
376 header::FORWARDED,
377 format!("for=\"{}\";", peer).parse().unwrap(),
378 );
379
380 let reload_timezone = info.reload_timezone;
381
382 let resp = hyper::client::Client::new()
383 .request(request)
384 .map_err(Error::from)
385 .map_ok(|mut resp| {
386 resp.extensions_mut().insert(NoLogExtension());
387 resp
388 })
389 .await?;
390
391 if reload_timezone {
392 unsafe {
393 tzset();
394 }
395 }
396
397 Ok(resp)
398 }
399
400 pub async fn handle_api_request<Env: RpcEnvironment, S: 'static + BuildHasher + Send>(
401 mut rpcenv: Env,
402 info: &'static ApiMethod,
403 formatter: &'static OutputFormatter,
404 parts: Parts,
405 req_body: Body,
406 uri_param: HashMap<String, String, S>,
407 ) -> Result<Response<Body>, Error> {
408 let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
409 let compression = extract_compression_method(&parts.headers);
410
411 let result = match info.handler {
412 ApiHandler::AsyncHttp(handler) => {
413 let params = parse_query_parameters(info.parameters, "", &parts, &uri_param)?;
414 (handler)(parts, req_body, params, info, Box::new(rpcenv)).await
415 }
416 ApiHandler::Sync(handler) => {
417 let params =
418 get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
419 (handler)(params, info, &mut rpcenv).map(|data| (formatter.format_data)(data, &rpcenv))
420 }
421 ApiHandler::Async(handler) => {
422 let params =
423 get_request_parameters(info.parameters, parts, req_body, uri_param).await?;
424 (handler)(params, info, &mut rpcenv)
425 .await
426 .map(|data| (formatter.format_data)(data, &rpcenv))
427 }
428 };
429
430 let mut resp = match result {
431 Ok(resp) => resp,
432 Err(err) => {
433 if let Some(httperr) = err.downcast_ref::<HttpError>() {
434 if httperr.code == StatusCode::UNAUTHORIZED {
435 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
436 }
437 }
438 (formatter.format_error)(err)
439 }
440 };
441
442 let resp = match compression {
443 Some(CompressionMethod::Deflate) => {
444 resp.headers_mut().insert(
445 header::CONTENT_ENCODING,
446 CompressionMethod::Deflate.content_encoding(),
447 );
448 resp.map(|body| {
449 Body::wrap_stream(DeflateEncoder::with_quality(
450 TryStreamExt::map_err(body, |err| {
451 proxmox::io_format_err!("error during compression: {}", err)
452 }),
453 Level::Default,
454 ))
455 })
456 }
457 None => resp,
458 };
459
460 if info.reload_timezone {
461 unsafe {
462 tzset();
463 }
464 }
465
466 Ok(resp)
467 }
468
469
470 fn extension_to_content_type(filename: &Path) -> (&'static str, bool) {
471 if let Some(ext) = filename.extension().and_then(|osstr| osstr.to_str()) {
472 return match ext {
473 "css" => ("text/css", false),
474 "html" => ("text/html", false),
475 "js" => ("application/javascript", false),
476 "json" => ("application/json", false),
477 "map" => ("application/json", false),
478 "png" => ("image/png", true),
479 "ico" => ("image/x-icon", true),
480 "gif" => ("image/gif", true),
481 "svg" => ("image/svg+xml", false),
482 "jar" => ("application/java-archive", true),
483 "woff" => ("application/font-woff", true),
484 "woff2" => ("application/font-woff2", true),
485 "ttf" => ("application/font-snft", true),
486 "pdf" => ("application/pdf", true),
487 "epub" => ("application/epub+zip", true),
488 "mp3" => ("audio/mpeg", true),
489 "oga" => ("audio/ogg", true),
490 "tgz" => ("application/x-compressed-tar", true),
491 _ => ("application/octet-stream", false),
492 };
493 }
494
495 ("application/octet-stream", false)
496 }
497
498 async fn simple_static_file_download(
499 filename: PathBuf,
500 content_type: &'static str,
501 compression: Option<CompressionMethod>,
502 ) -> Result<Response<Body>, Error> {
503 use tokio::io::AsyncReadExt;
504
505 let mut file = File::open(filename)
506 .await
507 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
508
509 let mut data: Vec<u8> = Vec::new();
510
511 let mut response = match compression {
512 Some(CompressionMethod::Deflate) => {
513 let mut enc = DeflateEncoder::with_quality(data, Level::Default);
514 enc.compress_vec(&mut file, CHUNK_SIZE_LIMIT as usize)
515 .await?;
516 let mut response = Response::new(enc.into_inner().into());
517 response.headers_mut().insert(
518 header::CONTENT_ENCODING,
519 CompressionMethod::Deflate.content_encoding(),
520 );
521 response
522 }
523 None => {
524 file.read_to_end(&mut data)
525 .await
526 .map_err(|err| http_err!(BAD_REQUEST, "File read failed: {}", err))?;
527 Response::new(data.into())
528 }
529 };
530
531 response.headers_mut().insert(
532 header::CONTENT_TYPE,
533 header::HeaderValue::from_static(content_type),
534 );
535
536 Ok(response)
537 }
538
539 async fn chuncked_static_file_download(
540 filename: PathBuf,
541 content_type: &'static str,
542 compression: Option<CompressionMethod>,
543 ) -> Result<Response<Body>, Error> {
544 let mut resp = Response::builder()
545 .status(StatusCode::OK)
546 .header(header::CONTENT_TYPE, content_type);
547
548 let file = File::open(filename)
549 .await
550 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
551
552 let body = match compression {
553 Some(CompressionMethod::Deflate) => {
554 resp = resp.header(
555 header::CONTENT_ENCODING,
556 CompressionMethod::Deflate.content_encoding(),
557 );
558 Body::wrap_stream(DeflateEncoder::with_quality(
559 AsyncReaderStream::new(file),
560 Level::Default,
561 ))
562 }
563 None => Body::wrap_stream(AsyncReaderStream::new(file)),
564 };
565
566 Ok(resp.body(body).unwrap())
567 }
568
569 async fn handle_static_file_download(
570 filename: PathBuf,
571 compression: Option<CompressionMethod>,
572 ) -> Result<Response<Body>, Error> {
573 let metadata = tokio::fs::metadata(filename.clone())
574 .map_err(|err| http_err!(BAD_REQUEST, "File access problems: {}", err))
575 .await?;
576
577 let (content_type, nocomp) = extension_to_content_type(&filename);
578 let compression = if nocomp { None } else { compression };
579
580 if metadata.len() < CHUNK_SIZE_LIMIT {
581 simple_static_file_download(filename, content_type, compression).await
582 } else {
583 chuncked_static_file_download(filename, content_type, compression).await
584 }
585 }
586
587 fn extract_lang_header(headers: &http::HeaderMap) -> Option<String> {
588 if let Some(Ok(cookie)) = headers.get("COOKIE").map(|v| v.to_str()) {
589 return extract_cookie(cookie, "PBSLangCookie");
590 }
591 None
592 }
593
594 // FIXME: support handling multiple compression methods
595 fn extract_compression_method(headers: &http::HeaderMap) -> Option<CompressionMethod> {
596 if let Some(Ok(encodings)) = headers.get(header::ACCEPT_ENCODING).map(|v| v.to_str()) {
597 for encoding in encodings.split(&[',', ' '][..]) {
598 if let Ok(method) = encoding.parse() {
599 return Some(method);
600 }
601 }
602 }
603 None
604 }
605
606 async fn handle_request(
607 api: Arc<ApiConfig>,
608 req: Request<Body>,
609 peer: &std::net::SocketAddr,
610 ) -> Result<Response<Body>, Error> {
611 let (parts, body) = req.into_parts();
612 let method = parts.method.clone();
613 let (path, components) = normalize_uri_path(parts.uri.path())?;
614
615 let comp_len = components.len();
616
617 let query = parts.uri.query().unwrap_or_default();
618 if path.len() + query.len() > MAX_URI_QUERY_LENGTH {
619 return Ok(Response::builder()
620 .status(StatusCode::URI_TOO_LONG)
621 .body("".into())
622 .unwrap());
623 }
624
625 let env_type = api.env_type();
626 let mut rpcenv = RestEnvironment::new(env_type);
627
628 rpcenv.set_client_ip(Some(*peer));
629
630 let auth = &api.api_auth;
631
632 let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
633 let access_forbidden_time = std::time::Instant::now() + std::time::Duration::from_millis(500);
634
635 if comp_len >= 1 && components[0] == "api2" {
636 if comp_len >= 2 {
637 let format = components[1];
638
639 let formatter = match format {
640 "json" => &JSON_FORMATTER,
641 "extjs" => &EXTJS_FORMATTER,
642 _ => bail!("Unsupported output format '{}'.", format),
643 };
644
645 let mut uri_param = HashMap::new();
646 let api_method = api.find_method(&components[2..], method.clone(), &mut uri_param);
647
648 let mut auth_required = true;
649 if let Some(api_method) = api_method {
650 if let Permission::World = *api_method.access.permission {
651 auth_required = false; // no auth for endpoints with World permission
652 }
653 }
654
655 if auth_required {
656 match auth.check_auth(&parts.headers, &method) {
657 Ok(authid) => rpcenv.set_auth_id(Some(authid)),
658 Err(auth_err) => {
659 let err = match auth_err {
660 AuthError::Generic(err) => err,
661 AuthError::NoData => {
662 format_err!("no authentication credentials provided.")
663 }
664 };
665 let peer = peer.ip();
666 auth_logger()?.log(format!(
667 "authentication failure; rhost={} msg={}",
668 peer, err
669 ));
670
671 // always delay unauthorized calls by 3 seconds (from start of request)
672 let err = http_err!(UNAUTHORIZED, "authentication failed - {}", err);
673 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
674 return Ok((formatter.format_error)(err));
675 }
676 }
677 }
678
679 match api_method {
680 None => {
681 let err = http_err!(NOT_FOUND, "Path '{}' not found.", path);
682 return Ok((formatter.format_error)(err));
683 }
684 Some(api_method) => {
685 let auth_id = rpcenv.get_auth_id();
686 let user_info = CachedUserInfo::new()?;
687
688 if !check_api_permission(
689 api_method.access.permission,
690 auth_id.as_deref(),
691 &uri_param,
692 user_info.as_ref(),
693 ) {
694 let err = http_err!(FORBIDDEN, "permission check failed");
695 tokio::time::sleep_until(Instant::from_std(access_forbidden_time)).await;
696 return Ok((formatter.format_error)(err));
697 }
698
699 let result = if api_method.protected && env_type == RpcEnvironmentType::PUBLIC {
700 proxy_protected_request(api_method, parts, body, peer).await
701 } else {
702 handle_api_request(rpcenv, api_method, formatter, parts, body, uri_param)
703 .await
704 };
705
706 let mut response = match result {
707 Ok(resp) => resp,
708 Err(err) => (formatter.format_error)(err),
709 };
710
711 if let Some(auth_id) = auth_id {
712 let auth_id: Authid = auth_id.parse()?;
713 response.extensions_mut().insert(auth_id);
714 }
715
716 return Ok(response);
717 }
718 }
719 }
720 } else {
721 // not Auth required for accessing files!
722
723 if method != hyper::Method::GET {
724 bail!("Unsupported HTTP method {}", method);
725 }
726
727 if comp_len == 0 {
728 let language = extract_lang_header(&parts.headers);
729 match auth.check_auth(&parts.headers, &method) {
730 Ok(auth_id) => {
731 return Ok(api.get_index(Some(auth_id), language, parts));
732 }
733 Err(AuthError::Generic(_)) => {
734 tokio::time::sleep_until(Instant::from_std(delay_unauth_time)).await;
735 }
736 Err(AuthError::NoData) => {}
737 }
738 return Ok(api.get_index(None, language, parts));
739 } else {
740 let filename = api.find_alias(&components);
741 let compression = extract_compression_method(&parts.headers);
742 return handle_static_file_download(filename, compression).await;
743 }
744 }
745
746 Err(http_err!(NOT_FOUND, "Path '{}' not found.", path))
747 }