]>
Commit | Line | Data |
---|---|---|
1 | use std::collections::HashSet; | |
2 | use std::io::Write; | |
3 | use std::sync::atomic::{AtomicUsize, Ordering}; | |
4 | use std::sync::{Arc, Mutex}; | |
5 | ||
6 | use chrono::{DateTime, Utc}; | |
7 | use failure::*; | |
8 | use futures::*; | |
9 | use futures::stream::Stream; | |
10 | use http::Uri; | |
11 | use http::header::HeaderValue; | |
12 | use http::{Request, Response}; | |
13 | use hyper::Body; | |
14 | use hyper::client::{Client, HttpConnector}; | |
15 | use openssl::ssl::{SslConnector, SslMethod}; | |
16 | use serde_json::{json, Value}; | |
17 | use tokio::io::AsyncReadExt; | |
18 | use tokio::sync::{mpsc, oneshot}; | |
19 | use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; | |
20 | use xdg::BaseDirectories; | |
21 | ||
22 | use proxmox::tools::{ | |
23 | digest_to_hex, | |
24 | fs::{file_get_json, file_set_contents}, | |
25 | }; | |
26 | ||
27 | use super::merge_known_chunks::{MergedChunkInfo, MergeKnownChunks}; | |
28 | use super::pipe_to_stream::PipeToSendStream; | |
29 | use crate::backup::*; | |
30 | use crate::tools::async_io::EitherStream; | |
31 | use crate::tools::futures::{cancellable, Canceller}; | |
32 | use crate::tools::{self, tty, BroadcastFuture}; | |
33 | ||
34 | #[derive(Clone)] | |
35 | pub struct AuthInfo { | |
36 | username: String, | |
37 | ticket: String, | |
38 | token: String, | |
39 | } | |
40 | ||
41 | /// HTTP(S) API client | |
42 | pub struct HttpClient { | |
43 | client: Client<HttpsConnector>, | |
44 | server: String, | |
45 | auth: BroadcastFuture<AuthInfo>, | |
46 | } | |
47 | ||
48 | /// Delete stored ticket data (logout) | |
49 | pub fn delete_ticket_info(server: &str, username: &str) -> Result<(), Error> { | |
50 | ||
51 | let base = BaseDirectories::with_prefix("proxmox-backup")?; | |
52 | ||
53 | // usually /run/user/<uid>/... | |
54 | let path = base.place_runtime_file("tickets")?; | |
55 | ||
56 | let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600); | |
57 | ||
58 | let mut data = file_get_json(&path, Some(json!({})))?; | |
59 | ||
60 | if let Some(map) = data[server].as_object_mut() { | |
61 | map.remove(username); | |
62 | } | |
63 | ||
64 | file_set_contents(path, data.to_string().as_bytes(), Some(mode))?; | |
65 | ||
66 | Ok(()) | |
67 | } | |
68 | ||
69 | fn store_ticket_info(server: &str, username: &str, ticket: &str, token: &str) -> Result<(), Error> { | |
70 | ||
71 | let base = BaseDirectories::with_prefix("proxmox-backup")?; | |
72 | ||
73 | // usually /run/user/<uid>/... | |
74 | let path = base.place_runtime_file("tickets")?; | |
75 | ||
76 | let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600); | |
77 | ||
78 | let mut data = file_get_json(&path, Some(json!({})))?; | |
79 | ||
80 | let now = Utc::now().timestamp(); | |
81 | ||
82 | data[server][username] = json!({ "timestamp": now, "ticket": ticket, "token": token}); | |
83 | ||
84 | let mut new_data = json!({}); | |
85 | ||
86 | let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60; | |
87 | ||
88 | let empty = serde_json::map::Map::new(); | |
89 | for (server, info) in data.as_object().unwrap_or(&empty) { | |
90 | for (_user, uinfo) in info.as_object().unwrap_or(&empty) { | |
91 | if let Some(timestamp) = uinfo["timestamp"].as_i64() { | |
92 | let age = now - timestamp; | |
93 | if age < ticket_lifetime { | |
94 | new_data[server][username] = uinfo.clone(); | |
95 | } | |
96 | } | |
97 | } | |
98 | } | |
99 | ||
100 | file_set_contents(path, new_data.to_string().as_bytes(), Some(mode))?; | |
101 | ||
102 | Ok(()) | |
103 | } | |
104 | ||
105 | fn load_ticket_info(server: &str, username: &str) -> Option<(String, String)> { | |
106 | let base = BaseDirectories::with_prefix("proxmox-backup").ok()?; | |
107 | ||
108 | // usually /run/user/<uid>/... | |
109 | let path = base.place_runtime_file("tickets").ok()?; | |
110 | let data = file_get_json(&path, None).ok()?; | |
111 | let now = Utc::now().timestamp(); | |
112 | let ticket_lifetime = tools::ticket::TICKET_LIFETIME - 60; | |
113 | let uinfo = data[server][username].as_object()?; | |
114 | let timestamp = uinfo["timestamp"].as_i64()?; | |
115 | let age = now - timestamp; | |
116 | ||
117 | if age < ticket_lifetime { | |
118 | let ticket = uinfo["ticket"].as_str()?; | |
119 | let token = uinfo["token"].as_str()?; | |
120 | Some((ticket.to_owned(), token.to_owned())) | |
121 | } else { | |
122 | None | |
123 | } | |
124 | } | |
125 | ||
126 | impl HttpClient { | |
127 | ||
128 | pub fn new(server: &str, username: &str) -> Result<Self, Error> { | |
129 | let client = Self::build_client(); | |
130 | ||
131 | let password = if let Some((ticket, _token)) = load_ticket_info(server, username) { | |
132 | ticket | |
133 | } else { | |
134 | Self::get_password(&username)? | |
135 | }; | |
136 | ||
137 | let login = Self::credentials(client.clone(), server.to_owned(), username.to_owned(), password); | |
138 | ||
139 | Ok(Self { | |
140 | client, | |
141 | server: String::from(server), | |
142 | auth: BroadcastFuture::new(login), | |
143 | }) | |
144 | } | |
145 | ||
146 | /// Login future | |
147 | /// | |
148 | /// Login is done on demand, so this is onyl required if you need | |
149 | /// access to authentication data in 'AuthInfo'. | |
150 | pub fn login(&self) -> impl Future<Output = Result<AuthInfo, Error>> { | |
151 | self.auth.listen() | |
152 | } | |
153 | ||
154 | fn get_password(_username: &str) -> Result<String, Error> { | |
155 | use std::env::VarError::*; | |
156 | match std::env::var("PBS_PASSWORD") { | |
157 | Ok(p) => return Ok(p), | |
158 | Err(NotUnicode(_)) => bail!("PBS_PASSWORD contains bad characters"), | |
159 | Err(NotPresent) => { | |
160 | // Try another method | |
161 | } | |
162 | } | |
163 | ||
164 | // If we're on a TTY, query the user for a password | |
165 | if tty::stdin_isatty() { | |
166 | return Ok(String::from_utf8(tty::read_password("Password: ")?)?); | |
167 | } | |
168 | ||
169 | bail!("no password input mechanism available"); | |
170 | } | |
171 | ||
172 | fn build_client() -> Client<HttpsConnector> { | |
173 | ||
174 | let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap(); | |
175 | ||
176 | ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE); // fixme! | |
177 | ||
178 | let mut httpc = hyper::client::HttpConnector::new(); | |
179 | httpc.set_nodelay(true); // important for h2 download performance! | |
180 | httpc.set_recv_buffer_size(Some(1024*1024)); //important for h2 download performance! | |
181 | httpc.enforce_http(false); // we want https... | |
182 | ||
183 | let https = HttpsConnector::with_connector(httpc, ssl_connector_builder.build()); | |
184 | ||
185 | Client::builder() | |
186 | //.http2_initial_stream_window_size( (1 << 31) - 2) | |
187 | //.http2_initial_connection_window_size( (1 << 31) - 2) | |
188 | .build::<_, Body>(https) | |
189 | } | |
190 | ||
191 | pub fn request(&self, mut req: Request<Body>) -> impl Future<Output = Result<Value, Error>> { | |
192 | ||
193 | let login = self.auth.listen(); | |
194 | ||
195 | let client = self.client.clone(); | |
196 | ||
197 | login.and_then(move |auth| { | |
198 | ||
199 | let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)); | |
200 | req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap()); | |
201 | req.headers_mut().insert("CSRFPreventionToken", HeaderValue::from_str(&auth.token).unwrap()); | |
202 | ||
203 | let request = Self::api_request(client, req); | |
204 | ||
205 | request | |
206 | }) | |
207 | } | |
208 | ||
209 | pub fn get( | |
210 | &self, | |
211 | path: &str, | |
212 | data: Option<Value>, | |
213 | ) -> impl Future<Output = Result<Value, Error>> { | |
214 | let req = Self::request_builder(&self.server, "GET", path, data).unwrap(); | |
215 | self.request(req) | |
216 | } | |
217 | ||
218 | pub fn delete( | |
219 | &mut self, | |
220 | path: &str, | |
221 | data: Option<Value>, | |
222 | ) -> impl Future<Output = Result<Value, Error>> { | |
223 | let req = Self::request_builder(&self.server, "DELETE", path, data).unwrap(); | |
224 | self.request(req) | |
225 | } | |
226 | ||
227 | pub fn post( | |
228 | &mut self, | |
229 | path: &str, | |
230 | data: Option<Value>, | |
231 | ) -> impl Future<Output = Result<Value, Error>> { | |
232 | let req = Self::request_builder(&self.server, "POST", path, data).unwrap(); | |
233 | self.request(req) | |
234 | } | |
235 | ||
236 | pub fn download<W: Write + Send + 'static>( | |
237 | &mut self, | |
238 | path: &str, | |
239 | output: W, | |
240 | ) -> impl Future<Output = Result<W, Error>> { | |
241 | let mut req = Self::request_builder(&self.server, "GET", path, None).unwrap(); | |
242 | ||
243 | let login = self.auth.listen(); | |
244 | ||
245 | let client = self.client.clone(); | |
246 | ||
247 | login.and_then(move |auth| { | |
248 | ||
249 | let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)); | |
250 | req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap()); | |
251 | ||
252 | client.request(req) | |
253 | .map_err(Error::from) | |
254 | .and_then(|resp| { | |
255 | let status = resp.status(); | |
256 | if !status.is_success() { | |
257 | future::Either::Left( | |
258 | HttpClient::api_response(resp) | |
259 | .map(|_| Err(format_err!("unknown error"))) | |
260 | ) | |
261 | } else { | |
262 | future::Either::Right( | |
263 | resp.into_body() | |
264 | .map_err(Error::from) | |
265 | .try_fold(output, move |mut acc, chunk| async move { | |
266 | acc.write_all(&chunk)?; | |
267 | Ok::<_, Error>(acc) | |
268 | }) | |
269 | ) | |
270 | } | |
271 | }) | |
272 | }) | |
273 | } | |
274 | ||
275 | pub fn upload( | |
276 | &mut self, | |
277 | content_type: &str, | |
278 | body: Body, | |
279 | path: &str, | |
280 | data: Option<Value>, | |
281 | ) -> impl Future<Output = Result<Value, Error>> { | |
282 | ||
283 | let path = path.trim_matches('/'); | |
284 | let mut url = format!("https://{}:8007/{}", &self.server, path); | |
285 | ||
286 | if let Some(data) = data { | |
287 | let query = tools::json_object_to_query(data).unwrap(); | |
288 | url.push('?'); | |
289 | url.push_str(&query); | |
290 | } | |
291 | ||
292 | let url: Uri = url.parse().unwrap(); | |
293 | ||
294 | let req = Request::builder() | |
295 | .method("POST") | |
296 | .uri(url) | |
297 | .header("User-Agent", "proxmox-backup-client/1.0") | |
298 | .header("Content-Type", content_type) | |
299 | .body(body).unwrap(); | |
300 | ||
301 | self.request(req) | |
302 | } | |
303 | ||
304 | pub fn start_backup( | |
305 | &self, | |
306 | datastore: &str, | |
307 | backup_type: &str, | |
308 | backup_id: &str, | |
309 | backup_time: DateTime<Utc>, | |
310 | debug: bool, | |
311 | ) -> impl Future<Output = Result<Arc<BackupClient>, Error>> { | |
312 | ||
313 | let param = json!({ | |
314 | "backup-type": backup_type, | |
315 | "backup-id": backup_id, | |
316 | "backup-time": backup_time.timestamp(), | |
317 | "store": datastore, | |
318 | "debug": debug | |
319 | }); | |
320 | ||
321 | let req = Self::request_builder(&self.server, "GET", "/api2/json/backup", Some(param)).unwrap(); | |
322 | ||
323 | self.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!())) | |
324 | .map_ok(|(h2, canceller)| BackupClient::new(h2, canceller)) | |
325 | } | |
326 | ||
327 | pub fn start_backup_reader( | |
328 | &self, | |
329 | datastore: &str, | |
330 | backup_type: &str, | |
331 | backup_id: &str, | |
332 | backup_time: DateTime<Utc>, | |
333 | debug: bool, | |
334 | ) -> impl Future<Output = Result<Arc<BackupReader>, Error>> { | |
335 | ||
336 | let param = json!({ | |
337 | "backup-type": backup_type, | |
338 | "backup-id": backup_id, | |
339 | "backup-time": backup_time.timestamp(), | |
340 | "store": datastore, | |
341 | "debug": debug, | |
342 | }); | |
343 | let req = Self::request_builder(&self.server, "GET", "/api2/json/reader", Some(param)).unwrap(); | |
344 | ||
345 | self.start_h2_connection(req, String::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!())) | |
346 | .map_ok(|(h2, canceller)| BackupReader::new(h2, canceller)) | |
347 | } | |
348 | ||
349 | pub fn start_h2_connection( | |
350 | &self, | |
351 | mut req: Request<Body>, | |
352 | protocol_name: String, | |
353 | ) -> impl Future<Output = Result<(H2Client, Canceller), Error>> { | |
354 | ||
355 | let login = self.auth.listen(); | |
356 | let client = self.client.clone(); | |
357 | ||
358 | login.and_then(move |auth| { | |
359 | ||
360 | let enc_ticket = format!("PBSAuthCookie={}", percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)); | |
361 | req.headers_mut().insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap()); | |
362 | req.headers_mut().insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap()); | |
363 | ||
364 | client.request(req) | |
365 | .map_err(Error::from) | |
366 | .and_then(|resp| { | |
367 | ||
368 | let status = resp.status(); | |
369 | if status != http::StatusCode::SWITCHING_PROTOCOLS { | |
370 | future::Either::Left( | |
371 | Self::api_response(resp) | |
372 | .map(|_| Err(format_err!("unknown error"))) | |
373 | ) | |
374 | } else { | |
375 | future::Either::Right( | |
376 | resp | |
377 | .into_body() | |
378 | .on_upgrade() | |
379 | .map_err(Error::from) | |
380 | ) | |
381 | } | |
382 | }) | |
383 | .and_then(|upgraded| { | |
384 | let max_window_size = (1 << 31) - 2; | |
385 | ||
386 | h2::client::Builder::new() | |
387 | .initial_connection_window_size(max_window_size) | |
388 | .initial_window_size(max_window_size) | |
389 | .max_frame_size(4*1024*1024) | |
390 | .handshake(upgraded) | |
391 | .map_err(Error::from) | |
392 | }) | |
393 | .and_then(|(h2, connection)| async move { | |
394 | let connection = connection | |
395 | .map_err(|_| panic!("HTTP/2.0 connection failed")); | |
396 | ||
397 | let (connection, canceller) = cancellable(connection)?; | |
398 | // A cancellable future returns an Option which is None when cancelled and | |
399 | // Some when it finished instead, since we don't care about the return type we | |
400 | // need to map it away: | |
401 | let connection = connection.map(|_| ()); | |
402 | ||
403 | // Spawn a new task to drive the connection state | |
404 | hyper::rt::spawn(connection); | |
405 | ||
406 | // Wait until the `SendRequest` handle has available capacity. | |
407 | let c = h2.ready().await?; | |
408 | Ok((H2Client::new(c), canceller)) | |
409 | }.boxed()) | |
410 | }) | |
411 | } | |
412 | ||
413 | fn credentials( | |
414 | client: Client<HttpsConnector>, | |
415 | server: String, | |
416 | username: String, | |
417 | password: String, | |
418 | ) -> Box<dyn Future<Output = Result<AuthInfo, Error>> + Send> { | |
419 | Box::new(async move { | |
420 | let data = json!({ "username": username, "password": password }); | |
421 | let req = Self::request_builder(&server, "POST", "/api2/json/access/ticket", Some(data)).unwrap(); | |
422 | let cred = Self::api_request(client, req).await?; | |
423 | let auth = AuthInfo { | |
424 | username: cred["data"]["username"].as_str().unwrap().to_owned(), | |
425 | ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(), | |
426 | token: cred["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(), | |
427 | }; | |
428 | ||
429 | let _ = store_ticket_info(&server, &auth.username, &auth.ticket, &auth.token); | |
430 | ||
431 | Ok(auth) | |
432 | }) | |
433 | } | |
434 | ||
435 | async fn api_response(response: Response<Body>) -> Result<Value, Error> { | |
436 | let status = response.status(); | |
437 | let data = response | |
438 | .into_body() | |
439 | .try_concat() | |
440 | .await?; | |
441 | ||
442 | let text = String::from_utf8(data.to_vec()).unwrap(); | |
443 | if status.is_success() { | |
444 | if text.len() > 0 { | |
445 | let value: Value = serde_json::from_str(&text)?; | |
446 | Ok(value) | |
447 | } else { | |
448 | Ok(Value::Null) | |
449 | } | |
450 | } else { | |
451 | bail!("HTTP Error {}: {}", status, text); | |
452 | } | |
453 | } | |
454 | ||
455 | fn api_request( | |
456 | client: Client<HttpsConnector>, | |
457 | req: Request<Body> | |
458 | ) -> impl Future<Output = Result<Value, Error>> { | |
459 | ||
460 | client.request(req) | |
461 | .map_err(Error::from) | |
462 | .and_then(Self::api_response) | |
463 | } | |
464 | ||
465 | pub fn request_builder(server: &str, method: &str, path: &str, data: Option<Value>) -> Result<Request<Body>, Error> { | |
466 | let path = path.trim_matches('/'); | |
467 | let url: Uri = format!("https://{}:8007/{}", server, path).parse()?; | |
468 | ||
469 | if let Some(data) = data { | |
470 | if method == "POST" { | |
471 | let request = Request::builder() | |
472 | .method(method) | |
473 | .uri(url) | |
474 | .header("User-Agent", "proxmox-backup-client/1.0") | |
475 | .header(hyper::header::CONTENT_TYPE, "application/json") | |
476 | .body(Body::from(data.to_string()))?; | |
477 | return Ok(request); | |
478 | } else { | |
479 | let query = tools::json_object_to_query(data)?; | |
480 | let url: Uri = format!("https://{}:8007/{}?{}", server, path, query).parse()?; | |
481 | let request = Request::builder() | |
482 | .method(method) | |
483 | .uri(url) | |
484 | .header("User-Agent", "proxmox-backup-client/1.0") | |
485 | .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded") | |
486 | .body(Body::empty())?; | |
487 | return Ok(request); | |
488 | } | |
489 | } | |
490 | ||
491 | let request = Request::builder() | |
492 | .method(method) | |
493 | .uri(url) | |
494 | .header("User-Agent", "proxmox-backup-client/1.0") | |
495 | .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded") | |
496 | .body(Body::empty())?; | |
497 | ||
498 | Ok(request) | |
499 | } | |
500 | } | |
501 | ||
502 | ||
503 | pub struct BackupReader { | |
504 | h2: H2Client, | |
505 | canceller: Canceller, | |
506 | } | |
507 | ||
508 | impl Drop for BackupReader { | |
509 | ||
510 | fn drop(&mut self) { | |
511 | self.canceller.cancel(); | |
512 | } | |
513 | } | |
514 | ||
515 | impl BackupReader { | |
516 | ||
517 | pub fn new(h2: H2Client, canceller: Canceller) -> Arc<Self> { | |
518 | Arc::new(Self { h2, canceller: canceller }) | |
519 | } | |
520 | ||
521 | pub fn get( | |
522 | &self, | |
523 | path: &str, | |
524 | param: Option<Value>, | |
525 | ) -> impl Future<Output = Result<Value, Error>> { | |
526 | self.h2.get(path, param) | |
527 | } | |
528 | ||
529 | pub fn put( | |
530 | &self, | |
531 | path: &str, | |
532 | param: Option<Value>, | |
533 | ) -> impl Future<Output = Result<Value, Error>> { | |
534 | self.h2.put(path, param) | |
535 | } | |
536 | ||
537 | pub fn post( | |
538 | &self, | |
539 | path: &str, | |
540 | param: Option<Value>, | |
541 | ) -> impl Future<Output = Result<Value, Error>> { | |
542 | self.h2.post(path, param) | |
543 | } | |
544 | ||
545 | pub fn download<W: Write + Send + 'static>( | |
546 | &self, | |
547 | file_name: &str, | |
548 | output: W, | |
549 | ) -> impl Future<Output = Result<W, Error>> { | |
550 | let path = "download"; | |
551 | let param = json!({ "file-name": file_name }); | |
552 | self.h2.download(path, Some(param), output) | |
553 | } | |
554 | ||
555 | pub fn speedtest<W: Write + Send + 'static>( | |
556 | &self, | |
557 | output: W, | |
558 | ) -> impl Future<Output = Result<W, Error>> { | |
559 | self.h2.download("speedtest", None, output) | |
560 | } | |
561 | ||
562 | pub fn download_chunk<W: Write + Send + 'static>( | |
563 | &self, | |
564 | digest: &[u8; 32], | |
565 | output: W, | |
566 | ) -> impl Future<Output = Result<W, Error>> { | |
567 | let path = "chunk"; | |
568 | let param = json!({ "digest": digest_to_hex(digest) }); | |
569 | self.h2.download(path, Some(param), output) | |
570 | } | |
571 | ||
572 | pub fn force_close(self) { | |
573 | self.canceller.cancel(); | |
574 | } | |
575 | } | |
576 | ||
577 | pub struct BackupClient { | |
578 | h2: H2Client, | |
579 | canceller: Canceller, | |
580 | } | |
581 | ||
582 | impl Drop for BackupClient { | |
583 | ||
584 | fn drop(&mut self) { | |
585 | self.canceller.cancel(); | |
586 | } | |
587 | } | |
588 | ||
589 | pub struct BackupStats { | |
590 | pub size: u64, | |
591 | pub csum: [u8; 32], | |
592 | } | |
593 | ||
594 | impl BackupClient { | |
595 | pub fn new(h2: H2Client, canceller: Canceller) -> Arc<Self> { | |
596 | Arc::new(Self { h2, canceller }) | |
597 | } | |
598 | ||
599 | pub fn get( | |
600 | &self, | |
601 | path: &str, | |
602 | param: Option<Value>, | |
603 | ) -> impl Future<Output = Result<Value, Error>> { | |
604 | self.h2.get(path, param) | |
605 | } | |
606 | ||
607 | pub fn put( | |
608 | &self, | |
609 | path: &str, | |
610 | param: Option<Value>, | |
611 | ) -> impl Future<Output = Result<Value, Error>> { | |
612 | self.h2.put(path, param) | |
613 | } | |
614 | ||
615 | pub fn post( | |
616 | &self, | |
617 | path: &str, | |
618 | param: Option<Value>, | |
619 | ) -> impl Future<Output = Result<Value, Error>> { | |
620 | self.h2.post(path, param) | |
621 | } | |
622 | ||
623 | pub fn finish(self: Arc<Self>) -> impl Future<Output = Result<(), Error>> { | |
624 | self.h2.clone() | |
625 | .post("finish", None) | |
626 | .map_ok(move |_| { | |
627 | self.canceller.cancel(); | |
628 | }) | |
629 | } | |
630 | ||
631 | pub fn force_close(self) { | |
632 | self.canceller.cancel(); | |
633 | } | |
634 | ||
635 | pub fn upload_blob<R: std::io::Read>( | |
636 | &self, | |
637 | mut reader: R, | |
638 | file_name: &str, | |
639 | ) -> impl Future<Output = Result<BackupStats, Error>> { | |
640 | ||
641 | let h2 = self.h2.clone(); | |
642 | let file_name = file_name.to_owned(); | |
643 | ||
644 | async move { | |
645 | let mut raw_data = Vec::new(); | |
646 | // fixme: avoid loading into memory | |
647 | reader.read_to_end(&mut raw_data)?; | |
648 | ||
649 | let csum = openssl::sha::sha256(&raw_data); | |
650 | let param = json!({"encoded-size": raw_data.len(), "file-name": file_name }); | |
651 | let size = raw_data.len() as u64; // fixme: should be decoded size instead?? | |
652 | let _value = h2.upload("blob", Some(param), raw_data).await?; | |
653 | Ok(BackupStats { size, csum }) | |
654 | } | |
655 | } | |
656 | ||
657 | pub fn upload_blob_from_data( | |
658 | &self, | |
659 | data: Vec<u8>, | |
660 | file_name: &str, | |
661 | crypt_config: Option<Arc<CryptConfig>>, | |
662 | compress: bool, | |
663 | sign_only: bool, | |
664 | ) -> impl Future<Output = Result<BackupStats, Error>> { | |
665 | ||
666 | let h2 = self.h2.clone(); | |
667 | let file_name = file_name.to_owned(); | |
668 | let size = data.len() as u64; | |
669 | ||
670 | async move { | |
671 | let blob = if let Some(crypt_config) = crypt_config { | |
672 | if sign_only { | |
673 | DataBlob::create_signed(&data, crypt_config, compress)? | |
674 | } else { | |
675 | DataBlob::encode(&data, Some(crypt_config.clone()), compress)? | |
676 | } | |
677 | } else { | |
678 | DataBlob::encode(&data, None, compress)? | |
679 | }; | |
680 | ||
681 | let raw_data = blob.into_inner(); | |
682 | ||
683 | let csum = openssl::sha::sha256(&raw_data); | |
684 | let param = json!({"encoded-size": raw_data.len(), "file-name": file_name }); | |
685 | let _value = h2.upload("blob", Some(param), raw_data).await?; | |
686 | Ok(BackupStats { size, csum }) | |
687 | } | |
688 | } | |
689 | ||
690 | pub fn upload_blob_from_file<P: AsRef<std::path::Path>>( | |
691 | &self, | |
692 | src_path: P, | |
693 | file_name: &str, | |
694 | crypt_config: Option<Arc<CryptConfig>>, | |
695 | compress: bool, | |
696 | ) -> impl Future<Output = Result<BackupStats, Error>> { | |
697 | ||
698 | let h2 = self.h2.clone(); | |
699 | let file_name = file_name.to_owned(); | |
700 | let src_path = src_path.as_ref().to_owned(); | |
701 | ||
702 | async move { | |
703 | let mut file = tokio::fs::File::open(src_path.clone()) | |
704 | .await | |
705 | .map_err(move |err| format_err!("unable to open file {:?} - {}", src_path, err))?; | |
706 | ||
707 | let mut contents = Vec::new(); | |
708 | file.read_to_end(&mut contents).await.map_err(Error::from)?; | |
709 | ||
710 | let size: u64 = contents.len() as u64; | |
711 | let blob = DataBlob::encode(&contents, crypt_config, compress)?; | |
712 | let raw_data = blob.into_inner(); | |
713 | let csum = openssl::sha::sha256(&raw_data); | |
714 | let param = json!({ | |
715 | "encoded-size": raw_data.len(), | |
716 | "file-name": file_name, | |
717 | }); | |
718 | h2.upload("blob", Some(param), raw_data).await?; | |
719 | Ok(BackupStats { size, csum }) | |
720 | } | |
721 | } | |
722 | ||
723 | pub fn upload_stream( | |
724 | &self, | |
725 | archive_name: &str, | |
726 | stream: impl Stream<Item = Result<bytes::BytesMut, Error>>, | |
727 | prefix: &str, | |
728 | fixed_size: Option<u64>, | |
729 | crypt_config: Option<Arc<CryptConfig>>, | |
730 | ) -> impl Future<Output = Result<BackupStats, Error>> { | |
731 | let known_chunks = Arc::new(Mutex::new(HashSet::new())); | |
732 | ||
733 | let mut param = json!({ "archive-name": archive_name }); | |
734 | if let Some(size) = fixed_size { | |
735 | param["size"] = size.into(); | |
736 | } | |
737 | ||
738 | let index_path = format!("{}_index", prefix); | |
739 | let close_path = format!("{}_close", prefix); | |
740 | ||
741 | let prefix = prefix.to_owned(); | |
742 | ||
743 | let h2 = self.h2.clone(); | |
744 | ||
745 | let download_future = | |
746 | Self::download_chunk_list(h2.clone(), &index_path, archive_name, known_chunks.clone()); | |
747 | ||
748 | async move { | |
749 | download_future.await?; | |
750 | ||
751 | let wid = h2.post(&index_path, Some(param)).await?.as_u64().unwrap(); | |
752 | ||
753 | let (chunk_count, size, _speed, csum) = Self::upload_chunk_info_stream( | |
754 | h2.clone(), | |
755 | wid, | |
756 | stream, | |
757 | &prefix, | |
758 | known_chunks.clone(), | |
759 | crypt_config, | |
760 | ) | |
761 | .await?; | |
762 | ||
763 | let param = json!({ | |
764 | "wid": wid , | |
765 | "chunk-count": chunk_count, | |
766 | "size": size, | |
767 | }); | |
768 | let _value = h2.post(&close_path, Some(param)).await?; | |
769 | Ok(BackupStats { | |
770 | size: size as u64, | |
771 | csum, | |
772 | }) | |
773 | } | |
774 | } | |
775 | ||
776 | fn response_queue() -> ( | |
777 | mpsc::Sender<h2::client::ResponseFuture>, | |
778 | oneshot::Receiver<Result<(), Error>> | |
779 | ) { | |
780 | let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100); | |
781 | let (verify_result_tx, verify_result_rx) = oneshot::channel(); | |
782 | ||
783 | hyper::rt::spawn( | |
784 | verify_queue_rx | |
785 | .map(Ok::<_, Error>) | |
786 | .try_for_each(|response: h2::client::ResponseFuture| { | |
787 | response | |
788 | .map_err(Error::from) | |
789 | .and_then(H2Client::h2api_response) | |
790 | .map_ok(|result| println!("RESPONSE: {:?}", result)) | |
791 | .map_err(|err| format_err!("pipelined request failed: {}", err)) | |
792 | }) | |
793 | .map(|result| { | |
794 | let _ignore_closed_channel = verify_result_tx.send(result); | |
795 | }) | |
796 | ); | |
797 | ||
798 | (verify_queue_tx, verify_result_rx) | |
799 | } | |
800 | ||
801 | fn append_chunk_queue(h2: H2Client, wid: u64, path: String) -> ( | |
802 | mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>, | |
803 | oneshot::Receiver<Result<(), Error>> | |
804 | ) { | |
805 | let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64); | |
806 | let (verify_result_tx, verify_result_rx) = oneshot::channel(); | |
807 | ||
808 | let h2_2 = h2.clone(); | |
809 | ||
810 | hyper::rt::spawn( | |
811 | verify_queue_rx | |
812 | .map(Ok::<_, Error>) | |
813 | .and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| { | |
814 | match (response, merged_chunk_info) { | |
815 | (Some(response), MergedChunkInfo::Known(list)) => { | |
816 | future::Either::Left( | |
817 | response | |
818 | .map_err(Error::from) | |
819 | .and_then(H2Client::h2api_response) | |
820 | .and_then(move |_result| { | |
821 | future::ok(MergedChunkInfo::Known(list)) | |
822 | }) | |
823 | ) | |
824 | } | |
825 | (None, MergedChunkInfo::Known(list)) => { | |
826 | future::Either::Right(future::ok(MergedChunkInfo::Known(list))) | |
827 | } | |
828 | _ => unreachable!(), | |
829 | } | |
830 | }) | |
831 | .merge_known_chunks() | |
832 | .and_then(move |merged_chunk_info| { | |
833 | match merged_chunk_info { | |
834 | MergedChunkInfo::Known(chunk_list) => { | |
835 | let mut digest_list = vec![]; | |
836 | let mut offset_list = vec![]; | |
837 | for (offset, digest) in chunk_list { | |
838 | //println!("append chunk {} (offset {})", proxmox::tools::digest_to_hex(&digest), offset); | |
839 | digest_list.push(digest_to_hex(&digest)); | |
840 | offset_list.push(offset); | |
841 | } | |
842 | println!("append chunks list len ({})", digest_list.len()); | |
843 | let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list }); | |
844 | let mut request = H2Client::request_builder("localhost", "PUT", &path, None).unwrap(); | |
845 | request.headers_mut().insert(hyper::header::CONTENT_TYPE, HeaderValue::from_static("application/json")); | |
846 | let param_data = bytes::Bytes::from(param.to_string().as_bytes()); | |
847 | let upload_data = Some(param_data); | |
848 | h2_2.send_request(request, upload_data) | |
849 | .and_then(move |response| { | |
850 | response | |
851 | .map_err(Error::from) | |
852 | .and_then(H2Client::h2api_response) | |
853 | .map_ok(|_| ()) | |
854 | }) | |
855 | .map_err(|err| format_err!("pipelined request failed: {}", err)) | |
856 | } | |
857 | _ => unreachable!(), | |
858 | } | |
859 | }) | |
860 | .try_for_each(|_| future::ok(())) | |
861 | .map(|result| { | |
862 | let _ignore_closed_channel = verify_result_tx.send(result); | |
863 | }) | |
864 | ); | |
865 | ||
866 | (verify_queue_tx, verify_result_rx) | |
867 | } | |
868 | ||
869 | fn download_chunk_list( | |
870 | h2: H2Client, | |
871 | path: &str, | |
872 | archive_name: &str, | |
873 | known_chunks: Arc<Mutex<HashSet<[u8;32]>>>, | |
874 | ) -> impl Future<Output = Result<(), Error>> { | |
875 | ||
876 | let param = json!({ "archive-name": archive_name }); | |
877 | let request = H2Client::request_builder("localhost", "GET", path, Some(param)).unwrap(); | |
878 | ||
879 | h2.send_request(request, None) | |
880 | .and_then(move |response| { | |
881 | response | |
882 | .map_err(Error::from) | |
883 | .and_then(move |resp| { | |
884 | let status = resp.status(); | |
885 | ||
886 | if !status.is_success() { | |
887 | future::Either::Left( | |
888 | H2Client::h2api_response(resp) | |
889 | .map(|_| Err(format_err!("unknown error"))) | |
890 | ) | |
891 | } else { | |
892 | future::Either::Right(future::ok(resp.into_body())) | |
893 | } | |
894 | }) | |
895 | .and_then(move |mut body| { | |
896 | ||
897 | let mut release_capacity = body.release_capacity().clone(); | |
898 | ||
899 | DigestListDecoder::new(body.map_err(Error::from)) | |
900 | .try_for_each(move |chunk| { | |
901 | let _ = release_capacity.release_capacity(chunk.len()); | |
902 | println!("GOT DOWNLOAD {}", digest_to_hex(&chunk)); | |
903 | known_chunks.lock().unwrap().insert(chunk); | |
904 | futures::future::ok(()) | |
905 | }) | |
906 | }) | |
907 | }) | |
908 | } | |
909 | ||
910 | fn upload_chunk_info_stream( | |
911 | h2: H2Client, | |
912 | wid: u64, | |
913 | stream: impl Stream<Item = Result<bytes::BytesMut, Error>>, | |
914 | prefix: &str, | |
915 | known_chunks: Arc<Mutex<HashSet<[u8;32]>>>, | |
916 | crypt_config: Option<Arc<CryptConfig>>, | |
917 | ) -> impl Future<Output = Result<(usize, usize, usize, [u8; 32]), Error>> { | |
918 | ||
919 | let repeat = Arc::new(AtomicUsize::new(0)); | |
920 | let repeat2 = repeat.clone(); | |
921 | ||
922 | let stream_len = Arc::new(AtomicUsize::new(0)); | |
923 | let stream_len2 = stream_len.clone(); | |
924 | ||
925 | let append_chunk_path = format!("{}_index", prefix); | |
926 | let upload_chunk_path = format!("{}_chunk", prefix); | |
927 | ||
928 | let (upload_queue, upload_result) = | |
929 | Self::append_chunk_queue(h2.clone(), wid, append_chunk_path.to_owned()); | |
930 | ||
931 | let start_time = std::time::Instant::now(); | |
932 | ||
933 | let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new()))); | |
934 | let index_csum_2 = index_csum.clone(); | |
935 | ||
936 | stream | |
937 | .and_then(move |data| { | |
938 | ||
939 | let chunk_len = data.len(); | |
940 | ||
941 | repeat.fetch_add(1, Ordering::SeqCst); | |
942 | let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64; | |
943 | ||
944 | let mut chunk_builder = DataChunkBuilder::new(data.as_ref()) | |
945 | .compress(true); | |
946 | ||
947 | if let Some(ref crypt_config) = crypt_config { | |
948 | chunk_builder = chunk_builder.crypt_config(crypt_config); | |
949 | } | |
950 | ||
951 | let mut known_chunks = known_chunks.lock().unwrap(); | |
952 | let digest = chunk_builder.digest(); | |
953 | ||
954 | let mut guard = index_csum.lock().unwrap(); | |
955 | let csum = guard.as_mut().unwrap(); | |
956 | ||
957 | let chunk_end = offset + chunk_len as u64; | |
958 | ||
959 | csum.update(&chunk_end.to_le_bytes()); | |
960 | csum.update(digest); | |
961 | ||
962 | let chunk_is_known = known_chunks.contains(digest); | |
963 | if chunk_is_known { | |
964 | future::ok(MergedChunkInfo::Known(vec![(offset, *digest)])) | |
965 | } else { | |
966 | known_chunks.insert(*digest); | |
967 | future::ready(chunk_builder | |
968 | .build() | |
969 | .map(move |chunk| MergedChunkInfo::New(ChunkInfo { | |
970 | chunk, | |
971 | chunk_len: chunk_len as u64, | |
972 | offset, | |
973 | })) | |
974 | ) | |
975 | } | |
976 | }) | |
977 | .merge_known_chunks() | |
978 | .try_for_each(move |merged_chunk_info| { | |
979 | ||
980 | if let MergedChunkInfo::New(chunk_info) = merged_chunk_info { | |
981 | let offset = chunk_info.offset; | |
982 | let digest = *chunk_info.chunk.digest(); | |
983 | let digest_str = digest_to_hex(&digest); | |
984 | ||
985 | println!("upload new chunk {} ({} bytes, offset {})", digest_str, | |
986 | chunk_info.chunk_len, offset); | |
987 | ||
988 | let chunk_data = chunk_info.chunk.raw_data(); | |
989 | let param = json!({ | |
990 | "wid": wid, | |
991 | "digest": digest_str, | |
992 | "size": chunk_info.chunk_len, | |
993 | "encoded-size": chunk_data.len(), | |
994 | }); | |
995 | ||
996 | let request = H2Client::request_builder("localhost", "POST", &upload_chunk_path, Some(param)).unwrap(); | |
997 | let upload_data = Some(bytes::Bytes::from(chunk_data)); | |
998 | ||
999 | let new_info = MergedChunkInfo::Known(vec![(offset, digest)]); | |
1000 | ||
1001 | let mut upload_queue = upload_queue.clone(); | |
1002 | future::Either::Left(h2 | |
1003 | .send_request(request, upload_data) | |
1004 | .and_then(move |response| async move { | |
1005 | upload_queue | |
1006 | .send((new_info, Some(response))) | |
1007 | .await | |
1008 | .map_err(Error::from) | |
1009 | }) | |
1010 | ) | |
1011 | } else { | |
1012 | let mut upload_queue = upload_queue.clone(); | |
1013 | future::Either::Right(async move { | |
1014 | upload_queue | |
1015 | .send((merged_chunk_info, None)) | |
1016 | .await | |
1017 | .map_err(Error::from) | |
1018 | }) | |
1019 | } | |
1020 | }) | |
1021 | .then(move |result| async move { | |
1022 | upload_result.await?.and(result) | |
1023 | }.boxed()) | |
1024 | .and_then(move |_| { | |
1025 | let repeat = repeat2.load(Ordering::SeqCst); | |
1026 | let stream_len = stream_len2.load(Ordering::SeqCst); | |
1027 | let speed = ((stream_len*1000000)/(1024*1024))/(start_time.elapsed().as_micros() as usize); | |
1028 | println!("Uploaded {} chunks in {} seconds ({} MB/s).", repeat, start_time.elapsed().as_secs(), speed); | |
1029 | if repeat > 0 { | |
1030 | println!("Average chunk size was {} bytes.", stream_len/repeat); | |
1031 | println!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128)); | |
1032 | } | |
1033 | ||
1034 | let mut guard = index_csum_2.lock().unwrap(); | |
1035 | let csum = guard.take().unwrap().finish(); | |
1036 | ||
1037 | futures::future::ok((repeat, stream_len, speed, csum)) | |
1038 | }) | |
1039 | } | |
1040 | ||
1041 | pub fn upload_speedtest(&self) -> impl Future<Output = Result<usize, Error>> { | |
1042 | ||
1043 | let mut data = vec![]; | |
1044 | // generate pseudo random byte sequence | |
1045 | for i in 0..1024*1024 { | |
1046 | for j in 0..4 { | |
1047 | let byte = ((i >> (j<<3))&0xff) as u8; | |
1048 | data.push(byte); | |
1049 | } | |
1050 | } | |
1051 | ||
1052 | let item_len = data.len(); | |
1053 | ||
1054 | let repeat = Arc::new(AtomicUsize::new(0)); | |
1055 | let repeat2 = repeat.clone(); | |
1056 | ||
1057 | let (upload_queue, upload_result) = Self::response_queue(); | |
1058 | ||
1059 | let start_time = std::time::Instant::now(); | |
1060 | ||
1061 | let h2 = self.h2.clone(); | |
1062 | ||
1063 | futures::stream::repeat(data) | |
1064 | .take_while(move |_| { | |
1065 | let repeat = Arc::clone(&repeat); | |
1066 | async move { | |
1067 | repeat.fetch_add(1, Ordering::SeqCst); | |
1068 | start_time.elapsed().as_secs() < 5 | |
1069 | } | |
1070 | }) | |
1071 | .map(Ok) | |
1072 | .try_for_each(move |data| { | |
1073 | let h2 = h2.clone(); | |
1074 | ||
1075 | let mut upload_queue = upload_queue.clone(); | |
1076 | ||
1077 | println!("send test data ({} bytes)", data.len()); | |
1078 | let request = H2Client::request_builder("localhost", "POST", "speedtest", None).unwrap(); | |
1079 | h2.send_request(request, Some(bytes::Bytes::from(data))) | |
1080 | .and_then(move |response| async move { | |
1081 | upload_queue | |
1082 | .send(response) | |
1083 | .await | |
1084 | .map_err(Error::from) | |
1085 | }) | |
1086 | }) | |
1087 | .then(move |result| async move { | |
1088 | println!("RESULT {:?}", result); | |
1089 | upload_result.await?.and(result) | |
1090 | }) | |
1091 | .and_then(move |_| { | |
1092 | let repeat = repeat2.load(Ordering::SeqCst); | |
1093 | println!("Uploaded {} chunks in {} seconds.", repeat, start_time.elapsed().as_secs()); | |
1094 | let speed = ((item_len*1000000*(repeat as usize))/(1024*1024))/(start_time.elapsed().as_micros() as usize); | |
1095 | if repeat > 0 { | |
1096 | println!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128)); | |
1097 | } | |
1098 | futures::future::ok(speed) | |
1099 | }) | |
1100 | } | |
1101 | } | |
1102 | ||
1103 | #[derive(Clone)] | |
1104 | pub struct H2Client { | |
1105 | h2: h2::client::SendRequest<bytes::Bytes>, | |
1106 | } | |
1107 | ||
1108 | impl H2Client { | |
1109 | ||
1110 | pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self { | |
1111 | Self { h2 } | |
1112 | } | |
1113 | ||
1114 | pub fn get(&self, path: &str, param: Option<Value>) -> impl Future<Output = Result<Value, Error>> { | |
1115 | let req = Self::request_builder("localhost", "GET", path, param).unwrap(); | |
1116 | self.request(req) | |
1117 | } | |
1118 | ||
1119 | pub fn put(&self, path: &str, param: Option<Value>) -> impl Future<Output = Result<Value, Error>> { | |
1120 | let req = Self::request_builder("localhost", "PUT", path, param).unwrap(); | |
1121 | self.request(req) | |
1122 | } | |
1123 | ||
1124 | pub fn post(&self, path: &str, param: Option<Value>) -> impl Future<Output = Result<Value, Error>> { | |
1125 | let req = Self::request_builder("localhost", "POST", path, param).unwrap(); | |
1126 | self.request(req) | |
1127 | } | |
1128 | ||
1129 | pub fn download<W: Write + Send + 'static>( | |
1130 | &self, | |
1131 | path: &str, | |
1132 | param: Option<Value>, | |
1133 | output: W, | |
1134 | ) -> impl Future<Output = Result<W, Error>> { | |
1135 | let request = Self::request_builder("localhost", "GET", path, param).unwrap(); | |
1136 | ||
1137 | self.send_request(request, None) | |
1138 | .and_then(move |response| { | |
1139 | response | |
1140 | .map_err(Error::from) | |
1141 | .and_then(move |resp| { | |
1142 | let status = resp.status(); | |
1143 | if !status.is_success() { | |
1144 | future::Either::Left( | |
1145 | H2Client::h2api_response(resp) | |
1146 | .map(|_| Err(format_err!("unknown error"))) | |
1147 | ) | |
1148 | } else { | |
1149 | let mut body = resp.into_body(); | |
1150 | let release_capacity = body.release_capacity().clone(); | |
1151 | ||
1152 | future::Either::Right( | |
1153 | body | |
1154 | .map_err(Error::from) | |
1155 | .try_fold(output, move |mut acc, chunk| { | |
1156 | let mut release_capacity = release_capacity.clone(); | |
1157 | async move { | |
1158 | let _ = release_capacity.release_capacity(chunk.len()); | |
1159 | acc.write_all(&chunk)?; | |
1160 | Ok::<_, Error>(acc) | |
1161 | } | |
1162 | }) | |
1163 | ) | |
1164 | } | |
1165 | }) | |
1166 | }) | |
1167 | } | |
1168 | ||
1169 | pub fn upload( | |
1170 | &self, | |
1171 | path: &str, | |
1172 | param: Option<Value>, | |
1173 | data: Vec<u8>, | |
1174 | ) -> impl Future<Output = Result<Value, Error>> { | |
1175 | let request = Self::request_builder("localhost", "POST", path, param).unwrap(); | |
1176 | ||
1177 | self.h2.clone() | |
1178 | .ready() | |
1179 | .map_err(Error::from) | |
1180 | .and_then(move |mut send_request| { | |
1181 | let (response, stream) = send_request.send_request(request, false).unwrap(); | |
1182 | PipeToSendStream::new(bytes::Bytes::from(data), stream) | |
1183 | .and_then(|_| { | |
1184 | response | |
1185 | .map_err(Error::from) | |
1186 | .and_then(Self::h2api_response) | |
1187 | }) | |
1188 | }) | |
1189 | } | |
1190 | ||
1191 | fn request( | |
1192 | &self, | |
1193 | request: Request<()>, | |
1194 | ) -> impl Future<Output = Result<Value, Error>> { | |
1195 | ||
1196 | self.send_request(request, None) | |
1197 | .and_then(move |response| { | |
1198 | response | |
1199 | .map_err(Error::from) | |
1200 | .and_then(Self::h2api_response) | |
1201 | }) | |
1202 | } | |
1203 | ||
1204 | fn send_request( | |
1205 | &self, | |
1206 | request: Request<()>, | |
1207 | data: Option<bytes::Bytes>, | |
1208 | ) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> { | |
1209 | ||
1210 | self.h2.clone() | |
1211 | .ready() | |
1212 | .map_err(Error::from) | |
1213 | .and_then(move |mut send_request| { | |
1214 | if let Some(data) = data { | |
1215 | let (response, stream) = send_request.send_request(request, false).unwrap(); | |
1216 | future::Either::Left(PipeToSendStream::new(data, stream) | |
1217 | .and_then(move |_| { | |
1218 | future::ok(response) | |
1219 | })) | |
1220 | } else { | |
1221 | let (response, _stream) = send_request.send_request(request, true).unwrap(); | |
1222 | future::Either::Right(future::ok(response)) | |
1223 | } | |
1224 | }) | |
1225 | } | |
1226 | ||
1227 | fn h2api_response( | |
1228 | response: Response<h2::RecvStream>, | |
1229 | ) -> impl Future<Output = Result<Value, Error>> { | |
1230 | let status = response.status(); | |
1231 | ||
1232 | let (_head, mut body) = response.into_parts(); | |
1233 | ||
1234 | // The `release_capacity` handle allows the caller to manage | |
1235 | // flow control. | |
1236 | // | |
1237 | // Whenever data is received, the caller is responsible for | |
1238 | // releasing capacity back to the server once it has freed | |
1239 | // the data from memory. | |
1240 | let mut release_capacity = body.release_capacity().clone(); | |
1241 | ||
1242 | body | |
1243 | .map_ok(move |chunk| { | |
1244 | // Let the server send more data. | |
1245 | let _ = release_capacity.release_capacity(chunk.len()); | |
1246 | chunk | |
1247 | }) | |
1248 | .try_concat() | |
1249 | .map_err(Error::from) | |
1250 | .and_then(move |data| async move { | |
1251 | let text = String::from_utf8(data.to_vec()).unwrap(); | |
1252 | if status.is_success() { | |
1253 | if text.len() > 0 { | |
1254 | let mut value: Value = serde_json::from_str(&text)?; | |
1255 | if let Some(map) = value.as_object_mut() { | |
1256 | if let Some(data) = map.remove("data") { | |
1257 | return Ok(data); | |
1258 | } | |
1259 | } | |
1260 | bail!("got result without data property"); | |
1261 | } else { | |
1262 | Ok(Value::Null) | |
1263 | } | |
1264 | } else { | |
1265 | bail!("HTTP Error {}: {}", status, text); | |
1266 | } | |
1267 | }.boxed()) | |
1268 | } | |
1269 | ||
1270 | // Note: We always encode parameters with the url | |
1271 | pub fn request_builder(server: &str, method: &str, path: &str, data: Option<Value>) -> Result<Request<()>, Error> { | |
1272 | let path = path.trim_matches('/'); | |
1273 | ||
1274 | if let Some(data) = data { | |
1275 | let query = tools::json_object_to_query(data)?; | |
1276 | // We detected problem with hyper around 6000 characters - seo we try to keep on the safe side | |
1277 | if query.len() > 4096 { bail!("h2 query data too large ({} bytes) - please encode data inside body", query.len()); } | |
1278 | let url: Uri = format!("https://{}:8007/{}?{}", server, path, query).parse()?; | |
1279 | let request = Request::builder() | |
1280 | .method(method) | |
1281 | .uri(url) | |
1282 | .header("User-Agent", "proxmox-backup-client/1.0") | |
1283 | .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded") | |
1284 | .body(())?; | |
1285 | return Ok(request); | |
1286 | } else { | |
1287 | let url: Uri = format!("https://{}:8007/{}", server, path).parse()?; | |
1288 | let request = Request::builder() | |
1289 | .method(method) | |
1290 | .uri(url) | |
1291 | .header("User-Agent", "proxmox-backup-client/1.0") | |
1292 | .header(hyper::header::CONTENT_TYPE, "application/x-www-form-urlencoded") | |
1293 | .body(())?; | |
1294 | ||
1295 | Ok(request) | |
1296 | } | |
1297 | } | |
1298 | } | |
1299 | ||
1300 | pub struct HttpsConnector { | |
1301 | http: HttpConnector, | |
1302 | ssl_connector: SslConnector, | |
1303 | } | |
1304 | ||
1305 | impl HttpsConnector { | |
1306 | pub fn with_connector(mut http: HttpConnector, ssl_connector: SslConnector) -> Self { | |
1307 | http.enforce_http(false); | |
1308 | ||
1309 | Self { | |
1310 | http, | |
1311 | ssl_connector, | |
1312 | } | |
1313 | } | |
1314 | } | |
1315 | ||
1316 | type MaybeTlsStream = EitherStream< | |
1317 | tokio::net::TcpStream, | |
1318 | tokio_openssl::SslStream<tokio::net::TcpStream>, | |
1319 | >; | |
1320 | ||
1321 | impl hyper::client::connect::Connect for HttpsConnector { | |
1322 | type Transport = MaybeTlsStream; | |
1323 | type Error = Error; | |
1324 | type Future = Box<dyn Future<Output = Result<( | |
1325 | Self::Transport, | |
1326 | hyper::client::connect::Connected, | |
1327 | ), Error>> + Send + Unpin + 'static>; | |
1328 | ||
1329 | fn connect(&self, dst: hyper::client::connect::Destination) -> Self::Future { | |
1330 | let is_https = dst.scheme() == "https"; | |
1331 | let host = dst.host().to_string(); | |
1332 | ||
1333 | let config = self.ssl_connector.configure(); | |
1334 | let conn = self.http.connect(dst); | |
1335 | ||
1336 | Box::new(Box::pin(async move { | |
1337 | let (conn, connected) = conn.await?; | |
1338 | if is_https { | |
1339 | let conn = tokio_openssl::connect(config?, &host, conn).await?; | |
1340 | Ok((MaybeTlsStream::Right(conn), connected)) | |
1341 | } else { | |
1342 | Ok((MaybeTlsStream::Left(conn), connected)) | |
1343 | } | |
1344 | })) | |
1345 | } | |
1346 | } |