1 use std
::collections
::HashSet
;
3 use std
::sync
::atomic
::{AtomicUsize, Ordering}
;
4 use std
::sync
::{Arc, Mutex}
;
6 use chrono
::{DateTime, Utc}
;
9 use futures
::stream
::Stream
;
11 use http
::header
::HeaderValue
;
12 use http
::{Request, Response}
;
14 use hyper
::client
::{Client, HttpConnector}
;
15 use openssl
::ssl
::{SslConnector, SslMethod}
;
16 use serde_json
::{json, Value}
;
17 use tokio
::io
::AsyncReadExt
;
18 use tokio
::sync
::{mpsc, oneshot}
;
19 use url
::percent_encoding
::{percent_encode, DEFAULT_ENCODE_SET}
;
20 use xdg
::BaseDirectories
;
24 fs
::{file_get_json, file_set_contents}
,
27 use super::merge_known_chunks
::{MergedChunkInfo, MergeKnownChunks}
;
28 use super::pipe_to_stream
::PipeToSendStream
;
30 use crate::tools
::async_io
::EitherStream
;
31 use crate::tools
::futures
::{cancellable, Canceller}
;
32 use crate::tools
::{self, tty, BroadcastFuture}
;
41 /// HTTP(S) API client
42 pub struct HttpClient
{
43 client
: Client
<HttpsConnector
>,
45 auth
: BroadcastFuture
<AuthInfo
>,
48 /// Delete stored ticket data (logout)
49 pub fn delete_ticket_info(server
: &str, username
: &str) -> Result
<(), Error
> {
51 let base
= BaseDirectories
::with_prefix("proxmox-backup")?
;
53 // usually /run/user/<uid>/...
54 let path
= base
.place_runtime_file("tickets")?
;
56 let mode
= nix
::sys
::stat
::Mode
::from_bits_truncate(0o0600);
58 let mut data
= file_get_json(&path
, Some(json
!({}
)))?
;
60 if let Some(map
) = data
[server
].as_object_mut() {
64 file_set_contents(path
, data
.to_string().as_bytes(), Some(mode
))?
;
69 fn store_ticket_info(server
: &str, username
: &str, ticket
: &str, token
: &str) -> Result
<(), Error
> {
71 let base
= BaseDirectories
::with_prefix("proxmox-backup")?
;
73 // usually /run/user/<uid>/...
74 let path
= base
.place_runtime_file("tickets")?
;
76 let mode
= nix
::sys
::stat
::Mode
::from_bits_truncate(0o0600);
78 let mut data
= file_get_json(&path
, Some(json
!({}
)))?
;
80 let now
= Utc
::now().timestamp();
82 data
[server
][username
] = json
!({ "timestamp": now, "ticket": ticket, "token": token}
);
84 let mut new_data
= json
!({}
);
86 let ticket_lifetime
= tools
::ticket
::TICKET_LIFETIME
- 60;
88 let empty
= serde_json
::map
::Map
::new();
89 for (server
, info
) in data
.as_object().unwrap_or(&empty
) {
90 for (_user
, uinfo
) in info
.as_object().unwrap_or(&empty
) {
91 if let Some(timestamp
) = uinfo
["timestamp"].as_i64() {
92 let age
= now
- timestamp
;
93 if age
< ticket_lifetime
{
94 new_data
[server
][username
] = uinfo
.clone();
100 file_set_contents(path
, new_data
.to_string().as_bytes(), Some(mode
))?
;
105 fn load_ticket_info(server
: &str, username
: &str) -> Option
<(String
, String
)> {
106 let base
= BaseDirectories
::with_prefix("proxmox-backup").ok()?
;
108 // usually /run/user/<uid>/...
109 let path
= base
.place_runtime_file("tickets").ok()?
;
110 let data
= file_get_json(&path
, None
).ok()?
;
111 let now
= Utc
::now().timestamp();
112 let ticket_lifetime
= tools
::ticket
::TICKET_LIFETIME
- 60;
113 let uinfo
= data
[server
][username
].as_object()?
;
114 let timestamp
= uinfo
["timestamp"].as_i64()?
;
115 let age
= now
- timestamp
;
117 if age
< ticket_lifetime
{
118 let ticket
= uinfo
["ticket"].as_str()?
;
119 let token
= uinfo
["token"].as_str()?
;
120 Some((ticket
.to_owned(), token
.to_owned()))
128 pub fn new(server
: &str, username
: &str) -> Result
<Self, Error
> {
129 let client
= Self::build_client();
131 let password
= if let Some((ticket
, _token
)) = load_ticket_info(server
, username
) {
134 Self::get_password(&username
)?
137 let login
= Self::credentials(client
.clone(), server
.to_owned(), username
.to_owned(), password
);
141 server
: String
::from(server
),
142 auth
: BroadcastFuture
::new(login
),
148 /// Login is done on demand, so this is onyl required if you need
149 /// access to authentication data in 'AuthInfo'.
150 pub fn login(&self) -> impl Future
<Output
= Result
<AuthInfo
, Error
>> {
154 fn get_password(_username
: &str) -> Result
<String
, Error
> {
155 use std
::env
::VarError
::*;
156 match std
::env
::var("PBS_PASSWORD") {
157 Ok(p
) => return Ok(p
),
158 Err(NotUnicode(_
)) => bail
!("PBS_PASSWORD contains bad characters"),
160 // Try another method
164 // If we're on a TTY, query the user for a password
165 if tty
::stdin_isatty() {
166 return Ok(String
::from_utf8(tty
::read_password("Password: ")?
)?
);
169 bail
!("no password input mechanism available");
172 fn build_client() -> Client
<HttpsConnector
> {
174 let mut ssl_connector_builder
= SslConnector
::builder(SslMethod
::tls()).unwrap();
176 ssl_connector_builder
.set_verify(openssl
::ssl
::SslVerifyMode
::NONE
); // fixme!
178 let mut httpc
= hyper
::client
::HttpConnector
::new();
179 httpc
.set_nodelay(true); // important for h2 download performance!
180 httpc
.set_recv_buffer_size(Some(1024*1024)); //important for h2 download performance!
181 httpc
.enforce_http(false); // we want https...
183 let https
= HttpsConnector
::with_connector(httpc
, ssl_connector_builder
.build());
186 //.http2_initial_stream_window_size( (1 << 31) - 2)
187 //.http2_initial_connection_window_size( (1 << 31) - 2)
188 .build
::<_
, Body
>(https
)
191 pub fn request(&self, mut req
: Request
<Body
>) -> impl Future
<Output
= Result
<Value
, Error
>> {
193 let login
= self.auth
.listen();
195 let client
= self.client
.clone();
197 login
.and_then(move |auth
| {
199 let enc_ticket
= format
!("PBSAuthCookie={}", percent_encode(auth
.ticket
.as_bytes(), DEFAULT_ENCODE_SET
));
200 req
.headers_mut().insert("Cookie", HeaderValue
::from_str(&enc_ticket
).unwrap());
201 req
.headers_mut().insert("CSRFPreventionToken", HeaderValue
::from_str(&auth
.token
).unwrap());
203 let request
= Self::api_request(client
, req
);
213 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
214 let req
= Self::request_builder(&self.server
, "GET", path
, data
).unwrap();
222 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
223 let req
= Self::request_builder(&self.server
, "DELETE", path
, data
).unwrap();
231 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
232 let req
= Self::request_builder(&self.server
, "POST", path
, data
).unwrap();
236 pub fn download
<W
: Write
+ Send
+ '
static>(
240 ) -> impl Future
<Output
= Result
<W
, Error
>> {
241 let mut req
= Self::request_builder(&self.server
, "GET", path
, None
).unwrap();
243 let login
= self.auth
.listen();
245 let client
= self.client
.clone();
247 login
.and_then(move |auth
| {
249 let enc_ticket
= format
!("PBSAuthCookie={}", percent_encode(auth
.ticket
.as_bytes(), DEFAULT_ENCODE_SET
));
250 req
.headers_mut().insert("Cookie", HeaderValue
::from_str(&enc_ticket
).unwrap());
253 .map_err(Error
::from
)
255 let status
= resp
.status();
256 if !status
.is_success() {
257 future
::Either
::Left(
258 HttpClient
::api_response(resp
)
259 .map(|_
| Err(format_err
!("unknown error")))
262 future
::Either
::Right(
264 .map_err(Error
::from
)
265 .try_fold(output
, move |mut acc
, chunk
| async
move {
266 acc
.write_all(&chunk
)?
;
281 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
283 let path
= path
.trim_matches('
/'
);
284 let mut url
= format
!("https://{}:8007/{}", &self.server
, path
);
286 if let Some(data
) = data
{
287 let query
= tools
::json_object_to_query(data
).unwrap();
289 url
.push_str(&query
);
292 let url
: Uri
= url
.parse().unwrap();
294 let req
= Request
::builder()
297 .header("User-Agent", "proxmox-backup-client/1.0")
298 .header("Content-Type", content_type
)
299 .body(body
).unwrap();
309 backup_time
: DateTime
<Utc
>,
311 ) -> impl Future
<Output
= Result
<Arc
<BackupClient
>, Error
>> {
314 "backup-type": backup_type
,
315 "backup-id": backup_id
,
316 "backup-time": backup_time
.timestamp(),
321 let req
= Self::request_builder(&self.server
, "GET", "/api2/json/backup", Some(param
)).unwrap();
323 self.start_h2_connection(req
, String
::from(PROXMOX_BACKUP_PROTOCOL_ID_V1
!()))
324 .map_ok(|(h2
, canceller
)| BackupClient
::new(h2
, canceller
))
327 pub fn start_backup_reader(
332 backup_time
: DateTime
<Utc
>,
334 ) -> impl Future
<Output
= Result
<Arc
<BackupReader
>, Error
>> {
337 "backup-type": backup_type
,
338 "backup-id": backup_id
,
339 "backup-time": backup_time
.timestamp(),
343 let req
= Self::request_builder(&self.server
, "GET", "/api2/json/reader", Some(param
)).unwrap();
345 self.start_h2_connection(req
, String
::from(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1
!()))
346 .map_ok(|(h2
, canceller
)| BackupReader
::new(h2
, canceller
))
349 pub fn start_h2_connection(
351 mut req
: Request
<Body
>,
352 protocol_name
: String
,
353 ) -> impl Future
<Output
= Result
<(H2Client
, Canceller
), Error
>> {
355 let login
= self.auth
.listen();
356 let client
= self.client
.clone();
358 login
.and_then(move |auth
| {
360 let enc_ticket
= format
!("PBSAuthCookie={}", percent_encode(auth
.ticket
.as_bytes(), DEFAULT_ENCODE_SET
));
361 req
.headers_mut().insert("Cookie", HeaderValue
::from_str(&enc_ticket
).unwrap());
362 req
.headers_mut().insert("UPGRADE", HeaderValue
::from_str(&protocol_name
).unwrap());
365 .map_err(Error
::from
)
368 let status
= resp
.status();
369 if status
!= http
::StatusCode
::SWITCHING_PROTOCOLS
{
370 future
::Either
::Left(
371 Self::api_response(resp
)
372 .map(|_
| Err(format_err
!("unknown error")))
375 future
::Either
::Right(
379 .map_err(Error
::from
)
383 .and_then(|upgraded
| {
384 let max_window_size
= (1 << 31) - 2;
386 h2
::client
::Builder
::new()
387 .initial_connection_window_size(max_window_size
)
388 .initial_window_size(max_window_size
)
389 .max_frame_size(4*1024*1024)
391 .map_err(Error
::from
)
393 .and_then(|(h2
, connection
)| async
move {
394 let connection
= connection
395 .map_err(|_
| panic
!("HTTP/2.0 connection failed"));
397 let (connection
, canceller
) = cancellable(connection
)?
;
398 // A cancellable future returns an Option which is None when cancelled and
399 // Some when it finished instead, since we don't care about the return type we
400 // need to map it away:
401 let connection
= connection
.map(|_
| ());
403 // Spawn a new task to drive the connection state
404 hyper
::rt
::spawn(connection
);
406 // Wait until the `SendRequest` handle has available capacity.
407 let c
= h2
.ready().await?
;
408 Ok((H2Client
::new(c
), canceller
))
414 client
: Client
<HttpsConnector
>,
418 ) -> Box
<dyn Future
<Output
= Result
<AuthInfo
, Error
>> + Send
> {
419 Box
::new(async
move {
420 let data
= json
!({ "username": username, "password": password }
);
421 let req
= Self::request_builder(&server
, "POST", "/api2/json/access/ticket", Some(data
)).unwrap();
422 let cred
= Self::api_request(client
, req
).await?
;
423 let auth
= AuthInfo
{
424 username
: cred
["data"]["username"].as_str().unwrap().to_owned(),
425 ticket
: cred
["data"]["ticket"].as_str().unwrap().to_owned(),
426 token
: cred
["data"]["CSRFPreventionToken"].as_str().unwrap().to_owned(),
429 let _
= store_ticket_info(&server
, &auth
.username
, &auth
.ticket
, &auth
.token
);
435 async
fn api_response(response
: Response
<Body
>) -> Result
<Value
, Error
> {
436 let status
= response
.status();
442 let text
= String
::from_utf8(data
.to_vec()).unwrap();
443 if status
.is_success() {
445 let value
: Value
= serde_json
::from_str(&text
)?
;
451 bail
!("HTTP Error {}: {}", status
, text
);
456 client
: Client
<HttpsConnector
>,
458 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
461 .map_err(Error
::from
)
462 .and_then(Self::api_response
)
465 pub fn request_builder(server
: &str, method
: &str, path
: &str, data
: Option
<Value
>) -> Result
<Request
<Body
>, Error
> {
466 let path
= path
.trim_matches('
/'
);
467 let url
: Uri
= format
!("https://{}:8007/{}", server
, path
).parse()?
;
469 if let Some(data
) = data
{
470 if method
== "POST" {
471 let request
= Request
::builder()
474 .header("User-Agent", "proxmox-backup-client/1.0")
475 .header(hyper
::header
::CONTENT_TYPE
, "application/json")
476 .body(Body
::from(data
.to_string()))?
;
479 let query
= tools
::json_object_to_query(data
)?
;
480 let url
: Uri
= format
!("https://{}:8007/{}?{}", server
, path
, query
).parse()?
;
481 let request
= Request
::builder()
484 .header("User-Agent", "proxmox-backup-client/1.0")
485 .header(hyper
::header
::CONTENT_TYPE
, "application/x-www-form-urlencoded")
486 .body(Body
::empty())?
;
491 let request
= Request
::builder()
494 .header("User-Agent", "proxmox-backup-client/1.0")
495 .header(hyper
::header
::CONTENT_TYPE
, "application/x-www-form-urlencoded")
496 .body(Body
::empty())?
;
503 pub struct BackupReader
{
505 canceller
: Canceller
,
508 impl Drop
for BackupReader
{
511 self.canceller
.cancel();
517 pub fn new(h2
: H2Client
, canceller
: Canceller
) -> Arc
<Self> {
518 Arc
::new(Self { h2, canceller: canceller }
)
524 param
: Option
<Value
>,
525 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
526 self.h2
.get(path
, param
)
532 param
: Option
<Value
>,
533 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
534 self.h2
.put(path
, param
)
540 param
: Option
<Value
>,
541 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
542 self.h2
.post(path
, param
)
545 pub fn download
<W
: Write
+ Send
+ '
static>(
549 ) -> impl Future
<Output
= Result
<W
, Error
>> {
550 let path
= "download";
551 let param
= json
!({ "file-name": file_name }
);
552 self.h2
.download(path
, Some(param
), output
)
555 pub fn speedtest
<W
: Write
+ Send
+ '
static>(
558 ) -> impl Future
<Output
= Result
<W
, Error
>> {
559 self.h2
.download("speedtest", None
, output
)
562 pub fn download_chunk
<W
: Write
+ Send
+ '
static>(
566 ) -> impl Future
<Output
= Result
<W
, Error
>> {
568 let param
= json
!({ "digest": digest_to_hex(digest) }
);
569 self.h2
.download(path
, Some(param
), output
)
572 pub fn force_close(self) {
573 self.canceller
.cancel();
577 pub struct BackupClient
{
579 canceller
: Canceller
,
582 impl Drop
for BackupClient
{
585 self.canceller
.cancel();
589 pub struct BackupStats
{
595 pub fn new(h2
: H2Client
, canceller
: Canceller
) -> Arc
<Self> {
596 Arc
::new(Self { h2, canceller }
)
602 param
: Option
<Value
>,
603 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
604 self.h2
.get(path
, param
)
610 param
: Option
<Value
>,
611 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
612 self.h2
.put(path
, param
)
618 param
: Option
<Value
>,
619 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
620 self.h2
.post(path
, param
)
623 pub fn finish(self: Arc
<Self>) -> impl Future
<Output
= Result
<(), Error
>> {
625 .post("finish", None
)
627 self.canceller
.cancel();
631 pub fn force_close(self) {
632 self.canceller
.cancel();
635 pub fn upload_blob
<R
: std
::io
::Read
>(
639 ) -> impl Future
<Output
= Result
<BackupStats
, Error
>> {
641 let h2
= self.h2
.clone();
642 let file_name
= file_name
.to_owned();
645 let mut raw_data
= Vec
::new();
646 // fixme: avoid loading into memory
647 reader
.read_to_end(&mut raw_data
)?
;
649 let csum
= openssl
::sha
::sha256(&raw_data
);
650 let param
= json
!({"encoded-size": raw_data.len(), "file-name": file_name }
);
651 let size
= raw_data
.len() as u64; // fixme: should be decoded size instead??
652 let _value
= h2
.upload("blob", Some(param
), raw_data
).await?
;
653 Ok(BackupStats { size, csum }
)
657 pub fn upload_blob_from_data(
661 crypt_config
: Option
<Arc
<CryptConfig
>>,
664 ) -> impl Future
<Output
= Result
<BackupStats
, Error
>> {
666 let h2
= self.h2
.clone();
667 let file_name
= file_name
.to_owned();
668 let size
= data
.len() as u64;
671 let blob
= if let Some(crypt_config
) = crypt_config
{
673 DataBlob
::create_signed(&data
, crypt_config
, compress
)?
675 DataBlob
::encode(&data
, Some(crypt_config
.clone()), compress
)?
678 DataBlob
::encode(&data
, None
, compress
)?
681 let raw_data
= blob
.into_inner();
683 let csum
= openssl
::sha
::sha256(&raw_data
);
684 let param
= json
!({"encoded-size": raw_data.len(), "file-name": file_name }
);
685 let _value
= h2
.upload("blob", Some(param
), raw_data
).await?
;
686 Ok(BackupStats { size, csum }
)
690 pub fn upload_blob_from_file
<P
: AsRef
<std
::path
::Path
>>(
694 crypt_config
: Option
<Arc
<CryptConfig
>>,
696 ) -> impl Future
<Output
= Result
<BackupStats
, Error
>> {
698 let h2
= self.h2
.clone();
699 let file_name
= file_name
.to_owned();
700 let src_path
= src_path
.as_ref().to_owned();
703 let mut file
= tokio
::fs
::File
::open(src_path
.clone())
705 .map_err(move |err
| format_err
!("unable to open file {:?} - {}", src_path
, err
))?
;
707 let mut contents
= Vec
::new();
708 file
.read_to_end(&mut contents
).await
.map_err(Error
::from
)?
;
710 let size
: u64 = contents
.len() as u64;
711 let blob
= DataBlob
::encode(&contents
, crypt_config
, compress
)?
;
712 let raw_data
= blob
.into_inner();
713 let csum
= openssl
::sha
::sha256(&raw_data
);
715 "encoded-size": raw_data
.len(),
716 "file-name": file_name
,
718 h2
.upload("blob", Some(param
), raw_data
).await?
;
719 Ok(BackupStats { size, csum }
)
723 pub fn upload_stream(
726 stream
: impl Stream
<Item
= Result
<bytes
::BytesMut
, Error
>>,
728 fixed_size
: Option
<u64>,
729 crypt_config
: Option
<Arc
<CryptConfig
>>,
730 ) -> impl Future
<Output
= Result
<BackupStats
, Error
>> {
731 let known_chunks
= Arc
::new(Mutex
::new(HashSet
::new()));
733 let mut param
= json
!({ "archive-name": archive_name }
);
734 if let Some(size
) = fixed_size
{
735 param
["size"] = size
.into();
738 let index_path
= format
!("{}_index", prefix
);
739 let close_path
= format
!("{}_close", prefix
);
741 let prefix
= prefix
.to_owned();
743 let h2
= self.h2
.clone();
745 let download_future
=
746 Self::download_chunk_list(h2
.clone(), &index_path
, archive_name
, known_chunks
.clone());
749 download_future
.await?
;
751 let wid
= h2
.post(&index_path
, Some(param
)).await?
.as_u64().unwrap();
753 let (chunk_count
, size
, _speed
, csum
) = Self::upload_chunk_info_stream(
758 known_chunks
.clone(),
765 "chunk-count": chunk_count
,
768 let _value
= h2
.post(&close_path
, Some(param
)).await?
;
776 fn response_queue() -> (
777 mpsc
::Sender
<h2
::client
::ResponseFuture
>,
778 oneshot
::Receiver
<Result
<(), Error
>>
780 let (verify_queue_tx
, verify_queue_rx
) = mpsc
::channel(100);
781 let (verify_result_tx
, verify_result_rx
) = oneshot
::channel();
786 .try_for_each(|response
: h2
::client
::ResponseFuture
| {
788 .map_err(Error
::from
)
789 .and_then(H2Client
::h2api_response
)
790 .map_ok(|result
| println
!("RESPONSE: {:?}", result
))
791 .map_err(|err
| format_err
!("pipelined request failed: {}", err
))
794 let _ignore_closed_channel
= verify_result_tx
.send(result
);
798 (verify_queue_tx
, verify_result_rx
)
801 fn append_chunk_queue(h2
: H2Client
, wid
: u64, path
: String
) -> (
802 mpsc
::Sender
<(MergedChunkInfo
, Option
<h2
::client
::ResponseFuture
>)>,
803 oneshot
::Receiver
<Result
<(), Error
>>
805 let (verify_queue_tx
, verify_queue_rx
) = mpsc
::channel(64);
806 let (verify_result_tx
, verify_result_rx
) = oneshot
::channel();
808 let h2_2
= h2
.clone();
813 .and_then(move |(merged_chunk_info
, response
): (MergedChunkInfo
, Option
<h2
::client
::ResponseFuture
>)| {
814 match (response
, merged_chunk_info
) {
815 (Some(response
), MergedChunkInfo
::Known(list
)) => {
816 future
::Either
::Left(
818 .map_err(Error
::from
)
819 .and_then(H2Client
::h2api_response
)
820 .and_then(move |_result
| {
821 future
::ok(MergedChunkInfo
::Known(list
))
825 (None
, MergedChunkInfo
::Known(list
)) => {
826 future
::Either
::Right(future
::ok(MergedChunkInfo
::Known(list
)))
831 .merge_known_chunks()
832 .and_then(move |merged_chunk_info
| {
833 match merged_chunk_info
{
834 MergedChunkInfo
::Known(chunk_list
) => {
835 let mut digest_list
= vec
![];
836 let mut offset_list
= vec
![];
837 for (offset
, digest
) in chunk_list
{
838 //println!("append chunk {} (offset {})", proxmox::tools::digest_to_hex(&digest), offset);
839 digest_list
.push(digest_to_hex(&digest
));
840 offset_list
.push(offset
);
842 println
!("append chunks list len ({})", digest_list
.len());
843 let param
= json
!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list }
);
844 let mut request
= H2Client
::request_builder("localhost", "PUT", &path
, None
).unwrap();
845 request
.headers_mut().insert(hyper
::header
::CONTENT_TYPE
, HeaderValue
::from_static("application/json"));
846 let param_data
= bytes
::Bytes
::from(param
.to_string().as_bytes());
847 let upload_data
= Some(param_data
);
848 h2_2
.send_request(request
, upload_data
)
849 .and_then(move |response
| {
851 .map_err(Error
::from
)
852 .and_then(H2Client
::h2api_response
)
855 .map_err(|err
| format_err
!("pipelined request failed: {}", err
))
860 .try_for_each(|_
| future
::ok(()))
862 let _ignore_closed_channel
= verify_result_tx
.send(result
);
866 (verify_queue_tx
, verify_result_rx
)
869 fn download_chunk_list(
873 known_chunks
: Arc
<Mutex
<HashSet
<[u8;32]>>>,
874 ) -> impl Future
<Output
= Result
<(), Error
>> {
876 let param
= json
!({ "archive-name": archive_name }
);
877 let request
= H2Client
::request_builder("localhost", "GET", path
, Some(param
)).unwrap();
879 h2
.send_request(request
, None
)
880 .and_then(move |response
| {
882 .map_err(Error
::from
)
883 .and_then(move |resp
| {
884 let status
= resp
.status();
886 if !status
.is_success() {
887 future
::Either
::Left(
888 H2Client
::h2api_response(resp
)
889 .map(|_
| Err(format_err
!("unknown error")))
892 future
::Either
::Right(future
::ok(resp
.into_body()))
895 .and_then(move |mut body
| {
897 let mut release_capacity
= body
.release_capacity().clone();
899 DigestListDecoder
::new(body
.map_err(Error
::from
))
900 .try_for_each(move |chunk
| {
901 let _
= release_capacity
.release_capacity(chunk
.len());
902 println
!("GOT DOWNLOAD {}", digest_to_hex(&chunk
));
903 known_chunks
.lock().unwrap().insert(chunk
);
904 futures
::future
::ok(())
910 fn upload_chunk_info_stream(
913 stream
: impl Stream
<Item
= Result
<bytes
::BytesMut
, Error
>>,
915 known_chunks
: Arc
<Mutex
<HashSet
<[u8;32]>>>,
916 crypt_config
: Option
<Arc
<CryptConfig
>>,
917 ) -> impl Future
<Output
= Result
<(usize, usize, usize, [u8; 32]), Error
>> {
919 let repeat
= Arc
::new(AtomicUsize
::new(0));
920 let repeat2
= repeat
.clone();
922 let stream_len
= Arc
::new(AtomicUsize
::new(0));
923 let stream_len2
= stream_len
.clone();
925 let append_chunk_path
= format
!("{}_index", prefix
);
926 let upload_chunk_path
= format
!("{}_chunk", prefix
);
928 let (upload_queue
, upload_result
) =
929 Self::append_chunk_queue(h2
.clone(), wid
, append_chunk_path
.to_owned());
931 let start_time
= std
::time
::Instant
::now();
933 let index_csum
= Arc
::new(Mutex
::new(Some(openssl
::sha
::Sha256
::new())));
934 let index_csum_2
= index_csum
.clone();
937 .and_then(move |data
| {
939 let chunk_len
= data
.len();
941 repeat
.fetch_add(1, Ordering
::SeqCst
);
942 let offset
= stream_len
.fetch_add(chunk_len
, Ordering
::SeqCst
) as u64;
944 let mut chunk_builder
= DataChunkBuilder
::new(data
.as_ref())
947 if let Some(ref crypt_config
) = crypt_config
{
948 chunk_builder
= chunk_builder
.crypt_config(crypt_config
);
951 let mut known_chunks
= known_chunks
.lock().unwrap();
952 let digest
= chunk_builder
.digest();
954 let mut guard
= index_csum
.lock().unwrap();
955 let csum
= guard
.as_mut().unwrap();
957 let chunk_end
= offset
+ chunk_len
as u64;
959 csum
.update(&chunk_end
.to_le_bytes());
962 let chunk_is_known
= known_chunks
.contains(digest
);
964 future
::ok(MergedChunkInfo
::Known(vec
![(offset
, *digest
)]))
966 known_chunks
.insert(*digest
);
967 future
::ready(chunk_builder
969 .map(move |chunk
| MergedChunkInfo
::New(ChunkInfo
{
971 chunk_len
: chunk_len
as u64,
977 .merge_known_chunks()
978 .try_for_each(move |merged_chunk_info
| {
980 if let MergedChunkInfo
::New(chunk_info
) = merged_chunk_info
{
981 let offset
= chunk_info
.offset
;
982 let digest
= *chunk_info
.chunk
.digest();
983 let digest_str
= digest_to_hex(&digest
);
985 println
!("upload new chunk {} ({} bytes, offset {})", digest_str
,
986 chunk_info
.chunk_len
, offset
);
988 let chunk_data
= chunk_info
.chunk
.raw_data();
991 "digest": digest_str
,
992 "size": chunk_info
.chunk_len
,
993 "encoded-size": chunk_data
.len(),
996 let request
= H2Client
::request_builder("localhost", "POST", &upload_chunk_path
, Some(param
)).unwrap();
997 let upload_data
= Some(bytes
::Bytes
::from(chunk_data
));
999 let new_info
= MergedChunkInfo
::Known(vec
![(offset
, digest
)]);
1001 let mut upload_queue
= upload_queue
.clone();
1002 future
::Either
::Left(h2
1003 .send_request(request
, upload_data
)
1004 .and_then(move |response
| async
move {
1006 .send((new_info
, Some(response
)))
1008 .map_err(Error
::from
)
1012 let mut upload_queue
= upload_queue
.clone();
1013 future
::Either
::Right(async
move {
1015 .send((merged_chunk_info
, None
))
1017 .map_err(Error
::from
)
1021 .then(move |result
| async
move {
1022 upload_result
.await?
.and(result
)
1024 .and_then(move |_
| {
1025 let repeat
= repeat2
.load(Ordering
::SeqCst
);
1026 let stream_len
= stream_len2
.load(Ordering
::SeqCst
);
1027 let speed
= ((stream_len
*1000000)/(1024*1024))/(start_time
.elapsed().as_micros() as usize);
1028 println
!("Uploaded {} chunks in {} seconds ({} MB/s).", repeat
, start_time
.elapsed().as_secs(), speed
);
1030 println
!("Average chunk size was {} bytes.", stream_len
/repeat
);
1031 println
!("Time per request: {} microseconds.", (start_time
.elapsed().as_micros())/(repeat
as u128
));
1034 let mut guard
= index_csum_2
.lock().unwrap();
1035 let csum
= guard
.take().unwrap().finish();
1037 futures
::future
::ok((repeat
, stream_len
, speed
, csum
))
1041 pub fn upload_speedtest(&self) -> impl Future
<Output
= Result
<usize, Error
>> {
1043 let mut data
= vec
![];
1044 // generate pseudo random byte sequence
1045 for i
in 0..1024*1024 {
1047 let byte
= ((i
>> (j
<<3))&0xff) as u8;
1052 let item_len
= data
.len();
1054 let repeat
= Arc
::new(AtomicUsize
::new(0));
1055 let repeat2
= repeat
.clone();
1057 let (upload_queue
, upload_result
) = Self::response_queue();
1059 let start_time
= std
::time
::Instant
::now();
1061 let h2
= self.h2
.clone();
1063 futures
::stream
::repeat(data
)
1064 .take_while(move |_
| {
1065 let repeat
= Arc
::clone(&repeat
);
1067 repeat
.fetch_add(1, Ordering
::SeqCst
);
1068 start_time
.elapsed().as_secs() < 5
1072 .try_for_each(move |data
| {
1073 let h2
= h2
.clone();
1075 let mut upload_queue
= upload_queue
.clone();
1077 println
!("send test data ({} bytes)", data
.len());
1078 let request
= H2Client
::request_builder("localhost", "POST", "speedtest", None
).unwrap();
1079 h2
.send_request(request
, Some(bytes
::Bytes
::from(data
)))
1080 .and_then(move |response
| async
move {
1084 .map_err(Error
::from
)
1087 .then(move |result
| async
move {
1088 println
!("RESULT {:?}", result
);
1089 upload_result
.await?
.and(result
)
1091 .and_then(move |_
| {
1092 let repeat
= repeat2
.load(Ordering
::SeqCst
);
1093 println
!("Uploaded {} chunks in {} seconds.", repeat
, start_time
.elapsed().as_secs());
1094 let speed
= ((item_len
*1000000*(repeat
as usize))/(1024*1024))/(start_time
.elapsed().as_micros() as usize);
1096 println
!("Time per request: {} microseconds.", (start_time
.elapsed().as_micros())/(repeat
as u128
));
1098 futures
::future
::ok(speed
)
1104 pub struct H2Client
{
1105 h2
: h2
::client
::SendRequest
<bytes
::Bytes
>,
1110 pub fn new(h2
: h2
::client
::SendRequest
<bytes
::Bytes
>) -> Self {
1114 pub fn get(&self, path
: &str, param
: Option
<Value
>) -> impl Future
<Output
= Result
<Value
, Error
>> {
1115 let req
= Self::request_builder("localhost", "GET", path
, param
).unwrap();
1119 pub fn put(&self, path
: &str, param
: Option
<Value
>) -> impl Future
<Output
= Result
<Value
, Error
>> {
1120 let req
= Self::request_builder("localhost", "PUT", path
, param
).unwrap();
1124 pub fn post(&self, path
: &str, param
: Option
<Value
>) -> impl Future
<Output
= Result
<Value
, Error
>> {
1125 let req
= Self::request_builder("localhost", "POST", path
, param
).unwrap();
1129 pub fn download
<W
: Write
+ Send
+ '
static>(
1132 param
: Option
<Value
>,
1134 ) -> impl Future
<Output
= Result
<W
, Error
>> {
1135 let request
= Self::request_builder("localhost", "GET", path
, param
).unwrap();
1137 self.send_request(request
, None
)
1138 .and_then(move |response
| {
1140 .map_err(Error
::from
)
1141 .and_then(move |resp
| {
1142 let status
= resp
.status();
1143 if !status
.is_success() {
1144 future
::Either
::Left(
1145 H2Client
::h2api_response(resp
)
1146 .map(|_
| Err(format_err
!("unknown error")))
1149 let mut body
= resp
.into_body();
1150 let release_capacity
= body
.release_capacity().clone();
1152 future
::Either
::Right(
1154 .map_err(Error
::from
)
1155 .try_fold(output
, move |mut acc
, chunk
| {
1156 let mut release_capacity
= release_capacity
.clone();
1158 let _
= release_capacity
.release_capacity(chunk
.len());
1159 acc
.write_all(&chunk
)?
;
1172 param
: Option
<Value
>,
1174 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
1175 let request
= Self::request_builder("localhost", "POST", path
, param
).unwrap();
1179 .map_err(Error
::from
)
1180 .and_then(move |mut send_request
| {
1181 let (response
, stream
) = send_request
.send_request(request
, false).unwrap();
1182 PipeToSendStream
::new(bytes
::Bytes
::from(data
), stream
)
1185 .map_err(Error
::from
)
1186 .and_then(Self::h2api_response
)
1193 request
: Request
<()>,
1194 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
1196 self.send_request(request
, None
)
1197 .and_then(move |response
| {
1199 .map_err(Error
::from
)
1200 .and_then(Self::h2api_response
)
1206 request
: Request
<()>,
1207 data
: Option
<bytes
::Bytes
>,
1208 ) -> impl Future
<Output
= Result
<h2
::client
::ResponseFuture
, Error
>> {
1212 .map_err(Error
::from
)
1213 .and_then(move |mut send_request
| {
1214 if let Some(data
) = data
{
1215 let (response
, stream
) = send_request
.send_request(request
, false).unwrap();
1216 future
::Either
::Left(PipeToSendStream
::new(data
, stream
)
1217 .and_then(move |_
| {
1218 future
::ok(response
)
1221 let (response
, _stream
) = send_request
.send_request(request
, true).unwrap();
1222 future
::Either
::Right(future
::ok(response
))
1228 response
: Response
<h2
::RecvStream
>,
1229 ) -> impl Future
<Output
= Result
<Value
, Error
>> {
1230 let status
= response
.status();
1232 let (_head
, mut body
) = response
.into_parts();
1234 // The `release_capacity` handle allows the caller to manage
1237 // Whenever data is received, the caller is responsible for
1238 // releasing capacity back to the server once it has freed
1239 // the data from memory.
1240 let mut release_capacity
= body
.release_capacity().clone();
1243 .map_ok(move |chunk
| {
1244 // Let the server send more data.
1245 let _
= release_capacity
.release_capacity(chunk
.len());
1249 .map_err(Error
::from
)
1250 .and_then(move |data
| async
move {
1251 let text
= String
::from_utf8(data
.to_vec()).unwrap();
1252 if status
.is_success() {
1254 let mut value
: Value
= serde_json
::from_str(&text
)?
;
1255 if let Some(map
) = value
.as_object_mut() {
1256 if let Some(data
) = map
.remove("data") {
1260 bail
!("got result without data property");
1265 bail
!("HTTP Error {}: {}", status
, text
);
1270 // Note: We always encode parameters with the url
1271 pub fn request_builder(server
: &str, method
: &str, path
: &str, data
: Option
<Value
>) -> Result
<Request
<()>, Error
> {
1272 let path
= path
.trim_matches('
/'
);
1274 if let Some(data
) = data
{
1275 let query
= tools
::json_object_to_query(data
)?
;
1276 // We detected problem with hyper around 6000 characters - seo we try to keep on the safe side
1277 if query
.len() > 4096 { bail!("h2 query data too large ({} bytes
) - please encode data inside body
", query.len()); }
1278 let url: Uri = format!("https
://{}:8007/{}?{}", server, path, query).parse()?;
1279 let request
= Request
::builder()
1282 .header("User-Agent", "proxmox-backup-client/1.0")
1283 .header(hyper
::header
::CONTENT_TYPE
, "application/x-www-form-urlencoded")
1287 let url
: Uri
= format
!("https://{}:8007/{}", server
, path
).parse()?
;
1288 let request
= Request
::builder()
1291 .header("User-Agent", "proxmox-backup-client/1.0")
1292 .header(hyper
::header
::CONTENT_TYPE
, "application/x-www-form-urlencoded")
1300 pub struct HttpsConnector
{
1301 http
: HttpConnector
,
1302 ssl_connector
: SslConnector
,
1305 impl HttpsConnector
{
1306 pub fn with_connector(mut http
: HttpConnector
, ssl_connector
: SslConnector
) -> Self {
1307 http
.enforce_http(false);
1316 type MaybeTlsStream
= EitherStream
<
1317 tokio
::net
::TcpStream
,
1318 tokio_openssl
::SslStream
<tokio
::net
::TcpStream
>,
1321 impl hyper
::client
::connect
::Connect
for HttpsConnector
{
1322 type Transport
= MaybeTlsStream
;
1324 type Future
= Box
<dyn Future
<Output
= Result
<(
1326 hyper
::client
::connect
::Connected
,
1327 ), Error
>> + Send
+ Unpin
+ '
static>;
1329 fn connect(&self, dst
: hyper
::client
::connect
::Destination
) -> Self::Future
{
1330 let is_https
= dst
.scheme() == "https";
1331 let host
= dst
.host().to_string();
1333 let config
= self.ssl_connector
.configure();
1334 let conn
= self.http
.connect(dst
);
1336 Box
::new(Box
::pin(async
move {
1337 let (conn
, connected
) = conn
.await?
;
1339 let conn
= tokio_openssl
::connect(config?
, &host
, conn
).await?
;
1340 Ok((MaybeTlsStream
::Right(conn
), connected
))
1342 Ok((MaybeTlsStream
::Left(conn
), connected
))