From 4428818412632eb4c9b955aa568152d7963fbee9 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fabian=20Gr=C3=BCnbichler?= Date: Fri, 15 Jan 2021 14:38:27 +0100 Subject: [PATCH] clippy: remove unnecessary clones MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit and from::(T) Signed-off-by: Fabian Grünbichler --- src/api2/access/role.rs | 2 +- src/api2/access/user.rs | 2 +- src/api2/admin/datastore.rs | 12 ++++++------ src/api2/admin/sync.rs | 2 +- src/api2/backup.rs | 2 +- src/api2/config/datastore.rs | 2 +- src/api2/config/remote.rs | 2 +- src/api2/config/sync.rs | 4 ++-- src/api2/config/verify.rs | 2 +- src/api2/helpers.rs | 2 +- src/api2/node/disks/directory.rs | 2 +- src/api2/node/subscription.rs | 2 +- src/api2/tape/backup.rs | 2 +- src/api2/tape/restore.rs | 3 +-- src/backup/checksum_writer.rs | 2 +- src/backup/chunk_store.rs | 5 ++--- src/backup/datastore.rs | 4 +--- src/backup/dynamic_index.rs | 2 +- src/backup/key_derivation.rs | 6 +++--- src/backup/manifest.rs | 2 +- src/bin/proxmox-backup-client.rs | 2 +- src/bin/proxmox_backup_client/key.rs | 2 +- src/config/network.rs | 2 +- src/config/sync.rs | 2 +- src/config/tape_encryption_keys.rs | 4 ++-- src/server/email_notifications.rs | 2 +- src/server/environment.rs | 2 +- src/server/h2service.rs | 2 +- src/server/rest.rs | 2 +- src/server/verify_job.rs | 2 +- src/tape/helpers/snapshot_reader.rs | 2 +- src/tools/broadcast_future.rs | 2 +- src/tools/runtime.rs | 2 +- tests/prune.rs | 6 +++--- tests/worker-task-abort.rs | 2 +- 35 files changed, 47 insertions(+), 51 deletions(-) diff --git a/src/api2/access/role.rs b/src/api2/access/role.rs index 2b4c6b82..43863d40 100644 --- a/src/api2/access/role.rs +++ b/src/api2/access/role.rs @@ -46,7 +46,7 @@ fn list_roles() -> Result { let mut priv_list = Vec::new(); for (name, privilege) in PRIVILEGES.iter() { if privs & privilege > 0 { - priv_list.push(name.clone()); + priv_list.push(name); } } list.push(json!({ "roleid": role, "privs": priv_list, "comment": comment })); diff --git a/src/api2/access/user.rs b/src/api2/access/user.rs index f03389f8..ea583b1d 100644 --- a/src/api2/access/user.rs +++ b/src/api2/access/user.rs @@ -603,7 +603,7 @@ pub fn generate_token( token_shadow::set_secret(&tokenid, &secret)?; let token = user::ApiToken { - tokenid: tokenid.clone(), + tokenid, comment, enable, expire, diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 5b9a1e84..9ce096f5 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -440,8 +440,8 @@ pub fn list_snapshots ( let files = info .files .into_iter() - .map(|x| BackupContent { - filename: x.to_string(), + .map(|filename| BackupContent { + filename, size: None, crypt_mode: None, }) @@ -666,7 +666,7 @@ pub fn verify( let upid_str = WorkerTask::new_thread( worker_type, - Some(worker_id.clone()), + Some(worker_id), auth_id.clone(), to_stdout, move |worker| { @@ -855,7 +855,7 @@ fn prune( // We use a WorkerTask just to have a task log, but run synchrounously - let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?; + let worker = WorkerTask::new("prune", Some(worker_id), auth_id, true)?; if keep_all { worker.log("No prune selection - keeping all files."); @@ -1009,7 +1009,7 @@ fn get_datastore_list( } } - Ok(list.into()) + Ok(list) } #[sortable] @@ -1066,7 +1066,7 @@ fn download_file( .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) - .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())) + .map_ok(|bytes| bytes.freeze()) .map_err(move |err| { eprintln!("error during streaming of '{:?}' - {}", &path, err); err diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index c9b9e145..3a808b22 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -58,7 +58,7 @@ pub fn list_sync_jobs( } }) .filter(|job: &SyncJobStatus| { - let as_config: SyncJobConfig = job.clone().into(); + let as_config: SyncJobConfig = job.into(); check_sync_job_read_access(&user_info, &auth_id, &as_config) }).collect(); diff --git a/src/api2/backup.rs b/src/api2/backup.rs index bf9c1465..9c773ecf 100644 --- a/src/api2/backup.rs +++ b/src/api2/backup.rs @@ -138,7 +138,7 @@ async move { } }; - let backup_dir = BackupDir::with_group(backup_group.clone(), backup_time)?; + let backup_dir = BackupDir::with_group(backup_group, backup_time)?; let _last_guard = if let Some(last) = &last_backup { if backup_dir.backup_time() <= last.backup_dir.backup_time() { diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index a4d25989..b8b420e0 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -120,7 +120,7 @@ pub fn create_datastore(param: Value) -> Result<(), Error> { let _lock = open_file_locked(datastore::DATASTORE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?; - let datastore: datastore::DataStoreConfig = serde_json::from_value(param.clone())?; + let datastore: datastore::DataStoreConfig = serde_json::from_value(param)?; let (mut config, _digest) = datastore::config()?; diff --git a/src/api2/config/remote.rs b/src/api2/config/remote.rs index 49db68ce..5c29d28a 100644 --- a/src/api2/config/remote.rs +++ b/src/api2/config/remote.rs @@ -96,7 +96,7 @@ pub fn create_remote(password: String, param: Value) -> Result<(), Error> { let _lock = open_file_locked(remote::REMOTE_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?; - let mut data = param.clone(); + let mut data = param; data["password"] = Value::from(base64::encode(password.as_bytes())); let remote: remote::Remote = serde_json::from_value(data)?; diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 1192d15f..c0f40909 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -154,7 +154,7 @@ pub fn create_sync_job( let _lock = open_file_locked(sync::SYNC_CFG_LOCKFILE, std::time::Duration::new(10, 0), true)?; - let sync_job: sync::SyncJobConfig = serde_json::from_value(param.clone())?; + let sync_job: sync::SyncJobConfig = serde_json::from_value(param)?; if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) { bail!("permission check failed"); } @@ -514,7 +514,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator // unless they have Datastore.Modify as well job.store = "localstore3".to_string(); - job.owner = Some(read_auth_id.clone()); + job.owner = Some(read_auth_id); assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true); job.owner = None; assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true); diff --git a/src/api2/config/verify.rs b/src/api2/config/verify.rs index 2be2772d..7d59893a 100644 --- a/src/api2/config/verify.rs +++ b/src/api2/config/verify.rs @@ -98,7 +98,7 @@ pub fn create_verification_job( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let user_info = CachedUserInfo::new()?; - let verification_job: verify::VerificationJobConfig = serde_json::from_value(param.clone())?; + let verification_job: verify::VerificationJobConfig = serde_json::from_value(param)?; user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, false)?; diff --git a/src/api2/helpers.rs b/src/api2/helpers.rs index 3a11340b..2a822654 100644 --- a/src/api2/helpers.rs +++ b/src/api2/helpers.rs @@ -16,7 +16,7 @@ pub async fn create_download_response(path: PathBuf) -> Result, E }; let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) - .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())); + .map_ok(|bytes| bytes.freeze()); let body = Body::wrap_stream(payload); diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index da16e753..32628c4a 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -164,7 +164,7 @@ pub fn create_datastore_disk( let manager = DiskManage::new(); - let disk = manager.clone().disk_by_name(&disk)?; + let disk = manager.disk_by_name(&disk)?; let partition = create_single_linux_partition(&disk)?; create_file_system(&partition, filesystem)?; diff --git a/src/api2/node/subscription.rs b/src/api2/node/subscription.rs index c935fa2c..0559a2e5 100644 --- a/src/api2/node/subscription.rs +++ b/src/api2/node/subscription.rs @@ -137,7 +137,7 @@ pub fn set_subscription( let server_id = tools::get_hardware_address()?; - let info = subscription::check_subscription(key, server_id.to_owned())?; + let info = subscription::check_subscription(key, server_id)?; subscription::write_subscription(info) .map_err(|e| format_err!("Error writing subscription status - {}", e))?; diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index 56622e64..5e05e599 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -94,7 +94,7 @@ pub fn backup( let upid_str = WorkerTask::new_thread( "tape-backup", - Some(store.clone()), + Some(store), auth_id, to_stdout, move |worker| { diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index 1131f181..441cbd1a 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -128,7 +128,7 @@ pub fn restore( let members = inventory.compute_media_set_members(&media_set_uuid)?; - let media_list = members.media_list().clone(); + let media_list = members.media_list(); let mut media_id_list = Vec::new(); @@ -234,7 +234,6 @@ pub fn restore_media( Some(reader) => reader, }; - let target = target.clone(); restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?; } diff --git a/src/backup/checksum_writer.rs b/src/backup/checksum_writer.rs index 1167be6c..5aac6511 100644 --- a/src/backup/checksum_writer.rs +++ b/src/backup/checksum_writer.rs @@ -18,7 +18,7 @@ impl ChecksumWriter { let hasher = crc32fast::Hasher::new(); let signer = match config { Some(config) => { - let tied_signer = Tied::new(config.clone(), |config| { + let tied_signer = Tied::new(config, |config| { Box::new(unsafe { (*config).data_signer() }) }); Some(tied_signer) diff --git a/src/backup/chunk_store.rs b/src/backup/chunk_store.rs index 34d6f655..99d62a2e 100644 --- a/src/backup/chunk_store.rs +++ b/src/backup/chunk_store.rs @@ -80,7 +80,7 @@ impl ChunkStore { let default_options = CreateOptions::new(); - match create_path(&base, Some(default_options.clone()), Some(options.clone())) { + match create_path(&base, Some(default_options), Some(options.clone())) { Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err), Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? }, } @@ -113,9 +113,8 @@ impl ChunkStore { } fn lockfile_path>(base: P) -> PathBuf { - let base: PathBuf = base.into(); + let mut lockfile_path: PathBuf = base.into(); - let mut lockfile_path = base.clone(); lockfile_path.push(".lock"); lockfile_path diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs index 19e7ccba..b384d88a 100644 --- a/src/backup/datastore.rs +++ b/src/backup/datastore.rs @@ -334,9 +334,7 @@ impl DataStore { auth_id: &Authid, ) -> Result<(Authid, DirLockGuard), Error> { // create intermediate path first: - let base_path = self.base_path(); - - let mut full_path = base_path.clone(); + let mut full_path = self.base_path(); full_path.push(backup_group.backup_type()); std::fs::create_dir_all(&full_path)?; diff --git a/src/backup/dynamic_index.rs b/src/backup/dynamic_index.rs index df651906..54561205 100644 --- a/src/backup/dynamic_index.rs +++ b/src/backup/dynamic_index.rs @@ -229,7 +229,7 @@ impl IndexFile for DynamicIndexReader { Some(ChunkReadInfo { range: start..end, - digest: self.index[pos].digest.clone(), + digest: self.index[pos].digest, }) } diff --git a/src/backup/key_derivation.rs b/src/backup/key_derivation.rs index b0647618..73478c00 100644 --- a/src/backup/key_derivation.rs +++ b/src/backup/key_derivation.rs @@ -233,7 +233,7 @@ pub fn decrypt_key_config( let mut result = [0u8; 32]; result.copy_from_slice(&key); - let crypt_config = CryptConfig::new(result.clone())?; + let crypt_config = CryptConfig::new(result)?; let fingerprint = crypt_config.fingerprint(); if let Some(ref stored_fingerprint) = key_config.fingerprint { if &fingerprint != stored_fingerprint { @@ -313,9 +313,9 @@ fn encrypt_decrypt_test() -> Result<(), Error> { ])), }; - let encrypted = rsa_encrypt_key_config(public.clone(), &key).expect("encryption failed"); + let encrypted = rsa_encrypt_key_config(public, &key).expect("encryption failed"); let (decrypted, created, fingerprint) = - rsa_decrypt_key_config(private.clone(), &encrypted, &passphrase) + rsa_decrypt_key_config(private, &encrypted, &passphrase) .expect("decryption failed"); assert_eq!(key.created, created); diff --git a/src/backup/manifest.rs b/src/backup/manifest.rs index d9a55655..47f9cadc 100644 --- a/src/backup/manifest.rs +++ b/src/backup/manifest.rs @@ -186,7 +186,7 @@ impl BackupManifest { manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?; } - let manifest = serde_json::to_string_pretty(&manifest).unwrap().into(); + let manifest = serde_json::to_string_pretty(&manifest).unwrap(); Ok(manifest) } diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs index 2cd00c2e..96e47588 100644 --- a/src/bin/proxmox-backup-client.rs +++ b/src/bin/proxmox-backup-client.rs @@ -917,7 +917,7 @@ async fn create_backup( let (key, created, fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?; println!("Encryption key fingerprint: {}", fingerprint); - let crypt_config = CryptConfig::new(key.clone())?; + let crypt_config = CryptConfig::new(key)?; match key::find_master_pubkey()? { Some(ref path) if path.exists() => { diff --git a/src/bin/proxmox_backup_client/key.rs b/src/bin/proxmox_backup_client/key.rs index 109f0384..06104feb 100644 --- a/src/bin/proxmox_backup_client/key.rs +++ b/src/bin/proxmox_backup_client/key.rs @@ -118,7 +118,7 @@ fn create(kdf: Option, path: Option) -> Result<(), Error> { let mut key_array = [0u8; 32]; proxmox::sys::linux::fill_with_random_data(&mut key_array)?; - let crypt_config = CryptConfig::new(key_array.clone())?; + let crypt_config = CryptConfig::new(key_array)?; let key = key_array.to_vec(); match kdf { diff --git a/src/config/network.rs b/src/config/network.rs index 458dfe11..d6790afe 100644 --- a/src/config/network.rs +++ b/src/config/network.rs @@ -577,7 +577,7 @@ pub fn complete_port_list(arg: &str, _param: &HashMap) -> Vec return vec![], }; - let arg = arg.clone().trim(); + let arg = arg.trim(); let prefix = if let Some(idx) = arg.rfind(",") { &arg[..idx+1] } else { "" }; ports.iter().map(|port| format!("{}{}", prefix, port)).collect() } diff --git a/src/config/sync.rs b/src/config/sync.rs index d2e945a1..0d9be9dc 100644 --- a/src/config/sync.rs +++ b/src/config/sync.rs @@ -79,7 +79,7 @@ impl From<&SyncJobStatus> for SyncJobConfig { owner: job_status.owner.clone(), remote: job_status.remote.clone(), remote_store: job_status.remote_store.clone(), - remove_vanished: job_status.remove_vanished.clone(), + remove_vanished: job_status.remove_vanished, comment: job_status.comment.clone(), schedule: job_status.schedule.clone(), } diff --git a/src/config/tape_encryption_keys.rs b/src/config/tape_encryption_keys.rs index ff565349..284d0ed3 100644 --- a/src/config/tape_encryption_keys.rs +++ b/src/config/tape_encryption_keys.rs @@ -61,7 +61,7 @@ pub struct EncryptionKeyConfig { } pub fn compute_tape_key_fingerprint(key: &[u8; 32]) -> Result { - let crypt_config = CryptConfig::new(key.clone())?; + let crypt_config = CryptConfig::new(*key)?; Ok(crypt_config.fingerprint()) } @@ -228,7 +228,7 @@ pub fn insert_key(key: [u8;32], key_config: KeyConfig, hint: String) -> Result<( save_keys(key_map)?; let item = EncryptionKeyConfig::new(key_config, hint); - config_map.insert(fingerprint.clone(), item); + config_map.insert(fingerprint, item); save_key_configs(config_map)?; Ok(()) diff --git a/src/server/email_notifications.rs b/src/server/email_notifications.rs index 8bfc5f55..9a31f628 100644 --- a/src/server/email_notifications.rs +++ b/src/server/email_notifications.rs @@ -403,7 +403,7 @@ fn lookup_user_email(userid: &Userid) -> Option { if let Ok(user_config) = user::cached_config() { if let Ok(user) = user_config.lookup::("user", userid.as_str()) { - return user.email.clone(); + return user.email; } } diff --git a/src/server/environment.rs b/src/server/environment.rs index 2577c379..0548b5bc 100644 --- a/src/server/environment.rs +++ b/src/server/environment.rs @@ -48,6 +48,6 @@ impl RpcEnvironment for RestEnvironment { } fn get_client_ip(&self) -> Option { - self.client_ip.clone() + self.client_ip } } diff --git a/src/server/h2service.rs b/src/server/h2service.rs index 0bb735b8..989618ec 100644 --- a/src/server/h2service.rs +++ b/src/server/h2service.rs @@ -97,7 +97,7 @@ impl tower_service::Service> for H2Ser let method = req.method().clone(); let worker = self.worker.clone(); - std::pin::Pin::from(self.handle_request(req)) + self.handle_request(req) .map(move |result| match result { Ok(res) => { Self::log_response(worker, method, &path, &res); diff --git a/src/server/rest.rs b/src/server/rest.rs index 79c9c3d0..67fbbbb2 100644 --- a/src/server/rest.rs +++ b/src/server/rest.rs @@ -517,7 +517,7 @@ async fn chuncked_static_file_download(filename: PathBuf) -> Result BroadcastFuture { let task = source.map(move |value| { match value { - Ok(value) => Self::notify_listeners(inner1, Ok(value.clone())), + Ok(value) => Self::notify_listeners(inner1, Ok(value)), Err(err) => Self::notify_listeners(inner1, Err(err.to_string())), } }); diff --git a/src/tools/runtime.rs b/src/tools/runtime.rs index 46564e76..477d26d6 100644 --- a/src/tools/runtime.rs +++ b/src/tools/runtime.rs @@ -73,7 +73,7 @@ pub fn get_runtime_with_builder runtime::Builder>(get_builder: F) -> let runtime = builder.build().expect("failed to spawn tokio runtime"); let rt = Arc::new(runtime); - *guard = Arc::downgrade(&rt.clone()); + *guard = Arc::downgrade(&rt); rt } diff --git a/tests/prune.rs b/tests/prune.rs index d9758ea7..da516a0e 100644 --- a/tests/prune.rs +++ b/tests/prune.rs @@ -63,7 +63,7 @@ fn test_prune_hourly() -> Result<(), Error> { ]; assert_eq!(remove_list, expect); - let list = orig_list.clone(); + let list = orig_list; let options = PruneOptions::new().keep_hourly(Some(2)); let remove_list = get_prune_list(list, true, &options); let expect: Vec = vec![ @@ -126,7 +126,7 @@ fn test_prune_simple2() -> Result<(), Error> { ]; assert_eq!(remove_list, expect); - let list = orig_list.clone(); + let list = orig_list; let options = PruneOptions::new().keep_monthly(Some(1)).keep_yearly(Some(1)); let remove_list = get_prune_list(list, true, &options); let expect: Vec = vec![ @@ -266,7 +266,7 @@ fn test_prune_simple() -> Result<(), Error> { assert_eq!(remove_list, expect); // keep-weekly + keep-monthly + keep-yearly - let list = orig_list.clone(); + let list = orig_list; let options = PruneOptions::new().keep_weekly(Some(5)).keep_monthly(Some(6)).keep_yearly(Some(7)); let remove_list = get_prune_list(list, false, &options); // all backup are within one week, so we only keep a single file diff --git a/tests/worker-task-abort.rs b/tests/worker-task-abort.rs index 7ec3958c..1a0d938d 100644 --- a/tests/worker-task-abort.rs +++ b/tests/worker-task-abort.rs @@ -25,7 +25,7 @@ fn garbage_collection(worker: &server::WorkerTask) -> Result<(), Error> { worker.log("end garbage collection"); - Ok(()).into() + Ok(()) } -- 2.39.2