X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FStorage.pm;h=81f9b67508538cd4de065cdda781bc0acda60aee;hb=76c1e57be7c8495586e38205611290db049896e0;hp=c06983dee43d87981a0751ff3378048a5cd8501d;hpb=3d4949692a7e00c59e219804ced3b782d81cf7f0;p=pve-storage.git diff --git a/PVE/Storage.pm b/PVE/Storage.pm index c06983d..81f9b67 100755 --- a/PVE/Storage.pm +++ b/PVE/Storage.pm @@ -7,6 +7,7 @@ use Data::Dumper; use POSIX; use IO::Select; use IO::File; +use IO::Socket::IP; use File::Basename; use File::Path; use Cwd 'abs_path'; @@ -24,15 +25,16 @@ use PVE::Storage::DirPlugin; use PVE::Storage::LVMPlugin; use PVE::Storage::LvmThinPlugin; use PVE::Storage::NFSPlugin; +use PVE::Storage::CIFSPlugin; use PVE::Storage::ISCSIPlugin; use PVE::Storage::RBDPlugin; +use PVE::Storage::CephFSPlugin; use PVE::Storage::SheepdogPlugin; use PVE::Storage::ISCSIDirectPlugin; use PVE::Storage::GlusterfsPlugin; use PVE::Storage::ZFSPoolPlugin; use PVE::Storage::ZFSPlugin; use PVE::Storage::DRBDPlugin; -use PVE::ReplicationTools; # Storage API version. Icrement it on changes in storage API interface. use constant APIVER => 1; @@ -42,8 +44,10 @@ PVE::Storage::DirPlugin->register(); PVE::Storage::LVMPlugin->register(); PVE::Storage::LvmThinPlugin->register(); PVE::Storage::NFSPlugin->register(); +PVE::Storage::CIFSPlugin->register(); PVE::Storage::ISCSIPlugin->register(); PVE::Storage::RBDPlugin->register(); +PVE::Storage::CephFSPlugin->register(); PVE::Storage::SheepdogPlugin->register(); PVE::Storage::ISCSIDirectPlugin->register(); PVE::Storage::GlusterfsPlugin->register(); @@ -145,6 +149,17 @@ sub storage_check_enabled { return storage_check_node($cfg, $storeid, $node, $noerr); } +# storage_can_replicate: +# return true if storage supports replication +# (volumes alocated with vdisk_alloc() has replication feature) +sub storage_can_replicate { + my ($cfg, $storeid, $format) = @_; + + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->storage_can_replicate($scfg, $storeid, $format); +} + sub storage_ids { my ($cfg) = @_; @@ -264,20 +279,19 @@ sub volume_has_feature { } sub volume_snapshot_list { - my ($cfg, $volid, $prefix, $ip) = @_; + my ($cfg, $volid) = @_; my ($storeid, $volname) = parse_volume_id($volid, 1); if ($storeid) { my $scfg = storage_config($cfg, $storeid); my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - return $plugin->volume_snapshot_list($scfg, $storeid, $volname, $prefix, $ip); + return $plugin->volume_snapshot_list($scfg, $storeid, $volname); } elsif ($volid =~ m|^(/.+)$| && -e $volid) { die "send file/device '$volid' is not possible\n"; } else { die "unable to parse volume ID '$volid'\n"; } # return an empty array if dataset does not exist. - # youngest snap first } sub get_image_dir { @@ -526,7 +540,7 @@ sub abs_filesystem_path { } sub storage_migrate { - my ($cfg, $volid, $target_host, $target_storeid, $target_volname) = @_; + my ($cfg, $volid, $target_sshinfo, $target_storeid, $target_volname, $base_snapshot, $snapshot, $ratelimit_bps, $insecure, $with_snapshots, $logfunc) = @_; my ($storeid, $volname) = parse_volume_id($volid); $target_volname = $volname if !$target_volname; @@ -540,155 +554,85 @@ sub storage_migrate { my $target_volid = "${target_storeid}:${target_volname}"; - my $errstr = "unable to migrate '$volid' to '${target_volid}' on host '$target_host'"; - - my $sshoptions = "-o 'BatchMode=yes'"; - my $ssh = "/usr/bin/ssh $sshoptions"; - - local $ENV{RSYNC_RSH} = $ssh; + my $target_ip = $target_sshinfo->{ip}; + my $errstr = "unable to migrate '$volid' to '${target_volid}' on host '$target_sshinfo->{name}'"; - # only implemented for file system based storage - if ($scfg->{path}) { - if ($tcfg->{path}) { + my $ssh = PVE::Cluster::ssh_info_to_command($target_sshinfo); + my $ssh_base = PVE::Cluster::ssh_info_to_command_base($target_sshinfo); + local $ENV{RSYNC_RSH} = PVE::Tools::cmd2string($ssh_base); - my $src_plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - my $dst_plugin = PVE::Storage::Plugin->lookup($tcfg->{type}); - my $src = $src_plugin->path($scfg, $volname, $storeid); - my $dst = $dst_plugin->path($tcfg, $target_volname, $target_storeid); - - my $dirname = dirname($dst); - - if ($tcfg->{shared}) { # we can do a local copy - - run_command(['/bin/mkdir', '-p', $dirname]); - - run_command(['/bin/cp', $src, $dst]); - - } else { - run_command(['/usr/bin/ssh', "root\@${target_host}", - '/bin/mkdir', '-p', $dirname]); - - # we use rsync with --sparse, so we can't use --inplace, - # so we remove file on the target if it already exists to - # save space - my ($size, $format) = PVE::Storage::Plugin::file_size_info($src); - if ($format && ($format eq 'raw') && $size) { - run_command(['/usr/bin/ssh', "root\@${target_host}", - 'rm', '-f', $dst], - outfunc => sub {}); - } + my @cstream = ([ '/usr/bin/cstream', '-t', $ratelimit_bps ]) + if defined($ratelimit_bps); - my $cmd; - if ($format eq 'subvol') { - $cmd = ['/usr/bin/rsync', '--progress', '-X', '-A', '--numeric-ids', - '-aH', '--delete', '--no-whole-file', '--inplace', - '--one-file-system', "$src/", "[root\@${target_host}]:$dst"]; - } else { - $cmd = ['/usr/bin/rsync', '--progress', '--sparse', '--whole-file', - $src, "[root\@${target_host}]:$dst"]; - } - - my $percent = -1; - - run_command($cmd, outfunc => sub { - my $line = shift; - - if ($line =~ m/^\s*(\d+\s+(\d+)%\s.*)$/) { - if ($2 > $percent) { - $percent = $2; - print "rsync status: $1\n"; - *STDOUT->flush(); - } - } else { - print "$line\n"; - *STDOUT->flush(); - } - }); - } - } else { - die "$errstr - target type '$tcfg->{type}' not implemented\n"; + my $migration_snapshot; + if (!defined($snapshot)) { + if ($scfg->{type} eq 'zfspool') { + $migration_snapshot = 1; + $snapshot = '__migration__'; } + } - } elsif ($scfg->{type} eq 'zfspool') { - - if ($tcfg->{type} eq 'zfspool') { - - die "$errstr - pool on target does not have the same name as on source!" - if $tcfg->{pool} ne $scfg->{pool}; - - my (undef, $volname) = parse_volname($cfg, $volid); - - my $zfspath = "$scfg->{pool}\/$volname"; - - my $snap = ['zfs', 'snapshot', "$zfspath\@__migration__"]; - my $send = ['zfs', 'send', '-Rpv']; - my $rec = ['ssh', "root\@$target_host", 'zfs', 'recv','-F' ,$zfspath]; + my @formats = volume_transfer_formats($cfg, $volid, $volid, $snapshot, $base_snapshot, $with_snapshots); + die "cannot migrate from storage type '$scfg->{type}' to '$tcfg->{type}'\n" if !@formats; + my $format = $formats[0]; - if (my $snapname = PVE::ReplicationTools::get_last_replica_snap($volid)) { + my @insecurecmd; + if ($insecure) { + @insecurecmd = ('pvecm', 'mtunnel', '-run-command', 1); + if (my $network = $target_sshinfo->{network}) { + push @insecurecmd, '-migration_network', $network; + } + } - #check if target snapshot exists. - my $checksnap = ['/usr/bin/ssh', "root\@${target_host}", "-o", - 'BatchMode=yes', 'zfs', 'list', '-Hrt', 'snap', - "$zfspath\@$snapname "]; - eval { - run_command($checksnap); - push @$send, '-I', "$zfspath\@$snapname"; - }; + $with_snapshots = $with_snapshots ? 1 : 0; # sanitize for passing as cli parameter + my $send = ['pvesm', 'export', $volid, $format, '-', '-with-snapshots', $with_snapshots]; + my $recv = [@$ssh, @insecurecmd, '--', 'pvesm', 'import', $volid, $format, '-', '-with-snapshots', $with_snapshots]; + if (defined($snapshot)) { + push @$send, '-snapshot', $snapshot + } + if ($migration_snapshot) { + push @$recv, '-delete-snapshot', $snapshot; + } - } - push @$send, "--", "$zfspath\@__migration__"; + if (defined($base_snapshot)) { + # Check if the snapshot exists on the remote side: + push @$send, '-base', $base_snapshot; + push @$recv, '-base', $base_snapshot; + } - my $destroy_target = ['ssh', "root\@$target_host", 'zfs', 'destroy', "$zfspath\@__migration__"]; - run_command($snap); - eval{ - run_command([$send,$rec]); - }; - my $err = $@; - warn "zfs send/receive failed, cleaning up snapshot(s)..\n" if $err; - eval { run_command(['zfs', 'destroy', "$zfspath\@__migration__"]); }; - warn "could not remove source snapshot: $@\n" if $@; - eval { run_command($destroy_target); }; - warn "could not remove target snapshot: $@\n" if $@; - die $err if $err; - - } else { - die "$errstr - target type $tcfg->{type} is not valid\n"; - } - - } elsif ($scfg->{type} eq 'lvmthin' || $scfg->{type} eq 'lvm') { - - if (($scfg->{type} eq $tcfg->{type}) && - ($tcfg->{type} eq 'lvmthin' || $tcfg->{type} eq 'lvm')) { - - my (undef, $volname, $vmid) = parse_volname($cfg, $volid); - my $size = volume_size_info($cfg, $volid, 5); - my $src = path($cfg, $volid); - my $dst = path($cfg, $target_volid); - - run_command(['/usr/bin/ssh', "root\@${target_host}", - 'pvesm', 'alloc', $target_storeid, $vmid, - $target_volname, int($size/1024)]); - - eval { - if ($tcfg->{type} eq 'lvmthin') { - run_command([["dd", "if=$src", "bs=4k"],["/usr/bin/ssh", "root\@${target_host}", - "dd", 'conv=sparse', "of=$dst", "bs=4k"]]); - } else { - run_command([["dd", "if=$src", "bs=4k"],["/usr/bin/ssh", "root\@${target_host}", - "dd", "of=$dst", "bs=4k"]]); - } - }; - if (my $err = $@) { - run_command(['/usr/bin/ssh', "root\@${target_host}", - 'pvesm', 'free', $target_volid]); - die $err; + volume_snapshot($cfg, $volid, $snapshot) if $migration_snapshot; + eval { + if ($insecure) { + open(my $info, '-|', @$recv) + or die "receive command failed: $!\n"; + my ($ip) = <$info> =~ /^($PVE::Tools::IPRE)$/ or die "no tunnel IP received\n"; + my ($port) = <$info> =~ /^(\d+)$/ or die "no tunnel port received\n"; + my $socket = IO::Socket::IP->new(PeerHost => $ip, PeerPort => $port, Type => SOCK_STREAM) + or die "failed to connect to tunnel at $ip:$port\n"; + # we won't be reading from the socket + shutdown($socket, 0); + run_command([$send, @cstream], output => '>&'.fileno($socket)); + # don't close the connection entirely otherwise the receiving end + # might not get all buffered data (and fails with 'connection reset by peer') + shutdown($socket, 1); + 1 while <$info>; # wait for the remote process to finish + # now close the socket + close($socket); + if (!close($info)) { # does waitpid() + die "import failed: $!\n" if $!; + die "import failed: exit code ".($?>>8)."\n"; } } else { - die "$errstr - migrate from source type '$scfg->{type}' to '$tcfg->{type}' not implemented\n"; + run_command([$send, @cstream, $recv], logfunc => $logfunc); } - } else { - die "$errstr - source type '$scfg->{type}' not implemented\n"; + }; + my $err = $@; + warn "send/receive failed, cleaning up snapshot(s)..\n" if $err; + if ($migration_snapshot) { + eval { volume_snapshot_delete($cfg, $volid, $snapshot, 0) }; + warn "could not remove source snapshot: $@\n" if $@; } + die $err if $err; } sub vdisk_clone { @@ -1061,7 +1005,7 @@ sub deactivate_volumes { } sub storage_info { - my ($cfg, $content) = @_; + my ($cfg, $content, $includeformat) = @_; my $ids = $cfg->{ids}; @@ -1071,8 +1015,7 @@ sub storage_info { my $slist = []; foreach my $storeid (keys %$ids) { - - next if !storage_check_enabled($cfg, $storeid, undef, 1); + my $storage_enabled = defined(storage_check_enabled($cfg, $storeid, undef, 1)); if (defined($content)) { my $want_ctype = 0; @@ -1082,7 +1025,7 @@ sub storage_info { last; } } - next if !$want_ctype; + next if !$want_ctype || !$storage_enabled; } my $type = $ids->{$storeid}->{type}; @@ -1095,6 +1038,7 @@ sub storage_info { shared => $ids->{$storeid}->{shared} ? 1 : 0, content => PVE::Storage::Plugin::content_hash_to_string($ids->{$storeid}->{content}), active => 0, + enabled => $storage_enabled ? 1 : 0, }; push @$slist, $storeid; @@ -1104,7 +1048,18 @@ sub storage_info { foreach my $storeid (keys %$ids) { my $scfg = $ids->{$storeid}; + next if !$info->{$storeid}; + next if !$info->{$storeid}->{enabled}; + + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + if ($includeformat) { + my $pd = $plugin->plugindata(); + $info->{$storeid}->{format} = $pd->{format} + if $pd->{format}; + $info->{$storeid}->{select_existing} = $pd->{select_existing} + if $pd->{select_existing}; + } eval { activate_storage($cfg, $storeid, $cache); }; if (my $err = $@) { @@ -1112,9 +1067,7 @@ sub storage_info { next; } - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - my ($total, $avail, $used, $active); - eval { ($total, $avail, $used, $active) = $plugin->status($storeid, $scfg, $cache); }; + my ($total, $avail, $used, $active) = eval { $plugin->status($storeid, $scfg, $cache); }; warn $@ if $@; next if !$active; $info->{$storeid}->{total} = int($total); @@ -1164,6 +1117,41 @@ sub scan_nfs { return $res; } +sub scan_cifs { + my ($server_in, $user, $password, $domain) = @_; + + my $server; + if (!($server = resolv_server ($server_in))) { + die "unable to resolve address for server '${server_in}'\n"; + } + + # we support only Windows grater than 2012 cifsscan so use smb3 + my $cmd = ['/usr/bin/smbclient', '-m', 'smb3', '-d', '0', '-L', $server]; + if (defined($user)) { + die "password is required" if !defined($password); + push @$cmd, '-U', "$user\%$password"; + push @$cmd, '-W', $domain if defined($domain); + } else { + push @$cmd, '-N'; + } + + my $res = {}; + run_command($cmd, + outfunc => sub { + my $line = shift; + if ($line =~ m/(\S+)\s*Disk\s*(\S*)/) { + $res->{$1} = $2; + } elsif ($line =~ m/(NT_STATUS_(\S*))/) { + $res->{$1} = ''; + } + }, + errfunc => sub {}, + noerr => 1 + ); + + return $res; +} + sub scan_zfs { my $cmd = ['zfs', 'list', '-t', 'filesystem', '-H', '-o', 'name,avail,used']; @@ -1467,6 +1455,60 @@ sub extract_vzdump_config { } } +sub volume_export { + my ($cfg, $fh, $volid, $format, $snapshot, $base_snapshot, $with_snapshots) = @_; + + my ($storeid, $volname) = parse_volume_id($volid, 1); + die "cannot export volume '$volid'\n" if !$storeid; + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_export($scfg, $storeid, $fh, $volname, $format, + $snapshot, $base_snapshot, $with_snapshots); +} + +sub volume_import { + my ($cfg, $fh, $volid, $format, $base_snapshot, $with_snapshots) = @_; + + my ($storeid, $volname) = parse_volume_id($volid, 1); + die "cannot import into volume '$volid'\n" if !$storeid; + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_import($scfg, $storeid, $fh, $volname, $format, + $base_snapshot, $with_snapshots); +} + +sub volume_export_formats { + my ($cfg, $volid, $snapshot, $base_snapshot, $with_snapshots) = @_; + + my ($storeid, $volname) = parse_volume_id($volid, 1); + return if !$storeid; + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_export_formats($scfg, $storeid, $volname, + $snapshot, $base_snapshot, + $with_snapshots); +} + +sub volume_import_formats { + my ($cfg, $volid, $base_snapshot, $with_snapshots) = @_; + + my ($storeid, $volname) = parse_volume_id($volid, 1); + return if !$storeid; + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->volume_import_formats($scfg, $storeid, $volname, + $base_snapshot, $with_snapshots); +} + +sub volume_transfer_formats { + my ($cfg, $src_volid, $dst_volid, $snapshot, $base_snapshot, $with_snapshots) = @_; + my @export_formats = volume_export_formats($cfg, $src_volid, $snapshot, $base_snapshot, $with_snapshots); + my @import_formats = volume_import_formats($cfg, $dst_volid, $base_snapshot, $with_snapshots); + my %import_hash = map { $_ => 1 } @import_formats; + my @common = grep { $import_hash{$_} } @export_formats; + return @common; +} + # bash completion helper sub complete_storage { @@ -1530,4 +1572,91 @@ sub complete_volume { return $res; } +# Various io-heavy operations require io/bandwidth limits which can be +# configured on multiple levels: The global defaults in datacenter.cfg, and +# per-storage overrides. When we want to do a restore from storage A to storage +# B, we should take the smaller limit defined for storages A and B, and if no +# such limit was specified, use the one from datacenter.cfg. +sub get_bandwidth_limit { + my ($operation, $storage_list, $override) = @_; + + # called for each limit (global, per-storage) with the 'default' and the + # $operation limit and should udpate $override for every limit affecting + # us. + my $use_global_limits = 0; + my $apply_limit = sub { + my ($bwlimit) = @_; + if (defined($bwlimit)) { + my $limits = PVE::JSONSchema::parse_property_string('bwlimit', $bwlimit); + my $limit = $limits->{$operation} // $limits->{default}; + if (defined($limit)) { + if (!$override || $limit < $override) { + $override = $limit; + } + return; + } + } + # If there was no applicable limit, try to apply the global ones. + $use_global_limits = 1; + }; + + my ($rpcenv, $authuser); + if (defined($override)) { + $rpcenv = PVE::RPCEnvironment->get(); + $authuser = $rpcenv->get_user(); + } + + # Apply per-storage limits - if there are storages involved. + if (@$storage_list) { + my $config = config(); + + # The Datastore.Allocate permission allows us to modify the per-storage + # limits, therefore it also allows us to override them. + # Since we have most likely multiple storages to check, do a quick check on + # the general '/storage' path to see if we can skip the checks entirely: + return $override if $rpcenv && $rpcenv->check($authuser, '/storage', ['Datastore.Allocate'], 1); + + my %done; + foreach my $storage (@$storage_list) { + # Avoid duplicate checks: + next if $done{$storage}; + $done{$storage} = 1; + + # Otherwise we may still have individual /storage/$ID permissions: + if (!$rpcenv || !$rpcenv->check($authuser, "/storage/$storage", ['Datastore.Allocate'], 1)) { + # And if not: apply the limits. + my $storecfg = storage_config($config, $storage); + $apply_limit->($storecfg->{bwlimit}); + } + } + + # Storage limits take precedence over the datacenter defaults, so if + # a limit was applied: + return $override if !$use_global_limits; + } + + # Sys.Modify on '/' means we can change datacenter.cfg which contains the + # global default limits. + if (!$rpcenv || !$rpcenv->check($authuser, '/', ['Sys.Modify'], 1)) { + # So if we cannot modify global limits, apply them to our currently + # requested override. + my $dc = cfs_read_file('datacenter.cfg'); + $apply_limit->($dc->{bwlimit}); + } + + return $override; +} + +# checks if the storage id is available and dies if not +sub check_available { + my ($id) = @_; + + my $cfg = config(); + if (my $scfg = storage_config($cfg, $id, 1)) { + die "storage ID '$id' already defined\n"; + } + + return undef; +} + 1;