X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FStorage.pm;h=298976f8d669f108cdd0a8d3bc0787f723ea9f0c;hb=cfdffd8a20a1bc21b2059c1be59188340ecba603;hp=4772c6c06c1f2b64b8b4903d457ba77ffbb25481;hpb=e8a7e764afbee31609583ecce4300ba24cfff199;p=pve-storage.git diff --git a/PVE/Storage.pm b/PVE/Storage.pm index 4772c6c..298976f 100755 --- a/PVE/Storage.pm +++ b/PVE/Storage.pm @@ -25,9 +25,10 @@ use PVE::Storage::DirPlugin; use PVE::Storage::LVMPlugin; use PVE::Storage::LvmThinPlugin; use PVE::Storage::NFSPlugin; +use PVE::Storage::CIFSPlugin; use PVE::Storage::ISCSIPlugin; use PVE::Storage::RBDPlugin; -use PVE::Storage::SheepdogPlugin; +use PVE::Storage::CephFSPlugin; use PVE::Storage::ISCSIDirectPlugin; use PVE::Storage::GlusterfsPlugin; use PVE::Storage::ZFSPoolPlugin; @@ -35,16 +36,21 @@ use PVE::Storage::ZFSPlugin; use PVE::Storage::DRBDPlugin; # Storage API version. Icrement it on changes in storage API interface. -use constant APIVER => 1; +use constant APIVER => 3; +# Age is the number of versions we're backward compatible with. +# This is like having 'current=APIVER' and age='APIAGE' in libtool, +# see https://www.gnu.org/software/libtool/manual/html_node/Libtool-versioning.html +use constant APIAGE => 2; # load standard plugins PVE::Storage::DirPlugin->register(); PVE::Storage::LVMPlugin->register(); PVE::Storage::LvmThinPlugin->register(); PVE::Storage::NFSPlugin->register(); +PVE::Storage::CIFSPlugin->register(); PVE::Storage::ISCSIPlugin->register(); PVE::Storage::RBDPlugin->register(); -PVE::Storage::SheepdogPlugin->register(); +PVE::Storage::CephFSPlugin->register(); PVE::Storage::ISCSIDirectPlugin->register(); PVE::Storage::GlusterfsPlugin->register(); PVE::Storage::ZFSPoolPlugin->register(); @@ -61,18 +67,29 @@ if ( -d '/usr/share/perl5/PVE/Storage/Custom' ) { eval { require $file; + + # Check perl interface: + die "not derived from PVE::Storage::Plugin\n" + if !$modname->isa('PVE::Storage::Plugin'); + die "does not provide an api() method\n" + if !$modname->can('api'); + # Check storage API version and that file is really storage plugin. + my $version = $modname->api(); + die "implements an API version newer than current ($version > " . APIVER . ")\n" + if $version > APIVER; + my $min_version = (APIVER - APIAGE); + die "API version too old, please update the plugin ($version < $min_version)\n" + if $version < $min_version; + import $file; + $modname->register(); + + # If we got this far and the API version is not the same, make some + # noise: + warn "Plugin \"$modname\" is implementing an older storage API, an upgrade is recommended\n" + if $version != APIVER; }; if ($@) { - warn $@; - # Check storage API version and that file is really storage plugin. - } elsif ($modname->isa('PVE::Storage::Plugin') && $modname->can('api') && $modname->api() == APIVER) { - eval { - import $file; - $modname->register(); - }; - warn $@ if $@; - } else { - warn "Error loading storage plugin \"$modname\" because of API version mismatch. Please, update it.\n" + warn "Error loading storage plugin \"$modname\": $@"; } }); } @@ -82,6 +99,8 @@ PVE::Storage::Plugin->init(); my $UDEVADM = '/sbin/udevadm'; +our $iso_extension_re = qr/\.(?:iso|img)/i; + # PVE::Storage utility functions sub config { @@ -145,6 +164,17 @@ sub storage_check_enabled { return storage_check_node($cfg, $storeid, $node, $noerr); } +# storage_can_replicate: +# return true if storage supports replication +# (volumes alocated with vdisk_alloc() has replication feature) +sub storage_can_replicate { + my ($cfg, $storeid, $format) = @_; + + my $scfg = storage_config($cfg, $storeid); + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + return $plugin->storage_can_replicate($scfg, $storeid, $format); +} + sub storage_ids { my ($cfg) = @_; @@ -369,7 +399,8 @@ sub check_volume_access { if ($sid) { my ($vtype, undef, $ownervm) = parse_volname($cfg, $volid); if ($vtype eq 'iso' || $vtype eq 'vztmpl') { - # we simply allow access + # require at least read access to storage, (custom) templates/ISOs could be sensitive + $rpcenv->check_any($user, "/storage/$sid", ['Datastore.AllocateSpace', 'Datastore.Audit']); } elsif (defined($ownervm) && defined($vmid) && ($ownervm == $vmid)) { # we are owner - allow access } elsif ($vtype eq 'backup' && $ownervm) { @@ -472,7 +503,7 @@ sub path_to_volume_id { return ('images', $info->{volid}); } } - } elsif ($path =~ m!^$isodir/([^/]+\.[Ii][Ss][Oo])$!) { + } elsif ($path =~ m!^$isodir/([^/]+$iso_extension_re)$!) { my $name = $1; return ('iso', "$sid:iso/$name"); } elsif ($path =~ m!^$tmpldir/([^/]+\.tar\.gz)$!) { @@ -525,7 +556,7 @@ sub abs_filesystem_path { } sub storage_migrate { - my ($cfg, $volid, $target_sshinfo, $target_storeid, $target_volname, $base_snapshot, $snapshot, $ratelimit_bps, $insecure, $with_snapshots) = @_; + my ($cfg, $volid, $target_sshinfo, $target_storeid, $target_volname, $base_snapshot, $snapshot, $ratelimit_bps, $insecure, $with_snapshots, $logfunc) = @_; my ($storeid, $volname) = parse_volume_id($volid); $target_volname = $volname if !$target_volname; @@ -557,21 +588,19 @@ sub storage_migrate { } } - my @formats = volume_transfer_formats($cfg, $volid, $volid, $snapshot, $base_snapshot, $with_snapshots); + my @formats = volume_transfer_formats($cfg, $volid, $target_volid, $snapshot, $base_snapshot, $with_snapshots); die "cannot migrate from storage type '$scfg->{type}' to '$tcfg->{type}'\n" if !@formats; my $format = $formats[0]; - my @insecurecmd; + my $import_fn = '-'; # let pvesm import read from stdin per default if ($insecure) { - @insecurecmd = ('pvecm', 'mtunnel', '-run-command', 1); - if (my $network = $target_sshinfo->{network}) { - push @insecurecmd, '-migration_network', $network; - } + my $net = $target_sshinfo->{network} // $target_sshinfo->{ip}; + $import_fn = "tcp://$net"; } $with_snapshots = $with_snapshots ? 1 : 0; # sanitize for passing as cli parameter my $send = ['pvesm', 'export', $volid, $format, '-', '-with-snapshots', $with_snapshots]; - my $recv = [@$ssh, @insecurecmd, '--', 'pvesm', 'import', $volid, $format, '-', '-with-snapshots', $with_snapshots]; + my $recv = [@$ssh, '--', 'pvesm', 'import', $target_volid, $format, $import_fn, '-with-snapshots', $with_snapshots]; if (defined($snapshot)) { push @$send, '-snapshot', $snapshot } @@ -608,7 +637,7 @@ sub storage_migrate { die "import failed: exit code ".($?>>8)."\n"; } } else { - run_command([$send, @cstream, $recv]); + run_command([$send, @cstream, $recv], logfunc => $logfunc); } }; my $err = $@; @@ -656,6 +685,30 @@ sub vdisk_create_base { }); } +sub map_volume { + my ($cfg, $volid, $snapname) = @_; + + my ($storeid, $volname) = parse_volume_id($volid); + + my $scfg = storage_config($cfg, $storeid); + + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + + return $plugin->map_volume($storeid, $scfg, $volname, $snapname); +} + +sub unmap_volume { + my ($cfg, $volid, $snapname) = @_; + + my ($storeid, $volname) = parse_volume_id($volid); + + my $scfg = storage_config($cfg, $storeid); + + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + + return $plugin->unmap_volume($storeid, $scfg, $volname, $snapname); +} + sub vdisk_alloc { my ($cfg, $storeid, $vmid, $fmt, $name, $size) = @_; @@ -718,74 +771,6 @@ sub vdisk_free { $rpcenv->fork_worker('imgdel', undef, $authuser, $cleanup_worker); } -#list iso or openvz template ($tt = ) -sub template_list { - my ($cfg, $storeid, $tt) = @_; - - die "unknown template type '$tt'\n" - if !($tt eq 'iso' || $tt eq 'vztmpl' || $tt eq 'backup'); - - my $ids = $cfg->{ids}; - - storage_check_enabled($cfg, $storeid) if ($storeid); - - my $res = {}; - - # query the storage - - foreach my $sid (keys %$ids) { - next if $storeid && $storeid ne $sid; - - my $scfg = $ids->{$sid}; - my $type = $scfg->{type}; - - next if !storage_check_enabled($cfg, $sid, undef, 1); - - next if $tt eq 'iso' && !$scfg->{content}->{iso}; - next if $tt eq 'vztmpl' && !$scfg->{content}->{vztmpl}; - next if $tt eq 'backup' && !$scfg->{content}->{backup}; - - activate_storage($cfg, $sid); - - if ($scfg->{path}) { - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - - my $path = $plugin->get_subdir($scfg, $tt); - - foreach my $fn (<$path/*>) { - - my $info; - - if ($tt eq 'iso') { - next if $fn !~ m!/([^/]+\.[Ii][Ss][Oo])$!; - - $info = { volid => "$sid:iso/$1", format => 'iso' }; - - } elsif ($tt eq 'vztmpl') { - next if $fn !~ m!/([^/]+\.tar\.([gx]z))$!; - - $info = { volid => "$sid:vztmpl/$1", format => "t$2" }; - - } elsif ($tt eq 'backup') { - next if $fn !~ m!/([^/]+\.(tar|tar\.gz|tar\.lzo|tgz|vma|vma\.gz|vma\.lzo))$!; - - $info = { volid => "$sid:backup/$1", format => $2 }; - } - - $info->{size} = -s $fn; - - push @{$res->{$sid}}, $info; - } - - } - - @{$res->{$sid}} = sort {lc($a->{volid}) cmp lc ($b->{volid}) } @{$res->{$sid}} if $res->{$sid}; - } - - return $res; -} - - sub vdisk_list { my ($cfg, $storeid, $vmid, $vollist) = @_; @@ -830,40 +815,53 @@ sub vdisk_list { return $res; } +sub template_list { + my ($cfg, $storeid, $tt) = @_; + + die "unknown template type '$tt'\n" + if !($tt eq 'iso' || $tt eq 'vztmpl' || $tt eq 'backup' || $tt eq 'snippets'); + + my $ids = $cfg->{ids}; + + storage_check_enabled($cfg, $storeid) if ($storeid); + + my $res = {}; + + # query the storage + foreach my $sid (keys %$ids) { + next if $storeid && $storeid ne $sid; + + my $scfg = $ids->{$sid}; + my $type = $scfg->{type}; + + next if !$scfg->{content}->{$tt}; + + next if !storage_check_enabled($cfg, $sid, undef, 1); + + $res->{$sid} = volume_list($cfg, $sid, undef, $tt); + } + + return $res; +} + sub volume_list { my ($cfg, $storeid, $vmid, $content) = @_; - my @ctypes = qw(images vztmpl iso backup); + my @ctypes = qw(rootdir images vztmpl iso backup snippets); my $cts = $content ? [ $content ] : [ @ctypes ]; my $scfg = PVE::Storage::storage_config($cfg, $storeid); - my $res = []; - foreach my $ct (@$cts) { - my $data; - if ($ct eq 'images') { - $data = vdisk_list($cfg, $storeid, $vmid); - } elsif ($ct eq 'iso' && !defined($vmid)) { - $data = template_list($cfg, $storeid, 'iso'); - } elsif ($ct eq 'vztmpl'&& !defined($vmid)) { - $data = template_list ($cfg, $storeid, 'vztmpl'); - } elsif ($ct eq 'backup') { - $data = template_list ($cfg, $storeid, 'backup'); - foreach my $item (@{$data->{$storeid}}) { - if (defined($vmid)) { - @{$data->{$storeid}} = grep { $_->{volid} =~ m/\S+-$vmid-\S+/ } @{$data->{$storeid}}; - } - } - } + $cts = [ grep { defined($scfg->{content}->{$_}) } @$cts ]; - next if !$data || !$data->{$storeid}; + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - foreach my $item (@{$data->{$storeid}}) { - $item->{content} = $ct; - push @$res, $item; - } - } + activate_storage($cfg, $storeid); + + my $res = $plugin->list_volumes($storeid, $scfg, $vmid, $cts); + + @$res = sort {lc($a->{volid}) cmp lc ($b->{volid}) } @$res; return $res; } @@ -990,7 +988,7 @@ sub deactivate_volumes { } sub storage_info { - my ($cfg, $content) = @_; + my ($cfg, $content, $includeformat) = @_; my $ids = $cfg->{ids}; @@ -1000,8 +998,7 @@ sub storage_info { my $slist = []; foreach my $storeid (keys %$ids) { - - next if !storage_check_enabled($cfg, $storeid, undef, 1); + my $storage_enabled = defined(storage_check_enabled($cfg, $storeid, undef, 1)); if (defined($content)) { my $want_ctype = 0; @@ -1011,7 +1008,7 @@ sub storage_info { last; } } - next if !$want_ctype; + next if !$want_ctype || !$storage_enabled; } my $type = $ids->{$storeid}->{type}; @@ -1024,6 +1021,7 @@ sub storage_info { shared => $ids->{$storeid}->{shared} ? 1 : 0, content => PVE::Storage::Plugin::content_hash_to_string($ids->{$storeid}->{content}), active => 0, + enabled => $storage_enabled ? 1 : 0, }; push @$slist, $storeid; @@ -1033,7 +1031,18 @@ sub storage_info { foreach my $storeid (keys %$ids) { my $scfg = $ids->{$storeid}; + next if !$info->{$storeid}; + next if !$info->{$storeid}->{enabled}; + + my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); + if ($includeformat) { + my $pd = $plugin->plugindata(); + $info->{$storeid}->{format} = $pd->{format} + if $pd->{format}; + $info->{$storeid}->{select_existing} = $pd->{select_existing} + if $pd->{select_existing}; + } eval { activate_storage($cfg, $storeid, $cache); }; if (my $err = $@) { @@ -1041,9 +1050,7 @@ sub storage_info { next; } - my $plugin = PVE::Storage::Plugin->lookup($scfg->{type}); - my ($total, $avail, $used, $active); - eval { ($total, $avail, $used, $active) = $plugin->status($storeid, $scfg, $cache); }; + my ($total, $avail, $used, $active) = eval { $plugin->status($storeid, $scfg, $cache); }; warn $@ if $@; next if !$active; $info->{$storeid}->{total} = int($total); @@ -1093,6 +1100,41 @@ sub scan_nfs { return $res; } +sub scan_cifs { + my ($server_in, $user, $password, $domain) = @_; + + my $server; + if (!($server = resolv_server ($server_in))) { + die "unable to resolve address for server '${server_in}'\n"; + } + + # we support only Windows grater than 2012 cifsscan so use smb3 + my $cmd = ['/usr/bin/smbclient', '-m', 'smb3', '-d', '0', '-L', $server]; + if (defined($user)) { + die "password is required" if !defined($password); + push @$cmd, '-U', "$user\%$password"; + push @$cmd, '-W', $domain if defined($domain); + } else { + push @$cmd, '-N'; + } + + my $res = {}; + run_command($cmd, + outfunc => sub { + my $line = shift; + if ($line =~ m/(\S+)\s*Disk\s*(\S*)/) { + $res->{$1} = $2; + } elsif ($line =~ m/(NT_STATUS_(\S*))/) { + $res->{$1} = ''; + } + }, + errfunc => sub {}, + noerr => 1 + ); + + return $res; +} + sub scan_zfs { my $cmd = ['zfs', 'list', '-t', 'filesystem', '-H', '-o', 'name,avail,used']; @@ -1131,62 +1173,6 @@ sub resolv_portal { raise_param_exc({ portal => "unable to resolve portal address '$portal'" }); } -# idea is from usbutils package (/usr/bin/usb-devices) script -sub __scan_usb_device { - my ($res, $devpath, $parent, $level) = @_; - - return if ! -d $devpath; - return if $level && $devpath !~ m/^.*[-.](\d+)$/; - my $port = $level ? int($1 - 1) : 0; - - my $busnum = int(file_read_firstline("$devpath/busnum")); - my $devnum = int(file_read_firstline("$devpath/devnum")); - - my $d = { - port => $port, - level => $level, - busnum => $busnum, - devnum => $devnum, - speed => file_read_firstline("$devpath/speed"), - class => hex(file_read_firstline("$devpath/bDeviceClass")), - vendid => file_read_firstline("$devpath/idVendor"), - prodid => file_read_firstline("$devpath/idProduct"), - }; - - if ($level) { - my $usbpath = $devpath; - $usbpath =~ s|^.*/\d+\-||; - $d->{usbpath} = $usbpath; - } - - my $product = file_read_firstline("$devpath/product"); - $d->{product} = $product if $product; - - my $manu = file_read_firstline("$devpath/manufacturer"); - $d->{manufacturer} = $manu if $manu; - - my $serial => file_read_firstline("$devpath/serial"); - $d->{serial} = $serial if $serial; - - push @$res, $d; - - foreach my $subdev (<$devpath/$busnum-*>) { - next if $subdev !~ m|/$busnum-[0-9]+(\.[0-9]+)*$|; - __scan_usb_device($res, $subdev, $devnum, $level + 1); - } - -}; - -sub scan_usb { - - my $devlist = []; - - foreach my $device () { - __scan_usb_device($devlist, $device, 0, 0); - } - - return $devlist; -} sub scan_iscsi { my ($portal_in) = @_; @@ -1476,7 +1462,7 @@ sub complete_storage_enabled { sub complete_content_type { my ($cmdname, $pname, $cvalue) = @_; - return [qw(rootdir images vztmpl iso backup)]; + return [qw(rootdir images vztmpl iso backup snippets)]; } sub complete_volume { @@ -1513,4 +1499,92 @@ sub complete_volume { return $res; } +# Various io-heavy operations require io/bandwidth limits which can be +# configured on multiple levels: The global defaults in datacenter.cfg, and +# per-storage overrides. When we want to do a restore from storage A to storage +# B, we should take the smaller limit defined for storages A and B, and if no +# such limit was specified, use the one from datacenter.cfg. +sub get_bandwidth_limit { + my ($operation, $storage_list, $override) = @_; + + # called for each limit (global, per-storage) with the 'default' and the + # $operation limit and should udpate $override for every limit affecting + # us. + my $use_global_limits = 0; + my $apply_limit = sub { + my ($bwlimit) = @_; + if (defined($bwlimit)) { + my $limits = PVE::JSONSchema::parse_property_string('bwlimit', $bwlimit); + my $limit = $limits->{$operation} // $limits->{default}; + if (defined($limit)) { + if (!$override || $limit < $override) { + $override = $limit; + } + return; + } + } + # If there was no applicable limit, try to apply the global ones. + $use_global_limits = 1; + }; + + my ($rpcenv, $authuser); + if (defined($override)) { + $rpcenv = PVE::RPCEnvironment->get(); + $authuser = $rpcenv->get_user(); + } + + # Apply per-storage limits - if there are storages involved. + if (defined($storage_list) && @$storage_list) { + my $config = config(); + + # The Datastore.Allocate permission allows us to modify the per-storage + # limits, therefore it also allows us to override them. + # Since we have most likely multiple storages to check, do a quick check on + # the general '/storage' path to see if we can skip the checks entirely: + return $override if $rpcenv && $rpcenv->check($authuser, '/storage', ['Datastore.Allocate'], 1); + + my %done; + foreach my $storage (@$storage_list) { + next if !defined($storage); + # Avoid duplicate checks: + next if $done{$storage}; + $done{$storage} = 1; + + # Otherwise we may still have individual /storage/$ID permissions: + if (!$rpcenv || !$rpcenv->check($authuser, "/storage/$storage", ['Datastore.Allocate'], 1)) { + # And if not: apply the limits. + my $storecfg = storage_config($config, $storage); + $apply_limit->($storecfg->{bwlimit}); + } + } + + # Storage limits take precedence over the datacenter defaults, so if + # a limit was applied: + return $override if !$use_global_limits; + } + + # Sys.Modify on '/' means we can change datacenter.cfg which contains the + # global default limits. + if (!$rpcenv || !$rpcenv->check($authuser, '/', ['Sys.Modify'], 1)) { + # So if we cannot modify global limits, apply them to our currently + # requested override. + my $dc = cfs_read_file('datacenter.cfg'); + $apply_limit->($dc->{bwlimit}); + } + + return $override; +} + +# checks if the storage id is available and dies if not +sub assert_sid_unused { + my ($sid) = @_; + + my $cfg = config(); + if (my $scfg = storage_config($cfg, $sid, 1)) { + die "storage ID '$sid' already defined\n"; + } + + return undef; +} + 1;