use PVE::Cluster qw (cfs_read_file cfs_write_file);;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
-use PVE::Exception qw(raise raise_param_exc);
+use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
});
};
-my $check_storage_access_copy = sub {
+my $check_storage_access_clone = sub {
my ($rpcenv, $authuser, $storecfg, $conf, $storage) = @_;
my $sharedvm = 1;
return $res;
}});
+
+
__PACKAGE__->register_method({
name => 'create_vm',
path => '',
method => 'POST',
description => "Create or restore a virtual machine.",
permissions => {
- description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. If you create disks you need 'Datastore.AllocateSpace' on any used storage.",
- check => [ 'or',
- [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
- [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
- ],
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. " .
+ "For restore (option 'archive'), it is enough if the user has 'VM.Backup' permission and the VM already exists. " .
+ "If you create disks you need 'Datastore.AllocateSpace' on any used storage.",
+ user => 'all', # check inside
},
protected => 1,
proxyto => 'node',
$rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace'])
if defined($storage);
+ if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) {
+ # OK
+ } elsif ($pool && $rpcenv->check($authuser, "/pool/$pool", ['VM.Allocate'], 1)) {
+ # OK
+ } elsif ($archive && $force && (-f $filename) &&
+ $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) {
+ # OK: user has VM.Backup permissions, and want to restore an existing VM
+ } else {
+ raise_perm_exc();
+ }
+
if (!$archive) {
&$resolve_cdrom_alias($param);
}
}
- my $addVMtoPoolFn = sub {
- my $usercfg = cfs_read_file("user.cfg");
- if (my $data = $usercfg->{pools}->{$pool}) {
- $data->{vms}->{$vmid} = 1;
- $usercfg->{vms}->{$vmid} = $pool;
- cfs_write_file("user.cfg", $usercfg);
- }
- };
-
my $restorefn = sub {
# fixme: this test does not work if VM exists on other node!
pool => $pool,
unique => $unique });
- PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool;
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
die "create failed - $err";
}
- PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool;
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd);
if (!PVE::QemuServer::drive_is_cdrom($drive)) {
my $volid = $drive->{file};
+
if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) {
- if ($force || $key =~ m/^unused/) {
- eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ if ($force || $key =~ m/^unused/) {
+ eval {
+ # check if the disk is really unused
+ my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, $key);
+ my $path = PVE::Storage::path($storecfg, $volid);
+
+ die "unable to delete '$volid' - volume is still in use (snapshot?)\n"
+ if $used_paths->{$path};
+
+ PVE::Storage::vdisk_free($storecfg, $volid);
+ };
die $@ if $@;
} else {
PVE::QemuServer::add_unused_volume($conf, $volid, $vmid);
my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon};
die "balloon value too large (must be smaller than assigned memory)\n"
- if $balloon > $maxmem;
+ if $balloon && $balloon > $maxmem;
}
PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: " . join (' ', @paramarr));
PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
- PVE::AccessControl::lock_user_config($delVMfromPoolFn, "pool cleanup failed");
+ PVE::AccessControl::remove_vm_from_pool($vmid);
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
feature => {
description => "Feature to check.",
type => 'string',
- enum => [ 'snapshot', 'clone' ],
+ enum => [ 'snapshot', 'clone', 'copy' ],
},
snapname => get_standard_option('pve-snapshot-name', {
optional => 1,
},
},
returns => {
- type => 'boolean'
+ type => "object",
+ properties => {
+ hasFeature => { type => 'boolean' },
+ nodes => {
+ type => 'array',
+ items => { type => 'string' },
+ }
+ },
},
code => sub {
my ($param) = @_;
}
my $storecfg = PVE::Storage::config();
- my $hasfeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
- my $res = $hasfeature ? 1 : 0 ;
- return $res;
+ my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
+ my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
+
+ return {
+ hasFeature => $hasFeature,
+ nodes => [ keys %$nodelist ],
+ };
}});
__PACKAGE__->register_method({
- name => 'copy_vm',
- path => '{vmid}/copy',
+ name => 'clone_vm',
+ path => '{vmid}/clone',
method => 'POST',
protected => 1,
proxyto => 'node',
description => "Create a copy of virtual machine/template.",
permissions => {
- description => "You need 'VM.Copy' permissions on /vms/{vmid}, and 'VM.Allocate' permissions " .
+ description => "You need 'VM.Clone' permissions on /vms/{vmid}, and 'VM.Allocate' permissions " .
"on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
"'Datastore.AllocateSpace' on any used storage.",
check =>
[ 'and',
- ['perm', '/vms/{vmid}', [ 'VM.Copy' ]],
+ ['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
[ 'or',
[ 'perm', '/vms/{newid}', ['VM.Allocate']],
[ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
- newid => get_standard_option('pve-vmid', { description => 'VMID for the copy.' }),
+ newid => get_standard_option('pve-vmid', { description => 'VMID for the clone.' }),
name => {
optional => 1,
type => 'string', format => 'dns-name',
optional => 1,
}),
storage => get_standard_option('pve-storage-id', {
- description => "Target storage for full copy.",
+ description => "Target storage for full clone.",
requires => 'full',
optional => 1,
}),
optional => 1,
type => 'boolean',
description => "Create a full copy of all disk. This is always done when " .
- "you copy a normal VM. For VM templates, we try to create a linked copy by default.",
+ "you clone a normal VM. For VM templates, we try to create a linked clone by default.",
default => 0,
},
target => get_standard_option('pve-node', {
my $newid = extract_param($param, 'newid');
- # fixme: update pool after create
my $pool = extract_param($param, 'pool');
if (defined($pool)) {
my $localnode = PVE::INotify::nodename();
- undef $target if $target eq $localnode || $target eq 'localhost';
+ undef $target if $target && ($target eq $localnode || $target eq 'localhost');
PVE::Cluster::check_node_exists($target) if $target;
my $storecfg = PVE::Storage::config();
+ if ($storage) {
+ # check if storage is enabled on local node
+ PVE::Storage::storage_check_enabled($storecfg, $storage);
+ if ($target) {
+ # check if storage is available on target node
+ PVE::Storage::storage_check_node($storecfg, $storage, $target);
+ # clone only works if target storage is shared
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared};
+ }
+ }
+
PVE::Cluster::check_cfs_quorum();
my $running = PVE::QemuServer::check_running($vmid) || 0;
- die "Copy running VM $vmid not implemented\n" if $running; # fixme: implement this
-
# exclusive lock if VM is running - else shared lock is enough;
my $shared_lock = $running ? 0 : 1;
- # fixme: do early checks - re-check after lock
-
- # fixme: impl. target node parameter (mv VM config if all storages are shared)
+ my $clonefn = sub {
- my $copyfn = sub {
+ # do all tests after lock
+ # we also try to do all tests before we fork the worker
- # all tests after lock
my $conf = PVE::QemuServer::load_config($vmid);
PVE::QemuServer::check_lock($conf);
my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf;
- my $sharedvm = &$check_storage_access_copy($rpcenv, $authuser, $storecfg, $oldconf, $storage);
+ my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage);
- die "can't copy VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
+ die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
my $conffile = PVE::QemuServer::config_file($newid);
die "unable to create VM $newid: config file already exists\n"
if -f $conffile;
- # create empty/temp config - this fails if VM already exists on other node
- PVE::Tools::file_set_contents($conffile, "# qmcopy temporary file\nlock: copy\n");
-
- my $realcmd = sub {
- my $upid = shift;
-
- my $newvollist = [];
-
- eval {
- my $newconf = { lock => 'copy' };
- my $drives = {};
- my $vollist = [];
-
- foreach my $opt (keys %$oldconf) {
- my $value = $oldconf->{$opt};
-
- # do not copy snapshot related info
- next if $opt eq 'snapshots' || $opt eq 'parent' || $opt eq 'snaptime' ||
- $opt eq 'vmstate' || $opt eq 'snapstate';
-
- # always change MAC! address
- if ($opt =~ m/^net(\d+)$/) {
- my $net = PVE::QemuServer::parse_net($value);
- $net->{macaddr} = PVE::Tools::random_ether_addr();
- $newconf->{$opt} = PVE::QemuServer::print_net($net);
- } elsif (my $drive = PVE::QemuServer::parse_drive($opt, $value)) {
- if (PVE::QemuServer::drive_is_cdrom($drive)) {
- $newconf->{$opt} = $value; # simply copy configuration
- } else {
- $drives->{$opt} = $drive;
- push @$vollist, $drive->{file};
- }
- } else {
- # copy everything else
- $newconf->{$opt} = $value;
+ my $newconf = { lock => 'clone' };
+ my $drives = {};
+ my $vollist = [];
+
+ foreach my $opt (keys %$oldconf) {
+ my $value = $oldconf->{$opt};
+
+ # do not copy snapshot related info
+ next if $opt eq 'snapshots' || $opt eq 'parent' || $opt eq 'snaptime' ||
+ $opt eq 'vmstate' || $opt eq 'snapstate';
+
+ # always change MAC! address
+ if ($opt =~ m/^net(\d+)$/) {
+ my $net = PVE::QemuServer::parse_net($value);
+ $net->{macaddr} = PVE::Tools::random_ether_addr();
+ $newconf->{$opt} = PVE::QemuServer::print_net($net);
+ } elsif (my $drive = PVE::QemuServer::parse_drive($opt, $value)) {
+ if (PVE::QemuServer::drive_is_cdrom($drive)) {
+ $newconf->{$opt} = $value; # simply copy configuration
+ } else {
+ if ($param->{full} || !PVE::Storage::volume_is_base($storecfg, $drive->{file})) {
+ die "Full clone feature is not available"
+ if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
+ $drive->{full} = 1;
}
+ $drives->{$opt} = $drive;
+ push @$vollist, $drive->{file};
}
+ } else {
+ # copy everything else
+ $newconf->{$opt} = $value;
+ }
+ }
- delete $newconf->{template};
+ delete $newconf->{template};
- if ($param->{name}) {
- $newconf->{name} = $param->{name};
- } else {
- $newconf->{name} = "Copy-of-$oldconf->{name}";
- }
-
- if ($param->{description}) {
- $newconf->{description} = $param->{description};
- }
+ if ($param->{name}) {
+ $newconf->{name} = $param->{name};
+ } else {
+ if ($oldconf->{name}) {
+ $newconf->{name} = "Copy-of-$oldconf->{name}";
+ } else {
+ $newconf->{name} = "Copy-of-VM-$vmid";
+ }
+ }
- PVE::Storage::activate_volumes($storecfg, $vollist);
+ if ($param->{description}) {
+ $newconf->{description} = $param->{description};
+ }
- eval {
- local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ # create empty/temp config - this fails if VM already exists on other node
+ PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n");
- foreach my $opt (keys %$drives) {
- my $drive = $drives->{$opt};
+ my $realcmd = sub {
+ my $upid = shift;
- my $newvolid;
- if (!$param->{full} && PVE::Storage::volume_is_base($storecfg, $drive->{file})) {
- print "clone drive $opt ($drive->{file})\n";
- $newvolid = PVE::Storage::vdisk_clone($storecfg, $drive->{file}, $newid);
- } else {
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
- $storeid = $storage if $storage;
+ my $newvollist = [];
- my $fmt = undef;
- if($format){
- $fmt = $format;
- }else{
- my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
- $fmt = $drive->{format} || $defformat;
- }
+ eval {
+ local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
- my ($size) = PVE::Storage::volume_size_info($storecfg, $drive->{file}, 3);
+ PVE::Storage::activate_volumes($storecfg, $vollist);
- print "copy drive $opt ($drive->{file})\n";
- $newvolid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $newid, $fmt, undef, ($size/1024));
+ foreach my $opt (keys %$drives) {
+ my $drive = $drives->{$opt};
- PVE::QemuServer::qemu_img_convert($drive->{file}, $newvolid, $size, $snapname);
- }
+ my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
+ $newid, $storage, $format, $drive->{full}, $newvollist);
- my ($size) = PVE::Storage::volume_size_info($storecfg, $newvolid, 3);
- my $disk = { file => $newvolid, size => $size };
- $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $disk);
- push @$newvollist, $newvolid;
+ $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
- PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
- }
- };
- die $@ if $@;
+ PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+ }
delete $newconf->{lock};
PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
die "Failed to move config to node '$target' - rename failed: $!\n"
if !rename($conffile, $newconffile);
}
+
+ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
};
if (my $err = $@) {
unlink $conffile;
eval { PVE::Storage::vdisk_free($storecfg, $volid); };
warn $@ if $@;
}
- die "copy failed: $err";
+ die "clone failed: $err";
}
return;
};
- return $rpcenv->fork_worker('qmcopy', $vmid, $authuser, $realcmd);
+ return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
};
return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub {
# Aquire exclusive lock lock for $newid
- return PVE::QemuServer::lock_config_full($newid, 1, $copyfn);
+ return PVE::QemuServer::lock_config_full($newid, 1, $clonefn);
});
}});
proxyto => 'node',
description => "Create a Template.",
permissions => {
- description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}.",
- check => [ 'or',
- [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
- [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
- ],
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid}",
+ check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
},
parameters => {
additionalProperties => 0,
return undef;
}});
-
-
1;