use PVE::SafeSyslog;
use PVE::Cluster qw(cfs_read_file);
use PVE::INotify;
-use PVE::Exception qw(raise raise_perm_exc);
+use PVE::Exception qw(raise raise_perm_exc raise_param_exc);
use PVE::RESTHandler;
use PVE::RPCEnvironment;
use PVE::JSONSchema qw(get_standard_option);
use PVE::AccessControl;
use PVE::Storage;
use PVE::Firewall;
-use PVE::OpenVZ;
+use PVE::LXC;
use PVE::APLInfo;
+use PVE::Report;
+use PVE::HA::Env::PVE2;
+use PVE::HA::Config;
+use PVE::QemuConfig;
use PVE::QemuServer;
use PVE::API2::Subscription;
use PVE::API2::Services;
use PVE::API2::Storage::Scan;
use PVE::API2::Storage::Status;
use PVE::API2::Qemu;
-use PVE::API2::OpenVZ;
+use PVE::API2::LXC;
+use PVE::API2::LXC::Status;
use PVE::API2::VZDump;
use PVE::API2::APT;
use PVE::API2::Ceph;
use PVE::API2::Firewall::Host;
+use Digest::MD5;
+use Digest::SHA;
+use PVE::API2::Disks;
use JSON;
use base qw(PVE::RESTHandler);
});
__PACKAGE__->register_method ({
- subclass => "PVE::API2::Ceph",
- path => 'ceph',
+ subclass => "PVE::API2::LXC",
+ path => 'lxc',
});
__PACKAGE__->register_method ({
- subclass => "PVE::API2::OpenVZ",
- path => 'openvz',
+ subclass => "PVE::API2::Ceph",
+ path => 'ceph',
});
__PACKAGE__->register_method ({
path => 'storage',
});
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Disks",
+ path => 'disks',
+});
+
__PACKAGE__->register_method ({
subclass => "PVE::API2::APT",
path => 'apt',
my $result = [
{ name => 'ceph' },
+ { name => 'disks' },
{ name => 'apt' },
{ name => 'version' },
{ name => 'syslog' },
- { name => 'bootlog' },
{ name => 'status' },
{ name => 'subscription' },
+ { name => 'report' },
{ name => 'tasks' },
{ name => 'rrd' }, # fixme: remove?
{ name => 'rrddata' },# fixme: remove?
{ name => 'scan' },
{ name => 'storage' },
{ name => 'qemu' },
- { name => 'openvz' },
+ { name => 'lxc' },
{ name => 'vzdump' },
- { name => 'ubcfailcnt' },
{ name => 'network' },
{ name => 'aplinfo' },
{ name => 'startall' },
return PVE::pvecfg::version_info();
}});
-__PACKAGE__->register_method({
- name => 'beancounters_failcnt',
- path => 'ubcfailcnt',
- permissions => {
- check => ['perm', '/nodes/{node}', [ 'Sys.Audit' ]],
- },
- method => 'GET',
- proxyto => 'node',
- protected => 1, # openvz /proc entries are only readable by root
- description => "Get user_beancounters failcnt for all active containers.",
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- },
- },
- returns => {
- type => 'array',
- items => {
- type => "object",
- properties => {
- id => { type => 'string' },
- failcnt => { type => 'number' },
- },
- },
- },
- code => sub {
- my ($param) = @_;
-
- my $ubchash = PVE::OpenVZ::read_user_beancounters();
-
- my $res = [];
- foreach my $vmid (keys %$ubchash) {
- next if !$vmid;
- push @$res, { id => $vmid, failcnt => $ubchash->{$vmid}->{failcntsum} };
-
- }
- return $res;
- }});
-
__PACKAGE__->register_method({
name => 'status',
path => 'status',
my $netdev = PVE::ProcFSTools::read_proc_net_dev();
foreach my $dev (keys %$netdev) {
- next if $dev !~ m/^tap([1-9]\d*)i(\d+)$/;
+ next if $dev !~ m/^(?:tap|veth)([1-9]\d*)i(\d+)$/;
my $vmid = $1;
my $netid = $2;
minimum => 0,
optional => 1,
},
- },
- },
- returns => {
- type => 'array',
- items => {
- type => "object",
- properties => {
- n => {
- description=> "Line number",
- type=> 'integer',
- },
- t => {
- description=> "Line text",
- type => 'string',
- }
- }
- }
- },
- code => sub {
- my ($param) = @_;
-
- my $rpcenv = PVE::RPCEnvironment::get();
- my $user = $rpcenv->get_user();
- my $node = $param->{node};
-
- my ($count, $lines) = PVE::Tools::dump_logfile("/var/log/syslog", $param->{start}, $param->{limit});
-
- $rpcenv->set_result_attrib('total', $count);
-
- return $lines;
- }});
-
-__PACKAGE__->register_method({
- name => 'bootlog',
- path => 'bootlog',
- method => 'GET',
- description => "Read boot log",
- proxyto => 'node',
- permissions => {
- check => ['perm', '/nodes/{node}', [ 'Sys.Syslog' ]],
- },
- protected => 1,
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- start => {
- type => 'integer',
- minimum => 0,
+ since => {
+ type=> 'string',
+ pattern => '^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$',
+ description => "Display all log since this date-time string.",
optional => 1,
},
- limit => {
- type => 'integer',
- minimum => 0,
+ until => {
+ type=> 'string',
+ pattern => '^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$',
+ description => "Display all log until this date-time string.",
optional => 1,
},
},
my $user = $rpcenv->get_user();
my $node = $param->{node};
- my ($count, $lines) = PVE::Tools::dump_logfile("/var/log/boot", $param->{start}, $param->{limit});
+ my ($count, $lines) = PVE::Tools::dump_journal($param->{start}, $param->{limit},
+ $param->{since}, $param->{until});
$rpcenv->set_result_attrib('total', $count);
-
- return $lines;
+
+ return $lines;
}});
my $sslcert;
optional => 1,
default => 0,
},
+ websocket => {
+ optional => 1,
+ type => 'boolean',
+ description => "use websocket instead of standard vnc.",
+ },
},
},
returns => {
$sslcert = PVE::Tools::file_get_contents("/etc/pve/pve-root-ca.pem", 8192)
if !$sslcert;
- my $port = PVE::Tools::next_vnc_port();
-
- my $remip;
+ my ($remip, $family);
if ($node ne PVE::INotify::nodename()) {
- $remip = PVE::Cluster::remote_node_ip($node);
+ ($remip, $family) = PVE::Cluster::remote_node_ip($node);
+ } else {
+ $family = PVE::Tools::get_host_address_family($node);
}
+ my $port = PVE::Tools::next_vnc_port($family);
+
# NOTE: vncterm VNC traffic is already TLS encrypted,
# so we select the fastest chipher here (or 'none'?)
my $remcmd = $remip ?
$upgradecmd = PVE::Tools::shellquote($upgradecmd) if $remip;
$shcmd = [ '/bin/bash', '-c', $upgradecmd ];
} else {
- $shcmd = [ '/bin/bash', '-l' ];
+ $shcmd = [ '/bin/login', '-f', 'root' ];
}
} else {
$shcmd = [ '/bin/login' ];
my $cmd = ['/usr/bin/vncterm', '-rfbport', $port,
'-timeout', $timeout, '-authpath', $authpath,
- '-perm', 'Sys.Console', '-c', @$remcmd, @$shcmd];
+ '-perm', 'Sys.Console'];
+
+ if ($param->{websocket}) {
+ $ENV{PVE_VNC_TICKET} = $ticket; # pass ticket to vncterm
+ push @$cmd, '-notls', '-listen', 'localhost';
+ }
+
+ push @$cmd, '-c', @$remcmd, @$shcmd;
my $realcmd = sub {
my $upid = shift;
eval {
foreach my $k (keys %ENV) {
- next if $k eq 'PATH' || $k eq 'TERM' || $k eq 'USER' || $k eq 'HOME';
+ next if $k eq 'PVE_VNC_TICKET';
+ next if $k eq 'PATH' || $k eq 'TERM' || $k eq 'USER' || $k eq 'HOME' || $k eq 'LANG' || $k eq 'LANGUAGE';
delete $ENV{$k};
}
$ENV{PWD} = '/';
- PVE::Tools::run_command($cmd, errmsg => "vncterm failed");
+ PVE::Tools::run_command($cmd, errmsg => "vncterm failed", keeplocale => 1);
};
if (my $err = $@) {
syslog ('err', $err);
};
}});
+__PACKAGE__->register_method({
+ name => 'vncwebsocket',
+ path => 'vncwebsocket',
+ method => 'GET',
+ permissions => {
+ description => "Restricted to users on realm 'pam'. You also need to pass a valid ticket (vncticket).",
+ check => ['perm', '/nodes/{node}', [ 'Sys.Console' ]],
+ },
+ description => "Opens a weksocket for VNC traffic.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vncticket => {
+ description => "Ticket from previous call to vncproxy.",
+ type => 'string',
+ maxLength => 512,
+ },
+ port => {
+ description => "Port number returned by previous vncproxy call.",
+ type => 'integer',
+ minimum => 5900,
+ maximum => 5999,
+ },
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ port => { type => 'string' },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my ($user, undef, $realm) = PVE::AccessControl::verify_username($rpcenv->get_user());
+
+ raise_perm_exc("realm != pam") if $realm ne 'pam';
+
+ my $authpath = "/nodes/$param->{node}";
+
+ PVE::AccessControl::verify_vnc_ticket($param->{vncticket}, $user, $authpath);
+
+ my $port = $param->{port};
+
+ return { port => $port };
+ }});
+
__PACKAGE__->register_method ({
name => 'spiceshell',
path => 'spiceshell',
my $upgradecmd = "pveupgrade --shell";
$shcmd = [ '/bin/bash', '-c', $upgradecmd ];
} else {
- $shcmd = [ '/bin/bash', '-l' ];
+ $shcmd = [ '/bin/login', '-f', 'root' ];
}
} else {
$shcmd = [ '/bin/login' ];
},
dns1 => {
description => 'First name server IP address.',
- type => 'string', format => 'ipv4',
+ type => 'string', format => 'ip',
optional => 1,
},
dns2 => {
description => 'Second name server IP address.',
- type => 'string', format => 'ipv4',
+ type => 'string', format => 'ip',
optional => 1,
},
dns3 => {
description => 'Third name server IP address.',
- type => 'string', format => 'ipv4',
+ type => 'string', format => 'ip',
optional => 1,
},
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- storage => get_standard_option('pve-storage-id'),
- template => { type => 'string', maxLength => 255 },
+ storage => get_standard_option('pve-storage-id', {
+ description => "The storage where the template will be stored",
+ completion => \&PVE::Storage::complete_storage_enabled,
+ }),
+ template => { type => 'string',
+ description => "The template wich will downloaded",
+ maxLength => 255,
+ completion => \&complete_templet_repo,
+ },
},
},
returns => { type => "string" },
raise_param_exc({ template => "no such template"}) if !$pd;
- my $cfg = cfs_read_file("storage.cfg");
+ my $cfg = PVE::Storage::config();
my $scfg = PVE::Storage::storage_check_enabled($cfg, $param->{storage}, $node);
- die "cannot download to storage type '$scfg->{type}'"
- if !($scfg->{type} eq 'dir' || $scfg->{type} eq 'nfs');
-
- die "unknown template type '$pd->{type}'\n" if $pd->{type} ne 'openvz';
+ die "unknown template type '$pd->{type}'\n"
+ if !($pd->{type} eq 'openvz' || $pd->{type} eq 'lxc');
die "storage '$param->{storage}' does not support templates\n"
if !$scfg->{content}->{vztmpl};
print "starting template download from: $src\n";
print "target file: $dest\n";
- eval {
+ my $check_hash = sub {
+ my ($template_info, $filename, $noerr) = @_;
+
+ my $digest;
+ my $expected;
+
+ eval {
+ open(my $fh, '<', $filename) or die "Can't open '$filename': $!";
+ binmode($fh);
+ if (defined($template_info->{sha512sum})) {
+ $expected = $template_info->{sha512sum};
+ $digest = Digest::SHA->new(512)->addfile($fh)->hexdigest;
+ } elsif (defined($template_info->{md5sum})) {
+ #fallback to MD5
+ $expected = $template_info->{md5sum};
+ $digest = Digest::MD5->new->addfile($fh)->hexdigest;
+ } else {
+ die "no expected checksum defined";
+ }
+ close($fh);
+ };
+
+ die "checking hash failed - $@\n" if $@ && !$noerr;
+
+ return ($digest, $digest ? lc($digest) eq lc($expected) : 0);
+ };
+ eval {
if (-f $dest) {
- my $md5 = (split (/\s/, `md5sum '$dest'`))[0];
+ my ($hash, $correct) = &$check_hash($pd, $dest, 1);
- if ($md5 && (lc($md5) eq lc($pd->{md5sum}))) {
- print "file already exists $md5 - no need to download\n";
+ if ($hash && $correct) {
+ print "file already exists $hash - no need to download\n";
return;
}
}
die "download failed - $!\n";
}
- my $md5 = (split (/\s/, `md5sum '$tmpdest'`))[0];
+ my ($hash, $correct) = &$check_hash($pd, $tmpdest);
+
+ die "could not calculate checksum\n" if !$hash;
- if (!$md5 || (lc($md5) ne lc($pd->{md5sum}))) {
- die "wrong checksum: $md5 != $pd->{md5sum}\n";
+ if (!$correct) {
+ my $expected = $pd->{sha512sum} // $pd->{md5sum};
+ die "wrong checksum: $hash != $expected\n";
}
- if (system ('mv', $tmpdest, $dest) != 0) {
+ if (!rename($tmpdest, $dest)) {
die "unable to save file - $!\n";
}
};
return $rpcenv->fork_worker('download', undef, $user, $worker);
}});
-my $get_start_stop_list = sub {
- my ($nodename, $autostart) = @_;
+__PACKAGE__->register_method({
+ name => 'report',
+ path => 'report',
+ method => 'GET',
+ permissions => {
+ check => ['perm', '/nodes/{node}', [ 'Sys.Audit' ]],
+ },
+ protected => 1,
+ description => "Gather various systems information about a node",
+ proxyto => 'node',
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ return PVE::Report::generate();
+ }});
+
+# returns a list of VMIDs, those can be filtered by
+# * current parent node
+# * vmid whitelist
+# * guest is a template (default: skip)
+# * guest is HA manged (default: skip)
+my $get_filtered_vmlist = sub {
+ my ($nodename, $vmfilter, $templates, $ha_managed) = @_;
- my $cc = PVE::Cluster::cfs_read_file('cluster.conf');
my $vmlist = PVE::Cluster::get_vmlist();
- my $resList = {};
- foreach my $vmid (keys %{$vmlist->{ids}}) {
- my $d = $vmlist->{ids}->{$vmid};
- my $startup;
+ my $vms_allowed = {};
+ if (defined($vmfilter)) {
+ foreach my $vmid (PVE::Tools::split_list($vmfilter)) {
+ $vms_allowed->{$vmid} = 1;
+ }
+ }
- eval {
- return if $d->{node} ne $nodename;
-
- my $bootorder = LONG_MAX;
+ my $res = {};
+ foreach my $vmid (keys %{$vmlist->{ids}}) {
+ next if %$vms_allowed && !$vms_allowed->{$vmid};
- if ($d->{type} eq 'openvz') {
- my $conf = PVE::OpenVZ::load_config($vmid);
- return if $autostart && !($conf->{onboot} && $conf->{onboot}->{value});
-
- if ($conf->{bootorder} && defined($conf->{bootorder}->{value})) {
- $bootorder = $conf->{bootorder}->{value};
- }
- $startup = { order => $bootorder };
+ my $d = $vmlist->{ids}->{$vmid};
+ next if $nodename && $d->{node} ne $nodename;
+ eval {
+ my $class;
+ if ($d->{type} eq 'lxc') {
+ $class = 'PVE::LXC::Config';
} elsif ($d->{type} eq 'qemu') {
- my $conf = PVE::QemuServer::load_config($vmid);
- return if $autostart && !$conf->{onboot};
-
- if ($conf->{startup}) {
- $startup = PVE::QemuServer::parse_startup($conf->{startup});
- $startup->{order} = $bootorder if !defined($startup->{order});
- } else {
- $startup = { order => $bootorder };
- }
+ $class = 'PVE::QemuConfig';
} else {
die "unknown VM type '$d->{type}'\n";
}
- # skip ha managed VMs (started by rgmanager)
- return if PVE::Cluster::cluster_conf_lookup_pvevm($cc, 0, $vmid, 1);
-
- $resList->{$startup->{order}}->{$vmid} = $startup;
- $resList->{$startup->{order}}->{$vmid}->{type} = $d->{type};
+ my $conf = $class->load_config($vmid);
+ return if !$templates && $class->is_template($conf);
+ return if !$ha_managed && PVE::HA::Config::vm_is_ha_managed($vmid);
+
+ $res->{$vmid} = $conf;
+ $res->{$vmid}->{type} = $d->{type};
};
warn $@ if $@;
}
+ return $res;
+};
+
+# return all VMs which should get started/stopped on power up/down
+my $get_start_stop_list = sub {
+ my ($nodename, $autostart, $vmfilter) = @_;
+
+ my $vmlist = &$get_filtered_vmlist($nodename, $vmfilter);
+
+ my $resList = {};
+ foreach my $vmid (keys %$vmlist) {
+ my $conf = $vmlist->{$vmid};
+
+ next if $autostart && !$conf->{onboot};
+
+ my $startup = {};
+ if ($conf->{startup}) {
+ $startup = PVE::JSONSchema::pve_parse_startup_order($conf->{startup});
+ }
+
+ $startup->{order} = LONG_MAX if !defined($startup->{order});
+
+ $resList->{$startup->{order}}->{$vmid} = $startup;
+ $resList->{$startup->{order}}->{$vmid}->{type} = $conf->{type};
+ }
+
return $resList;
};
path => 'startall',
method => 'POST',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'VM.PowerMgmt' ]],
+ },
+ proxyto => 'node',
description => "Start all VMs and containers (when onboot=1).",
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
+ force => {
+ optional => 1,
+ type => 'boolean',
+ description => "force if onboot=0.",
+ },
+ vms => {
+ description => "Only consider Guests with these IDs.",
+ type => 'string', format => 'pve-vmid-list',
+ optional => 1,
+ },
},
},
returns => {
my $nodename = $param->{node};
$nodename = PVE::INotify::nodename() if $nodename eq 'localhost';
+ my $force = $param->{force};
+
my $code = sub {
$rpcenv->{type} = 'priv'; # to start tasks in background
- # wait up to 10 seconds for quorum
- for (my $i = 10; $i >= 0; $i--) {
- last if PVE::Cluster::check_cfs_quorum($i != 0 ? 1 : 0);
- sleep(1);
+ if (!PVE::Cluster::check_cfs_quorum(1)) {
+ print "waiting for quorum ...\n";
+ do {
+ sleep(1);
+ } while (!PVE::Cluster::check_cfs_quorum(1));
+ print "got quorum\n";
}
-
- my $startList = &$get_start_stop_list($nodename, 1);
+ my $autostart = $force ? undef : 1;
+ my $startList = &$get_start_stop_list($nodename, $autostart, $param->{vms});
# Note: use numeric sorting with <=>
foreach my $order (sort {$a <=> $b} keys %$startList) {
my $d = $vmlist->{$vmid};
PVE::Cluster::check_cfs_quorum(); # abort when we loose quorum
-
+
eval {
my $default_delay = 0;
my $upid;
- if ($d->{type} eq 'openvz') {
- return if PVE::OpenVZ::check_running($vmid);
+ if ($d->{type} eq 'lxc') {
+ return if PVE::LXC::check_running($vmid);
print STDERR "Starting CT $vmid\n";
- $upid = PVE::API2::OpenVZ->vm_start({node => $nodename, vmid => $vmid });
+ $upid = PVE::API2::LXC::Status->vm_start({node => $nodename, vmid => $vmid });
} elsif ($d->{type} eq 'qemu') {
- $default_delay = 3; # to redruce load
+ $default_delay = 3; # to reduce load
return if PVE::QemuServer::check_running($vmid, 1);
print STDERR "Starting VM $vmid\n";
$upid = PVE::API2::Qemu->vm_start({node => $nodename, vmid => $vmid });
}
}
} else {
- if ($d->{type} eq 'openvz') {
+ if ($d->{type} eq 'lxc') {
print STDERR "Starting CT $vmid failed: $status\n";
} elsif ($d->{type} eq 'qemu') {
print STDERR "Starting VM $vmid failed: status\n";
my ($nodename, $type, $vmid, $down_timeout) = @_;
my $upid;
- if ($type eq 'openvz') {
- return if !PVE::OpenVZ::check_running($vmid);
+ if ($type eq 'lxc') {
+ return if !PVE::LXC::check_running($vmid);
my $timeout = defined($down_timeout) ? int($down_timeout) : 60;
print STDERR "Stopping CT $vmid (timeout = $timeout seconds)\n";
- $upid = PVE::API2::OpenVZ->vm_shutdown({node => $nodename, vmid => $vmid,
- timeout => $timeout, forceStop => 1 });
+ $upid = PVE::API2::LXC::Status->vm_shutdown({node => $nodename, vmid => $vmid,
+ timeout => $timeout, forceStop => 1 });
} elsif ($type eq 'qemu') {
return if !PVE::QemuServer::check_running($vmid, 1);
my $timeout = defined($down_timeout) ? int($down_timeout) : 60*3;
die "unknown VM type '$type'\n";
}
- my $res = PVE::Tools::upid_decode($upid);
-
- return $res->{pid};
+ return $upid;
};
__PACKAGE__->register_method ({
path => 'stopall',
method => 'POST',
protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'VM.PowerMgmt' ]],
+ },
+ proxyto => 'node',
description => "Stop all VMs and Containers.",
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
+ vms => {
+ description => "Only consider Guests with these IDs.",
+ type => 'string', format => 'pve-vmid-list',
+ optional => 1,
+ },
},
},
returns => {
$rpcenv->{type} = 'priv'; # to start tasks in background
- my $stopList = &$get_start_stop_list($nodename);
+ my $stopList = &$get_start_stop_list($nodename, undef, $param->{vms});
my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
- my $maxWorkers = $cpuinfo->{cpus};
+ my $datacenterconfig = cfs_read_file('datacenter.cfg');
+ # if not set by user spawn max cpu count number of workers
+ my $maxWorkers = $datacenterconfig->{max_workers} || $cpuinfo->{cpus};
foreach my $order (sort {$b <=> $a} keys %$stopList) {
my $vmlist = $stopList->{$order};
my $workers = {};
+
+ my $finish_worker = sub {
+ my $pid = shift;
+ my $d = $workers->{$pid};
+ return if !$d;
+ delete $workers->{$pid};
+
+ syslog('info', "end task $d->{upid}");
+ };
+
foreach my $vmid (sort {$b <=> $a} keys %$vmlist) {
my $d = $vmlist->{$vmid};
- my $pid;
- eval { $pid = &$create_stop_worker($nodename, $d->{type}, $vmid, $d->{down}); };
+ my $upid;
+ eval { $upid = &$create_stop_worker($nodename, $d->{type}, $vmid, $d->{down}); };
warn $@ if $@;
- next if !$pid;
-
- $workers->{$pid} = 1;
+ next if !$upid;
+
+ my $res = PVE::Tools::upid_decode($upid, 1);
+ next if !$res;
+
+ my $pid = $res->{pid};
+
+ $workers->{$pid} = { type => $d->{type}, upid => $upid, vmid => $vmid };
while (scalar(keys %$workers) >= $maxWorkers) {
foreach my $p (keys %$workers) {
if (!PVE::ProcFSTools::check_process_running($p)) {
- delete $workers->{$p};
+ &$finish_worker($p);
}
}
sleep(1);
while (scalar(keys %$workers)) {
foreach my $p (keys %$workers) {
if (!PVE::ProcFSTools::check_process_running($p)) {
- delete $workers->{$p};
+ &$finish_worker($p);
}
}
sleep(1);
}
}
+
+ syslog('info', "all VMs and CTs stopped");
+
return;
};
return $rpcenv->fork_worker('stopall', undef, $authuser, $code);
-
}});
+my $create_migrate_worker = sub {
+ my ($nodename, $type, $vmid, $target) = @_;
+
+ my $upid;
+ if ($type eq 'lxc') {
+ my $online = PVE::LXC::check_running($vmid) ? 1 : 0;
+ print STDERR "Migrating CT $vmid\n";
+ $upid = PVE::API2::LXC->migrate_vm({node => $nodename, vmid => $vmid, target => $target,
+ online => $online });
+ } elsif ($type eq 'qemu') {
+ my $online = PVE::QemuServer::check_running($vmid, 1) ? 1 : 0;
+ print STDERR "Migrating VM $vmid\n";
+ $upid = PVE::API2::Qemu->migrate_vm({node => $nodename, vmid => $vmid, target => $target,
+ online => $online });
+ } else {
+ die "unknown VM type '$type'\n";
+ }
+
+ my $res = PVE::Tools::upid_decode($upid);
+
+ return $res->{pid};
+};
+
+__PACKAGE__->register_method ({
+ name => 'migrateall',
+ path => 'migrateall',
+ method => 'POST',
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'VM.Migrate' ]],
+ },
+ description => "Migrate all VMs and Containers.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ target => get_standard_option('pve-node', { description => "Target node." }),
+ maxworkers => {
+ description => "Maximal number of parallel migration job." .
+ " If not set use 'max_workers' from datacenter.cfg," .
+ " one of both must be set!",
+ optional => 1,
+ type => 'integer',
+ minimum => 1
+ },
+ vms => {
+ description => "Only consider Guests with these IDs.",
+ type => 'string', format => 'pve-vmid-list',
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $nodename = $param->{node};
+ $nodename = PVE::INotify::nodename() if $nodename eq 'localhost';
+
+ my $target = $param->{target};
+
+ my $datacenterconfig = cfs_read_file('datacenter.cfg');
+ # prefer parameter over datacenter cfg settings
+ my $maxWorkers = $param->{maxworkers} || $datacenterconfig->{max_workers} ||
+ die "either 'maxworkers' parameter or max_workers in datacenter.cfg must be set!\n";
+
+ my $code = sub {
+ $rpcenv->{type} = 'priv'; # to start tasks in background
+
+ my $vmlist = &$get_filtered_vmlist($nodename, $param->{vms}, 1, 1);
+
+ my $workers = {};
+ foreach my $vmid (sort keys %$vmlist) {
+ my $d = $vmlist->{$vmid};
+ my $pid;
+ eval { $pid = &$create_migrate_worker($nodename, $d->{type}, $vmid, $target); };
+ warn $@ if $@;
+ next if !$pid;
+
+ $workers->{$pid} = 1;
+ while (scalar(keys %$workers) >= $maxWorkers) {
+ foreach my $p (keys %$workers) {
+ if (!PVE::ProcFSTools::check_process_running($p)) {
+ delete $workers->{$p};
+ }
+ }
+ sleep(1);
+ }
+ }
+ while (scalar(keys %$workers)) {
+ foreach my $p (keys %$workers) {
+ if (!PVE::ProcFSTools::check_process_running($p)) {
+ delete $workers->{$p};
+ }
+ }
+ sleep(1);
+ }
+ return;
+ };
+
+ return $rpcenv->fork_worker('migrateall', undef, $authuser, $code);
+
+ }});
+
+# bash completion helper
+
+sub complete_templet_repo {
+ my ($cmdname, $pname, $cvalue) = @_;
+
+ my $repo = PVE::APLInfo::load_data();
+ my $res = [];
+ foreach my $templ (keys %{$repo->{all}}) {
+ next if $templ !~ m/^$cvalue/;
+ push @$res, $templ;
+ }
+
+ return $res;
+}
+
package PVE::API2::Nodes;
use strict;