my ($cfg, $group) = @_;
die "no such ha group '$group'\n" if !$cfg->{ids}->{$group};
-
+
my $group_cfg = dclone($cfg->{ids}->{$group});
$group_cfg->{group} = $group;
$group_cfg->{digest} = $cfg->{digest};
};
__PACKAGE__->register_method ({
- name => 'index',
+ name => 'index',
path => '',
method => 'GET',
description => "Get HA groups.",
# create /etc/pve/ha directory
PVE::Cluster::check_cfs_quorum();
mkdir("/etc/pve/ha");
-
+
my $group = extract_param($param, 'group');
my $type = 'group';
-
+
if (my $param_type = extract_param($param, 'type')) {
# useless, but do it anyway
die "types does not match\n" if $param_type ne $type;
my $rcfg = PVE::HA::Config::read_resources_config();
foreach my $sid (keys %{$rcfg->{ids}}) {
my $sg = $rcfg->{ids}->{$sid}->{group};
- die "ha group is used by service '$sid'\n"
+ die "ha group is used by service '$sid'\n"
if ($sg && $sg eq $group);
}
# create /etc/pve/ha directory
PVE::Cluster::check_cfs_quorum();
mkdir("/etc/pve/ha");
-
+
my ($sid, $type, $name) = PVE::HA::Config::parse_sid(extract_param($param, 'sid'));
if (my $param_type = extract_param($param, 'type')) {
check_service_state($sid);
PVE::HA::Config::queue_crm_commands("migrate $sid $param->{node}");
-
+
return undef;
}});
check_service_state($sid);
PVE::HA::Config::queue_crm_commands("relocate $sid $param->{node}");
-
+
return undef;
}});
};
__PACKAGE__->register_method ({
- name => 'index',
- path => '',
+ name => 'index',
+ path => '',
method => 'GET',
permissions => { user => 'all' },
description => "Directory index.",
},
code => sub {
my ($param) = @_;
-
+
my $result = [
{ name => 'current' },
{ name => 'manager_status' },
}});
__PACKAGE__->register_method ({
- name => 'status',
+ name => 'status',
path => 'current',
method => 'GET',
description => "Get HA manger status.",
next if $req_state eq 'freeze';
$active_count->{$sd->{node}}++;
}
-
+
foreach my $node (sort keys %{$status->{node_status}}) {
my $lrm_status = PVE::HA::Config::read_lrm_status($node);
my $id = "lrm:$node";
if (!$lrm_status->{timestamp}) {
- push @$res, { id => $id, type => 'lrm', node => $node,
- status => "$node (unable to read lrm status)"};
+ push @$res, { id => $id, type => 'lrm', node => $node,
+ status => "$node (unable to read lrm status)"};
} else {
my $status_str = &$timestamp_to_status($ctime, $lrm_status->{timestamp});
if ($status_str eq 'active') {
my $time_str = localtime($lrm_status->{timestamp});
my $status_text = "$node ($status_str, $time_str)";
- push @$res, { id => $id, type => 'lrm', node => $node,
- status => $status_text, timestamp => $lrm_status->{timestamp} };
+ push @$res, { id => $id, type => 'lrm', node => $node,
+ status => $status_text, timestamp => $lrm_status->{timestamp} };
}
}
}});
__PACKAGE__->register_method ({
- name => 'manager_status',
+ name => 'manager_status',
path => 'manager_status',
method => 'GET',
description => "Get full HA manger status, including LRM status.",
my ($param) = @_;
my $status = PVE::HA::Config::read_manager_status();
-
+
my $data = { manager_status => $status };
$data->{quorum} = {
node => $nodename,
quorate => PVE::Cluster::check_cfs_quorum(1),
};
-
+
foreach my $node (sort keys %{$status->{node_status}}) {
my $lrm_status = PVE::HA::Config::read_lrm_status($node);
$data->{lrm_status}->{$node} = $lrm_status;
}, $class;
$self->set_local_status({ state => 'wait_for_quorum' });
-
+
return $self;
}
my $old = $self->{status};
- # important: only update if if really changed
+ # important: only update if if really changed
return if $old->{state} eq $new->{state};
$haenv->log('info', "status change $old->{state} => $new->{state}");
my $starttime = $haenv->get_time();
for (;;) {
-
+
if ($haenv->get_ha_manager_lock()) {
if ($self->{ha_manager_wd}) {
$haenv->watchdog_update($self->{ha_manager_wd});
}
return 1;
}
-
+
last if ++$count > 5; # try max 5 time
my $delay = $haenv->get_time() - $starttime;
$haenv->sleep(1);
}
-
+
return 0;
}
my $status = $self->get_local_status();
my $state = $status->{state};
- # do state changes first
+ # do state changes first
if ($state eq 'wait_for_quorum') {
$self->set_local_status({ state => 'lost_manager_lock'});
}
}
-
+
$status = $self->get_local_status();
$state = $status->{state};
return 0 if $self->{shutdown_request};
$haenv->sleep(5);
-
+
} elsif ($state eq 'master') {
my $manager = $self->{manager};
$haenv->sleep_until($startime + $max_time);
} elsif ($state eq 'lost_manager_lock') {
-
+
if ($self->{ha_manager_wd}) {
$haenv->watchdog_close($self->{ha_manager_wd});
delete $self->{ha_manager_wd};
my $crm_commands_filename = "ha/crm_commands";
my $ha_fence_config = "ha/fence.cfg";
-cfs_register_file($crm_commands_filename,
+cfs_register_file($crm_commands_filename,
sub { my ($fn, $raw) = @_; return defined($raw) ? $raw : ''; },
sub { my ($fn, $raw) = @_; return $raw; });
-cfs_register_file($ha_groups_config,
+cfs_register_file($ha_groups_config,
sub { PVE::HA::Groups->parse_config(@_); },
sub { PVE::HA::Groups->write_config(@_); });
-cfs_register_file($ha_resources_config,
+cfs_register_file($ha_resources_config,
sub { PVE::HA::Resources->parse_config(@_); },
sub { PVE::HA::Resources->write_config(@_); });
-cfs_register_file($manager_status_filename,
- \&json_reader,
+cfs_register_file($manager_status_filename,
+ \&json_reader,
\&json_writer);
cfs_register_file($ha_fence_config,
\&PVE::HA::FenceConfig::parse_config,
my $filename = "/etc/pve/nodes/$node/lrm_status";
- PVE::HA::Tools::write_json_to_file($filename, $status_obj);
+ PVE::HA::Tools::write_json_to_file($filename, $status_obj);
}
sub parse_groups_config {
sub parse_resources_config {
my ($filename, $raw) = @_;
-
+
return PVE::HA::Resources->parse_config($filename, $raw);
}
sub loop_end_hook {
my ($self, @args) = @_;
-
+
return $self->{plug}->loop_end_hook(@args);
}
restricted => {
description => "Resources bound to restricted groups may only run on nodes defined by the group.",
verbose_description => "Resources bound to restricted groups may only run on nodes defined by the group. The resource will be placed in the stopped state if no group node member is online. Resources on unrestricted groups may run on any cluster node if all group members are offline, but they will migrate back as soon as a group member comes online. One can implement a 'preferred node' behavior using an unrestricted group with only one member.",
- type => 'boolean',
+ type => 'boolean',
optional => 1,
default => 0,
},
nofailback => {
description => "The CRM tries to run services on the node with the highest priority. If a node with higher priority comes online, the CRM migrates the service to that node. Enabling nofailback prevents that behavior.",
- type => 'boolean',
+ type => 'boolean',
optional => 1,
- default => 0,
+ default => 0,
},
- comment => {
+ comment => {
description => "Description.",
- type => 'string',
+ type => 'string',
optional => 1,
maxLength => 4096,
},
sub encode_value {
my ($class, $type, $key, $value) = @_;
-
+
if ($key eq 'nodes') {
return join(',', keys(%$value));
}
-
+
return $value;
}
cluster_state_update => 0,
}, $class;
- $self->set_local_status({ state => 'wait_for_agent_lock' });
+ $self->set_local_status({ state => 'wait_for_agent_lock' });
return $self;
}
my $old = $self->{status};
- # important: only update if if really changed
+ # important: only update if if really changed
return if $old->{state} eq $new->{state};
$haenv->log('info', "status change $old->{state} => $new->{state}");
my $haenv = $self->{haenv};
return 0 if !$haenv->quorate();
-
- my $lrm_status = {
+
+ my $lrm_status = {
state => $self->{status}->{state},
mode => $self->{mode},
results => $self->{results},
timestamp => $haenv->get_time(),
};
-
+
eval { $haenv->write_lrm_status($lrm_status); };
if (my $err = $@) {
$haenv->log('err', "unable to write lrm status file - $err");
my $starttime = $haenv->get_time();
for (;;) {
-
+
if ($haenv->get_ha_agent_lock()) {
if ($self->{ha_agent_wd}) {
$haenv->watchdog_update($self->{ha_agent_wd});
}
return 1;
}
-
+
last if ++$count > 5; # try max 5 time
my $delay = $haenv->get_time() - $starttime;
$haenv->sleep(1);
}
-
+
return 0;
}
sub active_service_count {
my ($self) = @_;
-
+
my $haenv = $self->{haenv};
my $nodename = $haenv->nodename();
my $ss = $self->{service_status};
my $count = 0;
-
+
foreach my $sid (keys %$ss) {
my $sd = $ss->{$sid};
next if !$sd->{node};
$count++;
}
-
+
return $count;
}
return $self->{shutdown_request} ? 0 : 1;
}
}
-
+
my $status = $self->get_local_status();
my $state = $status->{state};
$self->update_service_status();
my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
-
- # do state changes first
+
+ # do state changes first
my $ctime = $haenv->get_time();
$self->set_local_status({ state => 'active' });
}
}
-
+
} elsif ($state eq 'lost_agent_lock') {
if (!$fence_request && $haenv->quorate()) {
} elsif ($state eq 'active') {
- if ($fence_request) {
+ if ($fence_request) {
$haenv->log('err', "node need to be fenced - releasing agent_lock\n");
- $self->set_local_status({ state => 'lost_agent_lock'});
+ $self->set_local_status({ state => 'lost_agent_lock'});
} elsif (!$self->get_protected_ha_agent_lock()) {
$self->set_local_status({ state => 'lost_agent_lock'});
}
if ($state eq 'wait_for_agent_lock') {
return 0 if $self->{shutdown_request};
-
+
$self->update_lrm_status();
-
+
$haenv->sleep(5);
-
+
} elsif ($state eq 'active') {
my $startime = $haenv->get_time();
}
$self->update_lrm_status();
-
+
return 0 if $shutdown;
$haenv->sleep_until($startime + $max_time);
} elsif ($state eq 'lost_agent_lock') {
-
+
# Note: watchdog is active an will triger soon!
# so we hope to get the lock back soon!
my $service_count = $self->active_service_count();
if ($service_count > 0) {
- $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
+ $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
"detected $service_count running services");
} else {
$haenv->watchdog_close($self->{ha_agent_wd});
delete $self->{ha_agent_wd};
}
-
+
return 0;
}
}
if (my $err = $@) {
$haenv->log('err', $err);
POSIX::_exit(-1);
- }
- POSIX::_exit($res);
+ }
+ POSIX::_exit($res);
} else {
$count++;
$w->{pid} = $pid;
}
}
}
-
+
return $count;
}
my $exit_code = -1;
if ($status == -1) {
- $haenv->log('err', "resource agent $sid finished - failed to execute");
+ $haenv->log('err', "resource agent $sid finished - failed to execute");
} elsif (my $sig = ($status & 127)) {
$haenv->log('err', "resource agent $sid finished - got signal $sig");
} else {
$ms->{node_status} = $ns->{status};
$ms->{service_status} = $ss;
$ms->{timestamp} = $haenv->get_time();
-
+
$haenv->write_manager_status($ms);
-}
+}
sub get_service_group {
my ($groups, $online_node_usage, $service_conf) = @_;
}
# overwrite default if service is bound to a specific group
- $group = $groups->{ids}->{$service_conf->{group}} if $service_conf->{group} &&
+ $group = $groups->{ids}->{$service_conf->{group}} if $service_conf->{group} &&
$groups->{ids}->{$service_conf->{group}};
return $group;
}
}
- my @nodes = sort {
+ my @nodes = sort {
$online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
} keys %{$pri_groups->{$top_pri}};
sub compute_new_uuid {
my ($state) = @_;
-
+
$uid_counter++;
return md5_base64($state . $$ . time() . $uid_counter);
}
my $sd = $self->{ss}->{$sid};
my $state = $sd->{state};
if (defined($online_node_usage->{$sd->{node}})) {
- if (($state eq 'started') || ($state eq 'request_stop') ||
+ if (($state eq 'started') || ($state eq 'request_stop') ||
($state eq 'fence') || ($state eq 'freeze') || ($state eq 'error')) {
$online_node_usage->{$sd->{node}}++;
} elsif (($state eq 'migrate') || ($state eq 'relocate')) {
}
};
-# read LRM status for all nodes
+# read LRM status for all nodes
sub read_lrm_status {
my ($self) = @_;
}
}
-
+
return ($results, $modes);
}
chomp $cmd;
if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
- my ($task, $sid, $node) = ($1, $2, $3);
+ my ($task, $sid, $node) = ($1, $2, $3);
if (my $sd = $ss->{$sid}) {
if (!$ns->node_is_online($node)) {
$haenv->log('err', "crm command error - node not online: $cmd");
} else {
if ($node eq $sd->{node}) {
$haenv->log('info', "ignore crm command - service already on target node: $cmd");
- } else {
+ } else {
$haenv->log('info', "got crm command: $cmd");
$ss->{$sid}->{cmd} = [ $task, $node];
}
for (;;) {
my $repeat = 0;
-
+
$self->recompute_online_node_usage();
foreach my $sid (sort keys %$ss) {
# this can happen if we fence a node with active migrations
# hack: modify $sd (normally this should be considered read-only)
$haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
- $sd->{node} = $cd->{node};
+ $sd->{node} = $cd->{node};
}
if ($sd->{cmd}) {
return;
}
} else {
- $haenv->log('err', "unknown command '$cmd' for service '$sid'");
+ $haenv->log('err', "unknown command '$cmd' for service '$sid'");
}
}
}
return;
}
-
+
if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
&$change_service_state($self, $sid, 'request_stop');
return;
&$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
}
} else {
- $haenv->log('err', "unknown command '$cmd' for service '$sid'");
+ $haenv->log('err', "unknown command '$cmd' for service '$sid'");
}
} else {
sub get_node_state {
my ($self, $node) = @_;
- $self->{status}->{$node} = 'unknown'
+ $self->{status}->{$node} = 'unknown'
if !$self->{status}->{$node};
return $self->{status}->{$node};
my ($class, $type, $sectionId) = @_;
my (undef, $name) = split(':', $sectionId, 2);
-
+
return "$type: $name\n";
}
use strict;
use warnings;
use POSIX qw(strftime EINTR);
-use JSON;
+use JSON;
use IO::File;
use Fcntl qw(:DEFAULT :flock);
use File::Copy;
my $filename = "$self->{statusdir}/lrm_status_$node";
- return PVE::HA::Tools::read_json_from_file($filename, {});
+ return PVE::HA::Tools::read_json_from_file($filename, {});
}
sub write_lrm_status {
my $filename = "$self->{statusdir}/lrm_status_$node";
- PVE::HA::Tools::write_json_to_file($filename, $status_obj);
+ PVE::HA::Tools::write_json_to_file($filename, $status_obj);
}
sub read_hardware_status_nolock {
my ($self) = @_;
my $filename = "$self->{statusdir}/service_config";
- my $conf = PVE::HA::Tools::read_json_from_file($filename);
+ my $conf = PVE::HA::Tools::read_json_from_file($filename);
foreach my $sid (keys %$conf) {
my $d = $conf->{$sid};
die "no such service '$sid'\n" if !$conf->{$sid};
- die "current_node for '$sid' does not match ($current_node != $conf->{$sid}->{node})\n"
+ die "current_node for '$sid' does not match ($current_node != $conf->{$sid}->{node})\n"
if $current_node ne $conf->{$sid}->{node};
-
+
$conf->{$sid}->{node} = $new_node;
$self->write_service_config($conf);
my ($self, $cmd) = @_;
my $code = sub { $self->queue_crm_commands_nolock($cmd); };
-
+
$self->global_lock($code);
return undef;
return $data;
};
-
+
return $self->global_lock($code);
}
my ($self, $node) = @_;
my $filename = "$self->{statusdir}/service_status_$node";
- return PVE::HA::Tools::read_json_from_file($filename);
+ return PVE::HA::Tools::read_json_from_file($filename);
}
sub write_service_status {
# fixme: add test if a service runs on two nodes!!!
return $res;
-}
+}
my $default_group_config = <<__EOD;
group: prefer_node1
if (-f "$testdir/service_status_$node") {
copy("$testdir/service_status_$node", "$statusdir/service_status_$node");
- } else {
+ } else {
$self->write_service_status($node, {});
}
}
eval { $res = &$code($fh, @param) };
my $err = $@;
-
+
close($fh);
die $err if $err;
-
+
return $res;
}
}
my $quorate = ($online_count > int($node_count/2)) ? 1 : 0;
-
+
if (!$quorate) {
foreach my $node (keys %$cstatus) {
my $d = $cstatus->{$node};
my $update_cmd = sub {
my $filename = "$self->{statusdir}/watchdog_status";
-
+
my ($res, $wdstatus);
if (-f $filename) {
} else {
$wdstatus = {};
}
-
+
($wdstatus, $res) = &$code($wdstatus);
PVE::Tools::file_set_contents($filename, encode_json($wdstatus));
foreach my $id (keys %$wdstatus) {
delete $wdstatus->{$id} if $wdstatus->{$id}->{node} eq $node;
}
-
+
PVE::Tools::file_set_contents($filename, encode_json($wdstatus));
}
}
delete $wdstatus->{$wfh};
}
}
-
+
return ($wdstatus, $res);
};
my $tdiff = $ctime - $wd->{update_time};
die "watchdog expired" if $tdiff > $watchdog_timeout;
-
+
$wd->{update_time} = $ctime;
return ($wdstatus);
use strict;
use warnings;
use POSIX qw(strftime EINTR);
-use JSON;
+use JSON;
use IO::File;
use Fcntl qw(:DEFAULT :flock);
sub new {
my ($this, $nodename, $hardware, $log_id) = @_;
-
+
my $class = ref($this) || $this;
my $self = $class->SUPER::new($nodename, $hardware, $log_id);
my $time = $self->get_time();
- printf("%-5s %10s %12s: $msg\n", $level, strftime("%H:%M:%S", localtime($time)),
+ printf("%-5s %10s %12s: $msg\n", $level, strftime("%H:%M:%S", localtime($time)),
"$self->{nodename}/$self->{log_id}");
}
my ($self) = @_;
my $delay = $self->get_time() - $self->{loop_start};
-
+
die "loop take too long ($delay seconds)\n" if $delay > 30;
}
$id = 'hardware' if !$id;
- my $text = sprintf("%-5s %10s %12s: $msg\n", $level,
+ my $text = sprintf("%-5s %10s %12s: $msg\n", $level,
strftime("%H:%M:%S", localtime($time)), $id);
$self->append_text($text);
# fixme: duplicate code in Env?
sub read_manager_status {
my ($self) = @_;
-
+
my $filename = "$self->{statusdir}/manager_status";
- return PVE::HA::Tools::read_json_from_file($filename, {});
+ return PVE::HA::Tools::read_json_from_file($filename, {});
}
sub fork_daemon {
my $pid = fork();
die "fork failed" if ! defined($pid);
- if ($pid == 0) {
+ if ($pid == 0) {
close($lockfh) if defined($lockfh); # unlock global lock
-
+
POSIX::close($psync[0]);
my $outfh = $psync[1];
close STDIN;
POSIX::close(0) if $fd != 0;
- die "unable to redirect STDIN - $!"
+ die "unable to redirect STDIN - $!"
if !open(STDIN, "</dev/null");
# redirect STDOUT
close STDOUT;
POSIX::close (1) if $fd != 1;
- die "unable to redirect STDOUT - $!"
+ die "unable to redirect STDOUT - $!"
if !open(STDOUT, ">&", $outfh);
STDOUT->autoflush (1);
close STDERR;
POSIX::close(2) if $fd != 2;
- die "unable to redirect STDERR - $!"
+ die "unable to redirect STDERR - $!"
if !open(STDERR, ">&1");
-
+
STDERR->autoflush(1);
if ($type eq 'crm') {
}
return 1;
} else {
- POSIX::close($fd);
+ POSIX::close($fd);
return 0;
}
});
-
+
return $pid;
}
my $d = $self->{nodes}->{$node} || die "no such node '$node'";
my $action = $d->{power_btn}->get_active() ? 'on' : 'off';
-
- $self->sim_hardware_cmd("power $node $action");
+
+ $self->sim_hardware_cmd("power $node $action");
}
sub set_network_state {
my $d = $self->{nodes}->{$node} || die "no such node '$node'";
my $action = $d->{network_btn}->get_active() ? 'on' : 'off';
-
- $self->sim_hardware_cmd("network $node $action");
+
+ $self->sim_hardware_cmd("network $node $action");
}
sub create_node_control {
my ($self) = @_;
- my $ngrid = Gtk3::Grid->new();
+ my $ngrid = Gtk3::Grid->new();
$ngrid->set_row_spacing(2);
$ngrid->set_column_spacing(5);
$ngrid->set('margin-left', 5);
$w->set_size_request(150, -1);
$w->set_alignment (0, 0.5);
$ngrid->attach($w, 3, 0, 1, 1);
-
+
my $row = 1;
my @nodes = sort keys %{$self->{nodes}};
my $target = '';
$w->signal_connect('notify::active' => sub {
my $w = shift;
-
+
my $sel = $w->get_active();
return if $sel < 0;
});
$grid->attach($w, 1, 0, 1, 1);
- my $relocate_btn = Gtk3::CheckButton->new_with_label("stop service (relocate)");
+ my $relocate_btn = Gtk3::CheckButton->new_with_label("stop service (relocate)");
$grid->attach($relocate_btn, 1, 1, 1, 1);
my $contarea = $dialog->get_content_area();
sub create_service_control {
my ($self) = @_;
- my $sgrid = Gtk3::Grid->new();
+ my $sgrid = Gtk3::Grid->new();
$sgrid->set_row_spacing(2);
$sgrid->set_column_spacing(5);
$sgrid->set('margin', 5);
my $vbox = Gtk3::VBox->new(0, 0);
$grid->attach($vbox, 1, 0, 1, 1);
-
- my $ngrid = $self->create_node_control();
+
+ my $ngrid = $self->create_node_control();
$vbox->pack_start($ngrid, 0, 0, 0);
my $sep = Gtk3::HSeparator->new;
use strict;
use warnings;
use POSIX qw(strftime EINTR);
-use JSON;
+use JSON;
use IO::File;
use Fcntl qw(:DEFAULT :flock);
sub new {
my ($this, $nodename, $hardware, $log_id) = @_;
-
+
my $class = ref($this) || $this;
my $self = $class->SUPER::new($nodename, $hardware, $log_id);
my $line = sprintf("%-5s %5d %12s: $msg\n", $level, $time, "$self->{nodename}/$self->{log_id}");
print $line;
-
+
$self->{logfh}->print($line);
$self->{logfh}->flush();
}
use strict;
use warnings;
use POSIX qw(strftime EINTR);
-use JSON;
+use JSON;
use IO::File;
use Fcntl qw(:DEFAULT :flock);
use File::Copy;
my $d = $self->{nodes}->{$node};
- $d->{crm_env} =
+ $d->{crm_env} =
PVE::HA::Env->new('PVE::HA::Sim::TestEnv', $node, $self, 'crm');
- $d->{lrm_env} =
+ $d->{lrm_env} =
PVE::HA::Env->new('PVE::HA::Sim::TestEnv', $node, $self, 'lrm');
$d->{crm} = undef; # create on power on
my $last_command_time = 0;
my $next_cmd_at = 0;
-
+
for (;;) {
my $starttime = $self->get_time();
foreach my $node (@nodes) {
my $d = $self->{nodes}->{$node};
-
+
if (my $crm = $d->{crm}) {
my $exit_crm = !$crm->do_one_iteration();
}
}
-
- $self->{cur_time} = $starttime + $looptime
+
+ $self->{cur_time} = $starttime + $looptime
if ($self->{cur_time} - $starttime) < $looptime;
die "simulation end\n" if $self->{cur_time} > $max_sim_time;
}
next if $self->{cur_time} < $next_cmd_at;
-
+
# apply new comand after 5 loop iterations
if (($self->{loop_count} % 5) == 0) {
PVE::JSONSchema::register_standard_option('pve-ha-resource-id', {
description => "HA resource ID. This consists of a resource type followed by a resource specific name, separated with colon (example: vm:100 / ct:100).",
typetext => "<type>:<name>",
- type => 'string', format => 'pve-ha-resource-id',
+ type => 'string', format => 'pve-ha-resource-id',
});
PVE::JSONSchema::register_format('pve-ha-resource-or-vm-id', \&pve_verify_ha_resource_or_vm_id);
PVE::JSONSchema::register_standard_option('pve-ha-resource-or-vm-id', {
description => "HA resource ID. This consists of a resource type followed by a resource specific name, separated with colon (example: vm:100 / ct:100). For virtual machines and containers, you can simply use the VM or CT id as a shortcut (example: 100).",
typetext => "<type>:<name>",
- type => 'string', format => 'pve-ha-resource-or-vm-id',
+ type => 'string', format => 'pve-ha-resource-or-vm-id',
});
PVE::JSONSchema::register_format('pve-ha-group-node', \&pve_verify_ha_group_node);
my ($ss, $node) = @_;
my $count = 0;
-
+
foreach my $sid (keys %$ss) {
my $sd = $ss->{$sid};
next if !$sd->{node};
next;
}
}
-
+
return $count;
}