]>
git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Sim/Env.pm
1 package PVE
::HA
::Sim
::Env
;
5 use POSIX
qw(strftime EINTR);
9 use Fcntl
qw(:DEFAULT :flock);
15 my ($this, $nodename, $hardware, $log_id) = @_;
17 die "missing nodename" if !$nodename;
18 die "missing log_id" if !$log_id;
20 my $class = ref($this) || $this;
22 my $self = bless {}, $class;
24 $self->{statusdir
} = $hardware->statusdir();
25 $self->{nodename
} = $nodename;
27 $self->{hardware
} = $hardware;
28 $self->{lock_timeout
} = 120;
30 $self->{log_id
} = $log_id;
38 return $self->{nodename
};
42 my ($self, $lock_name, $unlock) = @_;
44 return 0 if !$self->quorate();
46 my $filename = "$self->{statusdir}/cluster_locks";
50 my $data = PVE
::HA
::Tools
::read_json_from_file
($filename, {});
54 my $nodename = $self->nodename();
55 my $ctime = $self->get_time();
59 if (my $d = $data->{$lock_name}) {
60 my $tdiff = $ctime - $d->{time};
62 if ($tdiff > $self->{lock_timeout
}) {
64 } elsif (($tdiff <= $self->{lock_timeout
}) && ($d->{node
} eq $nodename)) {
65 delete $data->{$lock_name};
74 if (my $d = $data->{$lock_name}) {
76 my $tdiff = $ctime - $d->{time};
78 if ($tdiff <= $self->{lock_timeout
}) {
79 if ($d->{node
} eq $nodename) {
86 $self->log('info', "got lock '$lock_name'");
87 $d->{node
} = $nodename;
93 $data->{$lock_name} = {
97 $self->log('info', "got lock '$lock_name'");
102 PVE
::HA
::Tools
::write_json_to_file
($filename, $data);
107 return $self->{hardware
}->global_lock($code);
110 sub read_manager_status
{
113 my $filename = "$self->{statusdir}/manager_status";
115 return PVE
::HA
::Tools
::read_json_from_file
($filename, {});
118 sub write_manager_status
{
119 my ($self, $status_obj) = @_;
121 my $filename = "$self->{statusdir}/manager_status";
123 PVE
::HA
::Tools
::write_json_to_file
($filename, $status_obj);
126 sub read_lrm_status
{
127 my ($self, $node) = @_;
129 $node = $self->{nodename
} if !defined($node);
131 return $self->{hardware
}->read_lrm_status($node);
134 sub write_lrm_status
{
135 my ($self, $status_obj) = @_;
137 my $node = $self->{nodename
};
139 return $self->{hardware
}->write_lrm_status($node, $status_obj);
145 return 0; # default to freezing services if not overwritten by subclass
148 sub service_config_exists
{
154 sub read_service_config
{
157 return $self->{hardware
}->read_service_config();
160 sub read_group_config
{
163 return $self->{hardware
}->read_group_config();
166 sub change_service_location
{
167 my ($self, $sid, $current_node, $new_node) = @_;
169 return $self->{hardware
}->change_service_location($sid, $current_node, $new_node);
172 sub queue_crm_commands
{
173 my ($self, $cmd) = @_;
175 return $self->{hardware
}->queue_crm_commands($cmd);
178 sub read_crm_commands
{
181 return $self->{hardware
}->read_crm_commands();
185 my ($self, $level, $msg) = @_;
189 my $time = $self->get_time();
191 printf("%-5s %5d %12s: $msg\n", $level, $time, "$self->{nodename}/$self->{log_id}");
197 die "implement in subclass";
201 my ($self, $delay) = @_;
203 die "implement in subclass";
207 my ($self, $end_time) = @_;
209 die "implement in subclass";
212 sub get_ha_manager_lock
{
215 return $self->sim_get_lock('ha_manager_lock');
218 sub get_ha_agent_lock_name
{
219 my ($self, $node) = @_;
221 $node = $self->nodename() if !$node;
223 return "ha_agent_${node}_lock";
226 sub get_ha_agent_lock
{
227 my ($self, $node) = @_;
229 my $lck = $self->get_ha_agent_lock_name($node);
230 return $self->sim_get_lock($lck);
233 # return true when cluster is quorate
237 my ($node_info, $quorate) = $self->{hardware
}->get_node_info();
238 my $node = $self->nodename();
239 return 0 if !$node_info->{$node}->{online
};
246 return $self->{hardware
}->get_node_info();
249 sub loop_start_hook
{
250 my ($self, $starttime) = @_;
252 # do nothing, overwrite in subclass
258 # do nothing, overwrite in subclass
264 my $node = $self->nodename();
266 return $self->{hardware
}->watchdog_open($node);
269 sub watchdog_update
{
270 my ($self, $wfh) = @_;
272 return $self->{hardware
}->watchdog_update($wfh);
276 my ($self, $wfh) = @_;
278 return $self->{hardware
}->watchdog_close($wfh);
287 sub exec_resource_agent
{
288 my ($self, $sid, $cd, $cmd, @params) = @_;
290 my $hardware = $self->{hardware
};
292 my $nodename = $self->{nodename
};
294 # fixme: return valid_exit code (instead of using die)
296 my $ss = $hardware->read_service_status($nodename);
298 if ($cmd eq 'started') {
300 # fixme: return valid_exit code
301 die "service '$sid' not on this node" if $cd->{node
} ne $nodename;
306 $self->log("info", "starting service $sid");
311 $hardware->write_service_status($nodename, $ss);
313 $self->log("info", "service status $sid started");
317 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
319 # fixme: return valid_exit code
320 die "service '$sid' not on this node" if $cd->{node
} ne $nodename;
325 $self->log("info", "stopping service $sid");
330 $hardware->write_service_status($nodename, $ss);
332 $self->log("info", "service status $sid stopped");
336 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
338 my $target = $params[0];
339 die "$cmd '$sid' failed - missing target\n" if !defined($target);
341 if ($cd->{node
} eq $target) {
344 } elsif ($cd->{node
} eq $nodename) {
346 $self->log("info", "service $sid - start $cmd to node '$target'");
348 if ($cmd eq 'relocate' && $ss->{$sid}) {
349 $self->log("info", "stopping service $sid (relocate)");
352 $hardware->write_service_status($nodename, $ss);
353 $self->log("info", "service status $sid stopped");
357 $self->change_service_location($sid, $nodename, $target);
358 $self->log("info", "service $sid - end $cmd to node '$target'");
363 die "migrate '$sid' failed - service is not on this node\n";
369 die "implement me (cmd '$cmd')";