]>
git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Sim/Env.pm
1 package PVE
::HA
::Sim
::Env
;
5 use POSIX
qw(strftime EINTR);
9 use Fcntl
qw(:DEFAULT :flock);
13 use PVE
::HA
::Resources
;
14 use PVE
::HA
::Sim
::Resources
::VirtVM
;
15 use PVE
::HA
::Sim
::Resources
::VirtCT
;
17 PVE
::HA
::Sim
::Resources
::VirtVM-
>register();
18 PVE
::HA
::Sim
::Resources
::VirtCT-
>register();
20 PVE
::HA
::Resources-
>init();
23 my ($this, $nodename, $hardware, $log_id) = @_;
25 die "missing nodename" if !$nodename;
26 die "missing log_id" if !$log_id;
28 my $class = ref($this) || $this;
30 my $self = bless {}, $class;
32 $self->{statusdir
} = $hardware->statusdir();
33 $self->{nodename
} = $nodename;
35 $self->{hardware
} = $hardware;
36 $self->{lock_timeout
} = 120;
38 $self->{log_id
} = $log_id;
46 return $self->{nodename
};
52 return $self->{hardware
};
56 my ($self, $lock_name, $unlock) = @_;
58 return 0 if !$self->quorate();
60 my $filename = "$self->{statusdir}/cluster_locks";
64 my $data = PVE
::HA
::Tools
::read_json_from_file
($filename, {});
68 my $nodename = $self->nodename();
69 my $ctime = $self->get_time();
73 if (my $d = $data->{$lock_name}) {
74 my $tdiff = $ctime - $d->{time};
76 if ($tdiff > $self->{lock_timeout
}) {
78 } elsif (($tdiff <= $self->{lock_timeout
}) && ($d->{node
} eq $nodename)) {
79 delete $data->{$lock_name};
88 if (my $d = $data->{$lock_name}) {
90 my $tdiff = $ctime - $d->{time};
92 if ($tdiff <= $self->{lock_timeout
}) {
93 if ($d->{node
} eq $nodename) {
100 $self->log('info', "got lock '$lock_name'");
101 $d->{node
} = $nodename;
107 $data->{$lock_name} = {
111 $self->log('info', "got lock '$lock_name'");
116 PVE
::HA
::Tools
::write_json_to_file
($filename, $data);
121 return $self->{hardware
}->global_lock($code);
124 sub read_manager_status
{
127 my $filename = "$self->{statusdir}/manager_status";
129 return PVE
::HA
::Tools
::read_json_from_file
($filename, {});
132 sub write_manager_status
{
133 my ($self, $status_obj) = @_;
135 my $filename = "$self->{statusdir}/manager_status";
137 PVE
::HA
::Tools
::write_json_to_file
($filename, $status_obj);
140 sub read_lrm_status
{
141 my ($self, $node) = @_;
143 $node = $self->{nodename
} if !defined($node);
145 return $self->{hardware
}->read_lrm_status($node);
148 sub write_lrm_status
{
149 my ($self, $status_obj) = @_;
151 my $node = $self->{nodename
};
153 return $self->{hardware
}->write_lrm_status($node, $status_obj);
156 sub is_node_shutdown
{
159 return 0; # default to freezing services if not overwritten by subclass
162 sub service_config_exists
{
168 sub read_service_config
{
171 return $self->{hardware
}->read_service_config();
174 sub read_group_config
{
177 return $self->{hardware
}->read_group_config();
180 sub change_service_location
{
181 my ($self, $sid, $current_node, $new_node) = @_;
183 return $self->{hardware
}->change_service_location($sid, $current_node, $new_node);
186 sub queue_crm_commands
{
187 my ($self, $cmd) = @_;
189 return $self->{hardware
}->queue_crm_commands($cmd);
192 sub read_crm_commands
{
195 return $self->{hardware
}->read_crm_commands();
199 my ($self, $level, $msg) = @_;
203 my $time = $self->get_time();
205 printf("%-5s %5d %12s: $msg\n", $level, $time, "$self->{nodename}/$self->{log_id}");
211 die "implement in subclass";
215 my ($self, $delay) = @_;
217 die "implement in subclass";
221 my ($self, $end_time) = @_;
223 die "implement in subclass";
226 sub get_ha_manager_lock
{
229 return $self->sim_get_lock('ha_manager_lock');
232 # release the cluster wide manager lock.
233 # when released another CRM may step up and get the lock, thus this should only
234 # get called when shutting down/deactivating the current master
235 sub release_ha_manager_lock
{
238 return $self->sim_get_lock('ha_manager_lock', 1);
241 sub get_ha_agent_lock_name
{
242 my ($self, $node) = @_;
244 $node = $self->nodename() if !$node;
246 return "ha_agent_${node}_lock";
249 sub get_ha_agent_lock
{
250 my ($self, $node) = @_;
252 my $lck = $self->get_ha_agent_lock_name($node);
253 return $self->sim_get_lock($lck);
257 # release the respective node agent lock.
258 # this should only get called if the nodes LRM gracefully shuts down with
259 # all services already cleanly stopped!
260 sub release_ha_agent_lock
{
263 my $node = $self->nodename();
265 my $lock = $self->get_ha_agent_lock_name($node);
266 return $self->sim_get_lock($lock, 1);
269 # return true when cluster is quorate
273 my ($node_info, $quorate) = $self->{hardware
}->get_node_info();
274 my $node = $self->nodename();
275 return 0 if !$node_info->{$node}->{online
};
282 return $self->{hardware
}->get_node_info();
285 sub loop_start_hook
{
286 my ($self, $starttime) = @_;
288 # do nothing, overwrite in subclass
294 # do nothing, overwrite in subclass
300 my $node = $self->nodename();
302 return $self->{hardware
}->watchdog_open($node);
305 sub watchdog_update
{
306 my ($self, $wfh) = @_;
308 return $self->{hardware
}->watchdog_update($wfh);
312 my ($self, $wfh) = @_;
314 return $self->{hardware
}->watchdog_close($wfh);
326 # nothing to clean up in the simulation environment