]>
git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Env.pm
9 # abstract out the cluster environment for a single node
12 my ($this, $baseclass, $node, @args) = @_;
14 my $class = ref($this) || $this;
16 my $plug = $baseclass->new($node, @args);
18 my $self = bless { plug
=> $plug }, $class;
26 return $self->{plug
}->nodename();
32 return $self->{plug
}->hardware();
35 # manager status is stored on cluster, protected by ha_manager_lock
36 sub read_manager_status
{
39 return $self->{plug
}->read_manager_status();
42 sub write_manager_status
{
43 my ($self, $status_obj) = @_;
45 return $self->{plug
}->write_manager_status($status_obj);
48 # lrm status is written by LRM, protected by ha_agent_lock,
49 # but can be read by any node (CRM)
52 my ($self, $node) = @_;
54 return $self->{plug
}->read_lrm_status($node);
57 sub write_lrm_status
{
58 my ($self, $status_obj) = @_;
60 return $self->{plug
}->write_lrm_status($status_obj);
63 # check if we do node shutdown
64 # we used this to decide if services should be stopped or freezed
65 sub is_node_shutdown
{
68 return $self->{plug
}->is_node_shutdown();
71 # implement a way to send commands to the CRM master
72 sub queue_crm_commands
{
73 my ($self, $cmd) = @_;
75 return $self->{plug
}->queue_crm_commands($cmd);
78 sub read_crm_commands
{
81 return $self->{plug
}->read_crm_commands();
84 sub read_service_config
{
87 return $self->{plug
}->read_service_config();
90 sub change_service_location
{
91 my ($self, $sid, $current_node, $new_node) = @_;
93 return $self->{plug
}->change_service_location($sid, $current_node, $new_node);
96 sub read_group_config
{
99 return $self->{plug
}->read_group_config();
102 # this should return a hash containing info
103 # what nodes are members and online.
107 return $self->{plug
}->get_node_info();
111 my ($self, $level, @args) = @_;
113 return $self->{plug
}->log($level, @args);
116 # acquire a cluster wide manager lock
117 sub get_ha_manager_lock
{
120 return $self->{plug
}->get_ha_manager_lock();
123 # release the cluster wide manager lock.
124 # when released another CRM may step up and get the lock, thus this should only
125 # get called when shutting down/deactivating the current master
126 sub release_ha_manager_lock
{
129 return $self->{plug
}->release_ha_manager_lock();
132 # acquire a cluster wide node agent lock
133 sub get_ha_agent_lock
{
134 my ($self, $node) = @_;
136 return $self->{plug
}->get_ha_agent_lock($node);
139 # release the respective node agent lock.
140 # this should only get called if the nodes LRM gracefully shuts down with
141 # all services already cleanly stopped!
142 sub release_ha_agent_lock
{
145 return $self->{plug
}->release_ha_agent_lock();
148 # return true when cluster is quorate
152 return $self->{plug
}->quorate();
155 # return current time
156 # overwrite that if you want to simulate
160 return $self->{plug
}->get_time();
164 my ($self, $delay) = @_;
166 return $self->{plug
}->sleep($delay);
170 my ($self, $end_time) = @_;
172 return $self->{plug
}->sleep_until($end_time);
175 sub loop_start_hook
{
176 my ($self, @args) = @_;
178 return $self->{plug
}->loop_start_hook(@args);
182 my ($self, @args) = @_;
184 return $self->{plug
}->loop_end_hook(@args);
190 # Note: when using /dev/watchdog, make sure perl does not close
191 # the handle automatically at exit!!
193 return $self->{plug
}->watchdog_open();
196 sub watchdog_update
{
197 my ($self, $wfh) = @_;
199 return $self->{plug
}->watchdog_update($wfh);
203 my ($self, $wfh) = @_;
205 return $self->{plug
}->watchdog_close($wfh);
211 return $self->{plug
}->after_fork();
214 # maximal number of workers to fork,
215 # return 0 as a hack to support regression tests
216 sub get_max_workers
{
219 return $self->{plug
}->get_max_workers();