]>
git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Env.pm
9 # abstract out the cluster environment for a single node
12 my ($this, $baseclass, $node, @args) = @_;
14 my $class = ref($this) || $this;
16 my $plug = $baseclass->new($node, @args);
18 my $self = bless { plug
=> $plug }, $class;
26 return $self->{plug
}->nodename();
32 return $self->{plug
}->hardware();
35 # manager status is stored on cluster, protected by ha_manager_lock
36 sub read_manager_status
{
39 return $self->{plug
}->read_manager_status();
42 sub write_manager_status
{
43 my ($self, $status_obj) = @_;
45 return $self->{plug
}->write_manager_status($status_obj);
48 # lrm status is written by LRM, protected by ha_agent_lock,
49 # but can be read by any node (CRM)
52 my ($self, $node) = @_;
54 return $self->{plug
}->read_lrm_status($node);
57 sub write_lrm_status
{
58 my ($self, $status_obj) = @_;
60 return $self->{plug
}->write_lrm_status($status_obj);
63 # check if we do node shutdown
64 # we used this to decide if services should be stopped or freezed
65 sub is_node_shutdown
{
68 return $self->{plug
}->is_node_shutdown();
71 # implement a way to send commands to the CRM master
72 sub queue_crm_commands
{
73 my ($self, $cmd) = @_;
75 return $self->{plug
}->queue_crm_commands($cmd);
78 sub read_crm_commands
{
81 return $self->{plug
}->read_crm_commands();
84 sub read_service_config
{
87 return $self->{plug
}->read_service_config();
90 sub read_fence_config
{
93 return $self->{plug
}->read_fence_config();
99 return $self->{plug
}->fencing_mode();
102 sub exec_fence_agent
{
103 my ($self, $agent, $node, @param) = @_;
105 return $self->{plug
}->exec_fence_agent($agent, $node, @param);
108 # this is normally only allowed by the master to recover a _fenced_ service
110 my ($self, $sid, $current_node, $new_node) = @_;
112 return $self->{plug
}->steal_service($sid, $current_node, $new_node);
115 sub read_group_config
{
118 return $self->{plug
}->read_group_config();
121 # this should return a hash containing info
122 # what nodes are members and online.
126 return $self->{plug
}->get_node_info();
130 my ($self, $level, @args) = @_;
132 return $self->{plug
}->log($level, @args);
135 # acquire a cluster wide manager lock
136 sub get_ha_manager_lock
{
139 return $self->{plug
}->get_ha_manager_lock();
142 # release the cluster wide manager lock.
143 # when released another CRM may step up and get the lock, thus this should only
144 # get called when shutting down/deactivating the current master
145 sub release_ha_manager_lock
{
148 return $self->{plug
}->release_ha_manager_lock();
151 # acquire a cluster wide node agent lock
152 sub get_ha_agent_lock
{
153 my ($self, $node) = @_;
155 return $self->{plug
}->get_ha_agent_lock($node);
158 # release the respective node agent lock.
159 # this should only get called if the nodes LRM gracefully shuts down with
160 # all services already cleanly stopped!
161 sub release_ha_agent_lock
{
164 return $self->{plug
}->release_ha_agent_lock();
167 # return true when cluster is quorate
171 return $self->{plug
}->quorate();
174 # return current time
175 # overwrite that if you want to simulate
179 return $self->{plug
}->get_time();
183 my ($self, $delay) = @_;
185 return $self->{plug
}->sleep($delay);
189 my ($self, $end_time) = @_;
191 return $self->{plug
}->sleep_until($end_time);
194 sub loop_start_hook
{
195 my ($self, @args) = @_;
197 return $self->{plug
}->loop_start_hook(@args);
201 my ($self, @args) = @_;
203 return $self->{plug
}->loop_end_hook(@args);
209 # Note: when using /dev/watchdog, make sure perl does not close
210 # the handle automatically at exit!!
212 return $self->{plug
}->watchdog_open();
215 sub watchdog_update
{
216 my ($self, $wfh) = @_;
218 return $self->{plug
}->watchdog_update($wfh);
222 my ($self, $wfh) = @_;
224 return $self->{plug
}->watchdog_close($wfh);
230 return $self->{plug
}->after_fork();
233 # maximal number of workers to fork,
234 # return 0 as a hack to support regression tests
235 sub get_max_workers
{
238 return $self->{plug
}->get_max_workers();