]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Env.pm
Env, HW: add HW fencing related functions
[pve-ha-manager.git] / src / PVE / HA / Env.pm
1 package PVE::HA::Env;
2
3 use strict;
4 use warnings;
5
6 use PVE::SafeSyslog;
7 use PVE::Tools;
8
9 # abstract out the cluster environment for a single node
10
11 sub new {
12 my ($this, $baseclass, $node, @args) = @_;
13
14 my $class = ref($this) || $this;
15
16 my $plug = $baseclass->new($node, @args);
17
18 my $self = bless { plug => $plug }, $class;
19
20 return $self;
21 }
22
23 sub nodename {
24 my ($self) = @_;
25
26 return $self->{plug}->nodename();
27 }
28
29 sub hardware {
30 my ($self) = @_;
31
32 return $self->{plug}->hardware();
33 }
34
35 # manager status is stored on cluster, protected by ha_manager_lock
36 sub read_manager_status {
37 my ($self) = @_;
38
39 return $self->{plug}->read_manager_status();
40 }
41
42 sub write_manager_status {
43 my ($self, $status_obj) = @_;
44
45 return $self->{plug}->write_manager_status($status_obj);
46 }
47
48 # lrm status is written by LRM, protected by ha_agent_lock,
49 # but can be read by any node (CRM)
50
51 sub read_lrm_status {
52 my ($self, $node) = @_;
53
54 return $self->{plug}->read_lrm_status($node);
55 }
56
57 sub write_lrm_status {
58 my ($self, $status_obj) = @_;
59
60 return $self->{plug}->write_lrm_status($status_obj);
61 }
62
63 # check if we do node shutdown
64 # we used this to decide if services should be stopped or freezed
65 sub is_node_shutdown {
66 my ($self) = @_;
67
68 return $self->{plug}->is_node_shutdown();
69 }
70
71 # implement a way to send commands to the CRM master
72 sub queue_crm_commands {
73 my ($self, $cmd) = @_;
74
75 return $self->{plug}->queue_crm_commands($cmd);
76 }
77
78 sub read_crm_commands {
79 my ($self) = @_;
80
81 return $self->{plug}->read_crm_commands();
82 }
83
84 sub read_service_config {
85 my ($self) = @_;
86
87 return $self->{plug}->read_service_config();
88 }
89
90 sub read_fence_config {
91 my ($self) = @_;
92
93 return $self->{plug}->read_fence_config();
94 }
95
96 sub fencing_mode {
97 my ($self) = @_;
98
99 return $self->{plug}->fencing_mode();
100 }
101
102 sub exec_fence_agent {
103 my ($self, $agent, $node, @param) = @_;
104
105 return $self->{plug}->exec_fence_agent($agent, $node, @param);
106 }
107
108 # this is normally only allowed by the master to recover a _fenced_ service
109 sub steal_service {
110 my ($self, $sid, $current_node, $new_node) = @_;
111
112 return $self->{plug}->steal_service($sid, $current_node, $new_node);
113 }
114
115 sub read_group_config {
116 my ($self) = @_;
117
118 return $self->{plug}->read_group_config();
119 }
120
121 # this should return a hash containing info
122 # what nodes are members and online.
123 sub get_node_info {
124 my ($self) = @_;
125
126 return $self->{plug}->get_node_info();
127 }
128
129 sub log {
130 my ($self, $level, @args) = @_;
131
132 return $self->{plug}->log($level, @args);
133 }
134
135 # acquire a cluster wide manager lock
136 sub get_ha_manager_lock {
137 my ($self) = @_;
138
139 return $self->{plug}->get_ha_manager_lock();
140 }
141
142 # release the cluster wide manager lock.
143 # when released another CRM may step up and get the lock, thus this should only
144 # get called when shutting down/deactivating the current master
145 sub release_ha_manager_lock {
146 my ($self) = @_;
147
148 return $self->{plug}->release_ha_manager_lock();
149 }
150
151 # acquire a cluster wide node agent lock
152 sub get_ha_agent_lock {
153 my ($self, $node) = @_;
154
155 return $self->{plug}->get_ha_agent_lock($node);
156 }
157
158 # release the respective node agent lock.
159 # this should only get called if the nodes LRM gracefully shuts down with
160 # all services already cleanly stopped!
161 sub release_ha_agent_lock {
162 my ($self) = @_;
163
164 return $self->{plug}->release_ha_agent_lock();
165 }
166
167 # return true when cluster is quorate
168 sub quorate {
169 my ($self) = @_;
170
171 return $self->{plug}->quorate();
172 }
173
174 # return current time
175 # overwrite that if you want to simulate
176 sub get_time {
177 my ($self) = @_;
178
179 return $self->{plug}->get_time();
180 }
181
182 sub sleep {
183 my ($self, $delay) = @_;
184
185 return $self->{plug}->sleep($delay);
186 }
187
188 sub sleep_until {
189 my ($self, $end_time) = @_;
190
191 return $self->{plug}->sleep_until($end_time);
192 }
193
194 sub loop_start_hook {
195 my ($self, @args) = @_;
196
197 return $self->{plug}->loop_start_hook(@args);
198 }
199
200 sub loop_end_hook {
201 my ($self, @args) = @_;
202
203 return $self->{plug}->loop_end_hook(@args);
204 }
205
206 sub watchdog_open {
207 my ($self) = @_;
208
209 # Note: when using /dev/watchdog, make sure perl does not close
210 # the handle automatically at exit!!
211
212 return $self->{plug}->watchdog_open();
213 }
214
215 sub watchdog_update {
216 my ($self, $wfh) = @_;
217
218 return $self->{plug}->watchdog_update($wfh);
219 }
220
221 sub watchdog_close {
222 my ($self, $wfh) = @_;
223
224 return $self->{plug}->watchdog_close($wfh);
225 }
226
227 sub after_fork {
228 my ($self) = @_;
229
230 return $self->{plug}->after_fork();
231 }
232
233 # maximal number of workers to fork,
234 # return 0 as a hack to support regression tests
235 sub get_max_workers {
236 my ($self) = @_;
237
238 return $self->{plug}->get_max_workers();
239 }
240
241 1;