]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Env.pm
env: rename get_ha_settings to get_datacenter_settings
[pve-ha-manager.git] / src / PVE / HA / Env.pm
1 package PVE::HA::Env;
2
3 use strict;
4 use warnings;
5
6 use PVE::SafeSyslog;
7 use PVE::Tools;
8
9 # abstract out the cluster environment for a single node
10
11 sub new {
12 my ($this, $baseclass, $node, @args) = @_;
13
14 my $class = ref($this) || $this;
15
16 my $plug = $baseclass->new($node, @args);
17
18 my $self = bless { plug => $plug }, $class;
19
20 return $self;
21 }
22
23 sub nodename {
24 my ($self) = @_;
25
26 return $self->{plug}->nodename();
27 }
28
29 sub hardware {
30 my ($self) = @_;
31
32 return $self->{plug}->hardware();
33 }
34
35 # manager status is stored on cluster, protected by ha_manager_lock
36 sub read_manager_status {
37 my ($self) = @_;
38
39 return $self->{plug}->read_manager_status();
40 }
41
42 sub write_manager_status {
43 my ($self, $status_obj) = @_;
44
45 return $self->{plug}->write_manager_status($status_obj);
46 }
47
48 # lrm status is written by LRM, protected by ha_agent_lock,
49 # but can be read by any node (CRM)
50
51 sub read_lrm_status {
52 my ($self, $node) = @_;
53
54 return $self->{plug}->read_lrm_status($node);
55 }
56
57 sub write_lrm_status {
58 my ($self, $status_obj) = @_;
59
60 return $self->{plug}->write_lrm_status($status_obj);
61 }
62
63 # check if we do node shutdown
64 # we used this to decide if services should be stopped or freezed
65 sub is_node_shutdown {
66 my ($self) = @_;
67
68 return $self->{plug}->is_node_shutdown();
69 }
70
71 # implement a way to send commands to the CRM master
72 sub queue_crm_commands {
73 my ($self, $cmd) = @_;
74
75 return $self->{plug}->queue_crm_commands($cmd);
76 }
77
78 sub read_crm_commands {
79 my ($self) = @_;
80
81 return $self->{plug}->read_crm_commands();
82 }
83
84 sub read_service_config {
85 my ($self) = @_;
86
87 return $self->{plug}->read_service_config();
88 }
89
90 sub update_service_config {
91 my ($self, $sid, $param) = @_;
92
93 return $self->{plug}->update_service_config($sid, $param);
94 }
95
96 sub parse_sid {
97 my ($self, $sid) = @_;
98
99 return $self->{plug}->parse_sid($sid);
100 }
101
102 sub read_fence_config {
103 my ($self) = @_;
104
105 return $self->{plug}->read_fence_config();
106 }
107
108 sub fencing_mode {
109 my ($self) = @_;
110
111 return $self->{plug}->fencing_mode();
112 }
113
114 sub exec_fence_agent {
115 my ($self, $agent, $node, @param) = @_;
116
117 return $self->{plug}->exec_fence_agent($agent, $node, @param);
118 }
119
120 # this is normally only allowed by the master to recover a _fenced_ service
121 sub steal_service {
122 my ($self, $sid, $current_node, $new_node) = @_;
123
124 return $self->{plug}->steal_service($sid, $current_node, $new_node);
125 }
126
127 sub read_group_config {
128 my ($self) = @_;
129
130 return $self->{plug}->read_group_config();
131 }
132
133 # this should return a hash containing info
134 # what nodes are members and online.
135 sub get_node_info {
136 my ($self) = @_;
137
138 return $self->{plug}->get_node_info();
139 }
140
141 sub log {
142 my ($self, $level, @args) = @_;
143
144 return $self->{plug}->log($level, @args);
145 }
146
147 sub sendmail {
148 my ($self, $subject, $text) = @_;
149
150 return $self->{plug}->sendmail($subject, $text);
151 }
152
153 # acquire a cluster wide manager lock
154 sub get_ha_manager_lock {
155 my ($self) = @_;
156
157 return $self->{plug}->get_ha_manager_lock();
158 }
159
160 # release the cluster wide manager lock.
161 # when released another CRM may step up and get the lock, thus this should only
162 # get called when shutting down/deactivating the current master
163 sub release_ha_manager_lock {
164 my ($self) = @_;
165
166 return $self->{plug}->release_ha_manager_lock();
167 }
168
169 # acquire a cluster wide node agent lock
170 sub get_ha_agent_lock {
171 my ($self, $node) = @_;
172
173 return $self->{plug}->get_ha_agent_lock($node);
174 }
175
176 # release the respective node agent lock.
177 # this should only get called if the nodes LRM gracefully shuts down with
178 # all services already cleanly stopped!
179 sub release_ha_agent_lock {
180 my ($self) = @_;
181
182 return $self->{plug}->release_ha_agent_lock();
183 }
184
185 # return true when cluster is quorate
186 sub quorate {
187 my ($self) = @_;
188
189 return $self->{plug}->quorate();
190 }
191
192 # return current time
193 # overwrite that if you want to simulate
194 sub get_time {
195 my ($self) = @_;
196
197 return $self->{plug}->get_time();
198 }
199
200 sub sleep {
201 my ($self, $delay) = @_;
202
203 return $self->{plug}->sleep($delay);
204 }
205
206 sub sleep_until {
207 my ($self, $end_time) = @_;
208
209 return $self->{plug}->sleep_until($end_time);
210 }
211
212 sub loop_start_hook {
213 my ($self, @args) = @_;
214
215 return $self->{plug}->loop_start_hook(@args);
216 }
217
218 sub loop_end_hook {
219 my ($self, @args) = @_;
220
221 return $self->{plug}->loop_end_hook(@args);
222 }
223
224 sub cluster_state_update {
225 my ($self) = @_;
226
227 return $self->{plug}->cluster_state_update();
228 }
229
230 sub watchdog_open {
231 my ($self) = @_;
232
233 # Note: when using /dev/watchdog, make sure perl does not close
234 # the handle automatically at exit!!
235
236 return $self->{plug}->watchdog_open();
237 }
238
239 sub watchdog_update {
240 my ($self, $wfh) = @_;
241
242 return $self->{plug}->watchdog_update($wfh);
243 }
244
245 sub watchdog_close {
246 my ($self, $wfh) = @_;
247
248 return $self->{plug}->watchdog_close($wfh);
249 }
250
251 sub after_fork {
252 my ($self) = @_;
253
254 return $self->{plug}->after_fork();
255 }
256
257 # maximal number of workers to fork,
258 # return 0 as a hack to support regression tests
259 sub get_max_workers {
260 my ($self) = @_;
261
262 return $self->{plug}->get_max_workers();
263 }
264
265 # return cluster wide enforced HA settings
266 sub get_datacenter_settings {
267 my ($self) = @_;
268
269 return $self->{plug}->get_datacenter_settings();
270 }
271
272 sub get_static_node_stats {
273 my ($self) = @_;
274
275 return $self->{plug}->get_static_node_stats();
276 }
277
278 1;