]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Sim/Env.pm
56b8bda5e92639919eb3f6c6d7745557716a575e
[pve-ha-manager.git] / src / PVE / HA / Sim / Env.pm
1 package PVE::HA::Sim::Env;
2
3 use strict;
4 use warnings;
5 use POSIX qw(strftime EINTR);
6 use Data::Dumper;
7 use JSON;
8 use IO::File;
9 use Fcntl qw(:DEFAULT :flock);
10
11 use PVE::HA::Tools;
12 use PVE::HA::Env;
13
14 sub new {
15 my ($this, $nodename, $hardware, $log_id) = @_;
16
17 die "missing nodename" if !$nodename;
18 die "missing log_id" if !$log_id;
19
20 my $class = ref($this) || $this;
21
22 my $self = bless {}, $class;
23
24 $self->{statusdir} = $hardware->statusdir();
25 $self->{nodename} = $nodename;
26
27 $self->{hardware} = $hardware;
28 $self->{lock_timeout} = 120;
29
30 $self->{log_id} = $log_id;
31
32 return $self;
33 }
34
35 sub nodename {
36 my ($self) = @_;
37
38 return $self->{nodename};
39 }
40
41 sub sim_get_lock {
42 my ($self, $lock_name, $unlock) = @_;
43
44 return 0 if !$self->quorate();
45
46 my $filename = "$self->{statusdir}/cluster_locks";
47
48 my $code = sub {
49
50 my $data = PVE::HA::Tools::read_json_from_file($filename, {});
51
52 my $res;
53
54 my $nodename = $self->nodename();
55 my $ctime = $self->get_time();
56
57 if ($unlock) {
58
59 if (my $d = $data->{$lock_name}) {
60 my $tdiff = $ctime - $d->{time};
61
62 if ($tdiff > $self->{lock_timeout}) {
63 $res = 1;
64 } elsif (($tdiff <= $self->{lock_timeout}) && ($d->{node} eq $nodename)) {
65 delete $data->{$lock_name};
66 $res = 1;
67 } else {
68 $res = 0;
69 }
70 }
71
72 } else {
73
74 if (my $d = $data->{$lock_name}) {
75
76 my $tdiff = $ctime - $d->{time};
77
78 if ($tdiff <= $self->{lock_timeout}) {
79 if ($d->{node} eq $nodename) {
80 $d->{time} = $ctime;
81 $res = 1;
82 } else {
83 $res = 0;
84 }
85 } else {
86 $self->log('info', "got lock '$lock_name'");
87 $d->{node} = $nodename;
88 $d->{time} = $ctime;
89 $res = 1;
90 }
91
92 } else {
93 $data->{$lock_name} = {
94 time => $ctime,
95 node => $nodename,
96 };
97 $self->log('info', "got lock '$lock_name'");
98 $res = 1;
99 }
100 }
101
102 PVE::HA::Tools::write_json_to_file($filename, $data);
103
104 return $res;
105 };
106
107 return $self->{hardware}->global_lock($code);
108 }
109
110 sub read_manager_status {
111 my ($self) = @_;
112
113 my $filename = "$self->{statusdir}/manager_status";
114
115 return PVE::HA::Tools::read_json_from_file($filename, {});
116 }
117
118 sub write_manager_status {
119 my ($self, $status_obj) = @_;
120
121 my $filename = "$self->{statusdir}/manager_status";
122
123 PVE::HA::Tools::write_json_to_file($filename, $status_obj);
124 }
125
126 sub read_lrm_status {
127 my ($self, $node) = @_;
128
129 $node = $self->{nodename} if !defined($node);
130
131 return $self->{hardware}->read_lrm_status($node);
132 }
133
134 sub write_lrm_status {
135 my ($self, $status_obj) = @_;
136
137 my $node = $self->{nodename};
138
139 return $self->{hardware}->write_lrm_status($node, $status_obj);
140 }
141
142 sub is_node_shutdown {
143 my ($self) = @_;
144
145 return 0; # default to freezing services if not overwritten by subclass
146 }
147
148 sub service_config_exists {
149 my ($self) = @_;
150
151 return 1;
152 }
153
154 sub read_service_config {
155 my ($self) = @_;
156
157 return $self->{hardware}->read_service_config();
158 }
159
160 sub read_group_config {
161 my ($self) = @_;
162
163 return $self->{hardware}->read_group_config();
164 }
165
166 sub change_service_location {
167 my ($self, $sid, $current_node, $new_node) = @_;
168
169 return $self->{hardware}->change_service_location($sid, $current_node, $new_node);
170 }
171
172 sub queue_crm_commands {
173 my ($self, $cmd) = @_;
174
175 return $self->{hardware}->queue_crm_commands($cmd);
176 }
177
178 sub read_crm_commands {
179 my ($self) = @_;
180
181 return $self->{hardware}->read_crm_commands();
182 }
183
184 sub log {
185 my ($self, $level, $msg) = @_;
186
187 chomp $msg;
188
189 my $time = $self->get_time();
190
191 printf("%-5s %5d %12s: $msg\n", $level, $time, "$self->{nodename}/$self->{log_id}");
192 }
193
194 sub get_time {
195 my ($self) = @_;
196
197 die "implement in subclass";
198 }
199
200 sub sleep {
201 my ($self, $delay) = @_;
202
203 die "implement in subclass";
204 }
205
206 sub sleep_until {
207 my ($self, $end_time) = @_;
208
209 die "implement in subclass";
210 }
211
212 sub get_ha_manager_lock {
213 my ($self) = @_;
214
215 return $self->sim_get_lock('ha_manager_lock');
216 }
217
218 sub get_ha_agent_lock_name {
219 my ($self, $node) = @_;
220
221 $node = $self->nodename() if !$node;
222
223 return "ha_agent_${node}_lock";
224 }
225
226 sub get_ha_agent_lock {
227 my ($self, $node) = @_;
228
229 my $lck = $self->get_ha_agent_lock_name($node);
230 return $self->sim_get_lock($lck);
231 }
232
233 # return true when cluster is quorate
234 sub quorate {
235 my ($self) = @_;
236
237 my ($node_info, $quorate) = $self->{hardware}->get_node_info();
238 my $node = $self->nodename();
239 return 0 if !$node_info->{$node}->{online};
240 return $quorate;
241 }
242
243 sub get_node_info {
244 my ($self) = @_;
245
246 return $self->{hardware}->get_node_info();
247 }
248
249 sub loop_start_hook {
250 my ($self, $starttime) = @_;
251
252 # do nothing, overwrite in subclass
253 }
254
255 sub loop_end_hook {
256 my ($self) = @_;
257
258 # do nothing, overwrite in subclass
259 }
260
261 sub watchdog_open {
262 my ($self) = @_;
263
264 my $node = $self->nodename();
265
266 return $self->{hardware}->watchdog_open($node);
267 }
268
269 sub watchdog_update {
270 my ($self, $wfh) = @_;
271
272 return $self->{hardware}->watchdog_update($wfh);
273 }
274
275 sub watchdog_close {
276 my ($self, $wfh) = @_;
277
278 return $self->{hardware}->watchdog_close($wfh);
279 }
280
281 sub can_fork {
282 my ($self) = @_;
283
284 return 1;
285 }
286
287 sub exec_resource_agent {
288 my ($self, $sid, $cd, $cmd, @params) = @_;
289
290 my $hardware = $self->{hardware};
291
292 my $nodename = $self->{nodename};
293
294 # fixme: return valid_exit code (instead of using die)
295
296 my $ss = $hardware->read_service_status($nodename);
297
298 if ($cmd eq 'started') {
299
300 # fixme: return valid_exit code
301 die "service '$sid' not on this node" if $cd->{node} ne $nodename;
302
303 if ($ss->{$sid}) {
304 return 0;
305 }
306 $self->log("info", "starting service $sid");
307
308 $self->sleep(2);
309
310 $ss->{$sid} = 1;
311 $hardware->write_service_status($nodename, $ss);
312
313 $self->log("info", "service status $sid started");
314
315 return 0;
316
317 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
318
319 # fixme: return valid_exit code
320 die "service '$sid' not on this node" if $cd->{node} ne $nodename;
321
322 if (!$ss->{$sid}) {
323 return 0;
324 }
325 $self->log("info", "stopping service $sid");
326
327 $self->sleep(2);
328
329 $ss->{$sid} = 0;
330 $hardware->write_service_status($nodename, $ss);
331
332 $self->log("info", "service status $sid stopped");
333
334 return 0;
335
336 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
337
338 my $target = $params[0];
339 die "$cmd '$sid' failed - missing target\n" if !defined($target);
340
341 if ($cd->{node} eq $target) {
342 # already migrate
343 return 0;
344 } elsif ($cd->{node} eq $nodename) {
345
346 $self->log("info", "service $sid - start $cmd to node '$target'");
347
348 if ($cmd eq 'relocate') {
349
350 if ($ss->{$sid}) {
351 $self->log("info", "stopping service $sid (relocate)");
352 $self->sleep(1); # time to stop service
353 $ss->{$sid} = 0;
354 $hardware->write_service_status($nodename, $ss);
355 }
356
357 $self->log("info", "service status $sid stopped");
358
359 } else {
360 $self->sleep(2); # (live) migration time
361 }
362
363 $self->change_service_location($sid, $nodename, $target);
364 $self->log("info", "service $sid - end $cmd to node '$target'");
365 # ensure that the old node doesn't has the service anymore
366 $ss->{$sid} = 0;
367 $hardware->write_service_status($nodename, $ss);
368
369 return 0;
370
371 } else {
372 die "migrate '$sid' failed - service is not on this node\n";
373 }
374
375
376 }
377
378 die "implement me (cmd '$cmd')";
379 }
380
381 1;