]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Sim/Env.pm
implement change_service_location
[pve-ha-manager.git] / src / PVE / HA / Sim / Env.pm
1 package PVE::HA::Sim::Env;
2
3 use strict;
4 use warnings;
5 use POSIX qw(strftime EINTR);
6 use Data::Dumper;
7 use JSON;
8 use IO::File;
9 use Fcntl qw(:DEFAULT :flock);
10
11 use PVE::HA::Tools;
12 use PVE::HA::Env;
13
14 sub new {
15 my ($this, $nodename, $hardware, $log_id) = @_;
16
17 die "missing nodename" if !$nodename;
18 die "missing log_id" if !$log_id;
19
20 my $class = ref($this) || $this;
21
22 my $self = bless {}, $class;
23
24 $self->{statusdir} = $hardware->statusdir();
25 $self->{nodename} = $nodename;
26
27 $self->{hardware} = $hardware;
28 $self->{lock_timeout} = 120;
29
30 $self->{log_id} = $log_id;
31
32 return $self;
33 }
34
35 sub nodename {
36 my ($self) = @_;
37
38 return $self->{nodename};
39 }
40
41 sub sim_get_lock {
42 my ($self, $lock_name, $unlock) = @_;
43
44 return 0 if !$self->quorate();
45
46 my $filename = "$self->{statusdir}/cluster_locks";
47
48 my $code = sub {
49
50 my $data = PVE::HA::Tools::read_json_from_file($filename, {});
51
52 my $res;
53
54 my $nodename = $self->nodename();
55 my $ctime = $self->get_time();
56
57 if ($unlock) {
58
59 if (my $d = $data->{$lock_name}) {
60 my $tdiff = $ctime - $d->{time};
61
62 if ($tdiff > $self->{lock_timeout}) {
63 $res = 1;
64 } elsif (($tdiff <= $self->{lock_timeout}) && ($d->{node} eq $nodename)) {
65 delete $data->{$lock_name};
66 $res = 1;
67 } else {
68 $res = 0;
69 }
70 }
71
72 } else {
73
74 if (my $d = $data->{$lock_name}) {
75
76 my $tdiff = $ctime - $d->{time};
77
78 if ($tdiff <= $self->{lock_timeout}) {
79 if ($d->{node} eq $nodename) {
80 $d->{time} = $ctime;
81 $res = 1;
82 } else {
83 $res = 0;
84 }
85 } else {
86 $self->log('info', "got lock '$lock_name'");
87 $d->{node} = $nodename;
88 $d->{time} = $ctime;
89 $res = 1;
90 }
91
92 } else {
93 $data->{$lock_name} = {
94 time => $ctime,
95 node => $nodename,
96 };
97 $self->log('info', "got lock '$lock_name'");
98 $res = 1;
99 }
100 }
101
102 PVE::HA::Tools::write_json_to_file($filename, $data);
103
104 return $res;
105 };
106
107 return $self->{hardware}->global_lock($code);
108 }
109
110 sub read_manager_status {
111 my ($self) = @_;
112
113 my $filename = "$self->{statusdir}/manager_status";
114
115 return PVE::HA::Tools::read_json_from_file($filename, {});
116 }
117
118 sub write_manager_status {
119 my ($self, $status_obj) = @_;
120
121 my $filename = "$self->{statusdir}/manager_status";
122
123 PVE::HA::Tools::write_json_to_file($filename, $status_obj);
124 }
125
126 sub read_lrm_status {
127 my ($self, $node) = @_;
128
129 $node = $self->{nodename} if !defined($node);
130
131 return $self->{hardware}->read_lrm_status($node);
132 }
133
134 sub write_lrm_status {
135 my ($self, $status_obj) = @_;
136
137 my $node = $self->{nodename};
138
139 return $self->{hardware}->write_lrm_status($node, $status_obj);
140 }
141
142 sub service_config_exists {
143 my ($self) = @_;
144
145 return 1; # assume ha is always enabled here
146 }
147
148 sub read_service_config {
149 my ($self) = @_;
150
151 return $self->{hardware}->read_service_config();
152 }
153
154 sub read_group_config {
155 my ($self) = @_;
156
157 return $self->{hardware}->read_group_config();
158 }
159
160 sub change_service_location {
161 my ($self, $sid, $current_node, $new_node) = @_;
162
163 return $self->{hardware}->change_service_location($sid, $current_node, $new_node);
164 }
165
166 sub queue_crm_commands {
167 my ($self, $cmd) = @_;
168
169 return $self->{hardware}->queue_crm_commands($cmd);
170 }
171
172 sub read_crm_commands {
173 my ($self) = @_;
174
175 return $self->{hardware}->read_crm_commands();
176 }
177
178 sub log {
179 my ($self, $level, $msg) = @_;
180
181 chomp $msg;
182
183 my $time = $self->get_time();
184
185 printf("%-5s %5d %12s: $msg\n", $level, $time, "$self->{nodename}/$self->{log_id}");
186 }
187
188 sub get_time {
189 my ($self) = @_;
190
191 die "implement in subclass";
192 }
193
194 sub sleep {
195 my ($self, $delay) = @_;
196
197 die "implement in subclass";
198 }
199
200 sub sleep_until {
201 my ($self, $end_time) = @_;
202
203 die "implement in subclass";
204 }
205
206 sub get_ha_manager_lock {
207 my ($self) = @_;
208
209 return $self->sim_get_lock('ha_manager_lock');
210 }
211
212 sub get_ha_agent_lock_name {
213 my ($self, $node) = @_;
214
215 $node = $self->nodename() if !$node;
216
217 return "ha_agent_${node}_lock";
218 }
219
220 sub get_ha_agent_lock {
221 my ($self, $node) = @_;
222
223 my $lck = $self->get_ha_agent_lock_name($node);
224 return $self->sim_get_lock($lck);
225 }
226
227 # return true when cluster is quorate
228 sub quorate {
229 my ($self) = @_;
230
231 my ($node_info, $quorate) = $self->{hardware}->get_node_info();
232 my $node = $self->nodename();
233 return 0 if !$node_info->{$node}->{online};
234 return $quorate;
235 }
236
237 sub get_node_info {
238 my ($self) = @_;
239
240 return $self->{hardware}->get_node_info();
241 }
242
243 sub loop_start_hook {
244 my ($self, $starttime) = @_;
245
246 # do nothing, overwrite in subclass
247 }
248
249 sub loop_end_hook {
250 my ($self) = @_;
251
252 # do nothing, overwrite in subclass
253 }
254
255 sub watchdog_open {
256 my ($self) = @_;
257
258 my $node = $self->nodename();
259
260 return $self->{hardware}->watchdog_open($node);
261 }
262
263 sub watchdog_update {
264 my ($self, $wfh) = @_;
265
266 return $self->{hardware}->watchdog_update($wfh);
267 }
268
269 sub watchdog_close {
270 my ($self, $wfh) = @_;
271
272 return $self->{hardware}->watchdog_close($wfh);
273 }
274
275 sub can_fork {
276 my ($self) = @_;
277
278 return 1;
279 }
280
281 sub exec_resource_agent {
282 my ($self, $sid, $cd, $cmd, @params) = @_;
283
284 my $hardware = $self->{hardware};
285
286 my $nodename = $self->{nodename};
287
288 # fixme: return valid_exit code (instead of using die)
289
290 my $ss = $hardware->read_service_status($nodename);
291
292 if ($cmd eq 'started') {
293
294 # fixme: return valid_exit code
295 die "service '$sid' not on this node" if $cd->{node} ne $nodename;
296
297 if ($ss->{$sid}) {
298 return 0;
299 }
300 $self->log("info", "starting service $sid");
301
302 $self->sleep(2);
303
304 $ss->{$sid} = 1;
305 $hardware->write_service_status($nodename, $ss);
306
307 $self->log("info", "service status $sid started");
308
309 return 0;
310
311 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
312
313 # fixme: return valid_exit code
314 die "service '$sid' not on this node" if $cd->{node} ne $nodename;
315
316 if (!$ss->{$sid}) {
317 return 0;
318 }
319 $self->log("info", "stopping service $sid");
320
321 $self->sleep(2);
322
323 $ss->{$sid} = 0;
324 $hardware->write_service_status($nodename, $ss);
325
326 $self->log("info", "service status $sid stopped");
327
328 return 0;
329
330 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
331
332 my $target = $params[0];
333 die "$cmd '$sid' failed - missing target\n" if !defined($target);
334
335 if ($cd->{node} eq $target) {
336 # already migrate
337 return 0;
338 } elsif ($cd->{node} eq $nodename) {
339
340 $self->log("info", "service $sid - start $cmd to node '$target'");
341
342 if ($cmd eq 'relocate' && $ss->{$sid}) {
343 $self->log("info", "stopping service $sid (relocate)");
344 $self->sleep(1);
345 $ss->{$sid} = 0;
346 $hardware->write_service_status($nodename, $ss);
347 $self->log("info", "service status $sid stopped");
348 }
349
350 $self->sleep(2);
351 $self->change_service_location($sid, $target);
352 $self->log("info", "service $sid - end $cmd to node '$target'");
353
354 return 0;
355
356 } else {
357 die "migrate '$sid' failed - service is not on this node\n";
358 }
359
360
361 }
362
363 die "implement me (cmd '$cmd')";
364 }
365
366 1;