]>
Commit | Line | Data |
---|---|---|
87b82b15 DM |
1 | package PVE::HA::Sim::Env; |
2 | ||
3 | use strict; | |
4 | use warnings; | |
5 | use POSIX qw(strftime EINTR); | |
6 | use Data::Dumper; | |
f5c29173 | 7 | use JSON; |
87b82b15 DM |
8 | use IO::File; |
9 | use Fcntl qw(:DEFAULT :flock); | |
10 | ||
11 | use PVE::HA::Tools; | |
12 | use PVE::HA::Env; | |
13 | ||
14 | sub new { | |
15 | my ($this, $nodename, $hardware, $log_id) = @_; | |
16 | ||
17 | die "missing nodename" if !$nodename; | |
18 | die "missing log_id" if !$log_id; | |
f5c29173 | 19 | |
87b82b15 DM |
20 | my $class = ref($this) || $this; |
21 | ||
22 | my $self = bless {}, $class; | |
23 | ||
24 | $self->{statusdir} = $hardware->statusdir(); | |
25 | $self->{nodename} = $nodename; | |
26 | ||
27 | $self->{hardware} = $hardware; | |
28 | $self->{lock_timeout} = 120; | |
29 | ||
30 | $self->{log_id} = $log_id; | |
31 | ||
32 | return $self; | |
33 | } | |
34 | ||
35 | sub nodename { | |
36 | my ($self) = @_; | |
37 | ||
38 | return $self->{nodename}; | |
39 | } | |
40 | ||
41 | sub sim_get_lock { | |
42 | my ($self, $lock_name, $unlock) = @_; | |
43 | ||
44 | return 0 if !$self->quorate(); | |
45 | ||
46 | my $filename = "$self->{statusdir}/cluster_locks"; | |
47 | ||
48 | my $code = sub { | |
49 | ||
f5c29173 | 50 | my $data = PVE::HA::Tools::read_json_from_file($filename, {}); |
87b82b15 DM |
51 | |
52 | my $res; | |
53 | ||
54 | my $nodename = $self->nodename(); | |
55 | my $ctime = $self->get_time(); | |
56 | ||
57 | if ($unlock) { | |
58 | ||
59 | if (my $d = $data->{$lock_name}) { | |
60 | my $tdiff = $ctime - $d->{time}; | |
f5c29173 | 61 | |
87b82b15 DM |
62 | if ($tdiff > $self->{lock_timeout}) { |
63 | $res = 1; | |
64 | } elsif (($tdiff <= $self->{lock_timeout}) && ($d->{node} eq $nodename)) { | |
65 | delete $data->{$lock_name}; | |
66 | $res = 1; | |
67 | } else { | |
68 | $res = 0; | |
69 | } | |
70 | } | |
71 | ||
72 | } else { | |
73 | ||
74 | if (my $d = $data->{$lock_name}) { | |
f5c29173 | 75 | |
87b82b15 | 76 | my $tdiff = $ctime - $d->{time}; |
f5c29173 | 77 | |
87b82b15 DM |
78 | if ($tdiff <= $self->{lock_timeout}) { |
79 | if ($d->{node} eq $nodename) { | |
80 | $d->{time} = $ctime; | |
81 | $res = 1; | |
82 | } else { | |
83 | $res = 0; | |
84 | } | |
85 | } else { | |
86 | $self->log('info', "got lock '$lock_name'"); | |
87 | $d->{node} = $nodename; | |
a371ef65 | 88 | $d->{time} = $ctime; |
87b82b15 DM |
89 | $res = 1; |
90 | } | |
91 | ||
92 | } else { | |
93 | $data->{$lock_name} = { | |
94 | time => $ctime, | |
95 | node => $nodename, | |
96 | }; | |
97 | $self->log('info', "got lock '$lock_name'"); | |
98 | $res = 1; | |
99 | } | |
100 | } | |
101 | ||
f5c29173 | 102 | PVE::HA::Tools::write_json_to_file($filename, $data); |
87b82b15 DM |
103 | |
104 | return $res; | |
105 | }; | |
106 | ||
107 | return $self->{hardware}->global_lock($code); | |
108 | } | |
109 | ||
110 | sub read_manager_status { | |
111 | my ($self) = @_; | |
f5c29173 | 112 | |
87b82b15 DM |
113 | my $filename = "$self->{statusdir}/manager_status"; |
114 | ||
f5c29173 | 115 | return PVE::HA::Tools::read_json_from_file($filename, {}); |
87b82b15 DM |
116 | } |
117 | ||
118 | sub write_manager_status { | |
119 | my ($self, $status_obj) = @_; | |
120 | ||
121 | my $filename = "$self->{statusdir}/manager_status"; | |
122 | ||
f5c29173 | 123 | PVE::HA::Tools::write_json_to_file($filename, $status_obj); |
87b82b15 DM |
124 | } |
125 | ||
c4a221bc DM |
126 | sub read_lrm_status { |
127 | my ($self, $node) = @_; | |
128 | ||
129 | $node = $self->{nodename} if !defined($node); | |
130 | ||
131 | return $self->{hardware}->read_lrm_status($node); | |
132 | } | |
133 | ||
134 | sub write_lrm_status { | |
135 | my ($self, $status_obj) = @_; | |
136 | ||
137 | my $node = $self->{nodename}; | |
138 | ||
139 | return $self->{hardware}->write_lrm_status($node, $status_obj); | |
140 | } | |
141 | ||
87b82b15 DM |
142 | sub read_service_config { |
143 | my ($self) = @_; | |
144 | ||
95360669 | 145 | return $self->{hardware}->read_service_config(); |
87b82b15 DM |
146 | } |
147 | ||
abc920b4 DM |
148 | sub read_group_config { |
149 | my ($self) = @_; | |
150 | ||
151 | return $self->{hardware}->read_group_config(); | |
152 | } | |
153 | ||
8456bde2 | 154 | sub change_service_location { |
6da27e23 | 155 | my ($self, $sid, $current_node, $new_node) = @_; |
8456bde2 | 156 | |
6da27e23 | 157 | return $self->{hardware}->change_service_location($sid, $current_node, $new_node); |
8456bde2 DM |
158 | } |
159 | ||
3b996922 DM |
160 | sub queue_crm_commands { |
161 | my ($self, $cmd) = @_; | |
162 | ||
163 | return $self->{hardware}->queue_crm_commands($cmd); | |
164 | } | |
165 | ||
166 | sub read_crm_commands { | |
167 | my ($self) = @_; | |
168 | ||
169 | return $self->{hardware}->read_crm_commands(); | |
170 | } | |
171 | ||
87b82b15 DM |
172 | sub log { |
173 | my ($self, $level, $msg) = @_; | |
174 | ||
175 | chomp $msg; | |
176 | ||
177 | my $time = $self->get_time(); | |
178 | ||
179 | printf("%-5s %5d %12s: $msg\n", $level, $time, "$self->{nodename}/$self->{log_id}"); | |
180 | } | |
181 | ||
182 | sub get_time { | |
183 | my ($self) = @_; | |
184 | ||
185 | die "implement in subclass"; | |
186 | } | |
187 | ||
188 | sub sleep { | |
189 | my ($self, $delay) = @_; | |
190 | ||
191 | die "implement in subclass"; | |
192 | } | |
193 | ||
194 | sub sleep_until { | |
195 | my ($self, $end_time) = @_; | |
196 | ||
197 | die "implement in subclass"; | |
198 | } | |
199 | ||
200 | sub get_ha_manager_lock { | |
201 | my ($self) = @_; | |
202 | ||
203 | return $self->sim_get_lock('ha_manager_lock'); | |
204 | } | |
205 | ||
206 | sub get_ha_agent_lock_name { | |
207 | my ($self, $node) = @_; | |
208 | ||
209 | $node = $self->nodename() if !$node; | |
210 | ||
211 | return "ha_agent_${node}_lock"; | |
212 | } | |
213 | ||
214 | sub get_ha_agent_lock { | |
87b82b15 DM |
215 | my ($self, $node) = @_; |
216 | ||
217 | my $lck = $self->get_ha_agent_lock_name($node); | |
f5c29173 | 218 | return $self->sim_get_lock($lck); |
87b82b15 DM |
219 | } |
220 | ||
221 | # return true when cluster is quorate | |
222 | sub quorate { | |
223 | my ($self) = @_; | |
224 | ||
225 | my ($node_info, $quorate) = $self->{hardware}->get_node_info(); | |
226 | my $node = $self->nodename(); | |
227 | return 0 if !$node_info->{$node}->{online}; | |
228 | return $quorate; | |
229 | } | |
230 | ||
231 | sub get_node_info { | |
232 | my ($self) = @_; | |
233 | ||
234 | return $self->{hardware}->get_node_info(); | |
235 | } | |
236 | ||
237 | sub loop_start_hook { | |
238 | my ($self, $starttime) = @_; | |
239 | ||
240 | # do nothing, overwrite in subclass | |
241 | } | |
242 | ||
243 | sub loop_end_hook { | |
244 | my ($self) = @_; | |
245 | ||
246 | # do nothing, overwrite in subclass | |
247 | } | |
248 | ||
249 | sub watchdog_open { | |
250 | my ($self) = @_; | |
251 | ||
252 | my $node = $self->nodename(); | |
253 | ||
254 | return $self->{hardware}->watchdog_open($node); | |
255 | } | |
256 | ||
257 | sub watchdog_update { | |
258 | my ($self, $wfh) = @_; | |
259 | ||
260 | return $self->{hardware}->watchdog_update($wfh); | |
261 | } | |
262 | ||
263 | sub watchdog_close { | |
264 | my ($self, $wfh) = @_; | |
265 | ||
266 | return $self->{hardware}->watchdog_close($wfh); | |
267 | } | |
268 | ||
1b3ee441 DM |
269 | sub can_fork { |
270 | my ($self) = @_; | |
271 | ||
272 | return 1; | |
273 | } | |
274 | ||
c4a221bc | 275 | sub exec_resource_agent { |
1b3ee441 DM |
276 | my ($self, $sid, $cd, $cmd, @params) = @_; |
277 | ||
278 | my $hardware = $self->{hardware}; | |
279 | ||
280 | my $nodename = $self->{nodename}; | |
281 | ||
282 | # fixme: return valid_exit code (instead of using die) | |
283 | ||
284 | my $ss = $hardware->read_service_status($nodename); | |
285 | ||
286 | if ($cmd eq 'started') { | |
287 | ||
288 | # fixme: return valid_exit code | |
289 | die "service '$sid' not on this node" if $cd->{node} ne $nodename; | |
290 | ||
291 | if ($ss->{$sid}) { | |
292 | return 0; | |
293 | } | |
294 | $self->log("info", "starting service $sid"); | |
295 | ||
296 | $self->sleep(2); | |
297 | ||
298 | $ss->{$sid} = 1; | |
299 | $hardware->write_service_status($nodename, $ss); | |
300 | ||
301 | $self->log("info", "service status $sid started"); | |
302 | ||
303 | return 0; | |
304 | ||
305 | } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') { | |
306 | ||
307 | # fixme: return valid_exit code | |
308 | die "service '$sid' not on this node" if $cd->{node} ne $nodename; | |
309 | ||
310 | if (!$ss->{$sid}) { | |
311 | return 0; | |
312 | } | |
313 | $self->log("info", "stopping service $sid"); | |
314 | ||
315 | $self->sleep(2); | |
316 | ||
317 | $ss->{$sid} = 0; | |
318 | $hardware->write_service_status($nodename, $ss); | |
319 | ||
320 | $self->log("info", "service status $sid stopped"); | |
321 | ||
322 | return 0; | |
323 | ||
324 | } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') { | |
325 | ||
326 | my $target = $params[0]; | |
327 | die "$cmd '$sid' failed - missing target\n" if !defined($target); | |
328 | ||
329 | if ($cd->{node} eq $target) { | |
330 | # already migrate | |
331 | return 0; | |
332 | } elsif ($cd->{node} eq $nodename) { | |
333 | ||
334 | $self->log("info", "service $sid - start $cmd to node '$target'"); | |
335 | ||
336 | if ($cmd eq 'relocate' && $ss->{$sid}) { | |
337 | $self->log("info", "stopping service $sid (relocate)"); | |
338 | $self->sleep(1); | |
339 | $ss->{$sid} = 0; | |
340 | $hardware->write_service_status($nodename, $ss); | |
341 | $self->log("info", "service status $sid stopped"); | |
342 | } | |
343 | ||
344 | $self->sleep(2); | |
345 | $self->change_service_location($sid, $target); | |
346 | $self->log("info", "service $sid - end $cmd to node '$target'"); | |
347 | ||
348 | return 0; | |
349 | ||
350 | } else { | |
351 | die "migrate '$sid' failed - service is not on this node\n"; | |
352 | } | |
353 | ||
354 | ||
355 | } | |
c4a221bc | 356 | |
1b3ee441 | 357 | die "implement me (cmd '$cmd')"; |
c4a221bc DM |
358 | } |
359 | ||
87b82b15 | 360 | 1; |