]>
Commit | Line | Data |
---|---|---|
3f7cacff DM |
1 | package PVE::ReplicationState; |
2 | ||
3 | use warnings; | |
4 | use strict; | |
5 | use JSON; | |
6 | ||
c292c8e9 | 7 | use PVE::INotify; |
c17dcb3e | 8 | use PVE::ProcFSTools; |
3f7cacff | 9 | use PVE::Tools; |
c292c8e9 DM |
10 | use PVE::CalendarEvent; |
11 | use PVE::Cluster; | |
90c07bf7 | 12 | use PVE::GuestHelpers; |
3f7cacff DM |
13 | use PVE::ReplicationConfig; |
14 | ||
3f7cacff DM |
15 | # Note: regression tests can overwrite $state_path for testing |
16 | our $state_path = "/var/lib/pve-manager/pve-replication-state.json"; | |
17 | our $state_lock = "/var/lib/pve-manager/pve-replication-state.lck"; | |
c292c8e9 DM |
18 | our $replicate_logdir = "/var/log/pve/replicate"; |
19 | ||
20 | # regression tests should overwrite this | |
21 | sub job_logfile_name { | |
22 | my ($jobid) = @_; | |
23 | ||
24 | return "${replicate_logdir}/$jobid"; | |
25 | } | |
3f7cacff DM |
26 | |
27 | # Note: We use PVE::Tools::file_set_contents to write state file atomically, | |
28 | # so read_state() always returns an consistent copy (even when not locked). | |
29 | ||
30 | sub read_state { | |
31 | ||
32 | return {} if ! -e $state_path; | |
33 | ||
34 | my $raw = PVE::Tools::file_get_contents($state_path); | |
35 | ||
36 | return {} if $raw eq ''; | |
37 | ||
38 | # untaint $raw | |
39 | if ($raw =~ m/^({.*})$/) { | |
40 | return decode_json($1); | |
41 | } | |
42 | ||
43 | die "invalid json data in '$state_path'\n"; | |
44 | } | |
45 | ||
46 | sub extract_job_state { | |
47 | my ($stateobj, $jobcfg) = @_; | |
48 | ||
49 | my $plugin = PVE::ReplicationConfig->lookup($jobcfg->{type}); | |
50 | ||
51 | my $vmid = $jobcfg->{guest}; | |
52 | my $tid = $plugin->get_unique_target_id($jobcfg); | |
53 | my $state = $stateobj->{$vmid}->{$tid}; | |
54 | ||
55 | $state = {} if !$state; | |
56 | ||
57 | $state->{last_iteration} //= 0; | |
58 | $state->{last_try} //= 0; # last sync start time | |
59 | $state->{last_sync} //= 0; # last successful sync start time | |
60 | $state->{fail_count} //= 0; | |
61 | ||
62 | return $state; | |
63 | } | |
64 | ||
210a5f79 DM |
65 | sub extract_vmid_tranfer_state { |
66 | my ($stateobj, $vmid, $old_target, $new_target) = @_; | |
67 | ||
68 | my $oldid = PVE::ReplicationConfig::Cluster->get_unique_target_id({ target => $old_target }); | |
69 | my $newid = PVE::ReplicationConfig::Cluster->get_unique_target_id({ target => $new_target }); | |
70 | ||
71 | if (defined(my $vmstate = $stateobj->{$vmid})) { | |
72 | $vmstate->{$newid} = delete($vmstate->{$oldid}) if defined($vmstate->{$oldid}); | |
73 | return $vmstate; | |
74 | } | |
75 | ||
76 | return {}; | |
77 | } | |
78 | ||
3f7cacff DM |
79 | sub read_job_state { |
80 | my ($jobcfg) = @_; | |
81 | ||
82 | my $stateobj = read_state(); | |
83 | return extract_job_state($stateobj, $jobcfg); | |
84 | } | |
85 | ||
55222f37 | 86 | # update state for a single job |
2c508173 | 87 | # pass $state = undef to delete the job state completely |
3f7cacff DM |
88 | sub write_job_state { |
89 | my ($jobcfg, $state) = @_; | |
90 | ||
91 | my $plugin = PVE::ReplicationConfig->lookup($jobcfg->{type}); | |
92 | ||
93 | my $vmid = $jobcfg->{guest}; | |
94 | my $tid = $plugin->get_unique_target_id($jobcfg); | |
95 | ||
90c07bf7 | 96 | my $update = sub { |
3f7cacff DM |
97 | |
98 | my $stateobj = read_state(); | |
99 | # Note: tuple ($vmid, $tid) is unique | |
2c508173 DM |
100 | if (defined($state)) { |
101 | $stateobj->{$vmid}->{$tid} = $state; | |
102 | } else { | |
103 | delete $stateobj->{$vmid}->{$tid}; | |
104 | delete $stateobj->{$vmid} if !%{$stateobj->{$vmid}}; | |
105 | } | |
3f7cacff DM |
106 | PVE::Tools::file_set_contents($state_path, encode_json($stateobj)); |
107 | }; | |
108 | ||
90c07bf7 DM |
109 | my $code = sub { |
110 | PVE::Tools::lock_file($state_lock, 10, $update); | |
111 | die $@ if $@; | |
112 | }; | |
113 | ||
114 | # make sure we have guest_migration_lock during update | |
115 | PVE::GuestHelpers::guest_migration_lock($vmid, undef, $code); | |
3f7cacff DM |
116 | } |
117 | ||
55222f37 DM |
118 | # update all job states related to a specific $vmid |
119 | sub write_vmid_job_states { | |
120 | my ($vmid_state, $vmid) = @_; | |
121 | ||
122 | my $update = sub { | |
123 | my $stateobj = read_state(); | |
124 | $stateobj->{$vmid} = $vmid_state; | |
125 | PVE::Tools::file_set_contents($state_path, encode_json($stateobj)); | |
126 | }; | |
127 | ||
128 | my $code = sub { | |
129 | PVE::Tools::lock_file($state_lock, 10, $update); | |
130 | die $@ if $@; | |
131 | }; | |
132 | ||
133 | # make sure we have guest_migration_lock during update | |
134 | PVE::GuestHelpers::guest_migration_lock($vmid, undef, $code); | |
135 | } | |
136 | ||
c17dcb3e DM |
137 | sub record_job_start { |
138 | my ($jobcfg, $state, $start_time, $iteration) = @_; | |
139 | ||
140 | $state->{pid} = $$; | |
141 | $state->{ptime} = PVE::ProcFSTools::read_proc_starttime($state->{pid}); | |
142 | $state->{last_node} = PVE::INotify::nodename(); | |
143 | $state->{last_try} = $start_time; | |
144 | $state->{last_iteration} = $iteration; | |
145 | $state->{storeid_list} //= []; | |
146 | ||
147 | write_job_state($jobcfg, $state); | |
148 | } | |
149 | ||
14849765 WL |
150 | sub delete_guest_states { |
151 | my ($vmid) = @_; | |
152 | ||
153 | my $code = sub { | |
154 | my $stateobj = read_state(); | |
155 | delete $stateobj->{$vmid}; | |
156 | PVE::Tools::file_set_contents($state_path, encode_json($stateobj)); | |
157 | }; | |
158 | ||
159 | PVE::Tools::lock_file($state_lock, 10, $code); | |
160 | } | |
161 | ||
c17dcb3e DM |
162 | sub record_job_end { |
163 | my ($jobcfg, $state, $start_time, $duration, $err) = @_; | |
164 | ||
165 | $state->{duration} = $duration; | |
166 | delete $state->{pid}; | |
167 | delete $state->{ptime}; | |
168 | ||
169 | if ($err) { | |
170 | chomp $err; | |
171 | $state->{fail_count}++; | |
172 | $state->{error} = "$err"; | |
92a243e9 | 173 | write_job_state($jobcfg, $state); |
c17dcb3e | 174 | } else { |
92a243e9 DM |
175 | if ($jobcfg->{remove_job}) { |
176 | write_job_state($jobcfg, undef); | |
177 | } else { | |
178 | $state->{last_sync} = $start_time; | |
179 | $state->{fail_count} = 0; | |
180 | delete $state->{error}; | |
181 | write_job_state($jobcfg, $state); | |
182 | } | |
c17dcb3e | 183 | } |
c17dcb3e DM |
184 | } |
185 | ||
52dcecfc DM |
186 | sub replication_snapshot_name { |
187 | my ($jobid, $last_sync) = @_; | |
188 | ||
189 | my $prefix = "__replicate_${jobid}_"; | |
190 | my $snapname = "${prefix}${last_sync}__"; | |
191 | ||
192 | wantarray ? ($prefix, $snapname) : $snapname; | |
193 | } | |
194 | ||
44972014 DM |
195 | sub purge_old_states { |
196 | ||
197 | my $local_node = PVE::INotify::nodename(); | |
198 | ||
199 | my $cfg = PVE::ReplicationConfig->new(); | |
200 | my $vms = PVE::Cluster::get_vmlist(); | |
201 | ||
202 | my $used_tids = {}; | |
203 | ||
204 | foreach my $jobid (sort keys %{$cfg->{ids}}) { | |
205 | my $jobcfg = $cfg->{ids}->{$jobid}; | |
206 | my $plugin = PVE::ReplicationConfig->lookup($jobcfg->{type}); | |
207 | my $tid = $plugin->get_unique_target_id($jobcfg); | |
208 | my $vmid = $jobcfg->{guest}; | |
209 | $used_tids->{$vmid}->{$tid} = 1 | |
210 | if defined($vms->{ids}->{$vmid}); # && $vms->{ids}->{$vmid}->{node} eq $local_node; | |
211 | } | |
212 | ||
213 | my $purge_state = sub { | |
214 | my $stateobj = read_state(); | |
215 | my $next_stateobj = {}; | |
216 | ||
217 | foreach my $vmid (keys %$stateobj) { | |
218 | foreach my $tid (keys %{$stateobj->{$vmid}}) { | |
219 | $next_stateobj->{$vmid}->{$tid} = $stateobj->{$vmid}->{$tid} if $used_tids->{$vmid}->{$tid}; | |
220 | } | |
221 | } | |
222 | PVE::Tools::file_set_contents($state_path, encode_json($next_stateobj)); | |
223 | }; | |
224 | ||
225 | PVE::Tools::lock_file($state_lock, 10, $purge_state); | |
226 | die $@ if $@; | |
227 | } | |
228 | ||
c292c8e9 DM |
229 | sub job_status { |
230 | ||
231 | my $local_node = PVE::INotify::nodename(); | |
232 | ||
233 | my $jobs = {}; | |
234 | ||
235 | my $stateobj = read_state(); | |
236 | ||
237 | my $cfg = PVE::ReplicationConfig->new(); | |
238 | ||
239 | my $vms = PVE::Cluster::get_vmlist(); | |
240 | ||
241 | foreach my $jobid (sort keys %{$cfg->{ids}}) { | |
242 | my $jobcfg = $cfg->{ids}->{$jobid}; | |
243 | my $vmid = $jobcfg->{guest}; | |
244 | ||
245 | die "internal error - not implemented" if $jobcfg->{type} ne 'local'; | |
246 | ||
247 | # skip non existing vms | |
248 | next if !$vms->{ids}->{$vmid}; | |
249 | ||
250 | # only consider guest on local node | |
251 | next if $vms->{ids}->{$vmid}->{node} ne $local_node; | |
252 | ||
fd844180 | 253 | my $target = $jobcfg->{target}; |
c292c8e9 DM |
254 | if (!$jobcfg->{remove_job}) { |
255 | # never sync to local node | |
fd844180 | 256 | next if $target eq $local_node; |
c292c8e9 DM |
257 | |
258 | next if $jobcfg->{disable}; | |
259 | } | |
260 | ||
261 | my $state = extract_job_state($stateobj, $jobcfg); | |
262 | $jobcfg->{state} = $state; | |
263 | $jobcfg->{id} = $jobid; | |
264 | $jobcfg->{vmtype} = $vms->{ids}->{$vmid}->{type}; | |
265 | ||
266 | my $next_sync = 0; | |
267 | ||
268 | if ($jobcfg->{remove_job}) { | |
269 | $next_sync = 1; # lowest possible value | |
270 | # todo: consider fail_count? How many retries? | |
271 | } else { | |
272 | if (my $fail_count = $state->{fail_count}) { | |
fd844180 WB |
273 | my $members = PVE::Cluster::get_members(); |
274 | if (!$fail_count || ($members->{$target} && $members->{$target}->{online})) { | |
275 | $next_sync = $state->{last_try} + 60*($fail_count < 3 ? 5*$fail_count : 30); | |
276 | } | |
c292c8e9 DM |
277 | } else { |
278 | my $schedule = $jobcfg->{schedule} || '*/15'; | |
279 | my $calspec = PVE::CalendarEvent::parse_calendar_event($schedule); | |
280 | $next_sync = PVE::CalendarEvent::compute_next_event($calspec, $state->{last_try}) // 0; | |
281 | } | |
282 | } | |
283 | ||
284 | $jobcfg->{next_sync} = $next_sync; | |
285 | ||
286 | $jobs->{$jobid} = $jobcfg; | |
287 | } | |
288 | ||
289 | return $jobs; | |
290 | } | |
291 | ||
292 | sub get_next_job { | |
293 | my ($iteration, $start_time) = @_; | |
294 | ||
295 | my $jobs = job_status(); | |
296 | ||
297 | my $sort_func = sub { | |
298 | my $joba = $jobs->{$a}; | |
299 | my $jobb = $jobs->{$b}; | |
300 | my $sa = $joba->{state}; | |
301 | my $sb = $jobb->{state}; | |
621b955f | 302 | my $res = $sa->{last_iteration} <=> $sb->{last_iteration}; |
c292c8e9 DM |
303 | return $res if $res != 0; |
304 | $res = $joba->{next_sync} <=> $jobb->{next_sync}; | |
305 | return $res if $res != 0; | |
306 | return $joba->{guest} <=> $jobb->{guest}; | |
307 | }; | |
308 | ||
309 | foreach my $jobid (sort $sort_func keys %$jobs) { | |
310 | my $jobcfg = $jobs->{$jobid}; | |
311 | next if $jobcfg->{state}->{last_iteration} >= $iteration; | |
312 | if ($jobcfg->{next_sync} && ($start_time >= $jobcfg->{next_sync})) { | |
313 | return $jobcfg; | |
314 | } | |
315 | } | |
316 | ||
317 | return undef; | |
318 | } | |
319 | ||
3f7cacff | 320 | 1; |