DESTDIR=
-SUBDIRS = aplinfo PVE bin www services configs network-hooks
+SUBDIRS = aplinfo PVE bin www services configs network-hooks test
ARCH:=$(shell dpkg-architecture -qDEB_BUILD_ARCH)
GITVERSION:=$(shell git rev-parse HEAD)
all: ${SUBDIRS}
check:
- ${MAKE} -C bin/test check
+ ${MAKE} -C test check
.PHONY: dinstall
dinstall: ${DEB}
export NOVIEW=1
include /usr/share/pve-doc-generator/pve-doc-generator.mk
-export PERLLIB=..
-
-SUBDIRS = test
-
SERVICES = pvestatd pveproxy pvedaemon spiceproxy
CLITOOLS = vzdump pvesubscription pveceph pveam pvesr pvenode
install -m 0644 ${SERVICE_MANS} ${MAN8DIR}
for i in ${CLITOOLS}; do install -m 0644 -D $$i.bash-completion ${BASHCOMPLDIR}/$$i; done
for i in ${SERVICES}; do install -m 0644 -D $$i.service-bash-completion ${BASHCOMPLDIR}/$$i; done
- set -e && for i in ${SUBDIRS}; do ${MAKE} -C $$i $@; done
.PHONY: clean
clean:
make cleanup-docgen
rm -rf *~ *.tmp ${CLI_MANS} ${SERVICE_MANS} *.1.pod *.8.pod pvemailforward *.bash-completion *.service-bash-completion
- set -e && for i in ${SUBDIRS}; do ${MAKE} -C $$i $@; done
+++ /dev/null
-include ../../defines.mk
-
-all:
-
-check:
- ./balloontest.pl
- ./replication_test1.pl
- ./replication_test2.pl
- ./replication_test3.pl
- ./replication_test4.pl
- ./replication_test5.pl
- ./replication_test6.pl
-
-
-.PHONY: install
-install:
- # do nothing
-
-.PHONY: clean
-clean:
- rm -rf *~ .mocked_* *.tmp
+++ /dev/null
-package ReplicationTestEnv;
-
-use strict;
-use warnings;
-use JSON;
-use Clone 'clone';
-use File::Basename;
-
-use lib ('.', '../..');
-
-use Data::Dumper;
-
-use PVE::INotify;
-use PVE::Cluster;
-use PVE::Storage;
-use PVE::ReplicationConfig;
-use PVE::ReplicationState;
-use PVE::API2::Replication;
-use PVE::Replication;
-use PVE::QemuConfig;
-use PVE::LXC::Config;
-
-
-use Test::MockModule;
-
-our $mocked_nodename = 'node1';
-
-our $mocked_replication_jobs = {};
-
-my $pve_replication_config_module = Test::MockModule->new('PVE::ReplicationConfig');
-my $pve_replication_state_module = Test::MockModule->new('PVE::ReplicationState');
-
-our $mocked_vm_configs = {};
-
-our $mocked_ct_configs = {};
-
-my $mocked_get_members = sub {
- return {
- node1 => { online => 1 },
- node2 => { online => 1 },
- node3 => { online => 1 },
- };
-};
-
-my $mocked_vmlist = sub {
- my $res = {};
-
- foreach my $id (keys %$mocked_ct_configs) {
- my $d = $mocked_ct_configs->{$id};
- $res->{$id} = { 'type' => 'lxc', 'node' => $d->{node}, 'version' => 1 };
- }
- foreach my $id (keys %$mocked_vm_configs) {
- my $d = $mocked_vm_configs->{$id};
- $res->{$id} = { 'type' => 'qemu', 'node' => $d->{node}, 'version' => 1 };
- }
-
- return { 'ids' => $res };
-};
-
-my $mocked_get_ssh_info = sub {
- my ($node, $network_cidr) = @_;
-
- return { node => $node };
-};
-
-my $mocked_ssh_info_to_command = sub {
- my ($info, @extra_options) = @_;
-
- return ['fake_ssh', $info->{name}, @extra_options];
-};
-
-my $statefile = ".mocked_repl_state";
-
-unlink $statefile;
-$PVE::ReplicationState::state_path = $statefile;
-$PVE::ReplicationState::state_lock = ".mocked_repl_state_lock";
-$PVE::API2::Replication::pvesr_lock_path = ".mocked_pvesr_lock";
-$PVE::GuestHelpers::lockdir = ".mocked_pve-manager_lock";
-
-if (!mkdir($PVE::GuestHelpers::lockdir) && !$!{EEXIST}) {
- # If we cannot create the guest helper lockdir we'll loop endlessly, so die
- # if it fails.
- die "mkdir($PVE::GuestHelpers::lockdir): $!\n";
-}
-
-my $pve_cluster_module = Test::MockModule->new('PVE::Cluster');
-
-my $pve_inotify_module = Test::MockModule->new('PVE::INotify');
-
-my $mocked_qemu_load_conf = sub {
- my ($class, $vmid, $node) = @_;
-
- $node = $mocked_nodename if !$node;
-
- my $conf = $mocked_vm_configs->{$vmid};
-
- die "no such vm '$vmid'" if !defined($conf);
- die "vm '$vmid' on wrong node" if $conf->{node} ne $node;
-
- return $conf;
-};
-
-my $pve_qemuserver_module = Test::MockModule->new('PVE::QemuServer');
-
-my $pve_qemuconfig_module = Test::MockModule->new('PVE::QemuConfig');
-
-my $mocked_lxc_load_conf = sub {
- my ($class, $vmid, $node) = @_;
-
- $node = $mocked_nodename if !$node;
-
- my $conf = $mocked_ct_configs->{$vmid};
-
- die "no such ct '$vmid'" if !defined($conf);
- die "ct '$vmid' on wrong node" if $conf->{node} ne $node;
-
- return $conf;
-};
-
-my $pve_lxc_config_module = Test::MockModule->new('PVE::LXC::Config');
-
-my $mocked_replication_config_new = sub {
-
- my $res = clone($mocked_replication_jobs);
-
- return bless { ids => $res }, 'PVE::ReplicationConfig';
-};
-
-my $mocked_storage_config = {
- ids => {
- local => {
- type => 'dir',
- shared => 0,
- content => {
- 'iso' => 1,
- 'backup' => 1,
- },
- path => "/var/lib/vz",
- },
- 'local-zfs' => {
- type => 'zfspool',
- pool => 'nonexistent-testpool',
- shared => 0,
- content => {
- 'images' => 1,
- 'rootdir' => 1
- },
- },
- },
-};
-
-my $pve_storage_module = Test::MockModule->new('PVE::Storage');
-
-my $mocked_storage_content = {};
-
-sub register_mocked_volid {
- my ($volid, $snapname) = @_;
-
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $scfg = $mocked_storage_config->{ids}->{$storeid} ||
- die "no such storage '$storeid'\n";
-
- my $d = $mocked_storage_content->{$storeid}->{$volname} //= {};
-
- $d->{$snapname} = 1 if $snapname;
-}
-
-my $mocked_volume_snapshot_list = sub {
- my ($cfg, $volid, $prefix) = @_;
-
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $snaps = [];
-
- if (my $d = $mocked_storage_content->{$storeid}->{$volname}) {
- $snaps = [keys %$d];
- }
-
- return $snaps;
-};
-
-my $mocked_volume_snapshot = sub {
- my ($cfg, $volid, $snap) = @_;
-
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
-
- my $d = $mocked_storage_content->{$storeid}->{$volname};
- die "no such volid '$volid'\n" if !$d;
- $d->{$snap} = 1;
-};
-
-my $mocked_volume_snapshot_delete = sub {
- my ($cfg, $volid, $snap, $running) = @_;
-
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $d = $mocked_storage_content->{$storeid}->{$volname};
- die "no such volid '$volid'\n" if !$d;
- delete $d->{$snap} || die "no such snapshot '$snap' on '$volid'\n";
-};
-
-my $pve_replication_module = Test::MockModule->new('PVE::Replication');
-
-my $mocked_job_logfile_name = sub {
- my ($jobid) = @_;
-
- return ".mocked_replication_log_$jobid";
-};
-
-my $mocked_log_time = 0;
-
-my $mocked_get_log_time = sub {
- return $mocked_log_time;
-};
-
-my $locks = {};
-
-my $mocked_cfs_lock_file = sub {
- my ($filename, $timeout, $code, @param) = @_;
-
- die "$filename already locked\n" if ($locks->{$filename});
-
- $locks->{$filename} = 1;
-
- my $res = $code->(@param);
-
- delete $locks->{$filename};
-
- return $res;
-};
-
-my $mocked_cfs_write_file = sub {
- my ($filename, $cfg) = @_;
-
- die "wrong file - $filename\n" if $filename ne 'replication.cfg';
-
- $cfg->write_config(); # checks but no actual write to pmxcfs
-};
-
-sub setup {
- $pve_replication_state_module->mock(job_logfile_name => $mocked_job_logfile_name);
- $pve_replication_module->mock(get_log_time => $mocked_get_log_time);
-
- $pve_storage_module->mock(config => sub { return $mocked_storage_config; });
- $pve_storage_module->mock(volume_snapshot_list => $mocked_volume_snapshot_list);
- $pve_storage_module->mock(volume_snapshot => $mocked_volume_snapshot);
- $pve_storage_module->mock(volume_snapshot_delete => $mocked_volume_snapshot_delete);
-
- $pve_replication_config_module->mock(
- new => $mocked_replication_config_new,
- lock => sub { $mocked_cfs_lock_file->('replication.cfg', undef, $_[0]); },
- write => sub { $mocked_cfs_write_file->('replication.cfg', $_[0]); },
- );
- $pve_qemuserver_module->mock(check_running => sub { return 0; });
- $pve_qemuconfig_module->mock(load_config => $mocked_qemu_load_conf);
-
- $pve_lxc_config_module->mock(load_config => $mocked_lxc_load_conf);
-
- $pve_cluster_module->mock(
- get_ssh_info => $mocked_get_ssh_info,
- ssh_info_to_command => $mocked_ssh_info_to_command,
- get_vmlist => sub { return $mocked_vmlist->(); },
- get_members => $mocked_get_members,
- cfs_update => sub {},
- cfs_lock_file => $mocked_cfs_lock_file,
- cfs_write_file => $mocked_cfs_write_file,
- );
- $pve_inotify_module->mock('nodename' => sub { return $mocked_nodename; });
-};
-
-# code to generate/conpare test logs
-
-my $logname;
-my $logfh;
-
-sub openlog {
- my ($filename) = @_;
-
- if (!$filename) {
- # compute from $0
- $filename = basename($0);
- if ($filename =~ m/^(\S+)\.pl$/) {
- $filename = "$1.log";
- } else {
- die "unable to compute log name for $0";
- }
- }
-
- die "log already open" if defined($logname);
-
- open (my $fh, ">", "$filename.tmp") ||
- die "unable to open log - $!";
-
- $logname = $filename;
- $logfh = $fh;
-}
-
-sub commit_log {
-
- close($logfh);
-
- if (-f $logname) {
- my $diff = `diff -u '$logname' '$logname.tmp'`;
- if ($diff) {
- warn "got unexpeted output\n";
- print "# diff -u '$logname' '$logname.tmp'\n";
- print $diff;
- exit(-1);
- }
- } else {
- rename("$logname.tmp", $logname) || die "rename log failed - $!";
- }
-}
-
-my $status;
-
-# helper to track job status
-sub track_jobs {
- my ($ctime) = @_;
-
- $mocked_log_time = $ctime;
-
- my $logmsg = sub {
- my ($msg) = @_;
-
- print "$msg\n";
- print $logfh "$msg\n";
- };
-
- if (!$status) {
- $status = PVE::ReplicationState::job_status();
- foreach my $jobid (sort keys %$status) {
- my $jobcfg = $status->{$jobid};
- $logmsg->("$ctime $jobid: new job next_sync => $jobcfg->{next_sync}");
- }
- }
-
- PVE::API2::Replication::run_jobs($ctime, $logmsg, 1);
-
- my $new = PVE::ReplicationState::job_status();
-
- # detect removed jobs
- foreach my $jobid (sort keys %$status) {
- if (!$new->{$jobid}) {
- $logmsg->("$ctime $jobid: vanished job");
- }
- }
-
- foreach my $jobid (sort keys %$new) {
- my $jobcfg = $new->{$jobid};
- my $oldcfg = $status->{$jobid};
- if (!$oldcfg) {
- $logmsg->("$ctime $jobid: new job next_sync => $jobcfg->{next_sync}");
- next; # no old state to compare
- } else {
- foreach my $k (qw(target guest vmtype next_sync)) {
- my $changes = '';
- if ($oldcfg->{$k} ne $jobcfg->{$k}) {
- $changes .= ', ' if $changes;
- $changes .= "$k => $jobcfg->{$k}";
- }
- $logmsg->("$ctime $jobid: changed config $changes") if $changes;
- }
- }
-
- my $oldstate = $oldcfg->{state};
-
- my $state = $jobcfg->{state};
-
- my $changes = '';
- foreach my $k (qw(last_node last_try last_sync fail_count error)) {
- if (($oldstate->{$k} // '') ne ($state->{$k} // '')) {
- my $value = $state->{$k} // '';
- chomp $value;
- $changes .= ', ' if $changes;
- $changes .= "$k => $value";
- }
- }
- $logmsg->("$ctime $jobid: changed state $changes") if $changes;
-
- my $old_storeid_list = $oldstate->{storeid_list};
- my $storeid_list = $state->{storeid_list};
-
- my $storeid_list_changes = 0;
- foreach my $storeid (@$storeid_list) {
- next if grep { $_ eq $storeid } @$old_storeid_list;
- $storeid_list_changes = 1;
- }
-
- foreach my $storeid (@$old_storeid_list) {
- next if grep { $_ eq $storeid } @$storeid_list;
- $storeid_list_changes = 1;
- }
-
- $logmsg->("$ctime $jobid: changed storeid list " . join(',', @$storeid_list))
- if $storeid_list_changes;
- }
- $status = $new;
-}
-
-
-1;
+++ /dev/null
-#!/usr/bin/perl -w
-
-use lib qw(../../);
-use strict;
-use Storable qw(dclone);
-use Data::Dumper;
-use PVE::AutoBalloon;
-
-my $debug = 0;
-
-my $test_status1 = {
- 100 => {
- maxmem => GB(2),
- shares => 2000,
- balloon => GB(1),
- balloon_min => GB(1),
- freemem => MB(0),
- },
- 101 => {
- maxmem => GB(2),
- shares => 1000,
- balloon => GB(1),
- balloon_min => GB(1),
- freemem => MB(0),
- },
-};
-
-abtest($test_status1, 0);
-abtest($test_status1, MB(90), 100 => MB(1060), 101 => MB(1030));
-abtest($test_status1, MB(150), 100 => MB(1100), 101 => MB(1050));
-abtest($test_status1, MB(270), 100 => MB(1100), 101 => MB(1090));
-absim($test_status1, MB(180), 100 => MB(1120), 101 => MB(1060));
-absim($test_status1, MB(270), 100 => MB(1180), 101 => MB(1090));
-absim($test_status1, MB(600), 100 => MB(1300), 101 => MB(1300));
-absim($test_status1, MB(900), 100 => MB(1600), 101 => MB(1300));
-
-my $test_status2 = {
- 100 => {
- maxmem => GB(2),
- shares => 2000,
- balloon => GB(2),
- balloon_min => GB(2),
- freemem => MB(0),
- },
- 101 => {
- maxmem => GB(2),
- shares => 1000,
- balloon => GB(1),
- balloon_min => GB(1),
- freemem => MB(0),
- },
-};
-
-abtest($test_status2, 0);
-abtest($test_status2, MB(18), 101 => MB(1018));
-abtest($test_status2, MB(500), 101 => MB(1100));
-
-my $test_status3 = {
- 100 => {
- maxmem => GB(2),
- shares => 2000,
- balloon => GB(2),
- balloon_min => GB(2),
- freemem => MB(0),
- },
- 101 => {
- maxmem => GB(2),
- shares => 1000,
- balloon => GB(1)+MB(7),
- balloon_min => GB(1),
- freemem => MB(0),
- },
- 102 => {
- maxmem => GB(2),
- shares => 1000,
- balloon => GB(1),
- balloon_min => GB(1),
- freemem => MB(512),
- },
-};
-
-abtest($test_status3, 0);
-abtest($test_status3, MB(11), 101 => MB(1018));
-abtest($test_status3, MB(80), 101 => MB(1087));
-abtest($test_status3, MB(200), 101 => MB(1107));
-
-my $status = absim($test_status3, MB(593), 101 => MB(1300), 102 => MB(1300));
-absim($status, -MB(200), 101 => MB(1200), 102 => MB(1200));
-absim($status, -MB(400), 101 => MB(1200), 102 => GB(1));
-absim($status, -MB(593), 101 => MB(1007), 102 => GB(1));
-exit (0);
-
-sub abapply {
- my ($vmstatus, $res, $sum) = @_;
-
- my $changes = 0;
- my $abschanges = 0;
- foreach my $vmid (keys %$res) {
- my $diff = $res->{$vmid} - $vmstatus->{$vmid}->{balloon};
- if ($diff != 0) {
- # fixme: adjust freemem ?
- $vmstatus->{$vmid}->{freemem} += $diff;
- $vmstatus->{$vmid}->{freemem} = 0 if $vmstatus->{$vmid}->{freemem} < 0;
- $vmstatus->{$vmid}->{balloon} = $res->{$vmid};
- $sum->{$vmid} = $res->{$vmid};
- $changes += $diff;
- $abschanges += $diff > 0 ? $diff : -$diff;
- }
- }
-
- return ($changes, $abschanges);
-}
-
-my $tcount = 0;
-sub absim {
- my ($vmstatus, $goal, %expect) = @_;
-
- $tcount++;
-
- print "BALLOON SIM $tcount\n" if $debug;
-
- $vmstatus = dclone($vmstatus); # do not change original
-
- my $changes = 0;
- my $abschanges = 0;
- my $sum = {};
- do {
- my $res = PVE::AutoBalloon::compute_alg1($vmstatus, $goal, MB(100), $debug);
- print Dumper($res) if $debug;
- ($changes, $abschanges) = abapply($vmstatus, $res, $sum);
- $goal -= $changes;
- } while ($abschanges);
-
- abcheck($sum, %expect);
-
- print "BALLOON SIM END\n" if $debug;
- print Dumper($vmstatus) if $debug;
-
- return $vmstatus;
-}
-
-sub abcheck {
- my ($res, %expect) = @_;
-
- foreach my $vmid (keys %expect) {
- my $ev = $expect{$vmid};
- if (defined ($res->{$vmid})) {
- die "T$tcount: wrong value for VM $vmid ($ev != $res->{$vmid})\n"
- if $ev != $res->{$vmid};
- } else {
- die "T$tcount: missing value for VM $vmid (extected $ev)\n";
- }
- }
-
- foreach my $vmid (keys %$res) {
- die "T$tcount: got unexpected result for $vmid\n"
- if (defined($res->{$vmid}) &&
- !defined($expect{$vmid}));
- }
-}
-
-sub abtest {
- my ($vmstatus, $goal, %expect) = @_;
-
- $tcount++;
-
- print "BALLOON TEST $tcount\n" if $debug;
- my $res = PVE::AutoBalloon::compute_alg1($vmstatus, $goal, MB(100), $debug);
- print Dumper($res) if $debug;
-
- abcheck($res, %expect);
-
- print "\n\n" if $debug;
-
- return $res;
-}
-
-sub MB {
- my $mb = shift;
- return $mb*1000*1000;
-};
-sub GB {
- my $gb = shift;
- return $gb*1000*1000*1000;
-};
+++ /dev/null
-#!/usr/bin/perl
-
-use lib '../../';
-use strict;
-use warnings;
-use Time::HiRes qw( usleep ualarm gettimeofday tv_interval );
-use PVE::INotify;
-use PVE::AccessControl;
-
-my $hostname = PVE::INotify::read_file("hostname");
-
-# normally you use username/password,
-# but we can simply create a ticket if we are root
-my $ticket = PVE::AccessControl::assemble_ticket('root@pam');
-
-my $cmd = "ab -c 10 -n 1000 -k -C 'PVEAuthCookie=$ticket' https://$hostname:8006/api2/json";
-print "$cmd\n";
-system($cmd) == 0 || die "command failed - $!\n";
+++ /dev/null
-#!/usr/bin/perl
-
-use lib '../../';
-use strict;
-use warnings;
-use Time::HiRes qw( usleep ualarm gettimeofday tv_interval );
-use PVE::INotify;
-use PVE::AccessControl;
-use Net::SSLeay qw(get_https post_https sslcat make_headers make_form);
-
-use Data::Dumper;
-
-my $hostname = PVE::INotify::read_file("hostname");
-
-# normally you use username/password,
-# but we can simply create a ticket if we are root
-my $ticket = PVE::AccessControl::assemble_ticket('root@pam');
-
-my $wcount = 10;
-my $qcount = 100;
-
-sub test_rpc {
- my ($host) = @_;
-
- for (my $i = 0; $i < $qcount; $i++) {
- eval {
- my ($page, $response, %reply_headers)
- = get_https($host, 8006, '/api2/json',
- make_headers(Cookie => "PVEAuthCookie=$ticket"));
- die "$response\n" if $response !~ m/200 OK/;
- };
-
- my $err = $@;
-
- if ($err) {
-
- print "ERROR: $err\n";
- last;
- }
- }
-}
-
-sub run_tests {
- my ($host) = @_;
-
- my $workers;
-
- my $starttime = [gettimeofday];
-
- for (my $i = 0; $i < $wcount; $i++) {
- if (my $pid = fork ()) {
- $workers->{$pid} = 1;
- } else {
- test_rpc ($host);
- exit (0);
- }
- }
-
- # wait for children
- 1 while (wait > 0);
-
- my $elapsed = int(tv_interval ($starttime) * 1000);
-
- my $tpq = $elapsed / ($wcount*$qcount);
-
- print "$host: $tpq ms per query\n";
-}
-
-run_tests($hostname); # test 'pveproxy'
+++ /dev/null
-#!/usr/bin/perl
-
-# Note: Test if mockup from ReplicationTestEnv works
-
-use strict;
-use warnings;
-use JSON;
-
-use lib ('.', '../..');
-
-use Data::Dumper;
-
-use ReplicationTestEnv;
-use Test::More tests => 3;
-
-$ReplicationTestEnv::mocked_nodename = 'node1';
-
-my $testjob = {
- 'type' => 'local',
- 'target' => 'node1',
- 'guest' => 900,
-};
-
-$ReplicationTestEnv::mocked_replication_jobs = {
- job_900_to_node1 => $testjob,
-};
-
-$ReplicationTestEnv::mocked_vm_configs = {
- 900 => {
- node => 'node1',
- snapshots => {},
- ide0 => 'local-lvm:vm-900-disk-1,size=4G',
- memory => 512,
- ide2 => 'none,media=cdrom',
- },
-};
-
-ReplicationTestEnv::setup();
-
-ok(PVE::INotify::nodename() eq 'node1');
-
-my $list = PVE::Cluster::get_vmlist();
-is_deeply($list, { ids => {900 => { node => 'node1', type => 'qemu', version => 1}}});
-my $cfg = PVE::ReplicationConfig->new();
-is_deeply($cfg, { ids => { job_900_to_node1 => $testjob }});
-
-exit(0);
+++ /dev/null
-#!/usr/bin/perl
-
-# Note: Test replication scheduler
-
-use strict;
-use warnings;
-use JSON;
-
-use lib ('.', '../..');
-
-use Data::Dumper;
-
-use Test::MockModule;
-use ReplicationTestEnv;
-use Test::More tests => 1;
-
-use PVE::API2::Replication;
-
-$ReplicationTestEnv::mocked_nodename = 'node1';
-
-my $schedule = [];
-
-my $mocked_replicate = sub {
- my ($guest_class, $jobcfg, $state, $start_time, $logfunc) = @_;
-
- push @$schedule, {
- id => $jobcfg->{id},
- guest => $jobcfg->{guest},
- vmtype => $jobcfg->{vmtype},
- guest_class => $guest_class,
- last_sync => $state->{last_sync},
- start => $start_time,
- };
-};
-
-my $pve_replication_module = Test::MockModule->new('PVE::Replication');
-$pve_replication_module->mock(replicate => $mocked_replicate);
-
-$ReplicationTestEnv::mocked_replication_jobs = {
- '900-1_to_node2' => {
- 'type' => 'local',
- 'target' => 'node2',
- 'guest' => 900,
- },
- '900-2_to_node1' => {
- 'type' => 'local',
- 'target' => 'node1', # local node, job should be skipped
- 'guest' => 900,
- },
-};
-
-$ReplicationTestEnv::mocked_vm_configs = {
- 900 => {
- node => 'node1',
- snapshots => {},
- ide0 => 'local-lvm:vm-900-disk-1,size=4G',
- memory => 512,
- ide2 => 'none,media=cdrom',
- },
-};
-
-ReplicationTestEnv::setup();
-
-for (my $i = 0; $i < 61; $i++) {
- PVE::API2::Replication::run_jobs($i*60);
-}
-
-#print Dumper($schedule);
-
-my $exptected_schedule = [
- {
- last_sync => 0,
- start => 900,
- vmtype => 'qemu',
- guest_class => 'PVE::QemuConfig',
- id => '900-1_to_node2',
- guest => 900
- },
- {
- last_sync => 900,
- start => 1800,
- vmtype => 'qemu',
- guest_class => 'PVE::QemuConfig',
- id => '900-1_to_node2',
- guest => 900,
- },
- {
- last_sync => 1800,
- start => 2700,
- vmtype => 'qemu',
- guest_class => 'PVE::QemuConfig',
- id => '900-1_to_node2',
- guest => 900
- },
- {
- last_sync => 2700,
- start => 3600,
- vmtype => 'qemu',
- guest_class => 'PVE::QemuConfig',
- id => '900-1_to_node2',
- guest => 900
- }
-];
-
-is_deeply($schedule, $exptected_schedule);
-
-exit(0);
+++ /dev/null
-#!/usr/bin/perl
-
-# Note: Try to run replication job to same node (should fail)
-
-use strict;
-use warnings;
-use JSON;
-
-use lib ('.', '../..');
-
-use Data::Dumper;
-
-use Test::MockModule;
-use ReplicationTestEnv;
-use PVE::API2::Replication;
-
-use Test::More;
-
-$ReplicationTestEnv::mocked_nodename = 'node1';
-
-my $testjob = {
- 'type' => 'local',
- 'target' => 'node1',
- 'guest' => 900,
-};
-
-$ReplicationTestEnv::mocked_replication_jobs = {
- job_900_to_node1 => {
- 'type' => 'local',
- 'target' => 'node1', # local node, job should be skipped
- 'guest' => 900,
- },
-};
-
-$ReplicationTestEnv::mocked_vm_configs = {
- 900 => {
- node => 'node1',
- snapshots => {},
- ide0 => 'local-lvm:vm-900-disk-1,size=4G',
- memory => 512,
- ide2 => 'none,media=cdrom',
- },
-};
-
-ReplicationTestEnv::setup();
-
-eval { PVE::API2::Replication::run_single_job('job_900_to_node1', 1000); };
-my $err = $@;
-
-is($err, "unable to sync to local node\n", "test error message");
-
-done_testing();
+++ /dev/null
-1000 job_900_to_node2: new job next_sync => 900
-1000 job_900_to_node2: start replication job
-1000 job_900_to_node2: end replication job with error: faked replication error
-1000 job_900_to_node2: changed config next_sync => 1300
-1000 job_900_to_node2: changed state last_node => node1, last_try => 1000, fail_count => 1, error => faked replication error
-1300 job_900_to_node2: start replication job
-1300 job_900_to_node2: end replication job with error: faked replication error
-1300 job_900_to_node2: changed config next_sync => 1900
-1300 job_900_to_node2: changed state last_try => 1300, fail_count => 2
-1900 job_900_to_node2: start replication job
-1900 job_900_to_node2: end replication job with error: faked replication error
-1900 job_900_to_node2: changed config next_sync => 3700
-1900 job_900_to_node2: changed state last_try => 1900, fail_count => 3
-3700 job_900_to_node2: start replication job
-3700 job_900_to_node2: end replication job with error: faked replication error
-3700 job_900_to_node2: changed config next_sync => 5500
-3700 job_900_to_node2: changed state last_try => 3700, fail_count => 4
-5500 job_900_to_node2: start replication job
-5500 job_900_to_node2: end replication job with error: faked replication error
-5500 job_900_to_node2: changed config next_sync => 7300
-5500 job_900_to_node2: changed state last_try => 5500, fail_count => 5
-7300 job_900_to_node2: start replication job
-7300 job_900_to_node2: end replication job with error: faked replication error
-7300 job_900_to_node2: changed config next_sync => 9100
-7300 job_900_to_node2: changed state last_try => 7300, fail_count => 6
+++ /dev/null
-#!/usr/bin/perl
-
-# Note: Test replication job failure
-
-use strict;
-use warnings;
-use JSON;
-
-use lib ('.', '../..');
-
-use Data::Dumper;
-
-use Test::MockModule;
-use ReplicationTestEnv;
-
-use PVE::Tools;
-
-$ReplicationTestEnv::mocked_nodename = 'node1';
-
-my $pve_replication_module = Test::MockModule->new('PVE::Replication');
-$pve_replication_module->mock(
- replicate => sub { die "faked replication error\n"; });
-
-my $testjob = {
- 'type' => 'local',
- 'target' => 'node1',
- 'guest' => 900,
-};
-
-$ReplicationTestEnv::mocked_replication_jobs = {
- job_900_to_node2 => {
- 'type' => 'local',
- 'target' => 'node2',
- 'guest' => 900,
- },
- job_900_to_node1 => {
- 'type' => 'local',
- 'target' => 'node1', # local node, job should be skipped
- 'guest' => 900,
- },
-};
-
-$ReplicationTestEnv::mocked_vm_configs = {
- 900 => {
- node => 'node1',
- snapshots => {},
- ide0 => 'local-lvm:vm-900-disk-1,size=4G',
- memory => 512,
- ide2 => 'none,media=cdrom',
- },
-};
-
-ReplicationTestEnv::setup();
-
-my $ctime = 1000;
-
-my $status;
-
-ReplicationTestEnv::openlog();
-
-for (my $i = 0; $i < 120; $i++) {
- ReplicationTestEnv::track_jobs($ctime);
- $ctime += 60;
-}
-
-ReplicationTestEnv::commit_log();
-
-exit(0);
+++ /dev/null
-1000 job_900_to_node2: new job next_sync => 900
-1000 job_900_to_node2: start replication job
-1000 job_900_to_node2: guest => VM 900, running => 0
-1000 job_900_to_node2: volumes => local-zfs:vm-900-disk-1
-1000 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_1000__' on local-zfs:vm-900-disk-1
-1000 job_900_to_node2: full sync 'local-zfs:vm-900-disk-1' (__replicate_job_900_to_node2_1000__)
-1000 job_900_to_node2: end replication job
-1000 job_900_to_node2: changed config next_sync => 1800
-1000 job_900_to_node2: changed state last_node => node1, last_try => 1000, last_sync => 1000
-1000 job_900_to_node2: changed storeid list local-zfs
-1840 job_900_to_node2: start replication job
-1840 job_900_to_node2: guest => VM 900, running => 0
-1840 job_900_to_node2: volumes => local-zfs:vm-900-disk-1
-1840 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_1840__' on local-zfs:vm-900-disk-1
-1840 job_900_to_node2: incremental sync 'local-zfs:vm-900-disk-1' (__replicate_job_900_to_node2_1000__ => __replicate_job_900_to_node2_1840__)
-1840 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_1000__' on local-zfs:vm-900-disk-1
-1840 job_900_to_node2: end replication job
-1840 job_900_to_node2: changed config next_sync => 2700
-1840 job_900_to_node2: changed state last_try => 1840, last_sync => 1840
-2740 job_900_to_node2: start replication job
-2740 job_900_to_node2: guest => VM 900, running => 0
-2740 job_900_to_node2: volumes => local-zfs:vm-900-disk-1,local-zfs:vm-900-disk-2
-2740 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_2740__' on local-zfs:vm-900-disk-1
-2740 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_2740__' on local-zfs:vm-900-disk-2
-2740 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_2740__' on local-zfs:vm-900-disk-1
-2740 job_900_to_node2: end replication job with error: no such volid 'local-zfs:vm-900-disk-2'
-2740 job_900_to_node2: changed config next_sync => 3040
-2740 job_900_to_node2: changed state last_try => 2740, fail_count => 1, error => no such volid 'local-zfs:vm-900-disk-2'
-3040 job_900_to_node2: start replication job
-3040 job_900_to_node2: guest => VM 900, running => 0
-3040 job_900_to_node2: volumes => local-zfs:vm-900-disk-1,local-zfs:vm-900-disk-2
-3040 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_3040__' on local-zfs:vm-900-disk-1
-3040 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_3040__' on local-zfs:vm-900-disk-2
-3040 job_900_to_node2: incremental sync 'local-zfs:vm-900-disk-1' (__replicate_job_900_to_node2_1840__ => __replicate_job_900_to_node2_3040__)
-3040 job_900_to_node2: full sync 'local-zfs:vm-900-disk-2' (__replicate_job_900_to_node2_3040__)
-3040 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_1840__' on local-zfs:vm-900-disk-1
-3040 job_900_to_node2: end replication job
-3040 job_900_to_node2: changed config next_sync => 3600
-3040 job_900_to_node2: changed state last_try => 3040, last_sync => 3040, fail_count => 0, error =>
-3640 job_900_to_node2: start replication job
-3640 job_900_to_node2: guest => VM 900, running => 0
-3640 job_900_to_node2: volumes => local-zfs:vm-900-disk-1,local-zfs:vm-900-disk-2
-3640 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_3640__' on local-zfs:vm-900-disk-1
-3640 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_3640__' on local-zfs:vm-900-disk-2
-3640 job_900_to_node2: incremental sync 'local-zfs:vm-900-disk-1' (__replicate_job_900_to_node2_3040__ => __replicate_job_900_to_node2_3640__)
-3640 job_900_to_node2: incremental sync 'local-zfs:vm-900-disk-2' (__replicate_job_900_to_node2_3040__ => __replicate_job_900_to_node2_3640__)
-3640 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_3040__' on local-zfs:vm-900-disk-1
-3640 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_3040__' on local-zfs:vm-900-disk-2
-3640 job_900_to_node2: end replication job
-3640 job_900_to_node2: changed config next_sync => 4500
-3640 job_900_to_node2: changed state last_try => 3640, last_sync => 3640
-3700 job_900_to_node2: start replication job
-3700 job_900_to_node2: guest => VM 900, running => 0
-3700 job_900_to_node2: volumes => local-zfs:vm-900-disk-1,local-zfs:vm-900-disk-2
-3700 job_900_to_node2: start job removal - mode 'full'
-3700 job_900_to_node2: delete stale replication snapshot '__replicate_job_900_to_node2_3640__' on local-zfs:vm-900-disk-1
-3700 job_900_to_node2: delete stale replication snapshot '__replicate_job_900_to_node2_3640__' on local-zfs:vm-900-disk-2
-3700 job_900_to_node2: job removed
-3700 job_900_to_node2: end replication job
-3700 job_900_to_node2: vanished job
+++ /dev/null
-#!/usr/bin/perl
-
-# Note:
-# 1.) Start replication job with single disk
-# 2.) add non-existent disk (replication fails)
-# 3.) create disk (replication continues).
-# 4.) remove job
-
-use strict;
-use warnings;
-use JSON;
-
-use lib ('.', '../..');
-
-use Data::Dumper;
-
-use Test::MockModule;
-use ReplicationTestEnv;
-
-use PVE::Tools;
-
-$ReplicationTestEnv::mocked_nodename = 'node1';
-
-use PVE::INotify;
-use PVE::Cluster;
-use PVE::QemuConfig;
-use PVE::QemuServer;
-use PVE::LXC::Config;
-use PVE::LXC;
-use PVE::Storage;
-
-my $replicated_volume_status = {};
-
-my $mocked_remote_prepare_local_job = sub {
- my ($ssh_info, $jobid, $vmid, $volumes, $storeid_list, $last_sync, $parent_snapname, $force) = @_;
-
- my $target = $ssh_info->{node};
-
- my $last_snapshots = {};
-
- return $last_snapshots if !defined($replicated_volume_status->{$target});
-
- my $last_sync_snapname = PVE::ReplicationState::replication_snapshot_name($jobid, $last_sync);
-
- foreach my $volid (keys %{$replicated_volume_status->{$target}}) {
- if (!grep { $_ eq $volid } @$volumes) {
- delete $replicated_volume_status->{$target}->{$volid};
- next;
- }
- my $snapname = $replicated_volume_status->{$target}->{$volid};
-
- $last_snapshots->{$volid}->{$snapname} = 1 if $last_sync_snapname eq $snapname;
- }
-
- return $last_snapshots;
-};
-
-my $mocked_remote_finalize_local_job = sub {
- my ($ssh_info, $jobid, $vmid, $volumes, $last_sync) = @_;
-
- # do nothing
-};
-
-my $mocked_replicate_volume = sub {
- my ($ssh_info, $storecfg, $volid, $base_snapshot, $sync_snapname) = @_;
-
- my $target = $ssh_info->{node};
-
- $replicated_volume_status->{$target}->{$volid} = $sync_snapname;
-};
-
-my $mocked_delete_job = sub {
- my ($jobid) = @_;
-
- delete $ReplicationTestEnv::mocked_replication_jobs->{$jobid};
-};
-
-my $pve_replication_config_module = Test::MockModule->new('PVE::ReplicationConfig');
-$pve_replication_config_module->mock(delete_job => $mocked_delete_job);
-
-my $pve_replication_module = Test::MockModule->new('PVE::Replication');
-$pve_replication_module->mock(
- remote_prepare_local_job => $mocked_remote_prepare_local_job,
- remote_finalize_local_job => $mocked_remote_finalize_local_job,
- replicate_volume => $mocked_replicate_volume);
-
-my $testjob = {
- 'type' => 'local',
- 'target' => 'node1',
- 'guest' => 900,
-};
-
-$ReplicationTestEnv::mocked_replication_jobs = {
- job_900_to_node2 => {
- 'type' => 'local',
- 'target' => 'node2',
- 'guest' => 900,
- },
-};
-
-$ReplicationTestEnv::mocked_vm_configs = {
- 900 => {
- node => 'node1',
- snapshots => {},
- ide0 => 'local-zfs:vm-900-disk-1,size=4G',
- memory => 512,
- ide2 => 'none,media=cdrom',
- },
-};
-
-ReplicationTestEnv::setup();
-
-ReplicationTestEnv::register_mocked_volid('local-zfs:vm-900-disk-1');
-
-my $ctime = 1000;
-
-my $status;
-
-ReplicationTestEnv::openlog();
-
-for (my $i = 0; $i < 15; $i++) {
- ReplicationTestEnv::track_jobs($ctime);
- $ctime += 60;
-}
-
-# add a new, disk (but disk does not exist, so replication fails)
-$ReplicationTestEnv::mocked_vm_configs->{900}->{ide1} = 'local-zfs:vm-900-disk-2,size=4G';
-for (my $i = 0; $i < 15; $i++) {
- ReplicationTestEnv::track_jobs($ctime);
- $ctime += 60;
-}
-
-# register disk, so replication should succeed
-ReplicationTestEnv::register_mocked_volid('local-zfs:vm-900-disk-2');
-for (my $i = 0; $i < 15; $i++) {
- ReplicationTestEnv::track_jobs($ctime);
- $ctime += 60;
-}
-
-# mark job for removal
-$ReplicationTestEnv::mocked_replication_jobs->{job_900_to_node2}->{remove_job} = 'full';
-for (my $i = 0; $i < 15; $i++) {
- ReplicationTestEnv::track_jobs($ctime);
- $ctime += 60;
-}
-
-
-
-ReplicationTestEnv::commit_log();
-
-exit(0);
+++ /dev/null
-1000 job_900_to_node1: new job next_sync => 1
-1000 job_900_to_node1: start replication job
-1000 job_900_to_node1: guest => VM 900, running => 0
-1000 job_900_to_node1: volumes => local-zfs:vm-900-disk-1
-1000 job_900_to_node1: start job removal - mode 'full'
-1000 job_900_to_node1: job removed
-1000 job_900_to_node1: end replication job
-1000 job_900_to_node1: vanished job
+++ /dev/null
-#!/usr/bin/perl
-
-# Note: Try to delete replication job with target on same node
-
-use strict;
-use warnings;
-use JSON;
-
-use lib ('.', '../..');
-
-use Data::Dumper;
-
-use Test::MockModule;
-use ReplicationTestEnv;
-
-$ReplicationTestEnv::mocked_nodename = 'node1';
-
-my $mocked_delete_job = sub {
- my ($jobid) = @_;
-
- delete $ReplicationTestEnv::mocked_replication_jobs->{$jobid};
-};
-
-my $pve_replication_config_module = Test::MockModule->new('PVE::ReplicationConfig');
-$pve_replication_config_module->mock(
- delete_job => $mocked_delete_job);
-
-my $testjob = {
- 'type' => 'local',
- 'target' => 'node1',
- 'guest' => 900,
-};
-
-$ReplicationTestEnv::mocked_replication_jobs = {
- job_900_to_node1 => {
- remove_job => 'full',
- type => 'local',
- target => 'node1', # local node, job should be skipped
- guest => 900,
- },
-};
-
-$ReplicationTestEnv::mocked_vm_configs = {
- 900 => {
- node => 'node1',
- snapshots => {},
- ide0 => 'local-zfs:vm-900-disk-1,size=4G',
- memory => 512,
- ide2 => 'none,media=cdrom',
- },
-};
-
-ReplicationTestEnv::setup();
-
-ReplicationTestEnv::openlog();
-
-my $ctime = 1000;
-for (my $i = 0; $i < 15; $i++) {
- ReplicationTestEnv::track_jobs($ctime);
- $ctime += 60;
-}
-
-ReplicationTestEnv::commit_log();
-
-exit(0);
--- /dev/null
+include ../defines.mk
+
+all:
+
+export PERLLIB=..
+
+check:
+ ./balloontest.pl
+ ./replication_test1.pl
+ ./replication_test2.pl
+ ./replication_test3.pl
+ ./replication_test4.pl
+ ./replication_test5.pl
+ ./replication_test6.pl
+
+.PHONY: install
+install:
+
+.PHONY: clean
+clean:
+ rm -rf *~ .mocked_* *.tmp
--- /dev/null
+package ReplicationTestEnv;
+
+use strict;
+use warnings;
+use JSON;
+use Clone 'clone';
+use File::Basename;
+
+use lib ('.', '../..');
+
+use Data::Dumper;
+
+use PVE::INotify;
+use PVE::Cluster;
+use PVE::Storage;
+use PVE::ReplicationConfig;
+use PVE::ReplicationState;
+use PVE::API2::Replication;
+use PVE::Replication;
+use PVE::QemuConfig;
+use PVE::LXC::Config;
+
+
+use Test::MockModule;
+
+our $mocked_nodename = 'node1';
+
+our $mocked_replication_jobs = {};
+
+my $pve_replication_config_module = Test::MockModule->new('PVE::ReplicationConfig');
+my $pve_replication_state_module = Test::MockModule->new('PVE::ReplicationState');
+
+our $mocked_vm_configs = {};
+
+our $mocked_ct_configs = {};
+
+my $mocked_get_members = sub {
+ return {
+ node1 => { online => 1 },
+ node2 => { online => 1 },
+ node3 => { online => 1 },
+ };
+};
+
+my $mocked_vmlist = sub {
+ my $res = {};
+
+ foreach my $id (keys %$mocked_ct_configs) {
+ my $d = $mocked_ct_configs->{$id};
+ $res->{$id} = { 'type' => 'lxc', 'node' => $d->{node}, 'version' => 1 };
+ }
+ foreach my $id (keys %$mocked_vm_configs) {
+ my $d = $mocked_vm_configs->{$id};
+ $res->{$id} = { 'type' => 'qemu', 'node' => $d->{node}, 'version' => 1 };
+ }
+
+ return { 'ids' => $res };
+};
+
+my $mocked_get_ssh_info = sub {
+ my ($node, $network_cidr) = @_;
+
+ return { node => $node };
+};
+
+my $mocked_ssh_info_to_command = sub {
+ my ($info, @extra_options) = @_;
+
+ return ['fake_ssh', $info->{name}, @extra_options];
+};
+
+my $statefile = ".mocked_repl_state";
+
+unlink $statefile;
+$PVE::ReplicationState::state_path = $statefile;
+$PVE::ReplicationState::state_lock = ".mocked_repl_state_lock";
+$PVE::API2::Replication::pvesr_lock_path = ".mocked_pvesr_lock";
+$PVE::GuestHelpers::lockdir = ".mocked_pve-manager_lock";
+
+if (!mkdir($PVE::GuestHelpers::lockdir) && !$!{EEXIST}) {
+ # If we cannot create the guest helper lockdir we'll loop endlessly, so die
+ # if it fails.
+ die "mkdir($PVE::GuestHelpers::lockdir): $!\n";
+}
+
+my $pve_cluster_module = Test::MockModule->new('PVE::Cluster');
+
+my $pve_inotify_module = Test::MockModule->new('PVE::INotify');
+
+my $mocked_qemu_load_conf = sub {
+ my ($class, $vmid, $node) = @_;
+
+ $node = $mocked_nodename if !$node;
+
+ my $conf = $mocked_vm_configs->{$vmid};
+
+ die "no such vm '$vmid'" if !defined($conf);
+ die "vm '$vmid' on wrong node" if $conf->{node} ne $node;
+
+ return $conf;
+};
+
+my $pve_qemuserver_module = Test::MockModule->new('PVE::QemuServer');
+
+my $pve_qemuconfig_module = Test::MockModule->new('PVE::QemuConfig');
+
+my $mocked_lxc_load_conf = sub {
+ my ($class, $vmid, $node) = @_;
+
+ $node = $mocked_nodename if !$node;
+
+ my $conf = $mocked_ct_configs->{$vmid};
+
+ die "no such ct '$vmid'" if !defined($conf);
+ die "ct '$vmid' on wrong node" if $conf->{node} ne $node;
+
+ return $conf;
+};
+
+my $pve_lxc_config_module = Test::MockModule->new('PVE::LXC::Config');
+
+my $mocked_replication_config_new = sub {
+
+ my $res = clone($mocked_replication_jobs);
+
+ return bless { ids => $res }, 'PVE::ReplicationConfig';
+};
+
+my $mocked_storage_config = {
+ ids => {
+ local => {
+ type => 'dir',
+ shared => 0,
+ content => {
+ 'iso' => 1,
+ 'backup' => 1,
+ },
+ path => "/var/lib/vz",
+ },
+ 'local-zfs' => {
+ type => 'zfspool',
+ pool => 'nonexistent-testpool',
+ shared => 0,
+ content => {
+ 'images' => 1,
+ 'rootdir' => 1
+ },
+ },
+ },
+};
+
+my $pve_storage_module = Test::MockModule->new('PVE::Storage');
+
+my $mocked_storage_content = {};
+
+sub register_mocked_volid {
+ my ($volid, $snapname) = @_;
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $scfg = $mocked_storage_config->{ids}->{$storeid} ||
+ die "no such storage '$storeid'\n";
+
+ my $d = $mocked_storage_content->{$storeid}->{$volname} //= {};
+
+ $d->{$snapname} = 1 if $snapname;
+}
+
+my $mocked_volume_snapshot_list = sub {
+ my ($cfg, $volid, $prefix) = @_;
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $snaps = [];
+
+ if (my $d = $mocked_storage_content->{$storeid}->{$volname}) {
+ $snaps = [keys %$d];
+ }
+
+ return $snaps;
+};
+
+my $mocked_volume_snapshot = sub {
+ my ($cfg, $volid, $snap) = @_;
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
+
+ my $d = $mocked_storage_content->{$storeid}->{$volname};
+ die "no such volid '$volid'\n" if !$d;
+ $d->{$snap} = 1;
+};
+
+my $mocked_volume_snapshot_delete = sub {
+ my ($cfg, $volid, $snap, $running) = @_;
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $d = $mocked_storage_content->{$storeid}->{$volname};
+ die "no such volid '$volid'\n" if !$d;
+ delete $d->{$snap} || die "no such snapshot '$snap' on '$volid'\n";
+};
+
+my $pve_replication_module = Test::MockModule->new('PVE::Replication');
+
+my $mocked_job_logfile_name = sub {
+ my ($jobid) = @_;
+
+ return ".mocked_replication_log_$jobid";
+};
+
+my $mocked_log_time = 0;
+
+my $mocked_get_log_time = sub {
+ return $mocked_log_time;
+};
+
+my $locks = {};
+
+my $mocked_cfs_lock_file = sub {
+ my ($filename, $timeout, $code, @param) = @_;
+
+ die "$filename already locked\n" if ($locks->{$filename});
+
+ $locks->{$filename} = 1;
+
+ my $res = $code->(@param);
+
+ delete $locks->{$filename};
+
+ return $res;
+};
+
+my $mocked_cfs_write_file = sub {
+ my ($filename, $cfg) = @_;
+
+ die "wrong file - $filename\n" if $filename ne 'replication.cfg';
+
+ $cfg->write_config(); # checks but no actual write to pmxcfs
+};
+
+sub setup {
+ $pve_replication_state_module->mock(job_logfile_name => $mocked_job_logfile_name);
+ $pve_replication_module->mock(get_log_time => $mocked_get_log_time);
+
+ $pve_storage_module->mock(config => sub { return $mocked_storage_config; });
+ $pve_storage_module->mock(volume_snapshot_list => $mocked_volume_snapshot_list);
+ $pve_storage_module->mock(volume_snapshot => $mocked_volume_snapshot);
+ $pve_storage_module->mock(volume_snapshot_delete => $mocked_volume_snapshot_delete);
+
+ $pve_replication_config_module->mock(
+ new => $mocked_replication_config_new,
+ lock => sub { $mocked_cfs_lock_file->('replication.cfg', undef, $_[0]); },
+ write => sub { $mocked_cfs_write_file->('replication.cfg', $_[0]); },
+ );
+ $pve_qemuserver_module->mock(check_running => sub { return 0; });
+ $pve_qemuconfig_module->mock(load_config => $mocked_qemu_load_conf);
+
+ $pve_lxc_config_module->mock(load_config => $mocked_lxc_load_conf);
+
+ $pve_cluster_module->mock(
+ get_ssh_info => $mocked_get_ssh_info,
+ ssh_info_to_command => $mocked_ssh_info_to_command,
+ get_vmlist => sub { return $mocked_vmlist->(); },
+ get_members => $mocked_get_members,
+ cfs_update => sub {},
+ cfs_lock_file => $mocked_cfs_lock_file,
+ cfs_write_file => $mocked_cfs_write_file,
+ );
+ $pve_inotify_module->mock('nodename' => sub { return $mocked_nodename; });
+};
+
+# code to generate/conpare test logs
+
+my $logname;
+my $logfh;
+
+sub openlog {
+ my ($filename) = @_;
+
+ if (!$filename) {
+ # compute from $0
+ $filename = basename($0);
+ if ($filename =~ m/^(\S+)\.pl$/) {
+ $filename = "$1.log";
+ } else {
+ die "unable to compute log name for $0";
+ }
+ }
+
+ die "log already open" if defined($logname);
+
+ open (my $fh, ">", "$filename.tmp") ||
+ die "unable to open log - $!";
+
+ $logname = $filename;
+ $logfh = $fh;
+}
+
+sub commit_log {
+
+ close($logfh);
+
+ if (-f $logname) {
+ my $diff = `diff -u '$logname' '$logname.tmp'`;
+ if ($diff) {
+ warn "got unexpeted output\n";
+ print "# diff -u '$logname' '$logname.tmp'\n";
+ print $diff;
+ exit(-1);
+ }
+ } else {
+ rename("$logname.tmp", $logname) || die "rename log failed - $!";
+ }
+}
+
+my $status;
+
+# helper to track job status
+sub track_jobs {
+ my ($ctime) = @_;
+
+ $mocked_log_time = $ctime;
+
+ my $logmsg = sub {
+ my ($msg) = @_;
+
+ print "$msg\n";
+ print $logfh "$msg\n";
+ };
+
+ if (!$status) {
+ $status = PVE::ReplicationState::job_status();
+ foreach my $jobid (sort keys %$status) {
+ my $jobcfg = $status->{$jobid};
+ $logmsg->("$ctime $jobid: new job next_sync => $jobcfg->{next_sync}");
+ }
+ }
+
+ PVE::API2::Replication::run_jobs($ctime, $logmsg, 1);
+
+ my $new = PVE::ReplicationState::job_status();
+
+ # detect removed jobs
+ foreach my $jobid (sort keys %$status) {
+ if (!$new->{$jobid}) {
+ $logmsg->("$ctime $jobid: vanished job");
+ }
+ }
+
+ foreach my $jobid (sort keys %$new) {
+ my $jobcfg = $new->{$jobid};
+ my $oldcfg = $status->{$jobid};
+ if (!$oldcfg) {
+ $logmsg->("$ctime $jobid: new job next_sync => $jobcfg->{next_sync}");
+ next; # no old state to compare
+ } else {
+ foreach my $k (qw(target guest vmtype next_sync)) {
+ my $changes = '';
+ if ($oldcfg->{$k} ne $jobcfg->{$k}) {
+ $changes .= ', ' if $changes;
+ $changes .= "$k => $jobcfg->{$k}";
+ }
+ $logmsg->("$ctime $jobid: changed config $changes") if $changes;
+ }
+ }
+
+ my $oldstate = $oldcfg->{state};
+
+ my $state = $jobcfg->{state};
+
+ my $changes = '';
+ foreach my $k (qw(last_node last_try last_sync fail_count error)) {
+ if (($oldstate->{$k} // '') ne ($state->{$k} // '')) {
+ my $value = $state->{$k} // '';
+ chomp $value;
+ $changes .= ', ' if $changes;
+ $changes .= "$k => $value";
+ }
+ }
+ $logmsg->("$ctime $jobid: changed state $changes") if $changes;
+
+ my $old_storeid_list = $oldstate->{storeid_list};
+ my $storeid_list = $state->{storeid_list};
+
+ my $storeid_list_changes = 0;
+ foreach my $storeid (@$storeid_list) {
+ next if grep { $_ eq $storeid } @$old_storeid_list;
+ $storeid_list_changes = 1;
+ }
+
+ foreach my $storeid (@$old_storeid_list) {
+ next if grep { $_ eq $storeid } @$storeid_list;
+ $storeid_list_changes = 1;
+ }
+
+ $logmsg->("$ctime $jobid: changed storeid list " . join(',', @$storeid_list))
+ if $storeid_list_changes;
+ }
+ $status = $new;
+}
+
+
+1;
--- /dev/null
+#!/usr/bin/perl -w
+
+use lib qw(../../);
+use strict;
+use Storable qw(dclone);
+use Data::Dumper;
+use PVE::AutoBalloon;
+
+my $debug = 0;
+
+my $test_status1 = {
+ 100 => {
+ maxmem => GB(2),
+ shares => 2000,
+ balloon => GB(1),
+ balloon_min => GB(1),
+ freemem => MB(0),
+ },
+ 101 => {
+ maxmem => GB(2),
+ shares => 1000,
+ balloon => GB(1),
+ balloon_min => GB(1),
+ freemem => MB(0),
+ },
+};
+
+abtest($test_status1, 0);
+abtest($test_status1, MB(90), 100 => MB(1060), 101 => MB(1030));
+abtest($test_status1, MB(150), 100 => MB(1100), 101 => MB(1050));
+abtest($test_status1, MB(270), 100 => MB(1100), 101 => MB(1090));
+absim($test_status1, MB(180), 100 => MB(1120), 101 => MB(1060));
+absim($test_status1, MB(270), 100 => MB(1180), 101 => MB(1090));
+absim($test_status1, MB(600), 100 => MB(1300), 101 => MB(1300));
+absim($test_status1, MB(900), 100 => MB(1600), 101 => MB(1300));
+
+my $test_status2 = {
+ 100 => {
+ maxmem => GB(2),
+ shares => 2000,
+ balloon => GB(2),
+ balloon_min => GB(2),
+ freemem => MB(0),
+ },
+ 101 => {
+ maxmem => GB(2),
+ shares => 1000,
+ balloon => GB(1),
+ balloon_min => GB(1),
+ freemem => MB(0),
+ },
+};
+
+abtest($test_status2, 0);
+abtest($test_status2, MB(18), 101 => MB(1018));
+abtest($test_status2, MB(500), 101 => MB(1100));
+
+my $test_status3 = {
+ 100 => {
+ maxmem => GB(2),
+ shares => 2000,
+ balloon => GB(2),
+ balloon_min => GB(2),
+ freemem => MB(0),
+ },
+ 101 => {
+ maxmem => GB(2),
+ shares => 1000,
+ balloon => GB(1)+MB(7),
+ balloon_min => GB(1),
+ freemem => MB(0),
+ },
+ 102 => {
+ maxmem => GB(2),
+ shares => 1000,
+ balloon => GB(1),
+ balloon_min => GB(1),
+ freemem => MB(512),
+ },
+};
+
+abtest($test_status3, 0);
+abtest($test_status3, MB(11), 101 => MB(1018));
+abtest($test_status3, MB(80), 101 => MB(1087));
+abtest($test_status3, MB(200), 101 => MB(1107));
+
+my $status = absim($test_status3, MB(593), 101 => MB(1300), 102 => MB(1300));
+absim($status, -MB(200), 101 => MB(1200), 102 => MB(1200));
+absim($status, -MB(400), 101 => MB(1200), 102 => GB(1));
+absim($status, -MB(593), 101 => MB(1007), 102 => GB(1));
+exit (0);
+
+sub abapply {
+ my ($vmstatus, $res, $sum) = @_;
+
+ my $changes = 0;
+ my $abschanges = 0;
+ foreach my $vmid (keys %$res) {
+ my $diff = $res->{$vmid} - $vmstatus->{$vmid}->{balloon};
+ if ($diff != 0) {
+ # fixme: adjust freemem ?
+ $vmstatus->{$vmid}->{freemem} += $diff;
+ $vmstatus->{$vmid}->{freemem} = 0 if $vmstatus->{$vmid}->{freemem} < 0;
+ $vmstatus->{$vmid}->{balloon} = $res->{$vmid};
+ $sum->{$vmid} = $res->{$vmid};
+ $changes += $diff;
+ $abschanges += $diff > 0 ? $diff : -$diff;
+ }
+ }
+
+ return ($changes, $abschanges);
+}
+
+my $tcount = 0;
+sub absim {
+ my ($vmstatus, $goal, %expect) = @_;
+
+ $tcount++;
+
+ print "BALLOON SIM $tcount\n" if $debug;
+
+ $vmstatus = dclone($vmstatus); # do not change original
+
+ my $changes = 0;
+ my $abschanges = 0;
+ my $sum = {};
+ do {
+ my $res = PVE::AutoBalloon::compute_alg1($vmstatus, $goal, MB(100), $debug);
+ print Dumper($res) if $debug;
+ ($changes, $abschanges) = abapply($vmstatus, $res, $sum);
+ $goal -= $changes;
+ } while ($abschanges);
+
+ abcheck($sum, %expect);
+
+ print "BALLOON SIM END\n" if $debug;
+ print Dumper($vmstatus) if $debug;
+
+ return $vmstatus;
+}
+
+sub abcheck {
+ my ($res, %expect) = @_;
+
+ foreach my $vmid (keys %expect) {
+ my $ev = $expect{$vmid};
+ if (defined ($res->{$vmid})) {
+ die "T$tcount: wrong value for VM $vmid ($ev != $res->{$vmid})\n"
+ if $ev != $res->{$vmid};
+ } else {
+ die "T$tcount: missing value for VM $vmid (extected $ev)\n";
+ }
+ }
+
+ foreach my $vmid (keys %$res) {
+ die "T$tcount: got unexpected result for $vmid\n"
+ if (defined($res->{$vmid}) &&
+ !defined($expect{$vmid}));
+ }
+}
+
+sub abtest {
+ my ($vmstatus, $goal, %expect) = @_;
+
+ $tcount++;
+
+ print "BALLOON TEST $tcount\n" if $debug;
+ my $res = PVE::AutoBalloon::compute_alg1($vmstatus, $goal, MB(100), $debug);
+ print Dumper($res) if $debug;
+
+ abcheck($res, %expect);
+
+ print "\n\n" if $debug;
+
+ return $res;
+}
+
+sub MB {
+ my $mb = shift;
+ return $mb*1000*1000;
+};
+sub GB {
+ my $gb = shift;
+ return $gb*1000*1000*1000;
+};
--- /dev/null
+#!/usr/bin/perl
+
+use lib '../../';
+use strict;
+use warnings;
+use Time::HiRes qw( usleep ualarm gettimeofday tv_interval );
+use PVE::INotify;
+use PVE::AccessControl;
+
+my $hostname = PVE::INotify::read_file("hostname");
+
+# normally you use username/password,
+# but we can simply create a ticket if we are root
+my $ticket = PVE::AccessControl::assemble_ticket('root@pam');
+
+my $cmd = "ab -c 10 -n 1000 -k -C 'PVEAuthCookie=$ticket' https://$hostname:8006/api2/json";
+print "$cmd\n";
+system($cmd) == 0 || die "command failed - $!\n";
--- /dev/null
+#!/usr/bin/perl
+
+use lib '../../';
+use strict;
+use warnings;
+use Time::HiRes qw( usleep ualarm gettimeofday tv_interval );
+use PVE::INotify;
+use PVE::AccessControl;
+use Net::SSLeay qw(get_https post_https sslcat make_headers make_form);
+
+use Data::Dumper;
+
+my $hostname = PVE::INotify::read_file("hostname");
+
+# normally you use username/password,
+# but we can simply create a ticket if we are root
+my $ticket = PVE::AccessControl::assemble_ticket('root@pam');
+
+my $wcount = 10;
+my $qcount = 100;
+
+sub test_rpc {
+ my ($host) = @_;
+
+ for (my $i = 0; $i < $qcount; $i++) {
+ eval {
+ my ($page, $response, %reply_headers)
+ = get_https($host, 8006, '/api2/json',
+ make_headers(Cookie => "PVEAuthCookie=$ticket"));
+ die "$response\n" if $response !~ m/200 OK/;
+ };
+
+ my $err = $@;
+
+ if ($err) {
+
+ print "ERROR: $err\n";
+ last;
+ }
+ }
+}
+
+sub run_tests {
+ my ($host) = @_;
+
+ my $workers;
+
+ my $starttime = [gettimeofday];
+
+ for (my $i = 0; $i < $wcount; $i++) {
+ if (my $pid = fork ()) {
+ $workers->{$pid} = 1;
+ } else {
+ test_rpc ($host);
+ exit (0);
+ }
+ }
+
+ # wait for children
+ 1 while (wait > 0);
+
+ my $elapsed = int(tv_interval ($starttime) * 1000);
+
+ my $tpq = $elapsed / ($wcount*$qcount);
+
+ print "$host: $tpq ms per query\n";
+}
+
+run_tests($hostname); # test 'pveproxy'
--- /dev/null
+#!/usr/bin/perl
+
+# Note: Test if mockup from ReplicationTestEnv works
+
+use strict;
+use warnings;
+use JSON;
+
+use lib ('.', '../..');
+
+use Data::Dumper;
+
+use ReplicationTestEnv;
+use Test::More tests => 3;
+
+$ReplicationTestEnv::mocked_nodename = 'node1';
+
+my $testjob = {
+ 'type' => 'local',
+ 'target' => 'node1',
+ 'guest' => 900,
+};
+
+$ReplicationTestEnv::mocked_replication_jobs = {
+ job_900_to_node1 => $testjob,
+};
+
+$ReplicationTestEnv::mocked_vm_configs = {
+ 900 => {
+ node => 'node1',
+ snapshots => {},
+ ide0 => 'local-lvm:vm-900-disk-1,size=4G',
+ memory => 512,
+ ide2 => 'none,media=cdrom',
+ },
+};
+
+ReplicationTestEnv::setup();
+
+ok(PVE::INotify::nodename() eq 'node1');
+
+my $list = PVE::Cluster::get_vmlist();
+is_deeply($list, { ids => {900 => { node => 'node1', type => 'qemu', version => 1}}});
+my $cfg = PVE::ReplicationConfig->new();
+is_deeply($cfg, { ids => { job_900_to_node1 => $testjob }});
+
+exit(0);
--- /dev/null
+#!/usr/bin/perl
+
+# Note: Test replication scheduler
+
+use strict;
+use warnings;
+use JSON;
+
+use lib ('.', '../..');
+
+use Data::Dumper;
+
+use Test::MockModule;
+use ReplicationTestEnv;
+use Test::More tests => 1;
+
+use PVE::API2::Replication;
+
+$ReplicationTestEnv::mocked_nodename = 'node1';
+
+my $schedule = [];
+
+my $mocked_replicate = sub {
+ my ($guest_class, $jobcfg, $state, $start_time, $logfunc) = @_;
+
+ push @$schedule, {
+ id => $jobcfg->{id},
+ guest => $jobcfg->{guest},
+ vmtype => $jobcfg->{vmtype},
+ guest_class => $guest_class,
+ last_sync => $state->{last_sync},
+ start => $start_time,
+ };
+};
+
+my $pve_replication_module = Test::MockModule->new('PVE::Replication');
+$pve_replication_module->mock(replicate => $mocked_replicate);
+
+$ReplicationTestEnv::mocked_replication_jobs = {
+ '900-1_to_node2' => {
+ 'type' => 'local',
+ 'target' => 'node2',
+ 'guest' => 900,
+ },
+ '900-2_to_node1' => {
+ 'type' => 'local',
+ 'target' => 'node1', # local node, job should be skipped
+ 'guest' => 900,
+ },
+};
+
+$ReplicationTestEnv::mocked_vm_configs = {
+ 900 => {
+ node => 'node1',
+ snapshots => {},
+ ide0 => 'local-lvm:vm-900-disk-1,size=4G',
+ memory => 512,
+ ide2 => 'none,media=cdrom',
+ },
+};
+
+ReplicationTestEnv::setup();
+
+for (my $i = 0; $i < 61; $i++) {
+ PVE::API2::Replication::run_jobs($i*60);
+}
+
+#print Dumper($schedule);
+
+my $exptected_schedule = [
+ {
+ last_sync => 0,
+ start => 900,
+ vmtype => 'qemu',
+ guest_class => 'PVE::QemuConfig',
+ id => '900-1_to_node2',
+ guest => 900
+ },
+ {
+ last_sync => 900,
+ start => 1800,
+ vmtype => 'qemu',
+ guest_class => 'PVE::QemuConfig',
+ id => '900-1_to_node2',
+ guest => 900,
+ },
+ {
+ last_sync => 1800,
+ start => 2700,
+ vmtype => 'qemu',
+ guest_class => 'PVE::QemuConfig',
+ id => '900-1_to_node2',
+ guest => 900
+ },
+ {
+ last_sync => 2700,
+ start => 3600,
+ vmtype => 'qemu',
+ guest_class => 'PVE::QemuConfig',
+ id => '900-1_to_node2',
+ guest => 900
+ }
+];
+
+is_deeply($schedule, $exptected_schedule);
+
+exit(0);
--- /dev/null
+#!/usr/bin/perl
+
+# Note: Try to run replication job to same node (should fail)
+
+use strict;
+use warnings;
+use JSON;
+
+use lib ('.', '../..');
+
+use Data::Dumper;
+
+use Test::MockModule;
+use ReplicationTestEnv;
+use PVE::API2::Replication;
+
+use Test::More;
+
+$ReplicationTestEnv::mocked_nodename = 'node1';
+
+my $testjob = {
+ 'type' => 'local',
+ 'target' => 'node1',
+ 'guest' => 900,
+};
+
+$ReplicationTestEnv::mocked_replication_jobs = {
+ job_900_to_node1 => {
+ 'type' => 'local',
+ 'target' => 'node1', # local node, job should be skipped
+ 'guest' => 900,
+ },
+};
+
+$ReplicationTestEnv::mocked_vm_configs = {
+ 900 => {
+ node => 'node1',
+ snapshots => {},
+ ide0 => 'local-lvm:vm-900-disk-1,size=4G',
+ memory => 512,
+ ide2 => 'none,media=cdrom',
+ },
+};
+
+ReplicationTestEnv::setup();
+
+eval { PVE::API2::Replication::run_single_job('job_900_to_node1', 1000); };
+my $err = $@;
+
+is($err, "unable to sync to local node\n", "test error message");
+
+done_testing();
--- /dev/null
+1000 job_900_to_node2: new job next_sync => 900
+1000 job_900_to_node2: start replication job
+1000 job_900_to_node2: end replication job with error: faked replication error
+1000 job_900_to_node2: changed config next_sync => 1300
+1000 job_900_to_node2: changed state last_node => node1, last_try => 1000, fail_count => 1, error => faked replication error
+1300 job_900_to_node2: start replication job
+1300 job_900_to_node2: end replication job with error: faked replication error
+1300 job_900_to_node2: changed config next_sync => 1900
+1300 job_900_to_node2: changed state last_try => 1300, fail_count => 2
+1900 job_900_to_node2: start replication job
+1900 job_900_to_node2: end replication job with error: faked replication error
+1900 job_900_to_node2: changed config next_sync => 3700
+1900 job_900_to_node2: changed state last_try => 1900, fail_count => 3
+3700 job_900_to_node2: start replication job
+3700 job_900_to_node2: end replication job with error: faked replication error
+3700 job_900_to_node2: changed config next_sync => 5500
+3700 job_900_to_node2: changed state last_try => 3700, fail_count => 4
+5500 job_900_to_node2: start replication job
+5500 job_900_to_node2: end replication job with error: faked replication error
+5500 job_900_to_node2: changed config next_sync => 7300
+5500 job_900_to_node2: changed state last_try => 5500, fail_count => 5
+7300 job_900_to_node2: start replication job
+7300 job_900_to_node2: end replication job with error: faked replication error
+7300 job_900_to_node2: changed config next_sync => 9100
+7300 job_900_to_node2: changed state last_try => 7300, fail_count => 6
--- /dev/null
+#!/usr/bin/perl
+
+# Note: Test replication job failure
+
+use strict;
+use warnings;
+use JSON;
+
+use lib ('.', '../..');
+
+use Data::Dumper;
+
+use Test::MockModule;
+use ReplicationTestEnv;
+
+use PVE::Tools;
+
+$ReplicationTestEnv::mocked_nodename = 'node1';
+
+my $pve_replication_module = Test::MockModule->new('PVE::Replication');
+$pve_replication_module->mock(
+ replicate => sub { die "faked replication error\n"; });
+
+my $testjob = {
+ 'type' => 'local',
+ 'target' => 'node1',
+ 'guest' => 900,
+};
+
+$ReplicationTestEnv::mocked_replication_jobs = {
+ job_900_to_node2 => {
+ 'type' => 'local',
+ 'target' => 'node2',
+ 'guest' => 900,
+ },
+ job_900_to_node1 => {
+ 'type' => 'local',
+ 'target' => 'node1', # local node, job should be skipped
+ 'guest' => 900,
+ },
+};
+
+$ReplicationTestEnv::mocked_vm_configs = {
+ 900 => {
+ node => 'node1',
+ snapshots => {},
+ ide0 => 'local-lvm:vm-900-disk-1,size=4G',
+ memory => 512,
+ ide2 => 'none,media=cdrom',
+ },
+};
+
+ReplicationTestEnv::setup();
+
+my $ctime = 1000;
+
+my $status;
+
+ReplicationTestEnv::openlog();
+
+for (my $i = 0; $i < 120; $i++) {
+ ReplicationTestEnv::track_jobs($ctime);
+ $ctime += 60;
+}
+
+ReplicationTestEnv::commit_log();
+
+exit(0);
--- /dev/null
+1000 job_900_to_node2: new job next_sync => 900
+1000 job_900_to_node2: start replication job
+1000 job_900_to_node2: guest => VM 900, running => 0
+1000 job_900_to_node2: volumes => local-zfs:vm-900-disk-1
+1000 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_1000__' on local-zfs:vm-900-disk-1
+1000 job_900_to_node2: full sync 'local-zfs:vm-900-disk-1' (__replicate_job_900_to_node2_1000__)
+1000 job_900_to_node2: end replication job
+1000 job_900_to_node2: changed config next_sync => 1800
+1000 job_900_to_node2: changed state last_node => node1, last_try => 1000, last_sync => 1000
+1000 job_900_to_node2: changed storeid list local-zfs
+1840 job_900_to_node2: start replication job
+1840 job_900_to_node2: guest => VM 900, running => 0
+1840 job_900_to_node2: volumes => local-zfs:vm-900-disk-1
+1840 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_1840__' on local-zfs:vm-900-disk-1
+1840 job_900_to_node2: incremental sync 'local-zfs:vm-900-disk-1' (__replicate_job_900_to_node2_1000__ => __replicate_job_900_to_node2_1840__)
+1840 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_1000__' on local-zfs:vm-900-disk-1
+1840 job_900_to_node2: end replication job
+1840 job_900_to_node2: changed config next_sync => 2700
+1840 job_900_to_node2: changed state last_try => 1840, last_sync => 1840
+2740 job_900_to_node2: start replication job
+2740 job_900_to_node2: guest => VM 900, running => 0
+2740 job_900_to_node2: volumes => local-zfs:vm-900-disk-1,local-zfs:vm-900-disk-2
+2740 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_2740__' on local-zfs:vm-900-disk-1
+2740 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_2740__' on local-zfs:vm-900-disk-2
+2740 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_2740__' on local-zfs:vm-900-disk-1
+2740 job_900_to_node2: end replication job with error: no such volid 'local-zfs:vm-900-disk-2'
+2740 job_900_to_node2: changed config next_sync => 3040
+2740 job_900_to_node2: changed state last_try => 2740, fail_count => 1, error => no such volid 'local-zfs:vm-900-disk-2'
+3040 job_900_to_node2: start replication job
+3040 job_900_to_node2: guest => VM 900, running => 0
+3040 job_900_to_node2: volumes => local-zfs:vm-900-disk-1,local-zfs:vm-900-disk-2
+3040 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_3040__' on local-zfs:vm-900-disk-1
+3040 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_3040__' on local-zfs:vm-900-disk-2
+3040 job_900_to_node2: incremental sync 'local-zfs:vm-900-disk-1' (__replicate_job_900_to_node2_1840__ => __replicate_job_900_to_node2_3040__)
+3040 job_900_to_node2: full sync 'local-zfs:vm-900-disk-2' (__replicate_job_900_to_node2_3040__)
+3040 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_1840__' on local-zfs:vm-900-disk-1
+3040 job_900_to_node2: end replication job
+3040 job_900_to_node2: changed config next_sync => 3600
+3040 job_900_to_node2: changed state last_try => 3040, last_sync => 3040, fail_count => 0, error =>
+3640 job_900_to_node2: start replication job
+3640 job_900_to_node2: guest => VM 900, running => 0
+3640 job_900_to_node2: volumes => local-zfs:vm-900-disk-1,local-zfs:vm-900-disk-2
+3640 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_3640__' on local-zfs:vm-900-disk-1
+3640 job_900_to_node2: create snapshot '__replicate_job_900_to_node2_3640__' on local-zfs:vm-900-disk-2
+3640 job_900_to_node2: incremental sync 'local-zfs:vm-900-disk-1' (__replicate_job_900_to_node2_3040__ => __replicate_job_900_to_node2_3640__)
+3640 job_900_to_node2: incremental sync 'local-zfs:vm-900-disk-2' (__replicate_job_900_to_node2_3040__ => __replicate_job_900_to_node2_3640__)
+3640 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_3040__' on local-zfs:vm-900-disk-1
+3640 job_900_to_node2: delete previous replication snapshot '__replicate_job_900_to_node2_3040__' on local-zfs:vm-900-disk-2
+3640 job_900_to_node2: end replication job
+3640 job_900_to_node2: changed config next_sync => 4500
+3640 job_900_to_node2: changed state last_try => 3640, last_sync => 3640
+3700 job_900_to_node2: start replication job
+3700 job_900_to_node2: guest => VM 900, running => 0
+3700 job_900_to_node2: volumes => local-zfs:vm-900-disk-1,local-zfs:vm-900-disk-2
+3700 job_900_to_node2: start job removal - mode 'full'
+3700 job_900_to_node2: delete stale replication snapshot '__replicate_job_900_to_node2_3640__' on local-zfs:vm-900-disk-1
+3700 job_900_to_node2: delete stale replication snapshot '__replicate_job_900_to_node2_3640__' on local-zfs:vm-900-disk-2
+3700 job_900_to_node2: job removed
+3700 job_900_to_node2: end replication job
+3700 job_900_to_node2: vanished job
--- /dev/null
+#!/usr/bin/perl
+
+# Note:
+# 1.) Start replication job with single disk
+# 2.) add non-existent disk (replication fails)
+# 3.) create disk (replication continues).
+# 4.) remove job
+
+use strict;
+use warnings;
+use JSON;
+
+use lib ('.', '../..');
+
+use Data::Dumper;
+
+use Test::MockModule;
+use ReplicationTestEnv;
+
+use PVE::Tools;
+
+$ReplicationTestEnv::mocked_nodename = 'node1';
+
+use PVE::INotify;
+use PVE::Cluster;
+use PVE::QemuConfig;
+use PVE::QemuServer;
+use PVE::LXC::Config;
+use PVE::LXC;
+use PVE::Storage;
+
+my $replicated_volume_status = {};
+
+my $mocked_remote_prepare_local_job = sub {
+ my ($ssh_info, $jobid, $vmid, $volumes, $storeid_list, $last_sync, $parent_snapname, $force) = @_;
+
+ my $target = $ssh_info->{node};
+
+ my $last_snapshots = {};
+
+ return $last_snapshots if !defined($replicated_volume_status->{$target});
+
+ my $last_sync_snapname = PVE::ReplicationState::replication_snapshot_name($jobid, $last_sync);
+
+ foreach my $volid (keys %{$replicated_volume_status->{$target}}) {
+ if (!grep { $_ eq $volid } @$volumes) {
+ delete $replicated_volume_status->{$target}->{$volid};
+ next;
+ }
+ my $snapname = $replicated_volume_status->{$target}->{$volid};
+
+ $last_snapshots->{$volid}->{$snapname} = 1 if $last_sync_snapname eq $snapname;
+ }
+
+ return $last_snapshots;
+};
+
+my $mocked_remote_finalize_local_job = sub {
+ my ($ssh_info, $jobid, $vmid, $volumes, $last_sync) = @_;
+
+ # do nothing
+};
+
+my $mocked_replicate_volume = sub {
+ my ($ssh_info, $storecfg, $volid, $base_snapshot, $sync_snapname) = @_;
+
+ my $target = $ssh_info->{node};
+
+ $replicated_volume_status->{$target}->{$volid} = $sync_snapname;
+};
+
+my $mocked_delete_job = sub {
+ my ($jobid) = @_;
+
+ delete $ReplicationTestEnv::mocked_replication_jobs->{$jobid};
+};
+
+my $pve_replication_config_module = Test::MockModule->new('PVE::ReplicationConfig');
+$pve_replication_config_module->mock(delete_job => $mocked_delete_job);
+
+my $pve_replication_module = Test::MockModule->new('PVE::Replication');
+$pve_replication_module->mock(
+ remote_prepare_local_job => $mocked_remote_prepare_local_job,
+ remote_finalize_local_job => $mocked_remote_finalize_local_job,
+ replicate_volume => $mocked_replicate_volume);
+
+my $testjob = {
+ 'type' => 'local',
+ 'target' => 'node1',
+ 'guest' => 900,
+};
+
+$ReplicationTestEnv::mocked_replication_jobs = {
+ job_900_to_node2 => {
+ 'type' => 'local',
+ 'target' => 'node2',
+ 'guest' => 900,
+ },
+};
+
+$ReplicationTestEnv::mocked_vm_configs = {
+ 900 => {
+ node => 'node1',
+ snapshots => {},
+ ide0 => 'local-zfs:vm-900-disk-1,size=4G',
+ memory => 512,
+ ide2 => 'none,media=cdrom',
+ },
+};
+
+ReplicationTestEnv::setup();
+
+ReplicationTestEnv::register_mocked_volid('local-zfs:vm-900-disk-1');
+
+my $ctime = 1000;
+
+my $status;
+
+ReplicationTestEnv::openlog();
+
+for (my $i = 0; $i < 15; $i++) {
+ ReplicationTestEnv::track_jobs($ctime);
+ $ctime += 60;
+}
+
+# add a new, disk (but disk does not exist, so replication fails)
+$ReplicationTestEnv::mocked_vm_configs->{900}->{ide1} = 'local-zfs:vm-900-disk-2,size=4G';
+for (my $i = 0; $i < 15; $i++) {
+ ReplicationTestEnv::track_jobs($ctime);
+ $ctime += 60;
+}
+
+# register disk, so replication should succeed
+ReplicationTestEnv::register_mocked_volid('local-zfs:vm-900-disk-2');
+for (my $i = 0; $i < 15; $i++) {
+ ReplicationTestEnv::track_jobs($ctime);
+ $ctime += 60;
+}
+
+# mark job for removal
+$ReplicationTestEnv::mocked_replication_jobs->{job_900_to_node2}->{remove_job} = 'full';
+for (my $i = 0; $i < 15; $i++) {
+ ReplicationTestEnv::track_jobs($ctime);
+ $ctime += 60;
+}
+
+
+
+ReplicationTestEnv::commit_log();
+
+exit(0);
--- /dev/null
+1000 job_900_to_node1: new job next_sync => 1
+1000 job_900_to_node1: start replication job
+1000 job_900_to_node1: guest => VM 900, running => 0
+1000 job_900_to_node1: volumes => local-zfs:vm-900-disk-1
+1000 job_900_to_node1: start job removal - mode 'full'
+1000 job_900_to_node1: job removed
+1000 job_900_to_node1: end replication job
+1000 job_900_to_node1: vanished job
--- /dev/null
+#!/usr/bin/perl
+
+# Note: Try to delete replication job with target on same node
+
+use strict;
+use warnings;
+use JSON;
+
+use lib ('.', '../..');
+
+use Data::Dumper;
+
+use Test::MockModule;
+use ReplicationTestEnv;
+
+$ReplicationTestEnv::mocked_nodename = 'node1';
+
+my $mocked_delete_job = sub {
+ my ($jobid) = @_;
+
+ delete $ReplicationTestEnv::mocked_replication_jobs->{$jobid};
+};
+
+my $pve_replication_config_module = Test::MockModule->new('PVE::ReplicationConfig');
+$pve_replication_config_module->mock(
+ delete_job => $mocked_delete_job);
+
+my $testjob = {
+ 'type' => 'local',
+ 'target' => 'node1',
+ 'guest' => 900,
+};
+
+$ReplicationTestEnv::mocked_replication_jobs = {
+ job_900_to_node1 => {
+ remove_job => 'full',
+ type => 'local',
+ target => 'node1', # local node, job should be skipped
+ guest => 900,
+ },
+};
+
+$ReplicationTestEnv::mocked_vm_configs = {
+ 900 => {
+ node => 'node1',
+ snapshots => {},
+ ide0 => 'local-zfs:vm-900-disk-1,size=4G',
+ memory => 512,
+ ide2 => 'none,media=cdrom',
+ },
+};
+
+ReplicationTestEnv::setup();
+
+ReplicationTestEnv::openlog();
+
+my $ctime = 1000;
+for (my $i = 0; $i < 15; $i++) {
+ ReplicationTestEnv::track_jobs($ctime);
+ $ctime += 60;
+}
+
+ReplicationTestEnv::commit_log();
+
+exit(0);