my $DISK_KEY_RE = qr/^(?:(?:(?:virtio|ide|scsi|sata|efidisk|mp)\d+)|rootfs): /;
+my $INSTANCE_ID = get_instance_id($$);
+
my $command = $ARGV[0];
if (defined($command) && $command ne 'help' && $command ne 'printpod') {
die "unable to find command '$bin'\n";
}
+sub read_file {
+ my ($filename, $one_line_only) = @_;
+
+ my $fh = IO::File->new($filename, "r")
+ or die "Could not open file ${filename}: $!\n";
+
+ my $text = $one_line_only ? <$fh> : [ <$fh> ];
+
+ close($fh);
+
+ return $text;
+}
+
sub cut_target_width {
my ($path, $maxlen) = @_;
$path =~ s@/+@/@g;
return undef;
}
-sub check_pool_exists {
- my ($target, $user) = @_;
+sub check_dataset_exists {
+ my ($dataset, $ip, $user) = @_;
my $cmd = [];
- if ($target->{ip}) {
- push @$cmd, 'ssh', "$user\@$target->{ip}", '--';
+ if ($ip) {
+ push @$cmd, 'ssh', "$user\@$ip", '--';
}
- push @$cmd, 'zfs', 'list', '-H', '--', $target->{all};
+ push @$cmd, 'zfs', 'list', '-H', '--', $dataset;
eval {
run_cmd($cmd);
};
return 1;
}
+sub create_file_system {
+ my ($file_system, $ip, $user) = @_;
+
+ my $cmd = [];
+
+ if ($ip) {
+ push @$cmd, 'ssh', "$user\@$ip", '--';
+ }
+ push @$cmd, 'zfs', 'create', $file_system;
+
+ run_cmd($cmd);
+}
+
sub parse_target {
my ($text) = @_;
return undef;
}
- my $fh = IO::File->new("< $CRONJOBS");
- die "Could not open file $CRONJOBS: $!\n" if !$fh;
-
- my @text = <$fh>;
-
- close($fh);
+ my $text = read_file($CRONJOBS, 0);
- return encode_cron(@text);
+ return encode_cron(@{$text});
}
sub parse_argv {
method => undef,
source_user => undef,
dest_user => undef,
+ prepend_storage_id => undef,
properties => undef,
dest_config_path => undef,
};
'method=s' => \$param->{method},
'source-user=s' => \$param->{source_user},
'dest-user=s' => \$param->{dest_user},
+ 'prepend-storage-id' => \$param->{prepend_storage_id},
'properties' => \$param->{properties},
'dest-config-path=s' => \$param->{dest_config_path},
);
$job->{state} = $state->{state};
$job->{lsync} = $state->{lsync};
$job->{vm_type} = $state->{vm_type};
+ $job->{instance_id} = $state->{instance_id};
for (my $i = 0; $state->{"snap$i"}; $i++) {
$job->{"snap$i"} = $state->{"snap$i"};
my $job = {};
my $source = parse_target($param->{source});
- my $dest = parse_target($param->{dest}) if $param->{dest};
+ my $dest;
+ $dest = parse_target($param->{dest}) if $param->{dest};
$job->{name} = !$param->{name} ? "default" : $param->{name};
$job->{dest} = $param->{dest} if $param->{dest};
$job->{method} = "local" if !$dest->{ip} && !$source->{ip};
$job->{method} = "ssh" if !$job->{method};
$job->{limit} = $param->{limit};
- $job->{maxsnap} = $param->{maxsnap} if $param->{maxsnap};
+ $job->{maxsnap} = $param->{maxsnap};
$job->{source} = $param->{source};
$job->{source_user} = $param->{source_user};
$job->{dest_user} = $param->{dest_user};
+ $job->{prepend_storage_id} = !!$param->{prepend_storage_id};
$job->{properties} = !!$param->{properties};
$job->{dest_config_path} = $param->{dest_config_path} if $param->{dest_config_path};
return undef;
}
- my $fh = IO::File->new("< $STATE");
- die "Could not open file $STATE: $!\n" if !$fh;
-
- my $text = <$fh>;
- my $states = decode_json($text);
-
- close($fh);
-
- return $states;
+ my $text = read_file($STATE, 1);
+ return decode_json($text);
}
sub update_state {
my ($job) = @_;
- my $text;
- my $in_fh;
-
- eval {
- $in_fh = IO::File->new("< $STATE");
- die "Could not open file $STATE: $!\n" if !$in_fh;
- $text = <$in_fh>;
- };
+ my $text = eval { read_file($STATE, 1); };
my $out_fh = IO::File->new("> $STATE.new");
die "Could not open file ${STATE}.new: $!\n" if !$out_fh;
if ($job->{state} ne "del") {
$state->{state} = $job->{state};
$state->{lsync} = $job->{lsync};
+ $state->{instance_id} = $job->{instance_id};
$state->{vm_type} = $job->{vm_type};
for (my $i = 0; $job->{"snap$i"} ; $i++) {
close($out_fh);
rename "$STATE.new", $STATE;
- eval {
- close($in_fh);
- };
return $states;
}
my $header = "SHELL=/bin/sh\n";
$header .= "PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin\n\n";
- my $fh = IO::File->new("< $CRONJOBS");
- die "Could not open file $CRONJOBS: $!\n" if !$fh;
-
- my @test = <$fh>;
+ my $current = read_file($CRONJOBS, 0);
- while (my $line = shift(@test)) {
+ foreach my $line (@{$current}) {
chomp($line);
if ($line =~ m/source $job->{source} .*name $job->{name} /) {
$updated = 1;
my $new_fh = IO::File->new("> ${CRONJOBS}.new");
die "Could not open file ${CRONJOBS}.new: $!\n" if !$new_fh;
- die "can't write to $CRONJOBS.new\n" if !print($new_fh $text);
+ print $new_fh $text or die "can't write to $CRONJOBS.new: $!\n";
close ($new_fh);
- die "can't move $CRONJOBS.new: $!\n" if !rename "${CRONJOBS}.new", $CRONJOBS;
- close ($fh);
+ rename "${CRONJOBS}.new", $CRONJOBS or die "can't move $CRONJOBS.new: $!\n";
}
sub format_job {
$text .= " --verbose" if $job->{verbose};
$text .= " --source-user $job->{source_user}";
$text .= " --dest-user $job->{dest_user}";
+ $text .= " --prepend-storage-id" if $job->{prepend_storage_id};
$text .= " --properties" if $job->{properties};
$text .= " --dest-config-path $job->{dest_config_path}" if $job->{dest_config_path};
$text .= "\n";
run_cmd(['ssh-copy-id', '-i', '/root/.ssh/id_rsa.pub', "$param->{source_user}\@$ip"]);
}
- die "Pool $dest->{all} does not exists\n" if !check_pool_exists($dest, $param->{dest_user});
+ die "Pool $dest->{all} does not exist\n"
+ if !check_dataset_exists($dest->{all}, $dest->{ip}, $param->{dest_user});
if (!defined($source->{vmid})) {
- die "Pool $source->{all} does not exists\n" if !check_pool_exists($source, $param->{source_user});
+ die "Pool $source->{all} does not exist\n"
+ if !check_dataset_exists($source->{all}, $source->{ip}, $param->{source_user});
}
my $vm_type = vm_exists($source, $param->{source_user});
});
}
+sub get_instance_id {
+ my ($pid) = @_;
+
+ my $stat = read_file("/proc/$pid/stat", 1)
+ or die "unable to read process stats\n";
+ my $boot_id = read_file("/proc/sys/kernel/random/boot_id", 1)
+ or die "unable to read boot ID\n";
+
+ my $stats = [ split(/\s+/, $stat) ];
+ my $starttime = $stats->[21];
+ chomp($boot_id);
+
+ return "${pid}:${starttime}:${boot_id}";
+}
+
+sub instance_exists {
+ my ($instance_id) = @_;
+
+ if (defined($instance_id) && $instance_id =~ m/^([1-9][0-9]*):/) {
+ my $pid = $1;
+ my $actual_id = eval { get_instance_id($pid); };
+ return defined($actual_id) && $actual_id eq $instance_id;
+ }
+
+ return 0;
+}
+
sub sync {
my ($param) = @_;
eval { $job = get_job($param) };
if ($job) {
- if (defined($job->{state}) && ($job->{state} eq "syncing" || $job->{state} eq "waiting")) {
+ my $state = $job->{state} // 'ok';
+ $state = 'ok' if !instance_exists($job->{instance_id});
+
+ if ($state eq "syncing" || $state eq "waiting") {
die "Job --source $param->{source} --name $param->{name} is already scheduled to sync\n";
}
$job->{state} = "waiting";
+ $job->{instance_id} = $INSTANCE_ID;
+
update_state($job);
}
});
my $sync_path = sub {
my ($source, $dest, $job, $param, $date) = @_;
- ($source->{old_snap}, $source->{last_snap}) = snapshot_get($source, $dest, $param->{maxsnap}, $param->{name}, $param->{source_user});
+ ($dest->{old_snap}, $dest->{last_snap}) = snapshot_get($source, $dest, $param->{maxsnap}, $param->{name}, $param->{dest_user});
+
+ prepare_prepended_target($source, $dest, $param->{dest_user}) if defined($dest->{prepend});
snapshot_add($source, $dest, $param->{name}, $date, $param->{source_user}, $param->{dest_user});
send_image($source, $dest, $param);
- snapshot_destroy($source, $dest, $param->{method}, $source->{old_snap}, $param->{source_user}, $param->{dest_user}) if ($source->{destroy} && $source->{old_snap});
+ snapshot_destroy($source, $dest, $param->{method}, $dest->{old_snap}, $param->{source_user}, $param->{dest_user}) if ($source->{destroy} && $dest->{old_snap});
};
$source->{pool} = $disks->{$disk}->{pool};
$source->{path} = $disks->{$disk}->{path} if $disks->{$disk}->{path};
$source->{last_part} = $disks->{$disk}->{last_part};
+
+ $dest->{prepend} = $disks->{$disk}->{storage_id}
+ if $param->{prepend_storage_id};
+
&$sync_path($source, $dest, $job, $param, $date);
}
if ($param->{method} eq "ssh" && ($source->{ip} || $dest->{ip})) {
eval { $job = get_job($param); };
if ($job) {
$job->{state} = "error";
+ delete $job->{instance_id};
update_state($job);
}
});
$job->{state} = "ok";
}
$job->{lsync} = $date;
+ delete $job->{instance_id};
update_state($job);
}
});
}
sub snapshot_get{
- my ($source, $dest, $max_snap, $name, $source_user) = @_;
+ my ($source, $dest, $max_snap, $name, $dest_user) = @_;
my $cmd = [];
- push @$cmd, 'ssh', "$source_user\@$source->{ip}", '--', if $source->{ip};
+ push @$cmd, 'ssh', "$dest_user\@$dest->{ip}", '--', if $dest->{ip};
push @$cmd, 'zfs', 'list', '-r', '-t', 'snapshot', '-Ho', 'name', '-S', 'creation';
- push @$cmd, $source->{all};
- my $raw = run_cmd($cmd);
+ my $path = target_dataset($source, $dest);
+ push @$cmd, $path;
+
+ my $raw;
+ eval {$raw = run_cmd($cmd)};
+ if (my $erro =$@) { #this means the volume doesn't exist on dest yet
+ return undef;
+ }
+
my $index = 0;
my $line = "";
my $last_snap = undef;
while ($raw && $raw =~ s/^(.*?)(\n|$)//) {
$line = $1;
- if ($line =~ m/(rep_\Q${name}\E_\d{4}-\d{2}-\d{2}_\d{2}:\d{2}:\d{2})$/) {
-
+ if ($line =~ m/@(.*)$/) {
$last_snap = $1 if (!$last_snap);
+ }
+ if ($line =~ m/(rep_\Q${name}\E_\d{4}-\d{2}-\d{2}_\d{2}:\d{2}:\d{2})$/) {
$old_snap = $1;
$index++;
if ($index == $max_snap) {
}
}
-sub write_cron {
- my ($cfg) = @_;
-
- my $text = "SHELL=/bin/sh\n";
- $text .= "PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin\n";
-
- my $fh = IO::File->new("> $CRONJOBS");
- die "Could not open file: $!\n" if !$fh;
-
- foreach my $source (sort keys%{$cfg}) {
- foreach my $sync_name (sort keys%{$cfg->{$source}}) {
- next if $cfg->{$source}->{$sync_name}->{status} ne 'ok';
- $text .= "$PROG_PATH sync";
- $text .= " -source ";
- if ($cfg->{$source}->{$sync_name}->{vmid}) {
- $text .= "$cfg->{$source}->{$sync_name}->{source_ip}:" if $cfg->{$source}->{$sync_name}->{source_ip};
- $text .= "$cfg->{$source}->{$sync_name}->{vmid} ";
- } else {
- $text .= "$cfg->{$source}->{$sync_name}->{source_ip}:" if $cfg->{$source}->{$sync_name}->{source_ip};
- $text .= "$cfg->{$source}->{$sync_name}->{source_pool}";
- $text .= "$cfg->{$source}->{$sync_name}->{source_path}" if $cfg->{$source}->{$sync_name}->{source_path};
- }
- $text .= " -dest ";
- $text .= "$cfg->{$source}->{$sync_name}->{dest_ip}:" if $cfg->{$source}->{$sync_name}->{dest_ip};
- $text .= "$cfg->{$source}->{$sync_name}->{dest_pool}";
- $text .= "$cfg->{$source}->{$sync_name}->{dest_path}" if $cfg->{$source}->{$sync_name}->{dest_path};
- $text .= " -name $sync_name ";
- $text .= " -limit $cfg->{$source}->{$sync_name}->{limit}" if $cfg->{$source}->{$sync_name}->{limit};
- $text .= " -maxsnap $cfg->{$source}->{$sync_name}->{maxsnap}" if $cfg->{$source}->{$sync_name}->{maxsnap};
- $text .= "\n";
- }
- }
- die "Can't write to cron\n" if (!print($fh $text));
- close($fh);
-}
-
sub get_disks {
my ($target, $user) = @_;
my @parameter = split(/,/,$1);
foreach my $opt (@parameter) {
- if ($opt =~ m/^(?:file=|volume=)?([^:]+:)([A-Za-z0-9\-]+)$/){
+ if ($opt =~ m/^(?:file=|volume=)?([^:]+):([A-Za-z0-9\-]+)$/){
$disk = $2;
$stor = $1;
last;
my $cmd = [];
push @$cmd, 'ssh', "$user\@$ip", '--' if $ip;
- push @$cmd, 'pvesm', 'path', "$stor$disk";
+ push @$cmd, 'pvesm', 'path', "$stor:$disk";
my $path = run_cmd($cmd);
- die "Get no path from pvesm path $stor$disk\n" if !$path;
-
+ die "Get no path from pvesm path $stor:$disk\n" if !$path;
+
+ $disks->{$num}->{storage_id} = $stor;
+
if ($vm_type eq 'qemu' && $path =~ m/^\/dev\/zvol\/(\w+.*)(\/$disk)$/) {
my @array = split('/', $1);
return $disks;
}
+# how the corresponding dataset is named on the target
+sub target_dataset {
+ my ($source, $dest) = @_;
+
+ my $target = "$dest->{all}";
+ $target .= "/$dest->{prepend}" if defined($dest->{prepend});
+ $target .= "/$source->{last_part}" if $source->{last_part};
+ $target =~ s!/+!/!g;
+
+ return $target;
+}
+
+# create the parent dataset for the actual target
+sub prepare_prepended_target {
+ my ($source, $dest, $dest_user) = @_;
+
+ die "internal error - not a prepended target\n" if !defined($dest->{prepend});
+
+ # The parent dataset shouldn't be the actual target.
+ die "internal error - no last_part for source\n" if !$source->{last_part};
+
+ my $target = "$dest->{all}/$dest->{prepend}";
+ $target =~ s!/+!/!g;
+
+ return if check_dataset_exists($target, $dest->{ip}, $dest_user);
+
+ create_file_system($target, $dest->{ip}, $dest_user);
+}
+
sub snapshot_destroy {
my ($source, $dest, $method, $snap, $source_user, $dest_user) = @_;
if ($dest) {
my @ssh = $dest->{ip} ? ('ssh', "$dest_user\@$dest->{ip}", '--') : ();
- my $path = "$dest->{all}";
- $path .= "/$source->{last_part}" if $source->{last_part};
+ my $path = target_dataset($source, $dest);
eval {
run_cmd([@ssh, @zfscmd, "$path\@$snap"]);
}
}
+# check if snapshot for incremental sync exist on source side
sub snapshot_exist {
- my ($source , $dest, $method, $dest_user) = @_;
+ my ($source , $dest, $method, $source_user) = @_;
my $cmd = [];
- push @$cmd, 'ssh', "$dest_user\@$dest->{ip}", '--' if $dest->{ip};
+ push @$cmd, 'ssh', "$source_user\@$source->{ip}", '--' if $source->{ip};
push @$cmd, 'zfs', 'list', '-rt', 'snapshot', '-Ho', 'name';
- my $path = $dest->{all};
- $path .= "/$source->{last_part}" if $source->{last_part};
- $path .= "\@$source->{old_snap}";
+ my $path = $source->{all};
+ $path .= "\@$dest->{last_snap}";
push @$cmd, $path;
-
- my $text = "";
- eval {$text =run_cmd($cmd);};
+ eval {run_cmd($cmd)};
if (my $erro =$@) {
warn "WARN: $erro";
return undef;
}
-
- while ($text && $text =~ s/^(.*?)(\n|$)//) {
- my $line =$1;
- return 1 if $line =~ m/^.*$source->{old_snap}$/;
- }
+ return 1;
}
sub send_image {
push @$cmd, '-p', if $param->{properties};
push @$cmd, '-v' if $param->{verbose};
- if($source->{last_snap} && snapshot_exist($source , $dest, $param->{method}, $param->{dest_user})) {
- push @$cmd, '-i', "$source->{all}\@$source->{last_snap}";
+ if($dest->{last_snap} && snapshot_exist($source , $dest, $param->{method}, $param->{source_user})) {
+ push @$cmd, '-i', "$source->{all}\@$dest->{last_snap}";
}
push @$cmd, '--', "$source->{all}\@$source->{new_snap}";
my $bwl = $param->{limit}*1024;
push @$cmd, \'|', 'cstream', '-t', $bwl;
}
- my $target = "$dest->{all}";
- $target .= "/$source->{last_part}" if $source->{last_part};
- $target =~ s!/+!/!g;
+ my $target = target_dataset($source, $dest);
push @$cmd, \'|';
push @$cmd, 'ssh', '-o', 'BatchMode=yes', "$param->{dest_user}\@$dest->{ip}", '--' if $dest->{ip};
}
if ($source->{destroy}){
- my $dest_target_old ="${config_dir}/$source->{vmid}.conf.$source->{vm_type}.$source->{old_snap}";
+ my $dest_target_old ="${config_dir}/$source->{vmid}.conf.$source->{vm_type}.$dest->{old_snap}";
if($dest->{ip}){
run_cmd(['ssh', "$dest_user\@$dest->{ip}", '--', 'rm', '-f', '--', $dest_target_old]);
} else {
my $cmd_help = {
destroy => qq{
-$PROGNAME destroy -source <string> [OPTIONS]
+$PROGNAME destroy --source <string> [OPTIONS]
- remove a sync Job from the scheduler
+ Remove a sync Job from the scheduler
- -name string
+ --name string
+ The name of the sync job, if not set 'default' is used.
- name of the sync job, if not set it is default
-
- -source string
-
- the source can be an <VMID> or [IP:]<ZFSPool>[/Path]
+ --source string
+ The source can be an <VMID> or [IP:]<ZFSPool>[/Path]
},
create => qq{
-$PROGNAME create -dest <string> -source <string> [OPTIONS]
-
- Create a sync Job
-
- -dest string
-
- the destination target is like [IP]:<Pool>[/Path]
-
- -dest-user string
-
- name of the user on the destination target, root by default
-
- -limit integer
+$PROGNAME create --dest <string> --source <string> [OPTIONS]
- max sync speed in kBytes/s, default unlimited
+ Create a new sync-job
- -maxsnap string
+ --dest string
+ The destination target is like [IP]:<Pool>[/Path]
- how much snapshots will be kept before get erased, default 1
+ --dest-user string
+ The name of the user on the destination target, root by default
- -name string
+ --limit integer
+ Maximal sync speed in kBytes/s, default is unlimited
- name of the sync job, if not set it is default
-
- -skip boolean
-
- if this flag is set it will skip the first sync
-
- -source string
+ --maxsnap integer
+ The number of snapshots to keep until older ones are erased.
+ The default is 1, use 0 for unlimited.
- the source can be an <VMID> or [IP:]<ZFSPool>[/Path]
+ --name string
+ The name of the sync job, if not set it is default
- -source-user string
+ --prepend-storage-id
+ If specified, prepend the storage ID to the destination's path(s).
- name of the user on the source target, root by default
+ --skip
+ If specified, skip the first sync.
- -properties boolean
+ --source string
+ The source can be an <VMID> or [IP:]<ZFSPool>[/Path]
- Include the dataset's properties in the stream.
+ --source-user string
+ The (ssh) user-name on the source target, root by default
- -dest-config-path string
+ --properties
+ If specified, include the dataset's properties in the stream.
- specify a custom config path on the destination target. default is /var/lib/pve-zsync
+ --dest-config-path string
+ Specifies a custom config path on the destination target.
+ The default is /var/lib/pve-zsync
},
sync => qq{
-$PROGNAME sync -dest <string> -source <string> [OPTIONS]\n
-
- will sync one time
-
- -dest string
-
- the destination target is like [IP:]<Pool>[/Path]
-
- -dest-user string
+$PROGNAME sync --dest <string> --source <string> [OPTIONS]\n
- name of the user on the destination target, root by default
+ Trigger one sync.
- -limit integer
+ --dest string
+ The destination target is like [IP:]<Pool>[/Path]
- max sync speed in kBytes/s, default unlimited
+ --dest-user string
+ The (ssh) user-name on the destination target, root by default
- -maxsnap integer
+ --limit integer
+ The maximal sync speed in kBytes/s, default is unlimited
- how much snapshots will be kept before get erased, default 1
+ --maxsnap integer
+ The number of snapshots to keep until older ones are erased.
+ The default is 1, use 0 for unlimited.
- -name string
-
- name of the sync job, if not set it is default.
+ --name string
+ The name of the sync job, if not set it is 'default'.
It is only necessary if scheduler allready contains this source.
- -source string
-
- the source can be an <VMID> or [IP:]<ZFSPool>[/Path]
-
- -source-user string
-
- name of the user on the source target, root by default
+ --prepend-storage-id
+ If specified, prepend the storage ID to the destination's path(s).
- -verbose boolean
+ --source string
+ The source can either be an <VMID> or [IP:]<ZFSPool>[/Path]
- print out the sync progress.
+ --source-user string
+ The name of the user on the source target, root by default
- -properties boolean
+ --verbose
+ If specified, print out the sync progress.
- Include the dataset's properties in the stream.
+ --properties
+ If specified, include the dataset's properties in the stream.
- -dest-config-path string
-
- specify a custom config path on the destination target. default is /var/lib/pve-zsync
+ --dest-config-path string
+ Specifies a custom config path on the destination target.
+ The default is /var/lib/pve-zsync
},
list => qq{
$PROGNAME list
help => qq{
$PROGNAME help <cmd> [OPTIONS]
- Get help about specified command.
-
- <cmd> string
+ Get help about specified command.
- Command name
-
- -verbose boolean
+ <cmd> string
+ Command name to get help about.
+ --verbose
Verbose output format.
},
enable => qq{
-$PROGNAME enable -source <string> [OPTIONS]
-
- enable a syncjob and reset error
+$PROGNAME enable --source <string> [OPTIONS]
- -name string
+ Enable a sync-job and reset all job-errors, if any.
+ --name string
name of the sync job, if not set it is default
- -source string
-
- the source can be an <VMID> or [IP:]<ZFSPool>[/Path]
+ --source string
+ the source can be an <VMID> or [IP:]<ZFSPool>[/Path]
},
disable => qq{
-$PROGNAME disable -source <string> [OPTIONS]
-
- pause a sync job
+$PROGNAME disable --source <string> [OPTIONS]
- -name string
-
- name of the sync job, if not set it is default
+ Disables (pauses) a sync-job
- -source string
+ --name string
+ name of the sync-job, if not set it is default
- the source can be an <VMID> or [IP:]<ZFSPool>[/Path]
+ --source string
+ the source can be an <VMID> or [IP:]<ZFSPool>[/Path]
},
- printpod => 'internal command',
+ printpod => "$PROGNAME printpod\n\n\tinternal command",
};
print("ERROR:\tno command specified\n") if !$help;
print("USAGE:\t$PROGNAME <COMMAND> [ARGS] [OPTIONS]\n");
print("\t$PROGNAME help [<cmd>] [OPTIONS]\n\n");
- print("\t$PROGNAME create -dest <string> -source <string> [OPTIONS]\n");
- print("\t$PROGNAME destroy -source <string> [OPTIONS]\n");
- print("\t$PROGNAME disable -source <string> [OPTIONS]\n");
- print("\t$PROGNAME enable -source <string> [OPTIONS]\n");
+ print("\t$PROGNAME create --dest <string> --source <string> [OPTIONS]\n");
+ print("\t$PROGNAME destroy --source <string> [OPTIONS]\n");
+ print("\t$PROGNAME disable --source <string> [OPTIONS]\n");
+ print("\t$PROGNAME enable --source <string> [OPTIONS]\n");
print("\t$PROGNAME list\n");
print("\t$PROGNAME status\n");
- print("\t$PROGNAME sync -dest <string> -source <string> [OPTIONS]\n");
+ print("\t$PROGNAME sync --dest <string> --source <string> [OPTIONS]\n");
}
sub check_target {
sub print_pod {
my $synopsis = join("\n", sort values %$cmd_help);
+ my $commands = join(", ", sort keys %$cmd_help);
print <<EOF;
=head1 NAME
-pve-zsync - PVE ZFS Replication Manager
+pve-zsync - PVE ZFS Storage Sync Tool
=head1 SYNOPSIS
pve-zsync <COMMAND> [ARGS] [OPTIONS]
-$synopsis
+Where <COMMAND> can be one of: $commands
=head1 DESCRIPTION
-This Tool helps you to sync your VM or directory which stored on ZFS between 2 servers.
-This tool also has the capability to add jobs to cron so the sync will be automatically done.
-The default syncing interval is set to 15 min, if you want to change this value you can do this in /etc/cron.d/pve-zsync.
-To config cron see man crontab.
+The pve-zsync tool can help you to sync your VMs or directories stored on ZFS
+between multiple servers.
+
+pve-zsync is able to automatically configure CRON jobs, so that a periodic sync
+will be automatically triggered.
+The default sync interval is 15 min, if you want to change this value you can
+do this in F</etc/cron.d/pve-zsync>. If you need help to configure CRON tabs, see
+man crontab.
-=head2 PVE ZFS Storage sync Tool
+=head1 COMMANDS AND OPTIONS
-This Tool can get remote pool on other PVE or send Pool to others ZFS machines
+$synopsis
=head1 EXAMPLES
-add sync job from local VM to remote ZFS Server
-pve-zsync create -source=100 -dest=192.168.1.2:zfspool
+Adds a job for syncing the local VM 100 to a remote server's ZFS pool named "tank":
+ pve-zsync create --source=100 -dest=192.168.1.2:tank
=head1 IMPORTANT FILES
-Cron jobs and config are stored at /etc/cron.d/pve-zsync
+Cron jobs and config are stored in F</etc/cron.d/pve-zsync>
-The VM config get copied on the destination machine to /var/lib/pve-zsync/
+The VM configuration itself gets copied to the destination machines
+F</var/lib/pve-zsync/> path.
=head1 COPYRIGHT AND DISCLAIMER
-Copyright (C) 2007-2015 Proxmox Server Solutions GmbH
+Copyright (C) 2007-2021 Proxmox Server Solutions GmbH
-This program is free software: you can redistribute it and/or modify it
-under the terms of the GNU Affero General Public License as published
-by the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
+This program is free software: you can redistribute it and/or modify it under
+the terms of the GNU Affero General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
-This program is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-Affero General Public License for more details.
+This program is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU Affero General Public License for more
+details.
-You should have received a copy of the GNU Affero General Public
-License along with this program. If not, see
-<http://www.gnu.org/licenses/>.
+You should have received a copy of the GNU Affero General Public License along
+with this program. If not, see <http://www.gnu.org/licenses/>.
EOF
}