use PVE::INotify;
use PVE::JSONSchema qw(get_standard_option);
use PVE::Tools;
+use PVE::ReplicationTools;
use base qw(PVE::AbstractConfig);
},
ro => {
type => 'boolean',
- description => 'Read-only mountpoint',
+ description => 'Read-only mount point',
optional => 1,
},
quota => {
description => 'Enable user quotas inside the container (not supported with zfs subvolumes)',
optional => 1,
},
+ replica => {
+ type => 'boolean',
+ description => 'Will include this volume to a storage replica job.',
+ optional => 1,
+ default => 1,
+ },
+ shared => {
+ type => 'boolean',
+ description => 'Mark this non-volume mount point as available on multiple nodes (see \'nodes\')',
+ verbose_description => "Mark this non-volume mount point as available on all nodes.\n\nWARNING: This option does not share the mount point automatically, it assumes it is shared already!",
+ optional => 1,
+ default => 0,
+ },
};
PVE::JSONSchema::register_standard_option('pve-ct-rootfs', {
maximum => 6,
default => 2,
},
+ cores => {
+ optional => 1,
+ type => 'integer',
+ description => "The number of cores assigned to the container. A container can use all available cores by default.",
+ minimum => 1,
+ maximum => 128,
+ },
cpulimit => {
optional => 1,
type => 'number',
type => 'integer',
minimum => 0,
},
+ replica => {
+ optional => 1,
+ description => "Storage replica for local storage.",
+ type => 'boolean',
+ default => 0,
+ },
+ replica_rate_limit => {
+ optional => 1,
+ description => "Storage replica rate limit in KBytes/s.",
+ type => 'integer',
+ minimum => 1,
+ },
+ replica_target => {
+ optional => 1,
+ description => "Storage replica target node.",
+ type => 'string',
+ },
+ replica_interval => {
+ optional => 1,
+ description => "Storage replica sync interval.",
+ type => 'integer',
+ minimum => 1,
+ maximum => 1440,
+ default => 15,
+ },
cmode => {
optional => 1,
description => "Console mode. By default, the console command tries to open a connection to one of the available tty devices. By setting cmode to 'console' it tries to attach to /dev/console instead. If you set cmode to 'shell', it simply invokes a shell inside the container (no login).",
'lxc.rootfs' => 'lxc.rootfs is auto generated from rootfs',
'lxc.rootfs.mount' => 1,
'lxc.rootfs.options' => 'lxc.rootfs.options is not supported' .
- ', please use mountpoint options in the "rootfs" key',
+ ', please use mount point options in the "rootfs" key',
# lxc.cgroup.*
+ # lxc.limit.*
'lxc.cap.drop' => 1,
'lxc.cap.keep' => 1,
'lxc.aa_profile' => 1,
%$rootfs_desc,
backup => {
type => 'boolean',
- description => 'Whether to include the mountpoint in backups.',
- verbose_description => 'Whether to include the mountpoint in backups '.
- '(only used for volume mountpoints).',
+ description => 'Whether to include the mount point in backups.',
+ verbose_description => 'Whether to include the mount point in backups '.
+ '(only used for volume mount points).',
optional => 1,
},
mp => {
type => 'string',
format => 'pve-lxc-mp-string',
format_description => 'Path',
- description => 'Path to the mountpoint as seen from inside the container '.
+ description => 'Path to the mount point as seen from inside the container '.
'(must not contain symlinks).',
- verbose_description => "Path to the mountpoint as seen from inside the container.\n\n".
+ verbose_description => "Path to the mount point as seen from inside the container.\n\n".
"NOTE: Must not contain any symlinks for security reasons."
},
};
my $key = $1;
my $value = $3;
my $validity = $valid_lxc_conf_keys->{$key} || 0;
- if ($validity eq 1 || $key =~ m/^lxc\.cgroup\./) {
+ if ($validity eq 1 || $key =~ m/^lxc\.(?:cgroup|limit)\./) {
push @{$conf->{lxc}}, [$key, $value];
} elsif (my $errmsg = $validity) {
warn "vm $vmid - $key: $errmsg\n";
if (defined($delete)) {
foreach my $opt (@$delete) {
if (!exists($conf->{$opt})) {
- warn "no such option: $opt\n";
+ # silently ignore
next;
}
$opt eq 'tty' || $opt eq 'console' || $opt eq 'cmode') {
next if $hotplug_error->($opt);
delete $conf->{$opt};
+ } elsif ($opt eq 'cores') {
+ delete $conf->{$opt}; # rest is handled by pvestatd
} elsif ($opt eq 'cpulimit') {
PVE::LXC::write_cgroup_value("cpu", $vmid, "cpu.cfs_quota_us", -1);
delete $conf->{$opt};
}
} elsif ($opt eq 'unprivileged') {
die "unable to delete read-only option: '$opt'\n";
+ } elsif ($opt eq "replica" || $opt eq "replica_target") {
+ delete $conf->{$opt};
+ delete $conf->{replica} if $opt eq "replica_target";
+
+ # job_remove required updated lxc conf
+ PVE::ReplicationTools::job_remove($vmid);
+ } elsif ($opt eq "replica_interval" || $opt eq "replica_rate_limit") {
+ delete $conf->{$opt};
+ PVE::ReplicationTools::update_conf($vmid, $opt, $param->{$opt});
} else {
die "implement me (delete: $opt)"
}
foreach my $opt (keys %$param) {
my $value = $param->{$opt};
my $check_protection_msg = "can't update CT $vmid drive '$opt'";
+ my $update;
if ($opt eq 'hostname' || $opt eq 'arch') {
$conf->{$opt} = $value;
} elsif ($opt eq 'onboot') {
next if $hotplug_error->($opt);
my $list = PVE::LXC::verify_searchdomain_list($value);
$conf->{$opt} = $list;
+ } elsif ($opt eq 'cores') {
+ $conf->{$opt} = $value;# rest is handled by pvestatd
} elsif ($opt eq 'cpulimit') {
if ($value == 0) {
PVE::LXC::write_cgroup_value("cpu", $vmid, "cpu.cfs_quota_us", -1);
} elsif ($opt eq 'ostype') {
next if $hotplug_error->($opt);
$conf->{$opt} = $value;
+ } elsif ($opt eq "replica") {
+ die "Not all volumes are syncable, please check your config\n"
+ if !PVE::ReplicationTools::check_guest_volumes_syncable($conf, 'lxc');
+ $conf->{$opt} = $param->{$opt};
+ die "replica_target is required\n" if !$conf->{replica_target}
+ && !$param->{replica_target};
+ if ($param->{replica}) {
+ PVE::ReplicationTools::job_enable($vmid);
+ } else {
+ PVE::ReplicationTools::job_disable($vmid);
+ }
+ $update = 1;
+ } elsif ($opt eq "replica_interval" || $opt eq "replica_rate_limit") {
+ $conf->{$opt} = $param->{$opt};
+ PVE::ReplicationTools::update_conf($vmid, $opt, $param->{$opt});
+ $update = 1;
+ } elsif ($opt eq "replica_target") {
+ die "Node: $param->{$opt} does not exists in Cluster.\n"
+ if !PVE::Cluster::check_node_exists($param->{$opt});
+ $update = 1;
+ PVE::ReplicationTools::update_conf($vmid, $opt, $param->{$opt})
+ if defined($conf->{$opt});
+ $conf->{$opt} = $param->{$opt};
} else {
die "implement me: $opt";
}
- PVE::LXC::Config->write_config($vmid, $conf) if $running;
+ PVE::LXC::Config->write_config($vmid, $conf) if $running || $update;
}
# Apply deletions and creations of new volumes
return !(defined($conf->{console}) && !$conf->{console});
}
+sub has_lxc_entry {
+ my ($class, $conf, $keyname) = @_;
+
+ if (my $lxcconf = $conf->{lxc}) {
+ foreach my $entry (@$lxcconf) {
+ my ($key, undef) = @$entry;
+ return 1 if $key eq $keyname;
+ }
+ }
+
+ return 0;
+}
+
sub get_tty_count {
my ($class, $conf) = @_;