use LWP::UserAgent;
+use Proxmox::RS::APT::Repositories;
+
use PVE::pvecfg;
use PVE::Tools qw(extract_param);
use PVE::Cluster;
use PVE::Exception;
use PVE::RESTHandler;
use PVE::RPCEnvironment;
-use PVE::RS::APT::Repositories;
use PVE::API2Tools;
use JSON;
my $pw;
if ($pkgfile->{Origin} eq 'Proxmox' && $pkgfile->{Component} eq 'pve-enterprise') {
- my $info = PVE::INotify::read_file('subscription');
- if ($info->{status} eq 'Active') {
+ my $info = PVE::API2::Subscription::read_etc_subscription();
+ if ($info->{status} eq 'active') {
$username = $info->{key};
$pw = PVE::API2Tools::get_hwaddress();
$ua->credentials("enterprise.proxmox.com:443", 'pve-enterprise-repository', $username, $pw);
code => sub {
my ($param) = @_;
- return PVE::RS::APT::Repositories::repositories();
+ return Proxmox::RS::APT::Repositories::repositories("pve");
}});
__PACKAGE__->register_method({
code => sub {
my ($param) = @_;
- PVE::RS::APT::Repositories::add_repository($param->{handle}, $param->{digest});
+ Proxmox::RS::APT::Repositories::add_repository($param->{handle}, "pve", $param->{digest});
}});
__PACKAGE__->register_method({
my $enabled = $param->{enabled};
$options->{enabled} = int($enabled) if defined($enabled);
- PVE::RS::APT::Repositories::change_repository(
+ Proxmox::RS::APT::Repositories::change_repository(
$param->{path},
int($param->{index}),
$options,
ksmtuned
libpve-apiclient-perl
libpve-network-perl
- proxmox-backup-file-restore
openvswitch-switch
+ proxmox-backup-file-restore
+ proxmox-offline-mirror-helper
pve-zsync
zfsutils-linux
);
$data->{ids}->{$id} = $opts;
- PVE::Jobs::create_job($id, 'vzdump');
+ PVE::Jobs::create_job($id, 'vzdump', $opts);
cfs_write_file('jobs.cfg', $data);
});
die "no such vzdump job\n" if !$job || $job->{type} ne 'vzdump';
}
- my $old_enabled = $job->{enabled} // 1;
-
my $deletable = {
comment => 1,
'repeat-missed' => 1,
delete $job->{$k};
}
- my $need_run_time_update = 0;
- if (defined($param->{schedule}) && $param->{schedule} ne $job->{schedule}) {
- $need_run_time_update = 1;
- }
-
foreach my $k (keys %$param) {
$job->{$k} = $param->{$k};
}
- my $new_enabled = $job->{enabled} // 1;
-
- if ($new_enabled && !$old_enabled) {
- $need_run_time_update = 1;
- }
-
$job->{all} = 1 if (defined($job->{exclude}) && !defined($job->{pool}));
if (defined($param->{vmid})) {
PVE::VZDump::verify_vzdump_parameters($job, 1);
- if ($need_run_time_update) {
- PVE::Jobs::update_last_runtime($id, 'vzdump');
- }
-
if (defined($idx)) {
cfs_write_file('vzdump.cron', $data);
}
cfs_write_file('jobs.cfg', $jobs_data);
+
+ PVE::Jobs::detect_changed_runtime_props($id, 'vzdump', $job);
+
return;
};
cfs_lock_file('vzdump.cron', undef, sub {
foreach my $storeid (keys %$storages) {
my $curr = $storages->{$storeid};
next if $curr->{type} ne 'rbd';
+ $curr->{pool} = 'rbd' if !defined $curr->{pool}; # set default
if (
$pool eq $curr->{pool} ||
(defined $curr->{'data-pool'} && $pool eq $curr->{'data-pool'})
description => "CPU utilization (when type in node,qemu,lxc).",
type => 'number',
optional => 1,
+ minimum => 0,
renderer => 'fraction_as_percentage',
},
maxcpu => {
description => "Number of available CPUs (when type in node,qemu,lxc).",
type => 'number',
optional => 1,
+ minimum => 0,
},
mem => {
description => "Used memory in bytes (when type in node,qemu,lxc).",
- type => 'string',
+ type => 'integer',
optional => 1,
renderer => 'bytes',
+ minimum => 0,
},
maxmem => {
description => "Number of available memory in bytes (when type in node,qemu,lxc).",
},
disk => {
description => "Used disk space in bytes (when type in storage), used root image spave for VMs (type in qemu,lxc).",
- type => 'string',
+ type => 'integer',
optional => 1,
renderer => 'bytes',
+ minimum => 0,
},
maxdisk => {
description => "Storage size in bytes (when type in storage), root image size for VMs (type in qemu,lxc).",
type => 'integer',
optional => 1,
renderer => 'bytes',
+ minimum => 0,
},
content => {
description => "Allowed storage content types (when type == storage).",
type => 'string',
optional => 1,
},
+ vmid => {
+ description => "The numerical vmid (when type in qemu,lxc).",
+ type => 'integer',
+ optional => 1,
+ minimum => 1,
+ },
},
},
},
# fake entry for local node if no cluster defined
my $pmxcfs = ($clinfo && $clinfo->{version}) ? 1 : 0; # pmxcfs online ?
- my $subinfo = PVE::INotify::read_file('subscription');
+ my $subinfo = PVE::API2::Subscription::read_etc_subscription();
my $sublevel = $subinfo->{level} || '';
return [{
use base qw(PVE::RESTHandler);
+my $verify_command_item_desc = {
+ description => "An array of objects describing endpoints, methods and arguments.",
+ type => "array",
+ items => {
+ type => "object",
+ properties => {
+ path => {
+ description => "A relative path to an API endpoint on this node.",
+ type => "string",
+ optional => 0,
+ },
+ method => {
+ description => "A method related to the API endpoint (GET, POST etc.).",
+ type => "string",
+ pattern => "(GET|POST|PUT|DELETE)",
+ optional => 0,
+ },
+ args => {
+ description => "A set of parameter names and their values.",
+ type => "object",
+ optional => 1,
+ },
+ },
+ }
+};
+
+PVE::JSONSchema::register_format('pve-command-batch', \&verify_command_batch);
+sub verify_command_batch {
+ my ($value, $noerr) = @_;
+ my $commands = eval { decode_json($value); };
+
+ return if $noerr && $@;
+ die "commands param did not contain valid JSON: $@" if $@;
+
+ eval { PVE::JSONSchema::validate($commands, $verify_command_item_desc) };
+
+ return $commands if !$@;
+
+ return if $noerr;
+ die "commands is not a valid array of commands: $@";
+}
+
__PACKAGE__->register_method ({
subclass => "PVE::API2::Qemu",
path => 'qemu',
commands => {
description => "JSON encoded array of commands.",
type => "string",
+ verbose_description => "JSON encoded array of commands, where each command is an object with the following properties:\n"
+ . PVE::RESTHandler::dump_properties($verify_command_item_desc->{items}->{properties}, 'full'),
+ format => "pve-command-batch",
}
},
},
returns => {
type => 'array',
- properties => {
-
+ items => {
+ type => "object",
+ properties => {},
},
},
code => sub {
my $rpcenv = PVE::RPCEnvironment::get();
my $user = $rpcenv->get_user();
-
+ # just parse the json again, it should already be validated
my $commands = eval { decode_json($param->{commands}); };
- die "commands param did not contain valid JSON: $@" if $@;
- die "commands is not an array" if ref($commands) ne "ARRAY";
-
foreach my $cmd (@$commands) {
eval {
- die "$cmd is not a valid command" if (ref($cmd) ne "HASH" || !$cmd->{path} || !$cmd->{method});
-
$cmd->{args} //= {};
my $path = "nodes/$param->{node}/$cmd->{path}";
system ("(sleep 2;/sbin/poweroff)&");
}
- return undef;
+ return;
}});
__PACKAGE__->register_method({
PVE::INotify::update_file('resolvconf', $param);
- return undef;
+ return;
}});
__PACKAGE__->register_method({
PVE::INotify::write_file('timezone', $param->{timezone});
- return undef;
+ return;
}});
__PACKAGE__->register_method({
my $vmlist = PVE::Cluster::get_vmlist();
- my $vms_allowed = {};
+ my $vms_allowed;
if (defined($vmfilter)) {
- foreach my $vmid (PVE::Tools::split_list($vmfilter)) {
- $vms_allowed->{$vmid} = 1;
- }
+ $vms_allowed = { map { $_ => 1 } PVE::Tools::split_list($vmfilter) };
}
my $res = {};
foreach my $vmid (keys %{$vmlist->{ids}}) {
- next if %$vms_allowed && !$vms_allowed->{$vmid};
+ next if defined($vms_allowed) && !$vms_allowed->{$vmid};
my $d = $vmlist->{ids}->{$vmid};
next if $nodename && $d->{node} ne $nodename;
} elsif ($d->{type} eq 'qemu') {
$class = 'PVE::QemuConfig';
} else {
- die "unknown VM type '$d->{type}'\n";
+ die "unknown virtual guest type '$d->{type}'\n";
}
my $conf = $class->load_config($vmid);
# do not skip HA vms on force or if a specific VMID set is wanted
my $include_ha_managed = defined($vmfilter) ? 1 : 0;
- my $vmlist = &$get_filtered_vmlist($nodename, $vmfilter, undef, $include_ha_managed);
+ my $vmlist = $get_filtered_vmlist->($nodename, $vmfilter, undef, $include_ha_managed);
my $resList = {};
foreach my $vmid (keys %$vmlist) {
my $conf = $vmlist->{$vmid}->{conf};
-
next if $autostart && !$conf->{onboot};
- my $startup = {};
- if ($conf->{startup}) {
- $startup = PVE::JSONSchema::pve_parse_startup_order($conf->{startup});
- }
-
- $startup->{order} = LONG_MAX if !defined($startup->{order});
+ my $startup = $conf->{startup} ? PVE::JSONSchema::pve_parse_startup_order($conf->{startup}) : {};
+ my $order = $startup->{order} = $startup->{order} // LONG_MAX;
- $resList->{$startup->{order}}->{$vmid} = $startup;
- $resList->{$startup->{order}}->{$vmid}->{type} = $vmlist->{$vmid}->{type};
+ $resList->{$order}->{$vmid} = $startup;
+ $resList->{$order}->{$vmid}->{type} = $vmlist->{$vmid}->{type};
}
return $resList;
my $create_stop_worker = sub {
my ($nodename, $type, $vmid, $down_timeout) = @_;
- my $upid;
if ($type eq 'lxc') {
return if !PVE::LXC::check_running($vmid);
- my $timeout = defined($down_timeout) ? int($down_timeout) : 60;
+ my $timeout = int($down_timeout // 60);
print STDERR "Stopping CT $vmid (timeout = $timeout seconds)\n";
- $upid = PVE::API2::LXC::Status->vm_shutdown({node => $nodename, vmid => $vmid,
- timeout => $timeout, forceStop => 1 });
+ return PVE::API2::LXC::Status->vm_shutdown(
+ { node => $nodename, vmid => $vmid, timeout => $timeout, forceStop => 1 }
+ );
} elsif ($type eq 'qemu') {
return if !PVE::QemuServer::check_running($vmid, 1);
- my $timeout = defined($down_timeout) ? int($down_timeout) : 60*3;
+ my $timeout = int($down_timeout // 3 * 60);
print STDERR "Stopping VM $vmid (timeout = $timeout seconds)\n";
- $upid = PVE::API2::Qemu->vm_shutdown({node => $nodename, vmid => $vmid,
- timeout => $timeout, forceStop => 1 });
+ return PVE::API2::Qemu->vm_shutdown(
+ { node => $nodename, vmid => $vmid, timeout => $timeout, forceStop => 1 }
+ );
} else {
die "unknown VM type '$type'\n";
}
-
- return $upid;
};
__PACKAGE__->register_method ({
foreach my $vmid (sort {$b <=> $a} keys %$vmlist) {
my $d = $vmlist->{$vmid};
- my $upid;
- eval { $upid = &$create_stop_worker($nodename, $d->{type}, $vmid, $d->{down}); };
+ my $upid = eval { $create_stop_worker->($nodename, $d->{type}, $vmid, $d->{down}) };
warn $@ if $@;
next if !$upid;
code => sub {
my ($param) = @_;
- PVE::Tools::lock_file('/var/lock/pve-etchosts.lck', undef, sub{
+ PVE::Tools::lock_file('/var/lock/pve-etchosts.lck', undef, sub {
if ($param->{digest}) {
my $hosts = PVE::INotify::read_file('etchosts');
PVE::Tools::assert_if_modified($hosts->{digest}, $param->{digest});
});
die $@ if $@;
- return undef;
+ return;
}});
# bash completion helper
use LWP::UserAgent;
use JSON;
+use Proxmox::RS::Subscription;
+
use PVE::Tools;
use PVE::ProcFSTools;
use PVE::Exception qw(raise_param_exc);
use PVE::JSONSchema qw(get_standard_option);
use PVE::SafeSyslog;
-use PVE::Subscription;
use PVE::API2Tools;
use PVE::RESTHandler;
use base qw(PVE::RESTHandler);
-PVE::INotify::register_file('subscription', "/etc/subscription",
- \&read_etc_pve_subscription,
- \&write_etc_pve_subscription);
-
my $subscription_pattern = 'pve([1248])([cbsp])-[0-9a-f]{10}';
+my $filename = "/etc/subscription";
sub get_sockets {
my $info = PVE::ProcFSTools::read_cpuinfo();
return ($sockets, $level);
}
-sub read_etc_pve_subscription {
- my ($filename, $fh) = @_;
-
+sub read_etc_subscription {
my $req_sockets = get_sockets();
my $server_id = PVE::API2Tools::get_hwaddress();
- my $info = PVE::Subscription::read_subscription($server_id, $filename, $fh);
+ my $info = Proxmox::RS::Subscription::read_subscription($filename);
- return $info if $info->{status} ne 'Active';
+ return $info if !$info || $info->{status} ne 'active';
my ($sockets, $level);
eval { ($sockets, $level) = check_key($info->{key}, $req_sockets); };
if (my $err = $@) {
chomp $err;
- $info->{status} = 'Invalid';
+ $info->{status} = 'invalid';
$info->{message} = $err;
} else {
$info->{level} = $level;
return $info;
}
-sub write_etc_pve_subscription {
- my ($filename, $fh, $info) = @_;
+sub write_etc_subscription {
+ my ($info) = @_;
my $server_id = PVE::API2Tools::get_hwaddress();
- PVE::Subscription::write_subscription($server_id, $filename, $fh, $info);
+ mkdir "/etc/apt/auth.conf.d";
+ Proxmox::RS::Subscription::write_subscription($filename, "/etc/apt/auth.conf.d/pve.conf", "enterprise.proxmox.com/debian/pve", $info);
}
__PACKAGE__->register_method ({
my $server_id = PVE::API2Tools::get_hwaddress();
my $url = "https://www.proxmox.com/proxmox-ve/pricing";
- my $info = PVE::INotify::read_file('subscription');
+ my $info = read_etc_subscription();
if (!$info) {
my $no_subscription_info = {
- status => "NotFound",
+ status => "notfound",
message => "There is no subscription key",
url => $url,
};
code => sub {
my ($param) = @_;
- my $info = PVE::INotify::read_file('subscription');
+ my $info = read_etc_subscription();
return undef if !$info;
my $server_id = PVE::API2Tools::get_hwaddress();
my $key = $info->{key};
- if ($key) {
- PVE::Subscription::update_apt_auth($key, $server_id);
- }
+ die "Updating offline key not possible - please remove and re-add subscription key to switch to online key.\n"
+ if $info->{signature};
- if (!$param->{force} && $info->{status} eq 'Active') {
- my $age = time() - $info->{checktime};
- return undef if $age < $PVE::Subscription::localkeydays*60*60*24;
- }
+ # key has been recently checked
+ return undef
+ if !$param->{force}
+ && $info->{status} eq 'active'
+ && Proxmox::RS::Subscription::check_age($info, 1)->{status} eq 'active';
my $req_sockets = get_sockets();
check_key($key, $req_sockets);
my $dccfg = PVE::Cluster::cfs_read_file('datacenter.cfg');
my $proxy = $dccfg->{http_proxy};
- $info = PVE::Subscription::check_subscription($key, $server_id, $proxy);
+ $info = Proxmox::RS::Subscription::check_subscription($key, $server_id, "", "Proxmox VE", $proxy);
- PVE::INotify::write_file('subscription', $info);
+ write_etc_subscription($info);
return undef;
}});
check_key($key, $req_sockets);
- PVE::INotify::write_file('subscription', $info);
+ write_etc_subscription($info);
my $dccfg = PVE::Cluster::cfs_read_file('datacenter.cfg');
my $proxy = $dccfg->{http_proxy};
- $info = PVE::Subscription::check_subscription($key, $server_id, $proxy);
+ $info = Proxmox::RS::Subscription::check_subscription($key, $server_id, "", "Proxmox VE", $proxy);
- PVE::INotify::write_file('subscription', $info);
+ write_etc_subscription($info);
return undef;
}});
method => 'POST',
description => "Create backup.",
permissions => {
- description => "The user needs 'VM.Backup' permissions on any VM, and 'Datastore.AllocateSpace'"
- ." on the backup storage. The 'maxfiles', 'prune-backups', 'tmpdir', 'dumpdir', 'script',"
- ." 'bwlimit' and 'ionice' parameters are restricted to the 'root\@pam' user.",
+ description => "The user needs 'VM.Backup' permissions on any VM, and "
+ ."'Datastore.AllocateSpace' on the backup storage. The 'maxfiles', 'prune-backups', "
+ ."'tmpdir', 'dumpdir', 'script', 'bwlimit', 'performance' and 'ionice' parameters are "
+ ."restricted to the 'root\@pam' user.",
user => 'all',
},
protected => 1,
if $param->{stdout};
}
- foreach my $key (qw(maxfiles prune-backups tmpdir dumpdir script bwlimit ionice)) {
+ for my $key (qw(maxfiles prune-backups tmpdir dumpdir script bwlimit performance ionice)) {
raise_param_exc({ $key => "Only root may set this option."})
if defined($param->{$key}) && ($user ne 'root@pam');
}
my @ceph_packages = qw(
ceph
ceph-common
- ceph-volume
ceph-mds
ceph-fuse
gdisk
nvme-cli
);
+ # got split out with quincy and is required by PVE tooling, conditionally exclude it for older
+ # FIXME: remove condition with PVE 8.0, i.e., once we only support quincy+ new installations
+ if ($cephver ne 'octopus' and $cephver ne 'pacific') {
+ push @ceph_packages, 'ceph-volume';
+ }
+
print "start installation\n";
# this flag helps to determine when apt is actually done installing (vs. partial extracing)
my $images = $plugin->list_images($storeid, $scfg, $vmid, undef, $cache);
push @$volids, map { $_->{volid} } @$images;
}
- my ($last_snapshots, $cleaned_replicated_volumes) = PVE::Replication::prepare($storecfg, $volids, $jobid, $last_sync, $parent_snapname, $logfunc);
- foreach my $volid (keys %$cleaned_replicated_volumes) {
- if (!$wanted_volids->{$volid}) {
+ my ($local_snapshots, $cleaned_replicated_volumes) = PVE::Replication::prepare($storecfg, $volids, $jobid, $last_sync, $parent_snapname, $logfunc);
+ for my $volid ($volids->@*) {
+ next if $wanted_volids->{$volid};
+
+ my $stale = $cleaned_replicated_volumes->{$volid};
+ # prepare() will not remove the last_sync snapshot, but if the volume was used by the
+ # job and is not wanted anymore, it is stale too. And not removing it now might cause
+ # it to be missed later, because the relevant storage might not get scanned anymore.
+ $stale ||= grep {
+ PVE::Replication::is_replication_snapshot($_, $jobid)
+ } keys %{$local_snapshots->{$volid} // {}};
+
+ if ($stale) {
$logfunc->("$jobid: delete stale volume '$volid'");
PVE::Storage::vdisk_free($storecfg, $volid);
- delete $last_snapshots->{$volid};
+ delete $local_snapshots->{$volid};
}
}
- print to_json($last_snapshots) . "\n";
+ print to_json($local_snapshots) . "\n";
return undef;
}});
print STDERR "$msg\n";
};
- my $last_snapshots = PVE::Replication::prepare(
- $storecfg, $volids, $jobid, $last_sync, undef, $logfunc);
+ PVE::Replication::prepare($storecfg, $volids, $jobid, $last_sync, undef, $logfunc);
return undef;
}});
use strict;
use warnings;
+use MIME::Base64;
+use JSON qw(decode_json);
+
use PVE::Tools;
use PVE::SafeSyslog;
use PVE::INotify;
PVE::RPCEnvironment->setup_default_cli_env();
}
+__PACKAGE__->register_method({
+ name => 'set_offline_key',
+ path => 'set_offline_key',
+ method => 'POST',
+ description => "(Internal use only!) Set a signed subscription info blob as offline key",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ data => {
+ type => "string",
+ },
+ },
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+
+ my $info = decode_json(decode_base64($param->{data}));
+
+ $info = Proxmox::RS::Subscription::check_signature($info);
+ $info = Proxmox::RS::Subscription::check_server_id($info);
+ $info = Proxmox::RS::Subscription::check_age($info, 0);
+
+ PVE::API2::Subscription::check_key($info->{key}, PVE::API2::Subscription::get_sockets());
+
+ PVE::API2::Subscription::write_etc_subscription($info);
+}});
+
our $cmddef = {
update => [ 'PVE::API2::Subscription', 'update', undef, { node => $nodename } ],
get => [ 'PVE::API2::Subscription', 'get', undef, { node => $nodename },
}
}],
set => [ 'PVE::API2::Subscription', 'set', ['key'], { node => $nodename } ],
+ "set-offline-key" => [ __PACKAGE__, 'set_offline_key', ['data'] ],
delete => [ 'PVE::API2::Subscription', 'delete', undef, { node => $nodename } ],
};
time => 0,
};
+my $saved_config_props = [qw(enabled schedule)];
+
+# saves some properties of the jobcfg into the jobstate so we can track
+# them on different nodes (where the update was not done)
+# and update the last runtime when they change
+sub detect_changed_runtime_props {
+ my ($jobid, $type, $cfg) = @_;
+
+ lock_job_state($jobid, $type, sub {
+ my $old_state = read_job_state($jobid, $type) // $default_state;
+
+ my $updated = 0;
+ for my $prop (@$saved_config_props) {
+ my $old_prop = $old_state->{config}->{$prop} // '';
+ my $new_prop = $cfg->{$prop} // '';
+ next if "$old_prop" eq "$new_prop";
+
+ if (defined($cfg->{$prop})) {
+ $old_state->{config}->{$prop} = $cfg->{$prop};
+ } else {
+ delete $old_state->{config}->{$prop};
+ }
+
+ $updated = 1;
+ }
+
+ return if !$updated;
+ $old_state->{updated} = time();
+
+ my $path = $get_state_file->($jobid, $type);
+ PVE::Tools::file_set_contents($path, encode_json($old_state));
+ });
+}
+
# lockless, since we use file_get_contents, which is atomic
sub read_job_state {
my ($jobid, $type) = @_;
state => 'stopped',
msg => $get_job_task_status->($state) // 'internal error',
upid => $state->{upid},
+ config => $state->{config},
};
if ($state->{updated}) { # save updated time stamp
# must be called when the job is first created
sub create_job {
- my ($jobid, $type) = @_;
+ my ($jobid, $type, $cfg) = @_;
lock_job_state($jobid, $type, sub {
my $state = read_job_state($jobid, $type) // $default_state;
}
$state->{time} = time();
+ for my $prop (@$saved_config_props) {
+ if (defined($cfg->{$prop})) {
+ $state->{config}->{$prop} = $cfg->{$prop};
+ }
+ }
my $path = $get_state_file->($jobid, $type);
PVE::Tools::file_set_contents($path, encode_json($state));
my $new_state = {
state => 'starting',
time => time(),
+ config => $state->{config},
};
my $path = $get_state_file->($jobid, $type);
upid => $upid,
};
}
+ $new_state->{config} = $state->{config};
my $path = $get_state_file->($jobid, $type);
PVE::Tools::file_set_contents($path, encode_json($new_state));
my $plugin = PVE::Jobs::Plugin->lookup($type);
if (starting_job($id, $type)) {
- my $upid = eval { $plugin->run($cfg) };
+ my $upid = eval { $plugin->run($cfg, $id, $schedule) };
if (my $err = $@) {
warn $@ if $@;
started_job($id, $type, undef, $err);
for my $id (keys $data->{ids}->%*) {
my $job = $data->{ids}->{$id};
my $type = $job->{type};
- my $jobstate = read_job_state($id, $type);
- create_job($id, $type) if !defined($jobstate);
+
+ my $path = $get_state_file->($id, $type);
+ if (-e $path) {
+ detect_changed_runtime_props($id, $type, $job);
+ } else {
+ create_job($id, $type, $job);
+ }
}
PVE::Tools::dir_glob_foreach($state_dir, '(.*?)-(.*).json', sub {
propertyList => {
type => { description => "Section type." },
id => {
- description => "The ID of the VZDump job.",
+ description => "The ID of the job.",
type => 'string',
format => 'pve-configid',
maxLength => 64,
sub decode_value {
my ($class, $type, $key, $value) = @_;
- if ($key eq 'prune-backups' && !ref($value)) {
- $value = PVE::JSONSchema::parse_property_string(
- 'prune-backups',
- $value,
- );
+ if ((my $format = $PVE::VZDump::Common::PROPERTY_STRINGS->{$key}) && !ref($value)) {
+ $value = PVE::JSONSchema::parse_property_string($format, $value);
}
return $value;
sub encode_value {
my ($class, $type, $key, $value) = @_;
- if ($key eq 'prune-backups' && ref($value) eq 'HASH') {
- $value = PVE::JSONSchema::print_property_string(
- $value,
- 'prune-backups',
- );
+ if ((my $format = $PVE::VZDump::Common::PROPERTY_STRINGS->{$key}) && ref($value) eq 'HASH') {
+ $value = PVE::JSONSchema::print_property_string($value, $format);
}
return $value;
delete $conf->{$opt} if !defined($props->{$opt});
}
- my $retention = $conf->{'prune-backups'};
- if ($retention && ref($retention) eq 'HASH') { # fixup, its required as string parameter
- $conf->{'prune-backups'} = PVE::JSONSchema::print_property_string($retention, 'prune-backups');
+ # Required as string parameters
+ for my $key (keys $PVE::VZDump::Common::PROPERTY_STRINGS->%*) {
+ if ($conf->{$key} && ref($conf->{$key}) eq 'HASH') {
+ my $format = $PVE::VZDump::Common::PROPERTY_STRINGS->{$key};
+ $conf->{$key} = PVE::JSONSchema::print_property_string($conf->{$key}, $format);
+ }
}
$conf->{quiet} = 1; # do not write to stdout/stderr
'cat /etc/fstab',
'findmnt --ascii',
'df --human -T',
+ 'proxmox-boot-tool status',
],
},
'virtual guests' => {
],
},
network => {
- order => 40,
+ order => 45,
cmds => [
'ip -details -statistics address',
'ip -details -4 route show',
'zpool status',
'zpool list -v',
'zfs list',
+ 'arcstat',
;
}
update_supported_cpuflags();
- my $subinfo = PVE::INotify::read_file('subscription');
+ my $subinfo = PVE::API2::Subscription::read_etc_subscription();
my $sublevel = $subinfo->{level} || '';
my $netdev = PVE::ProcFSTools::read_proc_net_dev();
return $notes_template;
};
+my sub parse_performance {
+ my ($param) = @_;
+
+ if (defined(my $perf = $param->{performance})) {
+ return if ref($perf) eq 'HASH'; # already parsed
+ $param->{performance} = PVE::JSONSchema::parse_property_string('backup-performance', $perf);
+ }
+}
+
my $parse_prune_backups_maxfiles = sub {
my ($param, $kind) = @_;
} keys %$confdesc_for_defaults
};
$parse_prune_backups_maxfiles->($defaults, "defaults in VZDump schema");
+ parse_performance($defaults);
my $raw;
eval { $raw = PVE::Tools::file_get_contents($fn); };
$res->{mailto} = [ @mailto ];
}
$parse_prune_backups_maxfiles->($res, "options in '$fn'");
+ parse_performance($res);
foreach my $key (keys %$defaults) {
$res->{$key} = $defaults->{$key} if !defined($res->{$key});
if defined($param->{'prune-backups'}) && defined($param->{maxfiles});
$parse_prune_backups_maxfiles->($param, 'CLI parameters');
+ parse_performance($param);
if (my $template = $param->{'notes-template'}) {
eval { $verify_notes_template->($template); };
Description: News displayed on the admin interface
<b>Welcome</b> to the Proxmox Virtual Environment!<br><br>
For more information please visit our homepage at
- <a href='http://www.proxmox.com' target='_blank'>www.proxmox.com</a>
+ <a href='https://www.proxmox.com' target='_blank'>www.proxmox.com</a>
Package: almalinux-8-default
Version: 20210928
Infopage: https://linuxcontainers.org
Description: LXC default image for almalinux 8 (20210928)
-Package: alpine-3.13-default
-Version: 20210419
+Package: almalinux-9-default
+Version: 20221108
Type: lxc
-OS: alpine
+OS: almalinux
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
-Location: system/alpine-3.13-default_20210419_amd64.tar.xz
-md5sum: 0292afc6483109c6e4770c9cbecb649e
-sha512sum: b799553c595e30f82ca7820794914d881cbe7372bdd6e2c9cdd9d90d41698dfd5cbb3a8fec2a30393fbd28665bb42e889e53553ff54eda410d0e852b606110b5
+Location: system/almalinux-9-default_20221108_amd64.tar.xz
+md5sum: 03e6d335c14b96501bc39e7852da9772
+sha512sum: 9b4561fad0de45943c0c46d9a075796533b0941c442cb70a5f9a323a601aba41f2e5e86d5e1600e5538b1470cf29f0cfd9f7f7c54ca9529cb9cbe234f2c6f440
Infopage: https://linuxcontainers.org
-Description: LXC default image for alpine 3.13 (20210419)
+Description: LXC default image for almalinux 9 (20221108)
Package: alpine-3.14-default
Version: 20210623
Infopage: https://linuxcontainers.org
Description: LXC default image for centos 8-stream (20220327)
+Package: centos-9-stream-default
+Version: 20221109
+Type: lxc
+OS: centos
+Section: system
+Maintainer: Proxmox Support Team <support@proxmox.com>
+Architecture: amd64
+Location: system/centos-9-stream-default_20221109_amd64.tar.xz
+md5sum: 13fccdcc2358b795ee613501eb88c850
+sha512sum: 04bb902992f74edf2333d215837e9bb21258dfcdb7bf23bd659176641f6538aeb25bc44286c9caffb10ceb87288ce93668c9410f4a69b8a3b316e09032ead3a8
+Infopage: https://linuxcontainers.org
+Description: LXC default image for centos 9-stream (20221109)
+
Package: debian-10-standard
Version: 10.7-1
Type: lxc
Infopage: https://linuxcontainers.org
Description: LXC default image for opensuse 15.3 (20210925)
+Package: opensuse-15.4-default
+Version: 20221109
+Type: lxc
+OS: opensuse
+Section: system
+Maintainer: Proxmox Support Team <support@proxmox.com>
+Architecture: amd64
+Location: system/opensuse-15.4-default_20221109_amd64.tar.xz
+md5sum: 1c66c3549b0684e788c17aa94c384262
+sha512sum: 8089309652a0db23ddff826d1e343e79c6eccb7b615fb309e0a6f6f1983ea697aa94044a795f3cbe35156b1a1b2f60489eb20ecb54c786cec23c9fd89e0f29c5
+Infopage: https://linuxcontainers.org
+Description: LXC default image for opensuse 15.4 (20221109)
+
Package: proxmox-mailgateway-6.4-standard
Version: 6.4-1
Type: lxc
Infopage: https://linuxcontainers.org
Description: LXC default image for rockylinux 8 (20210929)
+Package: rockylinux-9-default
+Version: 20221109
+Type: lxc
+OS: rockylinux
+Section: system
+Maintainer: Proxmox Support Team <support@proxmox.com>
+Architecture: amd64
+Location: system/rockylinux-9-default_20221109_amd64.tar.xz
+md5sum: e6aa40bb6a4e01c61fd27eb3da5446d1
+sha512sum: ddc2a29ee66598d4c3a4224a0fa9868882e80bbabb7a20ae9f53431bb0ff73e73d4bd48b86bb0e9d1330e0af2c500f461ea5dc3c500ef722b472257acdc4ab41
+Infopage: https://linuxcontainers.org
+Description: LXC default image for rockylinux 9 (20221109)
+
Package: ubuntu-18.04-standard
Version: 18.04.1-1
Type: lxc
Description: Ubuntu Focal (standard)
A small Ubuntu 20.04 Focal Fossa system including all standard packages.
-Package: ubuntu-21.10-standard
-Version: 21.10-1
-Type: lxc
-OS: ubuntu-21.10
-Section: system
-Maintainer: Proxmox Support Team <support@proxmox.com>
-Architecture: amd64
-Location: system/ubuntu-21.10-standard_21.10-1_amd64.tar.zst
-md5sum: ef2f94efb1839fede31bf389623aa723
-sha512sum: 69d07a1d31a5a56a9a06677f7b160c737a68fb21e6324449556f8bf9d72abd423bdc2fb2658974c7791d6152211aa98b4b3d834f4fdb43f2c7859caaba702656
-Infopage: http://pve.proxmox.com/wiki/Ubuntu_Impish_Standard
-Description: Ubuntu 21.10 Impish (standard)
- A small Ubuntu 21.10 Impish Indri system including all standard packages.
-
Package: ubuntu-22.04-standard
Version: 22.04-1
Type: lxc
Infopage: https://pve.proxmox.com/wiki/Linux_Container#pct_supported_distributions
Description: Ubuntu 22.04 Jammy (standard)
A small Ubuntu 22.04 Jammy Jellyfish system including all standard packages.
+
+Package: ubuntu-22.10-standard
+Version: 22.10-1
+Type: lxc
+OS: ubuntu-22.10
+Section: system
+Maintainer: Proxmox Support Team <support@proxmox.com>
+Architecture: amd64
+Location: system/ubuntu-22.10-standard_22.10-1_amd64.tar.zst
+md5sum: 9d9c20132f479905398921638a285584
+sha512sum: be2a5f3e749e8958fa8487f47aa67638d18c29d26c218cea289afc44923eb3efb0ef11572c78752ef3d707db1c5fe123aef7ca3f33e3b10d9feed1a34eb5362a
+Infopage: https://pve.proxmox.com/wiki/Linux_Container#pct_supported_distributions
+Description: Ubuntu 22.10 Kinetic (standard)
+ A small Ubuntu 22.10 Kinetic Kudu system including all standard packages.
use File::Find;
use File::stat;
+use PVE::APLInfo;
use PVE::CertHelpers;
use PVE::Certificate;
-use PVE::NodeConfig;
-use PVE::INotify;
-use PVE::Cluster;
use PVE::Cluster::Setup;
+use PVE::Cluster;
use PVE::DataCenterConfig;
-use PVE::APLInfo;
-use PVE::SafeSyslog;
+use PVE::INotify;
+use PVE::NodeConfig;
use PVE::RPCEnvironment;
+use PVE::SafeSyslog;
use PVE::Tools;
-use PVE::API2::Subscription;
-use PVE::API2::APT;
+
use PVE::API2::ACME;
+use PVE::API2::APT;
+use PVE::API2::Subscription;
initlog ('pveupdate', 'daemon');
syslog ('err', "update appliance info failed - see /var/log/pveam.log for details");
}
-my $info = PVE::INotify::read_file('subscription');
-# We assume that users with subscriptions want informations
-# about new packages.
-my $notify = ($info && $info->{status} eq 'Active') ? 1 : 0;
+my $info = eval { PVE::API2::Subscription::read_etc_subscription() };
+# Assume that users with subscriptions want informations about new packages.
+my $notify = ($info && $info->{status} eq 'active') ? 1 : 0;
eval { PVE::API2::APT->update_database({ node => $nodename, notify => $notify, quiet => 1 }); };
if (my $err = $@) {
syslog ('err', "update apt database failed: $err");
#storage: STORAGE_ID
#mode: snapshot|suspend|stop
#bwlimit: KBPS
+#performance: max-workers=N
#ionice: PRI
#lockwait: MINUTES
#stopwait: MINUTES
+pve-manager (7.2-11) bullseye; urgency=medium
+
+ * d/control: recommend proxmox-offline-mirror-helper
+
+ * report: add proxmox-boot-tool status output
+
+ * report: stabilize order of guests and network
+
+ * postinst: migrate/update APT auth config
+
+ -- Proxmox Support Team <support@proxmox.com> Wed, 14 Sep 2022 13:15:22 +0200
+
+pve-manager (7.2-10) bullseye; urgency=medium
+
+ * subscription: remove leftover key reference
+
+ -- Proxmox Support Team <support@proxmox.com> Mon, 12 Sep 2022 09:15:56 +0200
+
+pve-manager (7.2-9) bullseye; urgency=medium
+
+ * subscription: handle missing subscription info
+
+ * api: nodes: code/style rework start/stop list generation
+
+ * ui: CephInstallWizard: make first monitor node static and always use
+ localhost to avoid using one where ceph isn't yet installed
+
+ -- Proxmox Support Team <support@proxmox.com> Thu, 08 Sep 2022 14:56:33 +0200
+
+pve-manager (7.2-8) bullseye; urgency=medium
+
+ * ui: restore: improve warning for restoring container with same ID
+
+ * ui: ct restore: put mountpoint-erasure note on new line
+
+ * api: apt: switch to common Proxmox::RS::APT::Repositories package
+
+ * ceph-after-pve-cluster: enable for ceph-volume, disable for ceph-disk
+
+ * ui: datacenter: rework webauthn settings UX
+
+ * pvesr: remove stale replicated volumes immediately
+
+ * api2: add return type to nodes/{node}/execute endpoint
+
+ * api2: use JSONSchema to validate commands for "nodes/{node}/execute"
+
+ * Jobs: fix scheduling after updating job from a different node
+
+ * ui: improve form/MDevSelector
+
+ * subscription: switch to Proxmox::RS::Subscription
+
+ * pvesubscription: add 'set-offline-key' command
+
+ * www: subscription: add Signed/Offline status
+
+ -- Proxmox Support Team <support@proxmox.com> Tue, 6 Sep 2022 10:38:26 +0200
+
+pve-manager (7.2-7) bullseye; urgency=medium
+
+ * pveceph: fix regression with new split-out ceph-volume package and older
+ ceph releases (pacific, octopus)
+
+ -- Proxmox Support Team <support@proxmox.com> Tue, 05 Jul 2022 08:52:46 +0200
+
pve-manager (7.2-6) bullseye; urgency=medium
* update shipped aplinfo
libhttp-daemon-perl,
libpod-parser-perl,
libproxmox-acme-perl,
- libproxmox-rs-perl,
+ libproxmox-rs-perl (>= 0.2.0),
libpve-access-control (>= 7.0-2),
libpve-cluster-api-perl,
libpve-cluster-perl (>= 6.1-6),
libpve-common-perl (>= 7.1-4),
- libpve-guest-common-perl (>= 4.1-2),
+ libpve-guest-common-perl (>= 4.1-4),
libpve-http-server-perl (>= 2.0-12),
- libpve-rs-perl (>= 0.2.2),
+ libpve-rs-perl (>= 0.7.1),
libpve-storage-perl (>= 6.3-2),
libtemplate-perl,
libtest-mockmodule-perl,
libnet-dns-perl,
libproxmox-acme-perl,
libproxmox-acme-plugins,
- libproxmox-rs-perl,
+ libproxmox-rs-perl (>= 0.2.0),
libpve-access-control (>= 7.0-7),
libpve-cluster-api-perl (>= 7.0-5),
libpve-cluster-perl (>= 6.1-6),
libpve-common-perl (>= 7.1-4),
libpve-guest-common-perl (>= 4.0-2),
libpve-http-server-perl (>= 4.1-1),
- libpve-rs-perl (>= 0.5.0),
+ libpve-rs-perl (>= 0.7.1),
libpve-storage-perl (>= 7.2-3),
librados2-perl,
libtemplate-perl,
${misc:Depends},
${perl:Depends},
${shlibs:Depends},
+Recommends: proxmox-offline-mirror-helper
Suggests: libpve-network-perl (>= 0.5-1)
Conflicts: vlan,
vzdump,
#!/bin/sh
-# Abort if any command returns an error value
+# Abort if any command returns an error value
set -e
-# This script is called as the last step of the installation of the
-# package. All the package's files are in place, dpkg has already
-# done its automatic conffile handling, and all the packages we depend
-# of are already fully installed and configured.
+# This script is called as the last step of the installation of the package.
+# All the package's files are in place, dpkg has already done its automatic
+# conffile handling, and all the packages we depend of are already fully
+# installed and configured.
set_lvm_conf() {
LVM_CONF_MARKER="# added by pve-manager to avoid scanning"
- # only do these changes once
# keep user changes afterwards provided marker is still there..
- if ! grep -qLF "$LVM_CONF_MARKER" /etc/lvm/lvm.conf; then
- OLD_VALUE="$(lvmconfig --typeconfig full devices/global_filter)"
- NEW_VALUE='global_filter=["r|/dev/zd.*|"]'
-
- export LVM_SUPPRESS_FD_WARNINGS=1
-
- # check global_filter
- # keep previous setting from our custom packaging if it is still there
- if echo "$OLD_VALUE" | grep -qvF 'r|/dev/zd.*|'; then
- SET_FILTER=1
- BACKUP=1
- fi
- # should be the default since bullseye
- if lvmconfig --typeconfig full devices/scan_lvs | grep -qv 'scan_lvs=0'; then
- SET_SCAN_LVS=1
- BACKUP=1
- fi
- if test -n "$BACKUP"; then
- echo "Backing up lvm.conf before setting pve-manager specific settings.."
- cp -vb /etc/lvm/lvm.conf /etc/lvm/lvm.conf.bak
- fi
- if test -n "$SET_FILTER"; then
- echo "Setting 'global_filter' in /etc/lvm/lvm.conf to prevent zvols from being scanned:"
- echo "$OLD_VALUE => $NEW_VALUE"
- # comment out existing setting
- sed -i -e 's/^\([[:space:]]*global_filter[[:space:]]*=\)/#\1/' /etc/lvm/lvm.conf
- # add new section with our setting
- cat >> /etc/lvm/lvm.conf <<EOF
+ if grep -qLF "$LVM_CONF_MARKER" /etc/lvm/lvm.conf; then
+ return 0 # only do these changes once
+ fi
+
+ OLD_VALUE="$(lvmconfig --typeconfig full devices/global_filter)"
+ NEW_VALUE='global_filter=["r|/dev/zd.*|"]'
+
+ export LVM_SUPPRESS_FD_WARNINGS=1
+
+ # check global_filter
+ # keep previous setting from our custom packaging if it is still there
+ if echo "$OLD_VALUE" | grep -qvF 'r|/dev/zd.*|'; then
+ SET_FILTER=1
+ BACKUP=1
+ fi
+ # should be the default since bullseye
+ if lvmconfig --typeconfig full devices/scan_lvs | grep -qv 'scan_lvs=0'; then
+ SET_SCAN_LVS=1
+ BACKUP=1
+ fi
+ if test -n "$BACKUP"; then
+ echo "Backing up lvm.conf before setting pve-manager specific settings.."
+ cp -vb /etc/lvm/lvm.conf /etc/lvm/lvm.conf.bak
+ fi
+ if test -n "$SET_FILTER"; then
+ echo "Setting 'global_filter' in /etc/lvm/lvm.conf to prevent zvols from being scanned:"
+ echo "$OLD_VALUE => $NEW_VALUE"
+ # comment out existing setting
+ sed -i -e 's/^\([[:space:]]*global_filter[[:space:]]*=\)/#\1/' /etc/lvm/lvm.conf
+ # add new section with our setting
+ cat >> /etc/lvm/lvm.conf <<EOF
devices {
- $LVM_CONF_MARKER ZFS zvols
- $NEW_VALUE
-}
+ $LVM_CONF_MARKER ZFS zvols
+ $NEW_VALUE
+ }
EOF
- fi
- if test -n "$SET_SCAN_LVS"; then
- echo "Adding scan_lvs=0 setting to /etc/lvm/lvm.conf to prevent LVs from being scanned."
- # comment out existing setting
- sed -i -e 's/^\([[:space:]]*scan_lvs[[:space:]]*=\)/#\1/' /etc/lvm/lvm.conf
- # add new section with our setting
- cat >> /etc/lvm/lvm.conf <<EOF
+ fi
+ if test -n "$SET_SCAN_LVS"; then
+ echo "Adding scan_lvs=0 setting to /etc/lvm/lvm.conf to prevent LVs from being scanned."
+ # comment out existing setting
+ sed -i -e 's/^\([[:space:]]*scan_lvs[[:space:]]*=\)/#\1/' /etc/lvm/lvm.conf
+ # add new section with our setting
+ cat >> /etc/lvm/lvm.conf <<EOF
devices {
- $LVM_CONF_MARKER LVM volumes
- scan_lvs=0
-}
+ $LVM_CONF_MARKER LVM volumes
+ scan_lvs=0
+ }
EOF
- fi
+ fi
+}
+
+migrate_apt_auth_conf() {
+ output=""
+ removed=""
+ match=0
+
+ while read -r l; do
+ if echo "$l" | grep -q "^machine enterprise.proxmox.com/debian/pve"; then
+ match=1
+ elif echo "$l" | grep -q "machine"; then
+ match=0
+ fi
+
+ if test "$match" = "1"; then
+ removed="$removed\n$l"
+ else
+ output="$output\n$l"
+ fi
+ done < /etc/apt/auth.conf
+
+ if test -n "$removed"; then
+ if test ! -e /etc/apt/auth.conf.d/pve.conf; then
+ echo "Migrating APT auth config for enterprise.proxmox.com to /etc/apt/auth.conf.d/pve.conf .."
+ echo "$removed" > /etc/apt/auth.conf.d/pve.conf
+ else
+ echo "Removing stale APT auth config from /etc/apt/auth.conf"
+ fi
+ echo "$output" > /etc/apt/auth.conf
fi
}
mkdir /etc/pve 2>/dev/null || true
if test ! -e /var/lib/pve-manager/apl-info/download.proxmox.com; then
- mkdir -p /var/lib/pve-manager/apl-info
- cp /usr/share/doc/pve-manager/aplinfo.dat /var/lib/pve-manager/apl-info/download.proxmox.com
- pveam update || true
+ mkdir -p /var/lib/pve-manager/apl-info
+ cp /usr/share/doc/pve-manager/aplinfo.dat /var/lib/pve-manager/apl-info/download.proxmox.com
+ pveam update || true
fi
if ! test -f /root/.forward || ! grep -q '|/usr/bin/pvemailforward' /root/.forward; then
- echo '|/usr/bin/pvemailforward' >>/root/.forward
+ echo '|/usr/bin/pvemailforward' >>/root/.forward
fi
systemctl --system daemon-reload >/dev/null || true
NO_RESTART_UNITS="pvenetcommit.service pve-guests.service"
for unit in ${UNITS} ${NO_RESTART_UNITS}; do
- deb-systemd-helper unmask "$unit" >/dev/null || true
-
- # was-enabled defaults to true, so new installations run enable.
- if deb-systemd-helper --quiet was-enabled "$unit"; then
- # Enables the unit on first installation, creates new
- # symlinks on upgrades if the unit file has changed.
- deb-systemd-helper enable "$unit" >/dev/null || true
- else
- # Update the statefile to add new symlinks (if any), which need to be
- # cleaned up on purge. Also remove old symlinks.
- deb-systemd-helper update-state "$unit" >/dev/null || true
- fi
+ deb-systemd-helper unmask "$unit" >/dev/null || true
+
+ # was-enabled defaults to true, so new installations run enable.
+ if deb-systemd-helper --quiet was-enabled "$unit"; then
+ # Enables the unit on first installation, creates new
+ # symlinks on upgrades if the unit file has changed.
+ deb-systemd-helper enable "$unit" >/dev/null || true
+ else
+ # Update the statefile to add new symlinks (if any), which need to be
+ # cleaned up on purge. Also remove old symlinks.
+ deb-systemd-helper update-state "$unit" >/dev/null || true
+ fi
done
# FIXME: remove after beta is over and add hunk to actively remove the repo
BETA_SOURCES="/etc/apt/sources.list.d/pvetest-for-beta.list"
if test -f "$BETA_SOURCES" && dpkg --compare-versions "$2" 'lt' '7.0-9~' && dpkg --compare-versions "$2" 'gt' '7.0~'; then
- echo "Removing the during beta added pvetest repository file again"
- rm -v "$BETA_SOURCES" || true
+ echo "Removing the during beta added pvetest repository file again"
+ rm -v "$BETA_SOURCES" || true
fi
# FIXME: remove in PVE 8.0
if test ! -e /proxmox_install_mode && test -n "$2" && dpkg --compare-versions "$2" 'lt' '7.0-6~'; then
- # PVE 4.0 beta to 5.4 ISO had a bug and did not generated a unique machine-id. below is a
- # very relaxed machine-id list from all ISOs (released, tests & internal) possibly affected
- if grep -q \
- -e a0ee88c29b764c46a579dd89c86c2d84 \
- -e ecbf104295bd4f8b90bb82dc2fa5e9e5 \
- -e c8fa51cd0c254ea08b0e37c1e37afbb9 \
- -e 2ec24eda629a4c8d8c1f8dac50a9ee5f \
- -e ef8db290720047159b426bd322839d70 \
- -e bd94244c0da6419a82a383e62dc03b51 \
- -e 45d4e7046c3d4c26af8acd589f358ac6 \
- -e 8c445f96b3064ff79f825ea78a3eefde \
- -e 6f9fae0f0a794fd4b89b3abecfd7f182 \
- -e 6f9fae0f0a794fd4b89b3abecfd7f182 \
- -e 285de85759894b3f9ad9844a89045af6 \
- -e 89971dede7b04c98b2b0bc8845f53320 \
- -e 4e3b6e9550f24d638bc26211a7b37df5 \
- -e bc2f684e31ee4daf95e45c62410a95b1 \
- -e 8cc7bc883fd048b78a4af7433c48e341 \
- -e 9b46d99712854566bb02a656a3ff9191 \
- -e e7fc055af47048ee884dcb88a7474336 \
- -e 13d879f75e6447a69ed85179bd93759a \
- -e 5b59e448c3e74029af2ac91f572d68a7 \
- -e 5a2bd0d11a6c41f9a33fd527751224ea \
- -e 516afc72013c4b9da85b309aad987df2 \
- -e b0ce8d24684845e8ac337c588a7715cb \
- -e e0af064c16e9463e9fa980eac66427c1 \
- -e 6e925d11b497446e8e7f2ff38e7cf891 \
- -e eec280213051474d8bfe7e089a86744a \
- -e 708ded6ee82a46c08b77fecda2284c6c \
- -e 615cb2b78b2240289fef74da610c146f \
- -e b965b329a7e246d5be66a8d367f5760d \
- -e 5472a49c6436426fbebd7881f7b7f13b \
- /etc/machine-id
- then
- echo "found static machine-id bug from Proxmox VE ISO installer <= 5.4, regenerating machine-id"
- systemd-id128 new | tee /etc/machine-id.new /var/lib/dbus/machine-id.new
- # atomically replace
- mv /etc/machine-id.new /etc/machine-id
- mv /var/lib/dbus/machine-id.new /var/lib/dbus/machine-id
- echo "new machine-id generated, a reboot is recommended"
- else
- echo "machine-id check OK"
- fi
+ # PVE 4.0 beta to 5.4 ISO had a bug and did not generated a unique machine-id. below is a
+ # very relaxed machine-id list from all ISOs (released, tests & internal) possibly affected
+ if grep -q \
+ -e a0ee88c29b764c46a579dd89c86c2d84 \
+ -e ecbf104295bd4f8b90bb82dc2fa5e9e5 \
+ -e c8fa51cd0c254ea08b0e37c1e37afbb9 \
+ -e 2ec24eda629a4c8d8c1f8dac50a9ee5f \
+ -e ef8db290720047159b426bd322839d70 \
+ -e bd94244c0da6419a82a383e62dc03b51 \
+ -e 45d4e7046c3d4c26af8acd589f358ac6 \
+ -e 8c445f96b3064ff79f825ea78a3eefde \
+ -e 6f9fae0f0a794fd4b89b3abecfd7f182 \
+ -e 6f9fae0f0a794fd4b89b3abecfd7f182 \
+ -e 285de85759894b3f9ad9844a89045af6 \
+ -e 89971dede7b04c98b2b0bc8845f53320 \
+ -e 4e3b6e9550f24d638bc26211a7b37df5 \
+ -e bc2f684e31ee4daf95e45c62410a95b1 \
+ -e 8cc7bc883fd048b78a4af7433c48e341 \
+ -e 9b46d99712854566bb02a656a3ff9191 \
+ -e e7fc055af47048ee884dcb88a7474336 \
+ -e 13d879f75e6447a69ed85179bd93759a \
+ -e 5b59e448c3e74029af2ac91f572d68a7 \
+ -e 5a2bd0d11a6c41f9a33fd527751224ea \
+ -e 516afc72013c4b9da85b309aad987df2 \
+ -e b0ce8d24684845e8ac337c588a7715cb \
+ -e e0af064c16e9463e9fa980eac66427c1 \
+ -e 6e925d11b497446e8e7f2ff38e7cf891 \
+ -e eec280213051474d8bfe7e089a86744a \
+ -e 708ded6ee82a46c08b77fecda2284c6c \
+ -e 615cb2b78b2240289fef74da610c146f \
+ -e b965b329a7e246d5be66a8d367f5760d \
+ -e 5472a49c6436426fbebd7881f7b7f13b \
+ /etc/machine-id
+ then
+ echo "found static machine-id bug from Proxmox VE ISO installer <= 5.4, regenerating machine-id"
+ systemd-id128 new | tee /etc/machine-id.new /var/lib/dbus/machine-id.new
+ # atomically replace
+ mv /etc/machine-id.new /etc/machine-id
+ mv /var/lib/dbus/machine-id.new /var/lib/dbus/machine-id
+ echo "new machine-id generated, a reboot is recommended"
+ else
+ echo "machine-id check OK"
+ fi
fi
set_lvm_conf
if test ! -e /proxmox_install_mode; then
- # modeled after code generated by dh_start
- for unit in ${UNITS}; do
- if test -n "$2"; then
- dh_action="reload-or-restart";
- else
- dh_action="start"
- fi
- if systemctl -q is-enabled "$unit"; then
- deb-systemd-invoke $dh_action "$unit"
- fi
- done
+ # modeled after code generated by dh_start
+ for unit in ${UNITS}; do
+ if test -n "$2"; then
+ dh_action="reload-or-restart";
+ else
+ dh_action="start"
+ fi
+ if systemctl -q is-enabled "$unit"; then
+ deb-systemd-invoke $dh_action "$unit"
+ fi
+ done
+ fi
+
+ if test ! -e /proxmox_install_mode && test -n "$2" && dpkg --compare-versions "$2" 'lt' '7.2.11~'; then
+ if test -e /etc/apt/auth.conf ; then
+ migrate_apt_auth_conf
+ fi
fi
;;
install -m 0644 ceph-after-pve-cluster.conf ${SERVICEDIR}/ceph-mgr@.service.d
install -d ${SERVICEDIR}/ceph-osd@.service.d
install -m 0644 ceph-after-pve-cluster.conf ${SERVICEDIR}/ceph-osd@.service.d
- install -d ${SERVICEDIR}/ceph-disk@.service.d
- install -m 0644 ceph-after-pve-cluster.conf ${SERVICEDIR}/ceph-disk@.service.d
+ install -d ${SERVICEDIR}/ceph-volume@.service.d
+ install -m 0644 ceph-after-pve-cluster.conf ${SERVICEDIR}/ceph-volume@.service.d
install -d ${SERVICEDIR}/ceph-mds@.service.d
install -m 0644 ceph-after-pve-cluster.conf ${SERVICEDIR}/ceph-mds@.service.d
install -d ${DESTDIR}/usr/share/doc/${PACKAGE}/examples/
cat OnlineHelpInfo.js ${JSSRC} >$@.tmp
mv $@.tmp $@
-OnlineHelpInfo.js: /usr/bin/asciidoc-pve
+OnlineHelpInfo.js: /usr/bin/asciidoc-pve ${JSSRC}
/usr/bin/asciidoc-pve scan-extjs ${JSSRC} >$@.tmp
mv $@.tmp $@
root: 'PVE.dc.Config',
node: 'PVE.node.Config',
qemu: 'PVE.qemu.Config',
- lxc: 'PVE.lxc.Config',
+ lxc: 'pveLXCConfig',
storage: 'PVE.storage.Browser',
sdn: 'PVE.sdn.Browser',
pool: 'pvePoolConfig',
viewModel: {
data: {
nodename: '',
- cephRelease: 'pacific',
+ cephRelease: 'quincy',
configuration: true,
isInstalled: false,
},
value: gettext('First Ceph monitor') + ':',
},
{
- xtype: 'pveNodeSelector',
+ xtype: 'displayfield',
fieldLabel: gettext('Monitor node'),
- name: 'mon-node',
- selectCurNode: true,
- allowBlank: false,
+ cbind: {
+ value: '{nodename}',
+ },
},
{
xtype: 'displayfield',
var wizard = me.up('window');
var kv = wizard.getValues();
delete kv.delete;
- var monNode = kv['mon-node'];
- delete kv['mon-node'];
var nodename = me.nodename;
delete kv.nodename;
Proxmox.Utils.API2Request({
params: kv,
success: function() {
Proxmox.Utils.API2Request({
- url: `/nodes/${monNode}/ceph/mon/${monNode}`,
+ url: `/nodes/${nodename}/ceph/mon/${nodename}`,
waitMsgTarget: wizard,
method: 'POST',
success: function() {
delete job['repeat-missed'];
job.all = job.all === true ? 1 : 0;
- if (job['prune-backups']) {
- job['prune-backups'] = PVE.Parser.printPropertyString(job['prune-backups']);
- }
+ ['performance', 'prune-backups'].forEach(key => {
+ if (job[key]) {
+ job[key] = PVE.Parser.printPropertyString(job[key]);
+ }
+ });
let allNodes = PVE.data.ResourceStore.getNodes();
let nodes = allNodes.filter(node => node.status === 'online').map(node => node.node);
renderer: v => !v ? Proxmox.Utils.NoneText : PVE.Parser.printPropertyString(v),
width: 450,
url: "/api2/extjs/cluster/options",
- //onlineHelp: 'pveum_configure_webauthn',
+ onlineHelp: 'pveum_configure_webauthn',
items: [{
xtype: 'textfield',
- fieldLabel: gettext('Relying Party'),
- name: 'rp',
+ fieldLabel: gettext('Name'),
+ name: 'rp', // NOTE: relying party consists of name and id, this is the name
allowBlank: false,
- listeners: {
- dirtychange: (f, isDirty) =>
- f.up('panel').down('box[id=rpChangeWarning]').setHidden(!f.originalValue || !isDirty),
- },
},
{
xtype: 'textfield',
fieldLabel: gettext('Origin'),
+ emptyText: Ext.String.format(gettext("Domain Lockdown (e.g., {0})"), document.location.origin),
name: 'origin',
- allowBlank: false,
+ allowBlank: true,
},
{
xtype: 'textfield',
fieldLabel: 'ID',
name: 'id',
allowBlank: false,
+ listeners: {
+ dirtychange: (f, isDirty) =>
+ f.up('panel').down('box[id=idChangeWarning]').setHidden(!f.originalValue || !isDirty),
+ },
},
{
xtype: 'container',
iconCls: 'fa fa-fw fa-pencil-square-o',
handler: function(button, ev) {
let panel = this.up('panel');
- panel.down('field[name=rp]').setValue(document.location.hostname);
- panel.down('field[name=origin]').setValue(document.location.origin);
- panel.down('field[name=id]').setValue(document.location.hostname);
+ let fqdn = document.location.hostname;
+
+ panel.down('field[name=rp]').setValue(fqdn);
+
+ let idField = panel.down('field[name=id]');
+ let currentID = idField.getValue();
+ if (!currentID || currentID.length === 0) {
+ idField.setValue(fqdn);
+ }
},
},
],
},
{
xtype: 'box',
- id: 'rpChangeWarning',
+ id: 'idChangeWarning',
hidden: true,
padding: '5 0 0 0',
html: '<i class="fa fa-exclamation-triangle warning"></i> '
- + gettext('Changing the Relying Party may break existing webAuthn TFA entries.'),
+ + gettext('Changing the ID breaks existing WebAuthn TFA entries.'),
}],
});
me.add_inputpanel_row('bwlimit', gettext('Bandwidth Limits'), {
alias: 'widget.pveSecurityGroups',
title: 'Security Groups',
+ onlineHelp: 'pve_firewall_security_groups',
layout: 'border',
},
success: function(response, opts) {
let data = response.result.data;
- if (data.status === 'Active') {
+ if (data?.status.toLowerCase() === 'active') {
if (data.level === 'c') {
me.updateCommunity(data);
} else {
let freeId = PVE.Utils.nextFreeDisk(controllers, me.vmconfig);
if (freeId !== undefined) {
- busField.setValue(freeId.controller);
+ busField?.setValue(freeId.controller);
deviceIDField.setValue(freeId.id);
}
},
},
initComponent: function() {
- var me = this;
+ let me = this;
Ext.apply(me, {
fieldLabel: gettext('Bus/Device'),
return;
}
let field = me.down('field[name=deviceid]');
+ me.setToFree([value], undefined, field);
field.setMaxValue(PVE.Utils.diskControllerMaxIDs[value] - 1);
field.validate();
},
valueField: 'type',
displayField: 'type',
listConfig: {
+ width: 550,
columns: [
{
header: gettext('Type'),
dataIndex: 'type',
+ renderer: function(value, md, rec) {
+ if (rec.data.name !== undefined) {
+ return `${rec.data.name} (${value})`;
+ }
+ return value;
+ },
flex: 1,
},
{
- header: gettext('Available'),
+ header: gettext('Avail'),
dataIndex: 'available',
- width: 80,
+ width: 60,
},
{
header: gettext('Description'),
dataIndex: 'description',
flex: 1,
+ cellWrap: true,
renderer: function(value) {
if (!value) {
return '';
Ext.define('PVE.lxc.Config', {
extend: 'PVE.panel.Config',
- alias: 'widget.PVE.lxc.Config',
+ alias: 'widget.pveLXCConfig',
onlineHelp: 'chapter_pct',
onGetValues: function(values) {
var me = this;
- PVE.Utils.delete_if_default(values, 'cores', '', me.insideWizard);
- // cpu{limit,unit} aren't in the wizard so create is always false
- PVE.Utils.delete_if_default(values, 'cpulimit', '0', 0);
- PVE.Utils.delete_if_default(values, 'cpuunits', '1024', 0);
+ PVE.Utils.delete_if_default(values, 'cpulimit', '0', me.insideWizard);
+ PVE.Utils.delete_if_default(values, 'cpuunits', '1024', me.insideWizard);
return values;
},
xtype: 'proxmoxintegerfield',
name: 'cpuunits',
fieldLabel: gettext('CPU units'),
- value: 1024,
+ value: '',
minValue: 8,
maxValue: 500000,
+ emptyText: '1024',
labelWidth: labelWidth,
- allowBlank: false,
+ deleteEmpty: true,
+ allowBlank: true,
},
],
onlineHelp: 'sysadmin_certificate_management',
mixins: ['Proxmox.Mixin.CBind'],
+ scrollable: 'y',
items: [
{
labelWidth: 120,
},
width: 800,
- resizable: true,
- items: [
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Name'),
- name: 'filename',
- },
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Fingerprint'),
- name: 'fingerprint',
- },
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Issuer'),
- name: 'issuer',
- },
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Subject'),
- name: 'subject',
- },
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Public Key Type'),
- name: 'public-key-type',
- },
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Public Key Size'),
- name: 'public-key-bits',
- },
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Valid Since'),
- renderer: Proxmox.Utils.render_timestamp,
- name: 'notbefore',
- },
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Expires'),
- renderer: Proxmox.Utils.render_timestamp,
- name: 'notafter',
- },
- {
- xtype: 'displayfield',
- fieldLabel: gettext('Subject Alternative Names'),
- name: 'san',
- renderer: PVE.Utils.render_san,
- },
- {
- xtype: 'textarea',
- editable: false,
- grow: true,
- growMax: 200,
- fieldLabel: gettext('Certificate'),
- name: 'pem',
- },
- ],
+ items: {
+ xtype: 'inputpanel',
+ maxHeight: 900,
+ scrollable: 'y',
+ columnT: [
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Name'),
+ name: 'filename',
+ },
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Fingerprint'),
+ name: 'fingerprint',
+ },
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Issuer'),
+ name: 'issuer',
+ },
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Subject'),
+ name: 'subject',
+ },
+ ],
+ column1: [
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Public Key Type'),
+ name: 'public-key-type',
+ },
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Public Key Size'),
+ name: 'public-key-bits',
+ },
+ ],
+ column2: [
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Valid Since'),
+ renderer: Proxmox.Utils.render_timestamp,
+ name: 'notbefore',
+ },
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Expires'),
+ renderer: Proxmox.Utils.render_timestamp,
+ name: 'notafter',
+ },
+ ],
+ columnB: [
+ {
+ xtype: 'displayfield',
+ fieldLabel: gettext('Subject Alternative Names'),
+ name: 'san',
+ renderer: PVE.Utils.render_san,
+ },
+ {
+ xtype: 'fieldset',
+ title: gettext('Raw Certificate'),
+ collapsible: true,
+ collapsed: true,
+ items: [{
+ xtype: 'textarea',
+ name: 'pem',
+ editable: false,
+ grow: true,
+ growMax: 350,
+ fieldStyle: {
+ 'white-space': 'pre-wrap',
+ 'font-family': 'monospace',
+ },
+ }],
+ },
+ ],
+ },
initComponent: function() {
let me = this;
nextduedate: {
header: gettext('Next due date'),
},
+ signature: {
+ header: gettext('Signed/Offline'),
+ renderer: (value) => {
+ if (value) {
+ return gettext('Yes');
+ } else {
+ return gettext('No');
+ }
+ },
+ },
};
Ext.apply(me, {
title: gettext('CPU usage'),
fields: ['cpu', 'iowait'],
fieldTitles: [gettext('CPU usage'), gettext('IO delay')],
+ unit: 'percent',
store: rrdstore,
},
{
-Ext.define('PVE.qemu.Summary', {
+Ext.define('PVE.guest.Summary', {
extend: 'Ext.panel.Panel',
xtype: 'pveGuestSummary',
pveSelNode: me.pveSelNode,
fields: ['cpu'],
fieldTitles: [gettext('CPU usage')],
+ unit: 'percent',
store: rrdstore,
},
{
data: {
isSCSI: false,
isVirtIO: false,
+ isSCSISingle: false,
},
},
'field[name=deviceid]': {
change: 'fireIdChange',
},
- 'field[name=iothread]': {
+ 'field[name=scsiController]': {
change: function(f, value) {
- if (!this.getView().insideWizard) {
- return;
- }
- var vmScsiType = value ? 'virtio-scsi-single': 'virtio-scsi-pci';
- this.lookupReference('scsiController').setValue(vmScsiType);
+ let vm = this.getViewModel();
+ vm.set('isSCSISingle', value === 'virtio-scsi-single');
},
},
},
me.scsiController = Ext.create('Ext.form.field.Display', {
fieldLabel: gettext('SCSI Controller'),
reference: 'scsiController',
+ name: 'scsiController',
bind: me.insideWizard ? {
value: '{current.scsihw}',
visible: '{isSCSI}',
reference: 'discard',
name: 'discard',
},
- );
-
- advancedColumn1.push(
{
xtype: 'proxmoxcheckbox',
- fieldLabel: gettext('SSD emulation'),
- name: 'ssd',
+ name: 'iothread',
+ fieldLabel: 'IO thread',
clearOnDisable: true,
bind: {
- disabled: '{isVirtIO}',
+ disabled: '{!isVirtIO && !isSCSI}',
+ // Checkbox.setValue handles Arrays in a different way, therefore cast to bool
+ value: '{!!isVirtIO || (isSCSI && isSCSISingle)}',
},
},
+ );
+
+ advancedColumn1.push(
{
xtype: 'proxmoxcheckbox',
- name: 'iothread',
- fieldLabel: 'IO thread',
+ fieldLabel: gettext('SSD emulation'),
+ name: 'ssd',
clearOnDisable: true,
bind: {
- disabled: '{!isVirtIO && !isSCSI}',
+ disabled: '{isVirtIO}',
},
},
{
scsi: 2,
virtio: 1,
},
- scsihw: 'virtio-scsi-pci',
+ scsihw: 'virtio-scsi-single',
};
// virtio-net is in kernel since 2.6.25
delete values.affinity;
}
- PVE.Utils.delete_if_default(values, 'cpulimit', '0', 0);
- PVE.Utils.delete_if_default(values, 'cpuunits', '1024', 0);
+ PVE.Utils.delete_if_default(values, 'cpulimit', '0', me.insideWizard);
+ PVE.Utils.delete_if_default(values, 'cpuunits', '1024', me.insideWizard);
// build the cpu options:
me.cpu.cputype = values.cputype;
// FIXME: change to [1, 1000] once cgroup v1 support gets removed (PVE 8 ?)
minValue: 2,
maxValue: 262144,
- value: '1024',
+ value: '',
+ emptyText: '1024',
deleteEmpty: true,
allowBlank: true,
},
} catch (e) {
return "Failed to parse key - " + e;
}
- if (typeof key.data === undefined) {
+ if (key.data === undefined) {
return "Does not seems like a valid Proxmox Backup key!";
}
}
},
windowText: function(get) {
if (get('isInstalled')) {
- return '<p class="install-mask">' +
- Ext.String.format(gettext('{0} is not initialized.'), 'Ceph') + ' '+
- gettext('You need to create a initial config once.') + '</p>';
+ return `<p class="install-mask">
+ ${Ext.String.format(gettext('{0} is not initialized.'), 'Ceph')}
+ ${gettext('You need to create an initial config once.')}</p>`;
} else {
return '<p class="install-mask">' +
Ext.String.format(gettext('{0} is not installed on this node.'), 'Ceph') + '<br>' +
};
if (view.vmid) {
- confirmMsg += '. ' + gettext('This will permanently erase current VM data.');
+ confirmMsg += `. ${Ext.String.format(
+ gettext('This will permanently erase current {0} data.'),
+ view.vmtype === 'lxc' ? 'CT' : 'VM',
+ )}`;
+ if (view.vmtype === 'lxc') {
+ confirmMsg += `<br>${gettext('Mount point volumes are also erased.')}`;
+ }
Ext.Msg.confirm(gettext('Confirm'), confirmMsg, function(btn) {
if (btn === 'yes') {
executeRestore();