From 0a13e08ec2549d9f05377b53635c1f973b2afac2 Mon Sep 17 00:00:00 2001 From: Stefan Reiter Date: Tue, 19 Nov 2019 12:23:47 +0100 Subject: [PATCH] refactor: create QemuServer::Monitor for high-level QMP access QMP and monitor helpers are moved from QemuServer.pm. By using only vm_running_locally instead of check_running, a cyclic dependency to QemuConfig is avoided. This also means that the $nocheck parameter serves no more purpose, and has thus been removed along with vm_mon_cmd_nocheck. Care has been taken to avoid errors resulting from this, and occasionally a manual check for a VM's existance inserted on the callsite. Methods have been renamed to avoid redundant naming: * vm_qmp_command -> qmp_cmd * vm_mon_cmd -> mon_cmd * vm_human_monitor_command -> hmp_cmd mon_cmd is exported since it has many users. This patch also changes all non-package users of vm_qmp_command to use the mon_cmd helper. Includes mocking for tests. Signed-off-by: Stefan Reiter --- PVE/API2/Qemu.pm | 15 ++-- PVE/API2/Qemu/Agent.pm | 7 +- PVE/CLI/qm.pm | 13 +-- PVE/QemuConfig.pm | 15 ++-- PVE/QemuMigrate.pm | 21 ++--- PVE/QemuServer.pm | 184 +++++++++++++------------------------- PVE/QemuServer/Agent.pm | 3 +- PVE/QemuServer/Makefile | 1 + PVE/QemuServer/Memory.pm | 9 +- PVE/QemuServer/Monitor.pm | 62 +++++++++++++ PVE/VZDump/QemuServer.pm | 14 +-- test/snapshot-test.pm | 19 ++-- 12 files changed, 193 insertions(+), 170 deletions(-) create mode 100644 PVE/QemuServer/Monitor.pm diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index 8fcd3ab0..74104dfb 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -20,6 +20,7 @@ use PVE::ReplicationConfig; use PVE::GuestHelpers; use PVE::QemuConfig; use PVE::QemuServer; +use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuMigrate; use PVE::RPCEnvironment; use PVE::AccessControl; @@ -1829,8 +1830,8 @@ __PACKAGE__->register_method({ my ($ticket, undef, $remote_viewer_config) = PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port); - PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket); - PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30"); + mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket); + mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30"); return $remote_viewer_config; }}); @@ -2255,7 +2256,8 @@ __PACKAGE__->register_method({ # checking the qmp status here to get feedback to the gui/cli/api # and the status query should not take too long my $qmpstatus = eval { - PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0); + PVE::QemuConfig::assert_config_exists_on_node($vmid); + mon_cmd($vmid, "query-status"); }; my $err = $@ if $@; @@ -2336,7 +2338,8 @@ __PACKAGE__->register_method({ my $vmid = extract_param($param, 'vmid'); my $qmpstatus = eval { - PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0); + PVE::QemuConfig::assert_config_exists_on_node($vmid); + mon_cmd($vmid, "query-status"); }; my $err = $@ if $@; @@ -3088,7 +3091,7 @@ __PACKAGE__->register_method({ PVE::QemuConfig->write_config($vmid, $conf); if ($running && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && PVE::QemuServer::qga_check_running($vmid)) { - eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fstrim"); }; + eval { mon_cmd($vmid, "guest-fstrim"); }; } eval { @@ -3444,7 +3447,7 @@ __PACKAGE__->register_method({ my $res = ''; eval { - $res = PVE::QemuServer::vm_human_monitor_command($vmid, $param->{command}); + $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, $param->{command}); }; $res = "ERROR: $@" if $@; diff --git a/PVE/API2/Qemu/Agent.pm b/PVE/API2/Qemu/Agent.pm index 51fb0d8e..1bb4dd60 100644 --- a/PVE/API2/Qemu/Agent.pm +++ b/PVE/API2/Qemu/Agent.pm @@ -7,6 +7,7 @@ use PVE::RESTHandler; use PVE::JSONSchema qw(get_standard_option); use PVE::QemuServer; use PVE::QemuServer::Agent qw(agent_available agent_cmd check_agent_error); +use PVE::QemuServer::Monitor qw(mon_cmd); use MIME::Base64 qw(encode_base64 decode_base64); use JSON; @@ -190,7 +191,7 @@ sub register_command { agent_available($vmid, $conf); my $cmd = $param->{command} // $command; - my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-$cmd"); + my $res = mon_cmd($vmid, "guest-$cmd"); return { result => $res }; }}); @@ -415,7 +416,7 @@ __PACKAGE__->register_method({ my $content = ""; while ($bytes_left > 0 && !$eof) { - my $read = PVE::QemuServer::vm_mon_cmd($vmid, "guest-file-read", handle => $qgafh, count => int($read_size)); + my $read = mon_cmd($vmid, "guest-file-read", handle => $qgafh, count => int($read_size)); check_agent_error($read, "can't read from file"); $content .= decode_base64($read->{'buf-b64'}); @@ -423,7 +424,7 @@ __PACKAGE__->register_method({ $eof = $read->{eof} // 0; } - my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-file-close", handle => $qgafh); + my $res = mon_cmd($vmid, "guest-file-close", handle => $qgafh); check_agent_error($res, "can't close file", 1); my $result = { diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm index 44be39d7..87f5d84b 100755 --- a/PVE/CLI/qm.pm +++ b/PVE/CLI/qm.pm @@ -27,9 +27,11 @@ use PVE::Tools qw(extract_param); use PVE::API2::Qemu::Agent; use PVE::API2::Qemu; +use PVE::QemuConfig; use PVE::QemuServer::Helpers; use PVE::QemuServer::Agent qw(agent_available); use PVE::QemuServer::ImportDisk; +use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuServer::OVF; use PVE::QemuServer; @@ -210,15 +212,16 @@ __PACKAGE__->register_method ({ my ($param) = @_; my $vmid = $param->{vmid}; + PVE::QemuConfig::assert_config_exists_on_node($vmid); my $vnc_socket = PVE::QemuServer::Helpers::vnc_socket($vmid); if (my $ticket = $ENV{LC_PVE_TICKET}) { # NOTE: ssh on debian only pass LC_* variables - PVE::QemuServer::vm_mon_cmd($vmid, "change", device => 'vnc', target => "unix:$vnc_socket,password"); - PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'vnc', password => $ticket); - PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'vnc', time => "+30"); + mon_cmd($vmid, "change", device => 'vnc', target => "unix:$vnc_socket,password"); + mon_cmd($vmid, "set_password", protocol => 'vnc', password => $ticket); + mon_cmd($vmid, "expire_password", protocol => 'vnc', time => "+30"); } else { # FIXME: remove or allow to add tls-creds object, as x509 vnc param is removed with qemu 4?? - PVE::QemuServer::vm_mon_cmd($vmid, "change", device => 'vnc', target => "unix:$vnc_socket,password"); + mon_cmd($vmid, "change", device => 'vnc', target => "unix:$vnc_socket,password"); } run_vnc_proxy($vnc_socket); @@ -398,7 +401,7 @@ __PACKAGE__->register_method ({ last if $input =~ m/^\s*q(uit)?\s*$/; eval { - print PVE::QemuServer::vm_human_monitor_command ($vmid, $input); + print PVE::QemuServer::Monitor::hmp_cmd($vmid, $input); }; print "ERROR: $@" if $@; } diff --git a/PVE/QemuConfig.pm b/PVE/QemuConfig.pm index c42091ca..c3b9ac5a 100644 --- a/PVE/QemuConfig.pm +++ b/PVE/QemuConfig.pm @@ -6,6 +6,7 @@ use warnings; use PVE::AbstractConfig; use PVE::INotify; use PVE::QemuServer::Helpers; +use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuServer; use PVE::Storage; use PVE::Tools; @@ -198,10 +199,10 @@ sub __snapshot_freeze { my ($class, $vmid, $unfreeze) = @_; if ($unfreeze) { - eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-thaw"); }; + eval { mon_cmd($vmid, "guest-fsfreeze-thaw"); }; warn "guest-fsfreeze-thaw problems - $@" if $@; } else { - eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-freeze"); }; + eval { mon_cmd($vmid, "guest-fsfreeze-freeze"); }; warn "guest-fsfreeze-freeze problems - $@" if $@; } } @@ -217,9 +218,9 @@ sub __snapshot_create_vol_snapshots_hook { my $path = PVE::Storage::path($storecfg, $snap->{vmstate}); PVE::Storage::activate_volumes($storecfg, [$snap->{vmstate}]); - PVE::QemuServer::vm_mon_cmd($vmid, "savevm-start", statefile => $path); + mon_cmd($vmid, "savevm-start", statefile => $path); for(;;) { - my $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-savevm"); + my $stat = mon_cmd($vmid, "query-savevm"); if (!$stat->{status}) { die "savevm not active\n"; } elsif ($stat->{status} eq 'active') { @@ -232,18 +233,18 @@ sub __snapshot_create_vol_snapshots_hook { } } } else { - PVE::QemuServer::vm_mon_cmd($vmid, "savevm-start"); + mon_cmd($vmid, "savevm-start"); } } elsif ($hook eq "after") { eval { - PVE::QemuServer::vm_mon_cmd($vmid, "savevm-end"); + mon_cmd($vmid, "savevm-end"); PVE::Storage::deactivate_volumes($storecfg, [$snap->{vmstate}]) if $snap->{vmstate}; }; warn $@ if $@; } elsif ($hook eq "after-freeze") { # savevm-end is async, we need to wait for (;;) { - my $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-savevm"); + my $stat = mon_cmd($vmid, "query-savevm"); if (!$stat->{bytes}) { last; } else { diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm index ffb781a4..02515610 100644 --- a/PVE/QemuMigrate.pm +++ b/PVE/QemuMigrate.pm @@ -11,6 +11,7 @@ use PVE::Tools; use PVE::Cluster; use PVE::Storage; use PVE::QemuServer; +use PVE::QemuServer::Monitor qw(mon_cmd); use Time::HiRes qw( usleep ); use PVE::RPCEnvironment; use PVE::ReplicationConfig; @@ -548,7 +549,7 @@ sub phase2 { my $spice_ticket; if (PVE::QemuServer::vga_conf_has_spice($conf->{vga})) { - my $res = PVE::QemuServer::vm_mon_cmd($vmid, 'query-spice'); + my $res = mon_cmd($vmid, 'query-spice'); $spice_ticket = $res->{ticket}; } @@ -703,7 +704,7 @@ sub phase2 { $migrate_speed *= 1024; $self->log('info', "migrate_set_speed: $migrate_speed"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_speed", value => int($migrate_speed)); + mon_cmd($vmid, "migrate_set_speed", value => int($migrate_speed)); }; $self->log('info', "migrate_set_speed error: $@") if $@; @@ -712,7 +713,7 @@ sub phase2 { if (defined($migrate_downtime)) { $self->log('info', "migrate_set_downtime: $migrate_downtime"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); + mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); }; $self->log('info', "migrate_set_downtime error: $@") if $@; } @@ -730,7 +731,7 @@ sub phase2 { $self->log('info', "set cachesize: $cachesize"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate-set-cache-size", value => int($cachesize)); + mon_cmd($vmid, "migrate-set-cache-size", value => int($cachesize)); }; $self->log('info', "migrate-set-cache-size error: $@") if $@; @@ -746,7 +747,7 @@ sub phase2 { $self->log('info', "spice client_migrate_info"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "client_migrate_info", protocol => 'spice', + mon_cmd($vmid, "client_migrate_info", protocol => 'spice', hostname => $proxyticket, 'port' => 0, 'tls-port' => $spice_port, 'cert-subject' => $subject); }; @@ -756,7 +757,7 @@ sub phase2 { $self->log('info', "start migrate command to $ruri"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => $ruri); + mon_cmd($vmid, "migrate", uri => $ruri); }; my $merr = $@; $self->log('info', "migrate uri => $ruri failed: $merr") if $merr; @@ -774,7 +775,7 @@ sub phase2 { usleep($usleep); my $stat; eval { - $stat = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-migrate"); + $stat = mon_cmd($vmid, "query-migrate"); }; if (my $err = $@) { $err_count++; @@ -843,7 +844,7 @@ sub phase2 { $migrate_downtime *= 2; $self->log('info', "migrate_set_downtime: $migrate_downtime"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); + mon_cmd($vmid, "migrate_set_downtime", value => int($migrate_downtime*100)/100); }; $self->log('info', "migrate_set_downtime error: $@") if $@; } @@ -870,7 +871,7 @@ sub phase2_cleanup { $self->log('info', "migrate_cancel"); eval { - PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate_cancel"); + mon_cmd($vmid, "migrate_cancel"); }; $self->log('info', "migrate_cancel error: $@") if $@; @@ -1021,7 +1022,7 @@ sub phase3_cleanup { if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) { $self->log('info', "Waiting for spice server migration"); while (1) { - my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice'); + my $res = mon_cmd($vmid, 'query-spice'); last if int($res->{'migrated'}) == 1; last if $timer > 50; $timer ++; diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm index 4e457593..324be804 100644 --- a/PVE/QemuServer.pm +++ b/PVE/QemuServer.pm @@ -34,7 +34,6 @@ use PVE::INotify; use PVE::JSONSchema qw(get_standard_option); use PVE::ProcFSTools; use PVE::RPCEnvironment; -use PVE::SafeSyslog; use PVE::Storage; use PVE::SysFSTools; use PVE::Systemd; @@ -45,6 +44,7 @@ use PVE::QemuConfig; use PVE::QemuServer::Helpers; use PVE::QemuServer::Cloudinit; use PVE::QemuServer::Memory; +use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuServer::PCI qw(print_pci_addr print_pcie_addr print_pcie_root_port); use PVE::QemuServer::USB qw(parse_usb_device); @@ -4051,7 +4051,7 @@ sub config_to_command { sub spice_port { my ($vmid) = @_; - my $res = vm_mon_cmd($vmid, 'query-spice'); + my $res = mon_cmd($vmid, 'query-spice'); return $res->{'tls-port'} || $res->{'port'} || die "no spice port\n"; } @@ -4059,7 +4059,7 @@ sub spice_port { sub vm_devices_list { my ($vmid) = @_; - my $res = vm_mon_cmd($vmid, 'query-pci'); + my $res = mon_cmd($vmid, 'query-pci'); my $devices_to_check = []; my $devices = {}; foreach my $pcibus (@$res) { @@ -4078,14 +4078,14 @@ sub vm_devices_list { $devices_to_check = $to_check; } - my $resblock = vm_mon_cmd($vmid, 'query-block'); + my $resblock = mon_cmd($vmid, 'query-block'); foreach my $block (@$resblock) { if($block->{device} =~ m/^drive-(\S+)/){ $devices->{$1} = 1; } } - my $resmice = vm_mon_cmd($vmid, 'query-mice'); + my $resmice = mon_cmd($vmid, 'query-mice'); foreach my $mice (@$resmice) { if ($mice->{name} eq 'QEMU HID Tablet') { $devices->{tablet} = 1; @@ -4096,7 +4096,7 @@ sub vm_devices_list { # for usb devices there is no query-usb # but we can iterate over the entries in # qom-list path=/machine/peripheral - my $resperipheral = vm_mon_cmd($vmid, 'qom-list', path => '/machine/peripheral'); + my $resperipheral = mon_cmd($vmid, 'qom-list', path => '/machine/peripheral'); foreach my $per (@$resperipheral) { if ($per->{name} =~ m/^usb\d+$/) { $devices->{$per->{name}} = 1; @@ -4277,13 +4277,13 @@ sub qemu_deviceadd { $devicefull = "driver=".$devicefull; my %options = split(/[=,]/, $devicefull); - vm_mon_cmd($vmid, "device_add" , %options); + mon_cmd($vmid, "device_add" , %options); } sub qemu_devicedel { my ($vmid, $deviceid) = @_; - my $ret = vm_mon_cmd($vmid, "device_del", id => $deviceid); + my $ret = mon_cmd($vmid, "device_del", id => $deviceid); } sub qemu_iothread_add { @@ -4312,7 +4312,7 @@ sub qemu_iothread_del { sub qemu_objectadd { my($vmid, $objectid, $qomtype) = @_; - vm_mon_cmd($vmid, "object-add", id => $objectid, "qom-type" => $qomtype); + mon_cmd($vmid, "object-add", id => $objectid, "qom-type" => $qomtype); return 1; } @@ -4320,7 +4320,7 @@ sub qemu_objectadd { sub qemu_objectdel { my($vmid, $objectid) = @_; - vm_mon_cmd($vmid, "object-del", id => $objectid); + mon_cmd($vmid, "object-del", id => $objectid); return 1; } @@ -4330,7 +4330,7 @@ sub qemu_driveadd { my $drive = print_drive_full($storecfg, $vmid, $device); $drive =~ s/\\/\\\\/g; - my $ret = vm_human_monitor_command($vmid, "drive_add auto \"$drive\""); + my $ret = PVE::QemuServer::Monitor::hmp_cmd($vmid, "drive_add auto \"$drive\""); # If the command succeeds qemu prints: "OK" return 1 if $ret =~ m/OK/s; @@ -4341,7 +4341,7 @@ sub qemu_driveadd { sub qemu_drivedel { my($vmid, $deviceid) = @_; - my $ret = vm_human_monitor_command($vmid, "drive_del drive-$deviceid"); + my $ret = PVE::QemuServer::Monitor::hmp_cmd($vmid, "drive_del drive-$deviceid"); $ret =~ s/^\s+//; return 1 if $ret eq ""; @@ -4451,7 +4451,7 @@ sub qemu_add_pci_bridge { sub qemu_set_link_status { my ($vmid, $device, $up) = @_; - vm_mon_cmd($vmid, "set_link", name => $device, + mon_cmd($vmid, "set_link", name => $device, up => $up ? JSON::true : JSON::false); } @@ -4461,14 +4461,14 @@ sub qemu_netdevadd { my $netdev = print_netdev_full($vmid, $conf, $arch, $device, $deviceid, 1); my %options = split(/[=,]/, $netdev); - vm_mon_cmd($vmid, "netdev_add", %options); + mon_cmd($vmid, "netdev_add", %options); return 1; } sub qemu_netdevdel { my ($vmid, $deviceid) = @_; - vm_mon_cmd($vmid, "netdev_del", id => $deviceid); + mon_cmd($vmid, "netdev_del", id => $deviceid); } sub qemu_usb_hotplug { @@ -4523,7 +4523,7 @@ sub qemu_cpu_hotplug { my $retry = 0; my $currentrunningvcpus = undef; while (1) { - $currentrunningvcpus = vm_mon_cmd($vmid, "query-cpus"); + $currentrunningvcpus = mon_cmd($vmid, "query-cpus"); last if scalar(@{$currentrunningvcpus}) == $i-1; raise_param_exc({ vcpus => "error unplugging cpu$i" }) if $retry > 5; $retry++; @@ -4540,7 +4540,7 @@ sub qemu_cpu_hotplug { return; } - my $currentrunningvcpus = vm_mon_cmd($vmid, "query-cpus"); + my $currentrunningvcpus = mon_cmd($vmid, "query-cpus"); die "vcpus in running vm does not match its configuration\n" if scalar(@{$currentrunningvcpus}) != $currentvcpus; @@ -4553,7 +4553,7 @@ sub qemu_cpu_hotplug { my $retry = 0; my $currentrunningvcpus = undef; while (1) { - $currentrunningvcpus = vm_mon_cmd($vmid, "query-cpus"); + $currentrunningvcpus = mon_cmd($vmid, "query-cpus"); last if scalar(@{$currentrunningvcpus}) == $i; raise_param_exc({ vcpus => "error hotplugging cpu$i" }) if $retry > 10; sleep 1; @@ -4566,7 +4566,7 @@ sub qemu_cpu_hotplug { } else { for (my $i = $currentvcpus; $i < $vcpus; $i++) { - vm_mon_cmd($vmid, "cpu-add", id => int($i)); + mon_cmd($vmid, "cpu-add", id => int($i)); } } } @@ -4580,7 +4580,7 @@ sub qemu_block_set_io_throttle { return if !check_running($vmid) ; - vm_mon_cmd($vmid, "block_set_io_throttle", device => $deviceid, + mon_cmd($vmid, "block_set_io_throttle", device => $deviceid, bps => int($bps), bps_rd => int($bps_rd), bps_wr => int($bps_wr), @@ -4645,7 +4645,7 @@ sub qemu_block_resize { return if !$running; - vm_mon_cmd($vmid, "block_resize", device => $deviceid, size => int($size)); + mon_cmd($vmid, "block_resize", device => $deviceid, size => int($size)); } @@ -4655,7 +4655,7 @@ sub qemu_volume_snapshot { my $running = check_running($vmid); if ($running && do_snapshots_with_qemu($storecfg, $volid)){ - vm_mon_cmd($vmid, 'blockdev-snapshot-internal-sync', device => $deviceid, name => $snap); + mon_cmd($vmid, 'blockdev-snapshot-internal-sync', device => $deviceid, name => $snap); } else { PVE::Storage::volume_snapshot($storecfg, $volid, $snap); } @@ -4677,7 +4677,7 @@ sub qemu_volume_snapshot_delete { } if ($running && do_snapshots_with_qemu($storecfg, $volid)){ - vm_mon_cmd($vmid, 'blockdev-snapshot-delete-internal-sync', device => $deviceid, name => $snap); + mon_cmd($vmid, 'blockdev-snapshot-delete-internal-sync', device => $deviceid, name => $snap); } else { PVE::Storage::volume_snapshot_delete($storecfg, $volid, $snap, $running); } @@ -4696,7 +4696,7 @@ sub set_migration_caps { "compress" => 0 }; - my $supported_capabilities = vm_mon_cmd_nocheck($vmid, "query-migrate-capabilities"); + my $supported_capabilities = mon_cmd($vmid, "query-migrate-capabilities"); for my $supported_capability (@$supported_capabilities) { push @$cap_ref, { @@ -4705,7 +4705,7 @@ sub set_migration_caps { }; } - vm_mon_cmd_nocheck($vmid, "migrate-set-capabilities", capabilities => $cap_ref); + mon_cmd($vmid, "migrate-set-capabilities", capabilities => $cap_ref); } my $fast_plug_option = { @@ -4786,7 +4786,7 @@ sub vmconfig_hotplug_pending { die "skip\n" if defined($conf->{balloon}) && $conf->{balloon} == 0; # here we reset the ballooning value to memory my $balloon = $conf->{memory} || $defaults->{memory}; - vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024); + mon_cmd($vmid, "balloon", value => $balloon*1024*1024); } elsif ($fast_plug_option->{$opt}) { # do nothing } elsif ($opt =~ m/^net(\d+)$/) { @@ -4872,7 +4872,7 @@ sub vmconfig_hotplug_pending { # allow manual ballooning if shares is set to zero if ((defined($conf->{shares}) && ($conf->{shares} == 0))) { my $balloon = $conf->{pending}->{balloon} || $conf->{memory} || $defaults->{memory}; - vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024); + mon_cmd($vmid, "balloon", value => $balloon*1024*1024); } } elsif ($opt =~ m/^net(\d+)$/) { # some changes can be done without hotplug @@ -5148,14 +5148,14 @@ sub vmconfig_update_disk { } else { # cdrom if ($drive->{file} eq 'none') { - vm_mon_cmd($vmid, "eject",force => JSON::true,device => "drive-$opt"); + mon_cmd($vmid, "eject",force => JSON::true,device => "drive-$opt"); if (drive_is_cloudinit($old_drive)) { vmconfig_register_unused_drive($storecfg, $vmid, $conf, $old_drive); } } else { my $path = get_iso_path($storecfg, $vmid, $drive->{file}); - vm_mon_cmd($vmid, "eject", force => JSON::true,device => "drive-$opt"); # force eject if locked - vm_mon_cmd($vmid, "change", device => "drive-$opt",target => "$path") if $path; + mon_cmd($vmid, "eject", force => JSON::true,device => "drive-$opt"); # force eject if locked + mon_cmd($vmid, "change", device => "drive-$opt",target => "$path") if $path; } return 1; @@ -5427,7 +5427,7 @@ sub vm_start { print "migration listens on $migrate_uri\n" if $migrate_uri; if ($statefile && $statefile ne 'tcp' && $statefile ne 'unix') { - eval { vm_mon_cmd_nocheck($vmid, "cont"); }; + eval { mon_cmd($vmid, "cont"); }; warn $@ if $@; } @@ -5438,13 +5438,13 @@ sub vm_start { my $pfamily = PVE::Tools::get_host_address_family($nodename); my $storage_migrate_port = PVE::Tools::next_migrate_port($pfamily); - vm_mon_cmd_nocheck($vmid, "nbd-server-start", addr => { type => 'inet', data => { host => "${localip}", port => "${storage_migrate_port}" } } ); + mon_cmd($vmid, "nbd-server-start", addr => { type => 'inet', data => { host => "${localip}", port => "${storage_migrate_port}" } } ); $localip = "[$localip]" if Net::IP::ip_is_ipv6($localip); foreach my $opt (sort keys %$local_volumes) { my $volid = $local_volumes->{$opt}; - vm_mon_cmd_nocheck($vmid, "nbd-server-add", device => "drive-$opt", writable => JSON::true ); + mon_cmd($vmid, "nbd-server-add", device => "drive-$opt", writable => JSON::true ); my $migrate_storage_uri = "nbd:${localip}:${storage_migrate_port}:exportname=drive-$opt"; print "storage migration listens on $migrate_storage_uri volume:$volid\n"; } @@ -5459,13 +5459,13 @@ sub vm_start { if ($spice_port) { print "spice listens on port $spice_port\n"; if ($spice_ticket) { - vm_mon_cmd_nocheck($vmid, "set_password", protocol => 'spice', password => $spice_ticket); - vm_mon_cmd_nocheck($vmid, "expire_password", protocol => 'spice', time => "+30"); + mon_cmd($vmid, "set_password", protocol => 'spice', password => $spice_ticket); + mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30"); } } } else { - vm_mon_cmd_nocheck($vmid, "balloon", value => $conf->{balloon}*1024*1024) + mon_cmd($vmid, "balloon", value => $conf->{balloon}*1024*1024) if !$statefile && $conf->{balloon}; foreach my $opt (keys %$conf) { @@ -5475,7 +5475,7 @@ sub vm_start { } } - vm_mon_cmd_nocheck($vmid, 'qom-set', + mon_cmd($vmid, 'qom-set', path => "machine/peripheral/balloon0", property => "guest-stats-polling-interval", value => 2) if (!defined($conf->{balloon}) || $conf->{balloon}); @@ -5492,60 +5492,6 @@ sub vm_start { }); } -sub vm_mon_cmd { - my ($vmid, $execute, %params) = @_; - - my $cmd = { execute => $execute, arguments => \%params }; - vm_qmp_command($vmid, $cmd); -} - -sub vm_mon_cmd_nocheck { - my ($vmid, $execute, %params) = @_; - - my $cmd = { execute => $execute, arguments => \%params }; - vm_qmp_command($vmid, $cmd, 1); -} - -sub vm_qmp_command { - my ($vmid, $cmd, $nocheck) = @_; - - my $res; - - my $timeout; - if ($cmd->{arguments}) { - $timeout = delete $cmd->{arguments}->{timeout}; - } - - eval { - die "VM $vmid not running\n" if !check_running($vmid, $nocheck); - my $sname = PVE::QemuServer::Helpers::qmp_socket($vmid); - if (-e $sname) { # test if VM is reasonambe new and supports qmp/qga - my $qmpclient = PVE::QMPClient->new(); - - $res = $qmpclient->cmd($vmid, $cmd, $timeout); - } else { - die "unable to open monitor socket\n"; - } - }; - if (my $err = $@) { - syslog("err", "VM $vmid qmp command failed - $err"); - die $err; - } - - return $res; -} - -sub vm_human_monitor_command { - my ($vmid, $cmdline) = @_; - - my $cmd = { - execute => 'human-monitor-command', - arguments => { 'command-line' => $cmdline}, - }; - - return vm_qmp_command($vmid, $cmd); -} - sub vm_commandline { my ($storecfg, $vmid, $snapname) = @_; @@ -5580,7 +5526,7 @@ sub vm_reset { PVE::QemuConfig->check_lock($conf) if !$skiplock; - vm_mon_cmd($vmid, "system_reset"); + mon_cmd($vmid, "system_reset"); }); } @@ -5663,15 +5609,12 @@ sub _do_vm_stop { eval { if ($shutdown) { if (defined($conf) && parse_guest_agent($conf)->{enabled}) { - vm_qmp_command($vmid, { - execute => "guest-shutdown", - arguments => { timeout => $timeout } - }, $nocheck); + mon_cmd($vmid, "guest-shutdown", timeout => $timeout); } else { - vm_qmp_command($vmid, { execute => "system_powerdown" }, $nocheck); + mon_cmd($vmid, "system_powerdown"); } } else { - vm_qmp_command($vmid, { execute => "quit" }, $nocheck); + mon_cmd($vmid, "quit"); } }; my $err = $@; @@ -5794,7 +5737,7 @@ sub vm_suspend { $path = PVE::Storage::path($storecfg, $vmstate); PVE::QemuConfig->write_config($vmid, $conf); } else { - vm_mon_cmd($vmid, "stop"); + mon_cmd($vmid, "stop"); } }); @@ -5803,9 +5746,9 @@ sub vm_suspend { PVE::Storage::activate_volumes($storecfg, [$vmstate]); eval { - vm_mon_cmd($vmid, "savevm-start", statefile => $path); + mon_cmd($vmid, "savevm-start", statefile => $path); for(;;) { - my $state = vm_mon_cmd_nocheck($vmid, "query-savevm"); + my $state = mon_cmd($vmid, "query-savevm"); if (!$state->{status}) { die "savevm not active\n"; } elsif ($state->{status} eq 'active') { @@ -5828,7 +5771,7 @@ sub vm_suspend { if ($err) { # cleanup, but leave suspending lock, to indicate something went wrong eval { - vm_mon_cmd($vmid, "savevm-end"); + mon_cmd($vmid, "savevm-end"); PVE::Storage::deactivate_volumes($storecfg, [$vmstate]); PVE::Storage::vdisk_free($storecfg, $vmstate); delete $conf->@{qw(vmstate runningmachine)}; @@ -5841,7 +5784,7 @@ sub vm_suspend { die "lock changed unexpectedly\n" if !PVE::QemuConfig->has_lock($conf, 'suspending'); - vm_qmp_command($vmid, { execute => "quit" }); + mon_cmd($vmid, "quit"); $conf->{lock} = 'suspended'; PVE::QemuConfig->write_config($vmid, $conf); }); @@ -5852,8 +5795,7 @@ sub vm_resume { my ($vmid, $skiplock, $nocheck) = @_; PVE::QemuConfig->lock_config($vmid, sub { - my $vm_mon_cmd = $nocheck ? \&vm_mon_cmd_nocheck : \&vm_mon_cmd; - my $res = $vm_mon_cmd->($vmid, 'query-status'); + my $res = mon_cmd($vmid, 'query-status'); my $resume_cmd = 'cont'; if ($res->{status} && $res->{status} eq 'suspended') { @@ -5868,7 +5810,7 @@ sub vm_resume { if !($skiplock || PVE::QemuConfig->has_lock($conf, 'backup')); } - $vm_mon_cmd->($vmid, $resume_cmd); + mon_cmd($vmid, $resume_cmd); }); } @@ -5880,7 +5822,7 @@ sub vm_sendkey { my $conf = PVE::QemuConfig->load_config($vmid); # there is no qmp command, so we use the human monitor command - my $res = vm_human_monitor_command($vmid, "sendkey $key"); + my $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, "sendkey $key"); die $res if $res ne ''; }); } @@ -6691,7 +6633,7 @@ sub do_snapshots_with_qemu { sub qga_check_running { my ($vmid, $nowarn) = @_; - eval { vm_mon_cmd($vmid, "guest-ping", timeout => 3); }; + eval { mon_cmd($vmid, "guest-ping", timeout => 3); }; if ($@) { warn "Qemu Guest Agent is not running - $@" if !$nowarn; return 0; @@ -6863,7 +6805,7 @@ sub qemu_drive_mirror { } # if a job already runs for this device we get an error, catch it for cleanup - eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); }; + eval { mon_cmd($vmid, "drive-mirror", %$opts); }; if (my $err = $@) { eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) }; warn "$@\n" if $@; @@ -6882,7 +6824,7 @@ sub qemu_drive_mirror_monitor { while (1) { die "storage migration timed out\n" if $err_complete > 300; - my $stats = vm_mon_cmd($vmid, "query-block-jobs"); + my $stats = mon_cmd($vmid, "query-block-jobs"); my $running_mirror_jobs = {}; foreach my $stat (@$stats) { @@ -6925,7 +6867,7 @@ sub qemu_drive_mirror_monitor { my $agent_running = $qga && qga_check_running($vmid); if ($agent_running) { print "freeze filesystem\n"; - eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-freeze"); }; + eval { mon_cmd($vmid, "guest-fsfreeze-freeze"); }; } else { print "suspend vm\n"; eval { PVE::QemuServer::vm_suspend($vmid, 1); }; @@ -6936,7 +6878,7 @@ sub qemu_drive_mirror_monitor { if ($agent_running) { print "unfreeze filesystem\n"; - eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-thaw"); }; + eval { mon_cmd($vmid, "guest-fsfreeze-thaw"); }; } else { print "resume vm\n"; eval { PVE::QemuServer::vm_resume($vmid, 1, 1); }; @@ -6949,7 +6891,7 @@ sub qemu_drive_mirror_monitor { # try to switch the disk if source and destination are on the same guest print "$job: Completing block job...\n"; - eval { vm_mon_cmd($vmid, "block-job-complete", device => $job) }; + eval { mon_cmd($vmid, "block-job-complete", device => $job) }; if ($@ =~ m/cannot be completed/) { print "$job: Block job cannot be completed, try again.\n"; $err_complete++; @@ -6977,12 +6919,12 @@ sub qemu_blockjobs_cancel { foreach my $job (keys %$jobs) { print "$job: Cancelling block job\n"; - eval { vm_mon_cmd($vmid, "block-job-cancel", device => $job); }; + eval { mon_cmd($vmid, "block-job-cancel", device => $job); }; $jobs->{$job}->{cancel} = 1; } while (1) { - my $stats = vm_mon_cmd($vmid, "query-block-jobs"); + my $stats = mon_cmd($vmid, "query-block-jobs"); my $running_jobs = {}; foreach my $stat (@$stats) { @@ -7069,8 +7011,7 @@ no_data_clone: sub get_current_qemu_machine { my ($vmid) = @_; - my $cmd = { execute => 'query-machines', arguments => {} }; - my $res = vm_qmp_command($vmid, $cmd); + my $res = mon_cmd($vmid, "query-machines"); my ($current, $default); foreach my $e (@$res) { @@ -7084,8 +7025,7 @@ sub get_current_qemu_machine { sub get_running_qemu_version { my ($vmid) = @_; - my $cmd = { execute => 'query-version', arguments => {} }; - my $res = vm_qmp_command($vmid, $cmd); + my $res = mon_cmd($vmid, "query-version"); return "$res->{qemu}->{major}.$res->{qemu}->{minor}"; } @@ -7136,7 +7076,7 @@ sub version_cmp { sub runs_at_least_qemu_version { my ($vmid, $major, $minor, $extra) = @_; - my $v = vm_qmp_command($vmid, { execute => 'query-version' }); + my $v = mon_cmd($vmid, "query-version"); die "could not query currently running version for VM $vmid\n" if !defined($v); $v = $v->{qemu}; @@ -7196,7 +7136,7 @@ sub create_efidisk($$$$$) { sub vm_iothreads_list { my ($vmid) = @_; - my $res = vm_mon_cmd($vmid, 'query-iothreads'); + my $res = mon_cmd($vmid, 'query-iothreads'); my $iothreads = {}; foreach my $iothread (@$res) { @@ -7329,7 +7269,7 @@ sub generate_smbios1_uuid { sub nbd_stop { my ($vmid) = @_; - vm_mon_cmd($vmid, 'nbd-server-stop'); + mon_cmd($vmid, 'nbd-server-stop'); } sub create_reboot_request { diff --git a/PVE/QemuServer/Agent.pm b/PVE/QemuServer/Agent.pm index 586ac3a0..9fec4fbd 100644 --- a/PVE/QemuServer/Agent.pm +++ b/PVE/QemuServer/Agent.pm @@ -4,6 +4,7 @@ use strict; use warnings; use PVE::QemuServer; +use PVE::QemuServer::Monitor; use MIME::Base64 qw(decode_base64); use JSON; use base 'Exporter'; @@ -59,7 +60,7 @@ sub agent_cmd { my $conf = PVE::QemuConfig->load_config($vmid); # also checks if VM exists agent_available($vmid, $conf, $noerr); - my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-$cmd", %$params); + my $res = PVE::QemuServer::Monitor::mon_cmd($vmid, "guest-$cmd", %$params); check_agent_error($res, $errormsg, $noerr); return $res; diff --git a/PVE/QemuServer/Makefile b/PVE/QemuServer/Makefile index 56d14933..02b02095 100644 --- a/PVE/QemuServer/Makefile +++ b/PVE/QemuServer/Makefile @@ -6,6 +6,7 @@ SOURCES=PCI.pm \ Cloudinit.pm \ Agent.pm \ Helpers.pm \ + Monitor.pm \ .PHONY: install install: ${SOURCES} diff --git a/PVE/QemuServer/Memory.pm b/PVE/QemuServer/Memory.pm index f52f1d52..d500b3b5 100644 --- a/PVE/QemuServer/Memory.pm +++ b/PVE/QemuServer/Memory.pm @@ -7,6 +7,7 @@ use PVE::Tools qw(run_command lock_file lock_file_full file_read_firstline dir_g use PVE::Exception qw(raise raise_param_exc); use PVE::QemuServer; +use PVE::QemuServer::Monitor qw(mon_cmd); my $MAX_NUMA = 8; my $MAX_MEM = 4194304; @@ -141,7 +142,7 @@ sub qemu_memory_hotplug { my $hugepages_host_topology = hugepages_host_topology(); hugepages_allocate($hugepages_topology, $hugepages_host_topology); - eval { PVE::QemuServer::vm_mon_cmd($vmid, "object-add", 'qom-type' => "memory-backend-file", id => "mem-$name", props => { + eval { mon_cmd($vmid, "object-add", 'qom-type' => "memory-backend-file", id => "mem-$name", props => { size => int($dimm_size*1024*1024), 'mem-path' => $path, share => JSON::true, prealloc => JSON::true } ); }; if (my $err = $@) { hugepages_reset($hugepages_host_topology); @@ -153,7 +154,7 @@ sub qemu_memory_hotplug { eval { hugepages_update_locked($code); }; } else { - eval { PVE::QemuServer::vm_mon_cmd($vmid, "object-add", 'qom-type' => "memory-backend-ram", id => "mem-$name", props => { size => int($dimm_size*1024*1024) } ) }; + eval { mon_cmd($vmid, "object-add", 'qom-type' => "memory-backend-ram", id => "mem-$name", props => { size => int($dimm_size*1024*1024) } ) }; } if (my $err = $@) { @@ -161,7 +162,7 @@ sub qemu_memory_hotplug { die $err; } - eval { PVE::QemuServer::vm_mon_cmd($vmid, "device_add", driver => "pc-dimm", id => "$name", memdev => "mem-$name", node => $numanode) }; + eval { mon_cmd($vmid, "device_add", driver => "pc-dimm", id => "$name", memdev => "mem-$name", node => $numanode) }; if (my $err = $@) { eval { PVE::QemuServer::qemu_objectdel($vmid, "mem-$name"); }; die $err; @@ -201,7 +202,7 @@ sub qemu_memory_hotplug { sub qemu_dimm_list { my ($vmid) = @_; - my $dimmarray = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "query-memory-devices"); + my $dimmarray = mon_cmd($vmid, "query-memory-devices"); my $dimms = {}; foreach my $dimm (@$dimmarray) { diff --git a/PVE/QemuServer/Monitor.pm b/PVE/QemuServer/Monitor.pm new file mode 100644 index 00000000..b69fef75 --- /dev/null +++ b/PVE/QemuServer/Monitor.pm @@ -0,0 +1,62 @@ +package PVE::QemuServer::Monitor; + +use strict; +use warnings; + +use PVE::SafeSyslog; +use PVE::QemuServer::Helpers; +use PVE::QMPClient; + +use base 'Exporter'; +our @EXPORT_OK = qw( +mon_cmd +); + +sub qmp_cmd { + my ($vmid, $cmd) = @_; + + my $res; + + my $timeout; + if ($cmd->{arguments}) { + $timeout = delete $cmd->{arguments}->{timeout}; + } + + eval { + die "VM $vmid not running\n" if !PVE::QemuServer::Helpers::vm_running_locally($vmid); + my $sname = PVE::QemuServer::Helpers::qmp_socket($vmid); + if (-e $sname) { # test if VM is reasonably new and supports qmp/qga + my $qmpclient = PVE::QMPClient->new(); + + $res = $qmpclient->cmd($vmid, $cmd, $timeout); + } else { + die "unable to open monitor socket\n"; + } + }; + if (my $err = $@) { + syslog("err", "VM $vmid qmp command failed - $err"); + die $err; + } + + return $res; +} + +sub mon_cmd { + my ($vmid, $execute, %params) = @_; + + my $cmd = { execute => $execute, arguments => \%params }; + qmp_cmd($vmid, $cmd); +} + +sub hmp_cmd { + my ($vmid, $cmdline) = @_; + + my $cmd = { + execute => 'human-monitor-command', + arguments => { 'command-line' => $cmdline }, + }; + + return qmp_cmd($vmid, $cmd); +} + +1; diff --git a/PVE/VZDump/QemuServer.pm b/PVE/VZDump/QemuServer.pm index e02a0698..ca932589 100644 --- a/PVE/VZDump/QemuServer.pm +++ b/PVE/VZDump/QemuServer.pm @@ -12,12 +12,14 @@ use PVE::Cluster qw(cfs_read_file); use PVE::INotify; use PVE::IPCC; use PVE::JSONSchema; +use PVE::QMPClient; use PVE::Storage::Plugin; use PVE::Storage; use PVE::Tools; use PVE::VZDump; use PVE::QemuServer; +use PVE::QemuServer::Monitor qw(mon_cmd); use base qw (PVE::VZDump::Plugin); @@ -417,7 +419,7 @@ sub archive { } if ($agent_running){ - eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-freeze"); }; + eval { mon_cmd($vmid, "guest-fsfreeze-freeze"); }; if (my $err = $@) { $self->logerr($err); } @@ -427,7 +429,7 @@ sub archive { my $qmperr = $@; if ($agent_running){ - eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fsfreeze-thaw"); }; + eval { mon_cmd($vmid, "guest-fsfreeze-thaw"); }; if (my $err = $@) { $self->logerr($err); } @@ -452,7 +454,7 @@ sub archive { } else { $self->loginfo("resuming VM again"); } - PVE::QemuServer::vm_mon_cmd($vmid, 'cont'); + mon_cmd($vmid, 'cont'); } my $status; @@ -465,7 +467,7 @@ sub archive { my $transferred; while(1) { - $status = PVE::QemuServer::vm_mon_cmd($vmid, 'query-backup'); + $status = mon_cmd($vmid, 'query-backup'); my $total = $status->{total} || 0; $transferred = $status->{transferred} || 0; my $per = $total ? int(($transferred * 100)/$total) : 0; @@ -524,7 +526,7 @@ sub archive { if ($err) { $self->logerr($err); $self->loginfo("aborting backup job"); - eval { PVE::QemuServer::vm_mon_cmd($vmid, 'backup-cancel'); }; + eval { mon_cmd($vmid, 'backup-cancel'); }; if (my $err1 = $@) { $self->logerr($err1); } @@ -533,7 +535,7 @@ sub archive { if ($stop_after_backup) { # stop if not running eval { - my $resp = PVE::QemuServer::vm_mon_cmd($vmid, 'query-status'); + my $resp = mon_cmd($vmid, 'query-status'); my $status = $resp && $resp->{status} ? $resp->{status} : 'unknown'; if ($status eq 'prelaunch') { $self->loginfo("stopping kvm after backup task"); diff --git a/test/snapshot-test.pm b/test/snapshot-test.pm index 09d3e74d..1a2b4c4d 100644 --- a/test/snapshot-test.pm +++ b/test/snapshot-test.pm @@ -312,13 +312,9 @@ sub vm_running_locally { # END mocked PVE::QemuServer::Helpers methods -# BEGIN redefine PVE::QemuServer methods +# BEGIN mocked PVE::QemuServer::Monitor methods -sub do_snapshots_with_qemu { - return 0; -} - -sub vm_qmp_command { +sub qmp_cmd { my ($vmid, $cmd, $nocheck) = @_; my $exec = $cmd->{execute}; @@ -351,6 +347,14 @@ sub vm_qmp_command { die "unexpected vm_qmp_command!\n"; } +# END mocked PVE::QemuServer::Monitor methods + +# BEGIN redefine PVE::QemuServer methods + +sub do_snapshots_with_qemu { + return 0; +} + sub vm_start { my ($storecfg, $vmid, $statefile, $skiplock, $migratedfrom, $paused, $forcemachine) = @_; @@ -380,6 +384,9 @@ PVE::Tools::run_command("cp -a snapshot-input snapshot-working"); my $qemu_helpers_module = new Test::MockModule('PVE::QemuServer::Helpers'); $qemu_helpers_module->mock('vm_running_locally', \&vm_running_locally); +my $qemu_monitor_module = new Test::MockModule('PVE::QemuServer::Monitor'); +$qemu_monitor_module->mock('qmp_cmd', \&qmp_cmd); + my $qemu_config_module = new Test::MockModule('PVE::QemuConfig'); $qemu_config_module->mock('config_file_lock', \&config_file_lock); $qemu_config_module->mock('cfs_config_path', \&cfs_config_path); -- 2.39.5