By using:
export LIBGUESTFS_BACKEND_SETTINGS=force_kvm
you can force the backend to use KVM and never fall back to
TCG (software emulation).
---
lib/guestfs.pod | 9 +++++++++
lib/launch-direct.c | 26 +++++++++++++++++++++++---
lib/launch-libvirt.c | 15 ++++++++++++++-
3 files changed, 46 insertions(+), 4 deletions(-)
diff --git a/lib/guestfs.pod b/lib/guestfs.pod
index d746a41b1..a17e2e689 100644
--- a/lib/guestfs.pod
+++ b/lib/guestfs.pod
@@ -1529,6 +1529,15 @@ Using:
will force the direct and libvirt backends to use TCG (software
emulation) instead of KVM (hardware accelerated virtualization).
+=head3 force_kvm
+
+Using:
+
+ export LIBGUESTFS_BACKEND_SETTINGS=force_kvm
+
+will force the direct and libvirt backends to use KVM (hardware
+accelerated virtualization) instead of TCG (software emulation).
+
=head3 gdb
The direct backend supports:
diff --git a/lib/launch-direct.c b/lib/launch-direct.c
index b6ed9766f..aae5b8f4b 100644
--- a/lib/launch-direct.c
+++ b/lib/launch-direct.c
@@ -385,6 +385,8 @@ launch_direct (guestfs_h *g, void *datav, const char *arg)
struct hv_param *hp;
bool has_kvm;
int force_tcg;
+ int force_kvm;
+ const char *accel_val = "kvm:tcg";
const char *cpu_model;
CLEANUP_FREE char *append = NULL;
CLEANUP_FREE_STRING_LIST char **argv = NULL;
@@ -433,9 +435,27 @@ launch_direct (guestfs_h *g, void *datav, const char *arg)
force_tcg = guestfs_int_get_backend_setting_bool (g, "force_tcg");
if (force_tcg == -1)
return -1;
+ else if (force_tcg)
+ accel_val = "tcg";
- if (!has_kvm && !force_tcg)
- debian_kvm_warning (g);
+ force_kvm = guestfs_int_get_backend_setting_bool (g, "force_kvm");
+ if (force_kvm == -1)
+ return -1;
+ else if (force_kvm)
+ accel_val = "kvm";
+
+ if (force_kvm && force_tcg) {
+ error (g, "Both force_kvm and force_tcg backend settings supplied.");
+ return -1;
+ }
+ if (!has_kvm) {
+ if (!force_tcg)
+ debian_kvm_warning (g);
+ if (force_kvm) {
+ error (g, "force_kvm supplied but kvm not available.");
+ return -1;
+ }
+ }
/* Using virtio-serial, we need to create a local Unix domain socket
* for qemu to connect to.
@@ -530,7 +550,7 @@ launch_direct (guestfs_h *g, void *datav, const char *arg)
if (has_kvm && !force_tcg)
append_list ("gic-version=host");
#endif
- append_list_format ("accel=%s", !force_tcg ? "kvm:tcg" :
"tcg");
+ append_list_format ("accel=%s", accel_val);
} end_list ();
cpu_model = guestfs_int_get_cpu_model (has_kvm && !force_tcg);
diff --git a/lib/launch-libvirt.c b/lib/launch-libvirt.c
index 6c0cfe937..a52f6ec4f 100644
--- a/lib/launch-libvirt.c
+++ b/lib/launch-libvirt.c
@@ -773,6 +773,7 @@ parse_capabilities (guestfs_h *g, const char *capabilities_xml,
xmlAttrPtr attr;
size_t seen_qemu, seen_kvm;
int force_tcg;
+ int force_kvm;
doc = xmlReadMemory (capabilities_xml, strlen (capabilities_xml),
NULL, NULL, XML_PARSE_NONET);
@@ -820,11 +821,15 @@ parse_capabilities (guestfs_h *g, const char *capabilities_xml,
}
}
+ force_kvm = guestfs_int_get_backend_setting_bool (g, "force_kvm");
+ if (force_kvm == -1)
+ return -1;
+
/* This was RHBZ#886915: in that case the default libvirt URI
* pointed to a Xen hypervisor, and so could not create the
* appliance VM.
*/
- if (!seen_qemu && !seen_kvm) {
+ if ((!seen_qemu || force_kvm) && !seen_kvm) {
CLEANUP_FREE char *backend = guestfs_get_backend (g);
error (g,
@@ -846,6 +851,14 @@ parse_capabilities (guestfs_h *g, const char *capabilities_xml,
if (force_tcg == -1)
return -1;
+ if (force_kvm && force_tcg) {
+ error (g, "Both force_kvm and force_tcg backend settings supplied.");
+ return -1;
+ }
+
+ /* if force_kvm then seen_kvm */
+ assert (!force_kvm || seen_kvm);
+
if (!force_tcg)
data->is_kvm = seen_kvm;
else
--
2.31.0.rc2.261.g7f71774620-goog