Hi Richard,
I added both the v2 (slightly enhanced v1) and v3 (which uses the enum).
I have to say that I prefer v2 a lot more than v3 - the enum just
makes it worse in my opinion (maybe I just "implemented it wrong") -
tell me what you think.
Thanks!
On Tue, Mar 16, 2021 at 12:36 PM Sam Eiderman <sameid(a)google.com> wrote:
By using:
export LIBGUESTFS_BACKEND_SETTINGS=force_kvm
you can force the backend to use KVM and never fall back to
TCG (software emulation).
---
lib/guestfs-internal.h | 3 +++
lib/guestfs.pod | 9 +++++++++
lib/launch-direct.c | 42 ++++++++++++++++++++++++++++++++++++------
lib/launch-libvirt.c | 29 ++++++++++++++++++++++++-----
4 files changed, 72 insertions(+), 11 deletions(-)
diff --git a/lib/guestfs-internal.h b/lib/guestfs-internal.h
index 43509c6db..97dacc2b8 100644
--- a/lib/guestfs-internal.h
+++ b/lib/guestfs-internal.h
@@ -157,6 +157,9 @@ guestfs_int_cleanup_gl_recursive_lock_unlock (void *ptr)
#define VIRTIO_DEVICE_NAME(type) type "-pci"
#endif
+/* Guestfs accel options. */
+enum accel { BEST_ACCEL, FORCE_TCG, FORCE_KVM };
+
/* Guestfs handle and associated structures. */
/* State. */
diff --git a/lib/guestfs.pod b/lib/guestfs.pod
index bce9eb79f..ff58aa0bb 100644
--- a/lib/guestfs.pod
+++ b/lib/guestfs.pod
@@ -1530,6 +1530,15 @@ Using:
will force the direct and libvirt backends to use TCG (software
emulation) instead of KVM (hardware accelerated virtualization).
+=head3 force_kvm
+
+Using:
+
+ export LIBGUESTFS_BACKEND_SETTINGS=force_kvm
+
+will force the direct and libvirt backends to use KVM (hardware
+accelerated virtualization) instead of TCG (software emulation).
+
=head3 gdb
The direct backend supports:
diff --git a/lib/launch-direct.c b/lib/launch-direct.c
index b6ed9766f..416a87fa0 100644
--- a/lib/launch-direct.c
+++ b/lib/launch-direct.c
@@ -385,6 +385,9 @@ launch_direct (guestfs_h *g, void *datav, const char *arg)
struct hv_param *hp;
bool has_kvm;
int force_tcg;
+ int force_kvm;
+ enum accel accel;
+ const char *accel_val;
const char *cpu_model;
CLEANUP_FREE char *append = NULL;
CLEANUP_FREE_STRING_LIST char **argv = NULL;
@@ -434,8 +437,35 @@ launch_direct (guestfs_h *g, void *datav, const char *arg)
if (force_tcg == -1)
return -1;
- if (!has_kvm && !force_tcg)
- debian_kvm_warning (g);
+ force_kvm = guestfs_int_get_backend_setting_bool (g, "force_kvm");
+ if (force_kvm == -1)
+ return -1;
+
+ if (force_kvm && force_tcg) {
+ error (g, "Both force_kvm and force_tcg backend settings supplied.");
+ return -1;
+ }
+ else if (force_tcg) {
+ accel = FORCE_TCG;
+ accel_val = "tcg";
+ }
+ else if (force_kvm) {
+ accel = FORCE_KVM;
+ accel_val = "kvm"
+ }
+ else {
+ accel = BEST_ACCEL;
+ accel_val = "kvm:tcg";
+ }
+
+ if (!has_kvm) {
+ if (accel != FORCE_TCG)
+ debian_kvm_warning (g);
+ if (accel == FORCE_KVM) {
+ error (g, "force_kvm supplied but kvm not available.");
+ return -1;
+ }
+ }
/* Using virtio-serial, we need to create a local Unix domain socket
* for qemu to connect to.
@@ -527,13 +557,13 @@ launch_direct (guestfs_h *g, void *datav, const char *arg)
append_list (MACHINE_TYPE);
#endif
#ifdef __aarch64__
- if (has_kvm && !force_tcg)
+ if (has_kvm && accel != FORCE_TCG)
append_list ("gic-version=host");
#endif
- append_list_format ("accel=%s", !force_tcg ? "kvm:tcg" :
"tcg");
+ append_list_format ("accel=%s", accel_val);
} end_list ();
- cpu_model = guestfs_int_get_cpu_model (has_kvm && !force_tcg);
+ cpu_model = guestfs_int_get_cpu_model (has_kvm && accel != FORCE_TCG);
if (cpu_model)
arg ("-cpu", cpu_model);
@@ -690,7 +720,7 @@ launch_direct (guestfs_h *g, void *datav, const char *arg)
}
flags = 0;
- if (!has_kvm || force_tcg)
+ if (!has_kvm || accel == FORCE_TCG)
flags |= APPLIANCE_COMMAND_LINE_IS_TCG;
append = guestfs_int_appliance_command_line (g, appliance, flags);
arg ("-append", append);
diff --git a/lib/launch-libvirt.c b/lib/launch-libvirt.c
index eff1c8f7e..b8ad8d265 100644
--- a/lib/launch-libvirt.c
+++ b/lib/launch-libvirt.c
@@ -772,6 +772,8 @@ parse_capabilities (guestfs_h *g, const char *capabilities_xml,
xmlAttrPtr attr;
size_t seen_qemu, seen_kvm;
int force_tcg;
+ int force_kvm;
+ enum accel accel;
doc = xmlReadMemory (capabilities_xml, strlen (capabilities_xml),
NULL, NULL, XML_PARSE_NONET);
@@ -819,11 +821,30 @@ parse_capabilities (guestfs_h *g, const char *capabilities_xml,
}
}
+ force_tcg = guestfs_int_get_backend_setting_bool (g, "force_tcg");
+ if (force_tcg == -1)
+ return -1;
+
+ force_kvm = guestfs_int_get_backend_setting_bool (g, "force_kvm");
+ if (force_kvm == -1)
+ return -1;
+
+ if (force_kvm && force_tcg) {
+ error (g, "Both force_kvm and force_tcg backend settings supplied.");
+ return -1;
+ }
+ else if (force_tcg)
+ accel = FORCE_TCG;
+ else if (force_kvm)
+ accel = FORCE_KVM;
+ else
+ accel = BEST_ACCEL;
+
/* This was RHBZ#886915: in that case the default libvirt URI
* pointed to a Xen hypervisor, and so could not create the
* appliance VM.
*/
- if (!seen_qemu && !seen_kvm) {
+ if ((!seen_qemu || accel == FORCE_KVM) && !seen_kvm) {
CLEANUP_FREE char *backend = guestfs_get_backend (g);
error (g,
@@ -841,11 +862,9 @@ parse_capabilities (guestfs_h *g, const char *capabilities_xml,
return -1;
}
- force_tcg = guestfs_int_get_backend_setting_bool (g, "force_tcg");
- if (force_tcg == -1)
- return -1;
+ assert((accel != FORCE_KVM) || seen_kvm);
- if (!force_tcg)
+ if (accel != FORCE_TCG)
data->is_kvm = seen_kvm;
else
data->is_kvm = 0;
--
2.31.0.rc2.261.g7f71774620-goog