[PATCH] build: Move po subdir after everything else except po-docs
by Martin Kletzander
Otherwise it complains about missing files that it has no rules for, for example
`builder/index-parser.c`.
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
Another issue I found when compiling on CentOS 7. Not sure why it happens, but
it only makes sense that the directory that uses files from other directories
(which might be generated) is used after all the code ones.
Makefile.am | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/Makefile.am b/Makefile.am
index 1cc21961ae89..e5a28d70c555 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -41,7 +41,7 @@ endif
SUBDIRS += common/errnostring common/protocol common/qemuopts
SUBDIRS += common/utils
SUBDIRS += common/structs
-SUBDIRS += lib docs examples po
+SUBDIRS += lib docs examples
# The daemon and the appliance.
SUBDIRS += common/mlutils
@@ -201,6 +201,9 @@ SUBDIRS += \
utils/qemu-boot \
utils/qemu-speed-test
+# After all source files were used we can generate the translation strings
+SUBDIRS += po
+
# po-docs must come after tools, inspector.
if HAVE_PO4A
SUBDIRS += po-docs
--
2.23.0
5 years, 3 months
[nbdkit PATCH 0/4] Spec compliance patches
by Eric Blake
The first one is the nastiest - it is an assertion failure caused
by a spec-compliant client and introduced by our security fix
that was released in 1.14.1.
Eric Blake (4):
server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO
server: Fix back-to-back SET_META_CONTEXT
server: Forbid NUL in export and context names
server: Fix OPT_GO on different export than SET_META_CONTEXT
server/internal.h | 5 ++-
server/backend.c | 13 ++++++
server/connections.c | 2 +-
server/filters.c | 5 +--
server/plugins.c | 4 --
server/protocol-handshake-newstyle.c | 59 ++++++++++++++++++++++------
6 files changed, 67 insertions(+), 21 deletions(-)
--
2.21.0
5 years, 3 months
[PATCH 0/2] v2v: do not try to re-install qemu-guest-agent
by Pino Toscano
In case qemu-guest-agent is already installed in the guest, do not try
to install it again from the RHV Tools ISO.
Pino Toscano (2):
v2v: linux: install linux tools after unconfigurations
v2v: linux: do not install qemu-guest-agent if already installed
v2v/convert_linux.ml | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
--
2.21.0
5 years, 3 months
[PATCH] v2v: -o rhv-upload: add -oo rhv-disk-uuid option
by Pino Toscano
This way it is possible to override the UUIDs of the uploaded disks,
instead of letting RHV generate them.
This can be useful to force certain UUIDs, and to specify the disks in
--no-copy mode (which now can be used).
---
v2v/output_rhv_upload.ml | 43 ++++++++++++++++++++++++++++++++-----
v2v/rhv-upload-plugin.py | 2 ++
v2v/virt-v2v-output-rhv.pod | 23 ++++++++++++++++++++
3 files changed, 63 insertions(+), 5 deletions(-)
diff --git a/v2v/output_rhv_upload.ml b/v2v/output_rhv_upload.ml
index 40902c371..eec9c5c79 100644
--- a/v2v/output_rhv_upload.ml
+++ b/v2v/output_rhv_upload.ml
@@ -32,6 +32,7 @@ type rhv_options = {
rhv_cluster : string option;
rhv_direct : bool;
rhv_verifypeer : bool;
+ rhv_disk_uuids : string list option;
}
let print_output_options () =
@@ -41,6 +42,11 @@ let print_output_options () =
-oo rhv-cluster=CLUSTERNAME Set RHV cluster name.
-oo rhv-direct[=true|false] Use direct transfer mode (default: false).
-oo rhv-verifypeer[=true|false] Verify server identity (default: false).
+
+You can override the UUIDs of the disks, instead of using autogenerated UUIDs
+after their uploads (if you do, you must supply one for each disk):
+
+ -oo rhv-disk-uuid=UUID Disk UUID
")
let parse_output_options options =
@@ -48,6 +54,7 @@ let parse_output_options options =
let rhv_cluster = ref None in
let rhv_direct = ref false in
let rhv_verifypeer = ref false in
+ let rhv_disk_uuids = ref None in
List.iter (
function
@@ -63,6 +70,8 @@ let parse_output_options options =
| "rhv-direct", v -> rhv_direct := bool_of_string v
| "rhv-verifypeer", "" -> rhv_verifypeer := true
| "rhv-verifypeer", v -> rhv_verifypeer := bool_of_string v
+ | "rhv-disk-uuid", v ->
+ rhv_disk_uuids := Some (v :: (Option.default [] !rhv_disk_uuids))
| k, _ ->
error (f_"-o rhv-upload: unknown output option ‘-oo %s’") k
) options;
@@ -75,8 +84,9 @@ let parse_output_options options =
let rhv_cluster = !rhv_cluster in
let rhv_direct = !rhv_direct in
let rhv_verifypeer = !rhv_verifypeer in
+ let rhv_disk_uuids = Option.map List.rev !rhv_disk_uuids in
- { rhv_cafile; rhv_cluster; rhv_direct; rhv_verifypeer }
+ { rhv_cafile; rhv_cluster; rhv_direct; rhv_verifypeer; rhv_disk_uuids }
let nbdkit_python_plugin = Config.virt_v2v_nbdkit_python_plugin
let pidfile_timeout = 30
@@ -283,6 +293,16 @@ object
method install_rhev_apt = true
method prepare_targets source overlays _ _ _ _ =
+ let uuids =
+ match rhv_options.rhv_disk_uuids with
+ | None ->
+ List.map (fun _ -> None) overlays
+ | Some uuids ->
+ if List.length uuids <> List.length overlays then
+ error (f_"the number of ‘-oo rhv-disk-uuid’ parameters passed on the command line has to match the number of guest disk images (for this guest: %d)")
+ (List.length overlays);
+ List.map (fun uuid -> Some uuid) uuids in
+
let output_name = source.s_name in
let json_params =
("output_name", JSON.String output_name) :: json_params in
@@ -306,7 +326,7 @@ object
* target URI to point to the NBD socket.
*)
List.map (
- fun (target_format, ov) ->
+ fun ((target_format, ov), uuid) ->
let id = ov.ov_source.s_disk_id in
let disk_name = sprintf "%s-%03d" output_name id in
let json_params =
@@ -332,6 +352,12 @@ object
let json_params =
("diskid_file", JSON.String diskid_file) :: json_params in
+ let json_params =
+ match uuid with
+ | None -> json_params
+ | Some uuid ->
+ ("rhv_disk_uuid", JSON.String uuid) :: json_params in
+
(* Write the JSON parameters to a file. *)
let json_param_file = tmpdir // sprintf "params%d.json" id in
with_open_out
@@ -401,7 +427,7 @@ If the messages above are not sufficient to diagnose the problem then add the
"file.export", JSON.String "/";
] in
TargetURI ("json:" ^ JSON.string_of_doc json_params)
- ) overlays
+ ) (List.combine overlays uuids)
method disk_copied t i nr_disks =
(* Get the UUID of the disk image. This file is written
@@ -417,7 +443,14 @@ If the messages above are not sufficient to diagnose the problem then add the
disks_uuids <- disks_uuids @ [diskid];
method create_metadata source targets _ guestcaps inspect target_firmware =
- assert (List.length disks_uuids = List.length targets);
+ let image_uuids =
+ match rhv_options.rhv_disk_uuids, disks_uuids with
+ | None, [] ->
+ error (f_"there must be ‘-oo rhv-disk-uuid’ parameters passed on the command line to specify the UUIDs of guest disk images (for this guest: %d)")
+ (List.length targets)
+ | Some uuids, _ -> uuids
+ | None, uuids -> uuids in
+ assert (List.length image_uuids = List.length targets);
(* The storage domain UUID. *)
let sd_uuid =
@@ -433,7 +466,7 @@ If the messages above are not sufficient to diagnose the problem then add the
let ovf =
Create_ovf.create_ovf source targets guestcaps inspect
target_firmware output_alloc
- sd_uuid disks_uuids vol_uuids vm_uuid
+ sd_uuid image_uuids vol_uuids vm_uuid
OVirt in
let ovf = DOM.doc_to_string ovf in
diff --git a/v2v/rhv-upload-plugin.py b/v2v/rhv-upload-plugin.py
index 685680213..896c17942 100644
--- a/v2v/rhv-upload-plugin.py
+++ b/v2v/rhv-upload-plugin.py
@@ -135,6 +135,8 @@ def open(readonly):
disk_format = types.DiskFormat.COW
disk = disks_service.add(
disk = types.Disk(
+ # The ID is optional.
+ id = params.get('rhv_disk_uuid'),
name = params['disk_name'],
description = "Uploaded by virt-v2v",
format = disk_format,
diff --git a/v2v/virt-v2v-output-rhv.pod b/v2v/virt-v2v-output-rhv.pod
index 651f61dae..c91477f62 100644
--- a/v2v/virt-v2v-output-rhv.pod
+++ b/v2v/virt-v2v-output-rhv.pod
@@ -9,6 +9,7 @@ virt-v2v-output-rhv - Using virt-v2v to convert guests to oVirt or RHV
[-oo rhv-cafile=FILE]
[-oo rhv-cluster=CLUSTER]
[-oo rhv-direct]
+ [-oo rhv-disk-uuid=UUID ...]
[-oo rhv-verifypeer]
virt-v2v [-i* options] -o rhv -os [esd:/path|/path]
@@ -104,6 +105,28 @@ F</etc/pki/ovirt-engine/ca.pem> on the oVirt engine.
Set the RHV Cluster Name. If not given it uses C<Default>.
+=item I<-oo rhv-disk-uuid=>C<UUID>
+
+This option is used to specify UUIDs for the disks when creating the
+virtual machine. However, please note that:
+
+=over 4
+
+=item *
+
+you B<must> pass as many I<-oo rhv-disk-uuid=UUID> options as the
+amount of disks in the guest
+
+=item *
+
+the specified UUIDs are used as they are, without checking whether
+they are already used by other disks
+
+=back
+
+This option is considered advanced, and to be used mostly in
+combination with I<--no-copy>.
+
=item I<-oo rhv-direct>
If this option is given then virt-v2v will attempt to directly upload
--
2.21.0
5 years, 3 months
[PATCH] v2v: -o rhv-upload: check for a valid image transfer right away
by Pino Toscano
Check for the INITIALIZING state of the image transfer right away,
without waiting 5 seconds even before the first time: this way, if the
transfer is already in the right state then there is no need to wait.
---
v2v/rhv-upload-plugin.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/v2v/rhv-upload-plugin.py b/v2v/rhv-upload-plugin.py
index 51a7f381a..685680213 100644
--- a/v2v/rhv-upload-plugin.py
+++ b/v2v/rhv-upload-plugin.py
@@ -188,7 +188,6 @@ def open(readonly):
# actual transfer can start when its status is "Transferring".
endt = time.time() + timeout
while True:
- time.sleep(5)
transfer = transfer_service.get()
if transfer.phase != types.ImageTransferPhase.INITIALIZING:
break
@@ -196,6 +195,7 @@ def open(readonly):
transfer_service.cancel()
raise RuntimeError("timed out waiting for transfer status "
"!= INITIALIZING")
+ time.sleep(5)
# Now we have permission to start the transfer.
if params['rhv_direct']:
--
2.21.0
5 years, 3 months
[PATCH] v2v: -o rhv-upload: cancel disk transfer on open failure
by Pino Toscano
Make sure to cancel the trasfer in RHV in case of failure during the
open/creation of the disk in RHV, so it is automatically removed.
---
v2v/rhv-upload-plugin.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/v2v/rhv-upload-plugin.py b/v2v/rhv-upload-plugin.py
index 57e90484f..51a7f381a 100644
--- a/v2v/rhv-upload-plugin.py
+++ b/v2v/rhv-upload-plugin.py
@@ -193,12 +193,14 @@ def open(readonly):
if transfer.phase != types.ImageTransferPhase.INITIALIZING:
break
if time.time() > endt:
+ transfer_service.cancel()
raise RuntimeError("timed out waiting for transfer status "
"!= INITIALIZING")
# Now we have permission to start the transfer.
if params['rhv_direct']:
if transfer.transfer_url is None:
+ transfer_service.cancel()
raise RuntimeError("direct upload to host not supported, "
"requires ovirt-engine >= 4.2 and only works "
"when virt-v2v is run within the oVirt/RHV "
@@ -225,6 +227,7 @@ def open(readonly):
destination_url.port,
)
else:
+ transfer_service.cancel()
raise RuntimeError("unknown URL scheme (%s)" % destination_url.scheme)
# The first request is to fetch the features of the server.
@@ -259,6 +262,7 @@ def open(readonly):
pass
else:
+ transfer_service.cancel()
raise RuntimeError("could not use OPTIONS request: %d: %s" %
(r.status, r.reason))
--
2.21.0
5 years, 3 months
[PATCH nbdkit] server: Remove tricksy initialization of struct b_conn_handle.
by Richard W.M. Jones
b_conn_handle fields exportsize and can_* have a special meaning when
they are -1. It means that the value of the field has not been
computed and cached yet.
The struct was being initialized using memset (_, -1, _) which does
indeed have the effect of setting all the fields to -1, although it's
somewhat non-obvious to say the least.
This commit replaces it with ordinary field initialization, also
setting h->handle to NULL. By keeping the initialization function
next to the struct definition, hopefully they will be updated in
tandem in future.
GCC 9.2.1 actually optimizes this back into the memset equivalent
using inline AVX instructions, so good job there!
Simple refactoring, should have no effect on how the code works.
See commit d60d0f4248610fc1d116dc9f249526d20913c9a3.
---
server/connections.c | 3 +--
server/internal.h | 17 +++++++++++++++++
2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/server/connections.c b/server/connections.c
index 819f7b8..ec28815 100644
--- a/server/connections.c
+++ b/server/connections.c
@@ -269,9 +269,8 @@ new_connection (int sockin, int sockout, int nworkers)
goto error;
}
conn->nr_handles = backend->i + 1;
- memset (conn->handles, -1, conn->nr_handles * sizeof *conn->handles);
for_each_backend (b)
- conn->handles[b->i].handle = NULL;
+ reset_b_conn_handle (&conn->handles[b->i]);
conn->status = 1;
conn->nworkers = nworkers;
diff --git a/server/internal.h b/server/internal.h
index c31bb34..604dd89 100644
--- a/server/internal.h
+++ b/server/internal.h
@@ -168,6 +168,23 @@ struct b_conn_handle {
int can_cache;
};
+static inline void
+reset_b_conn_handle (struct b_conn_handle *h)
+{
+ h->handle = NULL;
+ h->exportsize = -1;
+ h->can_write = -1;
+ h->can_flush = -1;
+ h->is_rotational = -1;
+ h->can_trim = -1;
+ h->can_zero = -1;
+ h->can_fast_zero = -1;
+ h->can_fua = -1;
+ h->can_multi_conn = -1;
+ h->can_extents = -1;
+ h->can_cache = -1;
+}
+
struct connection {
pthread_mutex_t request_lock;
pthread_mutex_t read_lock;
--
2.23.0
5 years, 3 months
[PATCH nbdkit 0/2] Add new retry filter.
by Richard W.M. Jones
This is a retry filter implementation as outlined here:
https://www.redhat.com/archives/libguestfs/2019-September/msg00167.html
It is only lightly tested. One way to test it is to try an SSH copy
(see the commit message for patch 2/2), and in the middle of the copy
kill the per-connection sshd on the remote machine. You will see that
the copy recovers after a few seconds. Add the nbdkit -v option to
see some useful messages from the filter:
...
nbdkit: ssh[1]: debug: ssh: pread count=2097152 offset=216006656
nbdkit: ssh[1]: error: read failed: Socket error: Connection reset by peer (-1)
nbdkit: ssh[1]: debug: retry 1: original errno = 5
nbdkit: ssh[1]: debug: waiting 2 seconds before retrying
nbdkit: ssh[1]: debug: ssh: reopen
nbdkit: ssh[1]: debug: close
nbdkit: ssh[1]: error: cannot close file: Socket error: Connection reset by peer
nbdkit: ssh[1]: debug: ssh: open readonly=0
nbdkit: ssh[1]: debug: opened libssh handle
nbdkit: ssh[1]: debug: ssh: pread count=2097152 offset=216006656
...
It really needs an actual test, which is tricky to implement. My
thinking currently is that a custom (sh) plugin is the way to go, but
it would also be nice to test curl/ssh/vddk with this filter -- if
anyone has any suggestions on how to do that ...
This patch will probably conflict with Eric's series here:
https://www.redhat.com/archives/libguestfs/2019-September/msg00180.html
but that shouldn't be too hard to fix up.
Rich.
5 years, 3 months
Thoughts on nbdkit automatic reconnection
by Richard W.M. Jones
We have a running problem with the nbdkit VDDK plugin where the VDDK
side apparently disconnects or the network connection is interrupted.
During a virt-v2v conversion this causes the entire operation to fail,
and since v2v conversions take many hours that's not a happy outcome.
(Aside: I should say that we see many cases where it's claimed that
the connection was dropped, but often when we examine them in detail
the cause is something else. But it seems like this disconnection
thing does happen sometimes.)
To put this isn't concrete terms which don't involve v2v, let's say
you were doing something like:
nbdkit ssh host=remote /var/tmp/test.iso \
--run 'qemu-img convert -p -f raw $nbd -O qcow2 test.qcow2'
which copies a file over ssh to local. If /var/tmp/test.iso is very
large and/or the connection is very slow, and the network connection
is interrupted then the whole operation fails. If nbdkit could
retry/reconnect on failure then the operation might succeed.
There are lots of parameters associated with retrying, eg:
- how often should you retry before giving up?
- how long should you wait between retries?
- which errors should cause a retry, which are a hard failure?
So I had an idea we could implement this as a generic "retry" filter,
like:
nbdkit ssh ... --filter=retry retries=5 retry-delay=5 retry-exponential=yes
This cannot be implemented with the current design of filters because
a filter would have to call the plugin .close and .open methods, but
filters don't have access to those from regular data functions, and in
any case this would cause a new plugin handle to be allocated.
We could probably do it if we added a special .reopen method to
plugins. We could either require plugins which support the concept of
retrying to implement this, or we could have a generic implementation
in server/backend.c which would call .close, .open and cope with the
new handle.
Another way to do this would be to modify each plugin to add the
feature. nbdkit-nbd-plugin has this for a very limited case, but no
others do, and it's quite complex to implement in plugins. As far as
I can see it involves checking the return value of any data call that
the plugin makes and performing the reconnection logic, while not
changing the handle (so just calling self->close, self->open isn't
going to work).
If anyone has any thoughts about this I'd be happy to hear them.
Rich.
--
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
Read my programming and virtualization blog: http://rwmj.wordpress.com
virt-builder quickly builds VMs from scratch
http://libguestfs.org/virt-builder.1.html
5 years, 3 months