You can now use -o rhv (-o rhev is supported for compatibility).
---
v2v/Makefile.am | 8 +-
v2v/OVF.ml | 30 +++----
v2v/OVF.mli | 2 +-
v2v/changeuid.mli | 4 +-
v2v/cmdline.ml | 12 +--
v2v/convert_windows.ml | 2 +-
v2v/{output_rhev.ml => output_rhv.ml} | 26 +++---
v2v/{output_rhev.mli => output_rhv.mli} | 8 +-
v2v/output_vdsm.ml | 4 +-
v2v/test-harness/virt-v2v-test-harness.pod | 4 +-
v2v/test-v2v-no-copy.sh | 6 +-
v2v/{test-v2v-o-rhev.sh => test-v2v-o-rhv.sh} | 10 +--
v2v/types.mli | 2 +-
v2v/virt-v2v.pod | 110 +++++++++++++-------------
14 files changed, 116 insertions(+), 112 deletions(-)
rename v2v/{output_rhev.ml => output_rhv.ml} (90%)
rename v2v/{output_rhev.mli => output_rhv.mli} (83%)
rename v2v/{test-v2v-o-rhev.sh => test-v2v-o-rhv.sh} (95%)
diff --git a/v2v/Makefile.am b/v2v/Makefile.am
index 5e3c3eb..2802056 100644
--- a/v2v/Makefile.am
+++ b/v2v/Makefile.am
@@ -49,7 +49,7 @@ SOURCES_MLI = \
output_local.mli \
output_null.mli \
output_qemu.mli \
- output_rhev.mli \
+ output_rhv.mli \
output_vdsm.mli \
OVF.mli \
target_bus_assignment.mli \
@@ -94,7 +94,7 @@ SOURCES_ML = \
output_libvirt.ml \
output_local.ml \
output_qemu.ml \
- output_rhev.ml \
+ output_rhv.ml \
output_vdsm.ml \
inspect_source.ml \
target_bus_assignment.ml \
@@ -283,7 +283,7 @@ TESTS += \
test-v2v-o-libvirt.sh \
test-v2v-o-null.sh \
test-v2v-o-qemu.sh \
- test-v2v-o-rhev.sh \
+ test-v2v-o-rhv.sh \
test-v2v-o-vdsm-options.sh \
test-v2v-oa-option.sh \
test-v2v-of-option.sh \
@@ -373,7 +373,7 @@ EXTRA_DIST += \
test-v2v-o-libvirt.sh \
test-v2v-o-null.sh \
test-v2v-o-qemu.sh \
- test-v2v-o-rhev.sh \
+ test-v2v-o-rhv.sh \
test-v2v-o-vdsm-options.sh \
test-v2v-oa-option.sh \
test-v2v-of-option.sh \
diff --git a/v2v/OVF.ml b/v2v/OVF.ml
index eea1e63..4c93c12 100644
--- a/v2v/OVF.ml
+++ b/v2v/OVF.ml
@@ -108,11 +108,11 @@ and get_ostype = function
| { i_type = "linux" } -> "OtherLinux"
| { i_type = "windows"; i_major_version = 5; i_minor_version = 1 } ->
- "WindowsXP" (* no architecture differentiation of XP on RHEV *)
+ "WindowsXP" (* no architecture differentiation of XP on RHV *)
| { i_type = "windows"; i_major_version = 5; i_minor_version = 2;
i_product_name = product } when String.find product "XP" >= 0 ->
- "WindowsXP" (* no architecture differentiation of XP on RHEV *)
+ "WindowsXP" (* no architecture differentiation of XP on RHV *)
| { i_type = "windows"; i_major_version = 5; i_minor_version = 2;
i_arch = "i386" } ->
@@ -206,7 +206,7 @@ let origin_of_source_hypervisor = function
(* Generate the .meta file associated with each volume. *)
let create_meta_files output_alloc sd_uuid image_uuids targets =
(* Note: Upper case in the .meta, mixed case in the OVF. *)
- let output_alloc_for_rhev =
+ let output_alloc_for_rhv =
match output_alloc with
| Sparse -> "SPARSE"
| Preallocated -> "PREALLOCATED" in
@@ -220,12 +220,12 @@ let create_meta_files output_alloc sd_uuid image_uuids targets =
ov.ov_virtual_size;
ov.ov_virtual_size /^ 512L in
- let format_for_rhev =
+ let format_for_rhv =
match t.target_format with
| "raw" -> "RAW"
| "qcow2" -> "COW"
| _ ->
- error (f_"RHEV does not support the output format '%s', only raw
or qcow2") t.target_format in
+ error (f_"RHV does not support the output format '%s', only raw or
qcow2") t.target_format in
let buf = Buffer.create 256 in
let bpf fs = bprintf buf fs in
@@ -239,8 +239,8 @@ let create_meta_files output_alloc sd_uuid image_uuids targets =
bpf "LEGALITY=LEGAL\n";
bpf "POOL_UUID=\n";
bpf "SIZE=%Ld\n" size_in_sectors;
- bpf "FORMAT=%s\n" format_for_rhev;
- bpf "TYPE=%s\n" output_alloc_for_rhev;
+ bpf "FORMAT=%s\n" format_for_rhv;
+ bpf "TYPE=%s\n" output_alloc_for_rhv;
bpf "DESCRIPTION=%s\n" (String.replace generated_by "="
"_");
bpf "EOF\n";
Buffer.contents buf
@@ -329,7 +329,7 @@ let rec create_ovf source targets guestcaps inspect
e "rasd:ResourceType" [] [PCData "23"];
e "rasd:UsbPolicy" [] [PCData "Disabled"];
];
- (* We always add a qxl device when outputting to RHEV.
+ (* We always add a qxl device when outputting to RHV.
* See RHBZ#1213701 and RHBZ#1211231 for the reasoning
* behind that.
*)
@@ -366,7 +366,7 @@ let rec create_ovf source targets guestcaps inspect
*)
(match source with
| { s_display = Some { s_password = Some _ } } ->
- warning (f_"This guest required a password for connection to its display, but
this is not supported by RHEV. Therefore the converted guest's display will not
require a separate password to connect.");
+ warning (f_"This guest required a password for connection to its display, but
this is not supported by RHV. Therefore the converted guest's display will not
require a separate password to connect.");
| _ -> ());
if verbose () then (
@@ -408,7 +408,7 @@ and add_disks targets guestcaps output_alloc sd_uuid image_uuids
vol_uuids ovf =
let fileref = sprintf "%s/%s" image_uuid vol_uuid in
(* ovf:size and ovf:actual_size fields are integer GBs. If you
- * use floating point numbers then RHEV will fail to parse them.
+ * use floating point numbers then RHV will fail to parse them.
* In case the size is just below a gigabyte boundary, round up.
*)
let bytes_to_gb b =
@@ -425,15 +425,15 @@ and add_disks targets guestcaps output_alloc sd_uuid image_uuids
vol_uuids ovf =
| None, Some estimated_size -> Some (bytes_to_gb estimated_size), true
| None, None -> None, false in
- let format_for_rhev =
+ let format_for_rhv =
match t.target_format with
| "raw" -> "RAW"
| "qcow2" -> "COW"
| _ ->
- error (f_"RHEV does not support the output format '%s', only raw
or qcow2") t.target_format in
+ error (f_"RHV does not support the output format '%s', only raw or
qcow2") t.target_format in
(* Note: Upper case in the .meta, mixed case in the OVF. *)
- let output_alloc_for_rhev =
+ let output_alloc_for_rhv =
match output_alloc with
| Sparse -> "Sparse"
| Preallocated -> "Preallocated" in
@@ -456,8 +456,8 @@ and add_disks targets guestcaps output_alloc sd_uuid image_uuids
vol_uuids ovf =
"ovf:fileRef", fileref;
"ovf:parentRef", "";
"ovf:vm_snapshot_id", uuidgen ();
- "ovf:volume-format", format_for_rhev;
- "ovf:volume-type", output_alloc_for_rhev;
+ "ovf:volume-format", format_for_rhv;
+ "ovf:volume-type", output_alloc_for_rhv;
"ovf:format", "http://en.wikipedia.org/wiki/Byte"; (* wtf?
*)
"ovf:disk-interface",
(match guestcaps.gcaps_block_bus with
diff --git a/v2v/OVF.mli b/v2v/OVF.mli
index 3b260a5..89b96da 100644
--- a/v2v/OVF.mli
+++ b/v2v/OVF.mli
@@ -21,7 +21,7 @@
val create_meta_files : Types.output_allocation -> string -> string list ->
Types.target list -> string list
(** Create the .meta file associated with each target.
- Note this does not write them, since output_rhev has to do a
+ Note this does not write them, since output_rhv has to do a
permissions dance when writing files. Instead the contents of each
file is returned (one per target), and they must be written to
[target_file ^ ".meta"]. *)
diff --git a/v2v/changeuid.mli b/v2v/changeuid.mli
index 7838f0f..52f5359 100644
--- a/v2v/changeuid.mli
+++ b/v2v/changeuid.mli
@@ -18,8 +18,8 @@
(** Functions for making files and directories as another user.
- [-o rhev] output mode has to write files as UID:GID 36:36,
- otherwise RHEV cannot read them. Because the files are located on
+ [-o rhv] output mode has to write files as UID:GID 36:36,
+ otherwise RHV cannot read them. Because the files are located on
NFS (and hence might be root-squashed) we also cannot chown the
files. We cannot setuid the whole process to 36:36 because it
needs to do other root things like mounting and unmounting the NFS
diff --git a/v2v/cmdline.ml b/v2v/cmdline.ml
index 9a56d60..7b57a6f 100644
--- a/v2v/cmdline.ml
+++ b/v2v/cmdline.ml
@@ -123,7 +123,7 @@ let parse_cmdline () =
| "libvirt" -> output_mode := `Libvirt
| "disk" | "local" -> output_mode := `Local
| "null" -> output_mode := `Null
- | "ovirt" | "rhev" -> output_mode := `RHEV
+ | "ovirt" | "rhv" | "rhev" -> output_mode := `RHV
| "qemu" -> output_mode := `QEmu
| "vdsm" -> output_mode := `VDSM
| s ->
@@ -218,7 +218,7 @@ let parse_cmdline () =
virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi -os imported esx_guest
virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi esx_guest \
- -o rhev -os rhev.nfs:/export_domain --network rhevm
+ -o rhv -os rhv.nfs:/export_domain --network rhvm
virt-v2v -i libvirtxml guest-domain.xml -o local -os /var/tmp
@@ -389,15 +389,15 @@ read the man page virt-v2v(1).
| Some d -> d in
Output_qemu.output_qemu os qemu_boot
- | `RHEV ->
+ | `RHV ->
let os =
match output_storage with
| None ->
- error (f_"-o rhev: output storage was not specified, use
'-os'");
+ error (f_"-o rhv: output storage was not specified, use
'-os'");
| Some d -> d in
if qemu_boot then
- error (f_"-o rhev: --qemu-boot option cannot be used in this output
mode");
- Output_rhev.output_rhev os output_alloc
+ error (f_"-o rhv: --qemu-boot option cannot be used in this output
mode");
+ Output_rhv.output_rhv os output_alloc
| `VDSM ->
let os =
diff --git a/v2v/convert_windows.ml b/v2v/convert_windows.ml
index 25acdcc..f8337a0 100644
--- a/v2v/convert_windows.ml
+++ b/v2v/convert_windows.ml
@@ -319,7 +319,7 @@ echo Wait for PnP to complete
(String.concat "/" pnp_wait_path))
and configure_rhev_apt tool_path =
- (* Configure RHEV-APT (the RHEV guest agent). However if it doesn't
+ (* Configure RHEV-APT (the RHV guest agent). However if it doesn't
* exist just warn about it and continue.
*)
g#upload tool_path "/rhev-apt.exe"; (* XXX *)
diff --git a/v2v/output_rhev.ml b/v2v/output_rhv.ml
similarity index 90%
rename from v2v/output_rhev.ml
rename to v2v/output_rhv.ml
index 3280150..5ddcefa 100644
--- a/v2v/output_rhev.ml
+++ b/v2v/output_rhv.ml
@@ -66,7 +66,7 @@ and check_storage_domain domain_class os mp =
let entries =
try Sys.readdir mp
with Sys_error msg ->
- error (f_"could not read the %s specified by the '-os %s' parameter on
the command line. Is it really an OVirt or RHEV-M %s? The original error is: %s")
domain_class os domain_class msg in
+ error (f_"could not read the %s specified by the '-os %s' parameter on
the command line. Is it really an OVirt or RHV-M %s? The original error is: %s")
domain_class os domain_class msg in
let entries = Array.to_list entries in
let uuids = List.filter (
fun entry ->
@@ -78,7 +78,7 @@ and check_storage_domain domain_class os mp =
match uuids with
| [uuid] -> uuid
| [] ->
- error (f_"there are no UUIDs in the %s (%s). Is it really an OVirt or RHEV-M
%s?") domain_class os domain_class
+ error (f_"there are no UUIDs in the %s (%s). Is it really an OVirt or RHV-M
%s?") domain_class os domain_class
| _::_ ->
error (f_"there are multiple UUIDs in the %s (%s). This is unexpected, and
may be a bug in virt-v2v or OVirt.") domain_class os in
@@ -88,7 +88,7 @@ and check_storage_domain domain_class os mp =
let () =
let master_vms_dir = mp // uuid // "master" // "vms" in
if not (is_directory master_vms_dir) then
- error (f_"%s does not exist or is not a directory.\n\nMost likely cause:
Either the %s (%s) has not been attached to any Data Center, or the path %s is not an %s
at all.\n\nYou have to attach the %s to a Data Center using the RHEV-M / OVirt user
interface first.\n\nIf you don't know what the %s mount point should be then you can
also find this out through the RHEV-M user interface.")
+ error (f_"%s does not exist or is not a directory.\n\nMost likely cause:
Either the %s (%s) has not been attached to any Data Center, or the path %s is not an %s
at all.\n\nYou have to attach the %s to a Data Center using the RHV-M / OVirt user
interface first.\n\nIf you don't know what the %s mount point should be then you can
also find this out through the RHV-M user interface.")
master_vms_dir domain_class os os
domain_class domain_class domain_class in
@@ -98,7 +98,7 @@ and check_storage_domain domain_class os mp =
(* UID:GID required for files and directories when writing to ESD. *)
let uid = 36 and gid = 36
-class output_rhev os output_alloc =
+class output_rhv os output_alloc =
(* Create a UID-switching handle. If we're not root, create a dummy
* one because we cannot switch UIDs.
*)
@@ -111,11 +111,11 @@ class output_rhev os output_alloc =
object
inherit output
- method as_options = sprintf "-o rhev -os %s" os
+ method as_options = sprintf "-o rhv -os %s" os
method supported_firmware = [ TargetBIOS ]
- (* RHEV doesn't support serial consoles. This causes the conversion
+ (* RHV doesn't support serial consoles. This causes the conversion
* step to remove it.
*)
method keep_serial_console = false
@@ -157,7 +157,7 @@ object
mount_and_check_storage_domain (s_"Export Storage Domain") os in
esd_mp <- mp;
esd_uuid <- uuid;
- debug "RHEV: ESD mountpoint: %s\nRHEV: ESD UUID: %s" esd_mp esd_uuid;
+ debug "RHV: ESD mountpoint: %s\nRHV: ESD UUID: %s" esd_mp esd_uuid;
(* See if we can write files as UID:GID 36:36. *)
let () =
@@ -166,10 +166,10 @@ object
let stat = stat testfile in
Changeuid.unlink changeuid_t testfile;
let actual_uid = stat.st_uid and actual_gid = stat.st_gid in
- debug "RHEV: actual UID:GID of new files is %d:%d" actual_uid
actual_gid;
+ debug "RHV: actual UID:GID of new files is %d:%d" actual_uid actual_gid;
if uid <> actual_uid || gid <> actual_gid then (
if running_as_root then
- warning (f_"cannot write files to the NFS server as %d:%d, even though we
appear to be running as root. This probably means the NFS client or idmapd is not
configured properly.\n\nYou will have to chown the files that virt-v2v creates after the
run, otherwise RHEV-M will not be able to import the VM.") uid gid
+ warning (f_"cannot write files to the NFS server as %d:%d, even though we
appear to be running as root. This probably means the NFS client or idmapd is not
configured properly.\n\nYou will have to chown the files that virt-v2v creates after the
run, otherwise RHV-M will not be able to import the VM.") uid gid
else
warning (f_"cannot write files to the NFS server as %d:%d. You might want
to stop virt-v2v (^C) and rerun it as root.") uid gid
) in
@@ -225,7 +225,7 @@ object
fun ({ target_overlay = ov } as t, image_uuid, vol_uuid) ->
let ov_sd = ov.ov_sd in
let target_file = images_dir // image_uuid // vol_uuid in
- debug "RHEV: will export %s to %s" ov_sd target_file;
+ debug "RHV: will export %s to %s" ov_sd target_file;
{ t with target_file = target_file }
) (combine3 targets image_uuids vol_uuids) in
@@ -247,7 +247,7 @@ object
?clustersize path format size =
Changeuid.func changeuid_t (
fun () ->
- let g = open_guestfs ~identifier:"rhev_disk_create" () in
+ let g = open_guestfs ~identifier:"rhv_disk_create" () in
g#disk_create ?backingfile ?backingformat ?preallocation ?compat
?clustersize path format size;
(* Make it sufficiently writable so that possibly root, or
@@ -277,5 +277,5 @@ object
delete_target_directory <- false
end
-let output_rhev = new output_rhev
-let () = Modules_list.register_output_module "rhev"
+let output_rhv = new output_rhv
+let () = Modules_list.register_output_module "rhv"
diff --git a/v2v/output_rhev.mli b/v2v/output_rhv.mli
similarity index 83%
rename from v2v/output_rhev.mli
rename to v2v/output_rhv.mli
index 27df737..71ddd0a 100644
--- a/v2v/output_rhev.mli
+++ b/v2v/output_rhv.mli
@@ -16,12 +16,12 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*)
-(** [-o rhev] target. *)
+(** [-o rhv] target. *)
val mount_and_check_storage_domain : string -> string -> (string * string)
(** This helper function is also used by the VDSM target. *)
-val output_rhev : string -> Types.output_allocation -> Types.output
-(** [output_rhev os output_alloc] creates and
+val output_rhv : string -> Types.output_allocation -> Types.output
+(** [output_rhv os output_alloc] creates and
returns a new {!Types.output} object specialized for writing
- output to RHEV-M or oVirt Export Storage Domain. *)
+ output to RHV-M or oVirt Export Storage Domain. *)
diff --git a/v2v/output_vdsm.ml b/v2v/output_vdsm.ml
index fbddee8..ee7cc4e 100644
--- a/v2v/output_vdsm.ml
+++ b/v2v/output_vdsm.ml
@@ -49,7 +49,7 @@ object
method supported_firmware = [ TargetBIOS ]
- (* RHEV doesn't support serial consoles. This causes the conversion
+ (* RHV doesn't support serial consoles. This causes the conversion
* step to remove it.
*)
method keep_serial_console = false
@@ -62,7 +62,7 @@ object
* name of the target files that eventually get written by the main
* code.
*
- * 'os' is the output storage domain (-os /rhev/data/<data
center>/<data domain>)
+ * 'os' is the output storage domain (-os /rhv/data/<data
center>/<data domain>)
* this is already mounted path.
*
* Note it's good to fail here (early) if there are any problems, since
diff --git a/v2v/test-harness/virt-v2v-test-harness.pod
b/v2v/test-harness/virt-v2v-test-harness.pod
index 4bbaca8..a323549 100644
--- a/v2v/test-harness/virt-v2v-test-harness.pod
+++ b/v2v/test-harness/virt-v2v-test-harness.pod
@@ -17,8 +17,8 @@ virt-v2v-test-harness - Used to test virt-v2v against real test cases
=head1 DESCRIPTION
L<virt-v2v(1)> converts guests from a foreign hypervisor to run on
-KVM, managed by libvirt, OpenStack, oVirt, Red Hat Enterprise
-Virtualisation (RHEV) or several other targets.
+KVM, managed by libvirt, OpenStack, oVirt, Red Hat Virtualisation
+(RHV) or several other targets.
Virt-v2v-test-harness is a small library (module name:
C<V2v_test_harness>) used to run virt-v2v against a set of test cases
diff --git a/v2v/test-v2v-no-copy.sh b/v2v/test-v2v-no-copy.sh
index 6309c92..23109ae 100755
--- a/v2v/test-v2v-no-copy.sh
+++ b/v2v/test-v2v-no-copy.sh
@@ -58,17 +58,17 @@ test -f $d/windows.xml
# Test the disk was NOT created.
! test -f $d/windows-sda
-# --no-copy with -o rhev.
+# --no-copy with -o rhv.
mkdir $d/12345678-1234-1234-1234-123456789abc
mkdir $d/12345678-1234-1234-1234-123456789abc/images
mkdir $d/12345678-1234-1234-1234-123456789abc/master
mkdir $d/12345678-1234-1234-1234-123456789abc/master/vms
-# $VG - XXX Disabled because the forking used to write files in -o rhev
+# $VG - XXX Disabled because the forking used to write files in -o rhv
# mode confuses valgrind.
virt-v2v --debug-gc --no-copy \
-i libvirt -ic "$libvirt_uri" windows \
- -o rhev -os $d
+ -o rhv -os $d
# Test the OVF metadata was created.
test -f $d/12345678-1234-1234-1234-123456789abc/master/vms/*/*.ovf
diff --git a/v2v/test-v2v-o-rhev.sh b/v2v/test-v2v-o-rhv.sh
similarity index 95%
rename from v2v/test-v2v-o-rhev.sh
rename to v2v/test-v2v-o-rhv.sh
index 43e2d42..ce43b27 100755
--- a/v2v/test-v2v-o-rhev.sh
+++ b/v2v/test-v2v-o-rhv.sh
@@ -16,13 +16,13 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-# Test -o rhev.
+# Test -o rhv.
unset CDPATH
export LANG=C
set -e
-if [ -n "$SKIP_TEST_V2V_O_RHEV_SH" ]; then
+if [ -n "$SKIP_TEST_V2V_O_RHV_SH" ]; then
echo "$0: test skipped because environment variable is set"
exit 77
fi
@@ -43,7 +43,7 @@ fi
export VIRT_TOOLS_DATA_DIR="$srcdir/../test-data/fake-virt-tools"
-d=test-v2v-o-rhev.d
+d=test-v2v-o-rhv.d
rm -rf $d
mkdir $d
@@ -53,11 +53,11 @@ mkdir $d/12345678-1234-1234-1234-123456789abc/images
mkdir $d/12345678-1234-1234-1234-123456789abc/master
mkdir $d/12345678-1234-1234-1234-123456789abc/master/vms
-# $VG - XXX Disabled because the forking used to write files in -o rhev
+# $VG - XXX Disabled because the forking used to write files in -o rhv
# mode confuses valgrind.
virt-v2v --debug-gc \
-i libvirt -ic "$libvirt_uri" windows \
- -o rhev -os $d
+ -o rhv -os $d
# Test the OVF metadata was created.
test -f $d/12345678-1234-1234-1234-123456789abc/master/vms/*/*.ovf
diff --git a/v2v/types.mli b/v2v/types.mli
index 514565c..b007bf4 100644
--- a/v2v/types.mli
+++ b/v2v/types.mli
@@ -366,6 +366,6 @@ class virtual output : object
(** Called in order to create disks on the target. The method has the
same signature as Guestfs#disk_create. *)
method keep_serial_console : bool
- (** Whether this output supports serial consoles (RHEV does not). *)
+ (** Whether this output supports serial consoles (RHV does not). *)
end
(** Encapsulates all [-o], etc output arguments as an object. *)
diff --git a/v2v/virt-v2v.pod b/v2v/virt-v2v.pod
index 1654b35..0382c95 100644
--- a/v2v/virt-v2v.pod
+++ b/v2v/virt-v2v.pod
@@ -7,7 +7,7 @@ virt-v2v - Convert a guest to use KVM
virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi vmware_guest
virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi vmware_guest \
- -o rhev -os rhev.nfs:/export_domain --network rhevm
+ -o rhv -os rhv.nfs:/export_domain --network rhvm
virt-v2v -i libvirtxml guest-domain.xml -o local -os /var/tmp
@@ -22,8 +22,8 @@ virt-v2v - Convert a guest to use KVM
Virt-v2v converts guests from a foreign hypervisor to run on KVM. It
can read Linux and Windows guests running on VMware, Xen, Hyper-V and
some other hypervisors, and convert them to KVM managed by libvirt,
-OpenStack, oVirt, Red Hat Enterprise Virtualisation (RHEV) or several
-other targets.
+OpenStack, oVirt, Red Hat Virtualisation (RHV) or several other
+targets.
There is also a companion front-end called L<virt-p2v(1)> which comes
as an ISO, CD or PXE image that can be booted on physical machines to
@@ -42,7 +42,7 @@ libguestfs E<ge> 1.28.
Xen ───▶│ -i libvirt ──▶ │ │ │ (default) │
... ───▶│ (default) │ │ │ ──┐ └────────────┘
└────────────┘ │ │ ─┐└──────▶ -o glance
- -i libvirtxml ─────────▶ │ │ ┐└─────────▶ -o rhev
+ -i libvirtxml ─────────▶ │ │ ┐└─────────▶ -o rhv
└────────────┘ └──────────▶ -o vdsm
Virt-v2v has a number of possible input and output modes, selected
@@ -74,7 +74,7 @@ libvirt configuration file (mainly for testing).
I<-o qemu> writes to a local disk image with a shell script for
booting the guest directly in qemu (mainly for testing).
-I<-o rhev> is used to write to a RHEV-M / oVirt target. I<-o vdsm>
+I<-o rhv> is used to write to a RHV / oVirt target. I<-o vdsm>
is only used when virt-v2v runs under VDSM control.
I<--in-place> instructs virt-v2v to customize the guest OS in the input
@@ -97,23 +97,23 @@ disks to F</var/lib/libvirt/images>.
For more information see L</INPUT FROM VMWARE VCENTER SERVER> below.
-=head2 Convert from VMware to RHEV-M/oVirt
+=head2 Convert from VMware to RHV/oVirt
This is the same as the previous example, except you want to send the
-guest to a RHEV-M Export Storage Domain which is located remotely
-(over NFS) at C<rhev.nfs:/export_domain>. If you are unclear about
+guest to a RHV-M Export Storage Domain which is located remotely
+(over NFS) at C<rhv.nfs:/export_domain>. If you are unclear about
the location of the Export Storage Domain you should check the
-settings on your RHEV-M management console. Guest network
-interface(s) are connected to the target network called C<rhevm>.
+settings on your RHV-M management console. Guest network
+interface(s) are connected to the target network called C<rhvm>.
virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi vmware_guest \
- -o rhev -os rhev.nfs:/export_domain --network rhevm
+ -o rhv -os rhv.nfs:/export_domain --network rhvm
In this case the host running virt-v2v acts as a B<conversion server>.
-Note that after conversion, the guest will appear in the RHEV-M Export
-Storage Domain, from where you will need to import it using the RHEV-M
-user interface. (See L</OUTPUT TO RHEV>).
+Note that after conversion, the guest will appear in the RHV-M Export
+Storage Domain, from where you will need to import it using the RHV-M
+user interface. (See L</OUTPUT TO RHV>).
=head2 Convert disk image to OpenStack glance
@@ -199,7 +199,7 @@ QEMU and KVM only.
=item OpenStack Glance
-=item Red Hat Enterprise Virtualization (RHEV) 4.1 and up
+=item Red Hat Virtualization (RHV) 4.1 and up
=item Local libvirt
@@ -452,7 +452,7 @@ written.
=item B<-o> B<ovirt>
-This is the same as I<-o rhev>.
+This is the same as I<-o rhv>.
=item B<-o> B<qemu>
@@ -467,22 +467,26 @@ option which boots the guest under qemu immediately.
=item B<-o> B<rhev>
-Set the output method to I<rhev>.
+This is the same as I<-o rhv>.
-The converted guest is written to a RHEV Export Storage Domain. The
+=item B<-o> B<rhv>
+
+Set the output method to I<rhv>.
+
+The converted guest is written to a RHV Export Storage Domain. The
I<-os> parameter must also be used to specify the location of the
Export Storage Domain. Note this does not actually import the guest
-into RHEV. You have to do that manually later using the UI.
+into RHV. You have to do that manually later using the UI.
-See L</OUTPUT TO RHEV> below.
+See L</OUTPUT TO RHV> below.
=item B<-o> B<vdsm>
Set the output method to I<vdsm>.
-This mode is similar to I<-o rhev>, but the full path to the
+This mode is similar to I<-o rhv>, but the full path to the
data domain must be given:
-F</rhev/data-center/E<lt>data-center-uuidE<gt>/E<lt>data-domain-uuidE<gt>>.
+F</rhv/data-center/E<lt>data-center-uuidE<gt>/E<lt>data-domain-uuidE<gt>>.
This mode is only used when virt-v2v runs under VDSM control.
=item B<-oa> B<sparse>
@@ -520,10 +524,10 @@ For I<-o libvirt>, this is a libvirt directory pool
For I<-o local> and I<-o qemu>, this is a directory name. The
directory must exist.
-For I<-o rhev>, this can be an NFS path of the Export Storage Domain
+For I<-o rhv>, this can be an NFS path of the Export Storage Domain
of the form C<E<lt>hostE<gt>:E<lt>pathE<gt>>, eg:
- rhev-storage.example.com:/rhev/export
+ rhv-storage.example.com:/rhv/export
The NFS export must be mountable and writable by the user and host
running virt-v2v, since the virt-v2v program has to actually mount it
@@ -621,7 +625,7 @@ default in a future version of virt-v2v.
=item B<--vdsm-ovf-output>
-Normally the RHEV output mode chooses random UUIDs for the target
+Normally the RHV output mode chooses random UUIDs for the target
guest. However VDSM needs to control the UUIDs and passes these
parameters when virt-v2v runs under VDSM control. The parameters
control:
@@ -898,7 +902,7 @@ On RHEL E<ge> 7.3, only qemu-kvm-rhev (not qemu-kvm) is
supported.
Not supported.
-=item UEFI on RHEV
+=item UEFI on RHV
Not supported.
@@ -929,13 +933,13 @@ This is typical of a libvirt guest: It has a single network
interface
connected to a network called C<default>.
To map a specific network to a target network, for example C<default>
-on the source to C<rhevm> on the target, use:
+on the source to C<rhvm> on the target, use:
- virt-v2v [...] --network default:rhevm
+ virt-v2v [...] --network default:rhvm
To map every network to a target network, use:
- virt-v2v [...] --network rhevm
+ virt-v2v [...] --network rhvm
Bridges are handled in the same way, but you have to use the
I<--bridge> option instead. For example:
@@ -1471,29 +1475,29 @@ Define the final guest in libvirt:
=back
-=head1 OUTPUT TO RHEV
+=head1 OUTPUT TO RHV
-This section only applies to the I<-o rhev> output mode. If you use
-virt-v2v from the RHEV-M user interface, then behind the scenes the
+This section only applies to the I<-o rhv> output mode. If you use
+virt-v2v from the RHV-M user interface, then behind the scenes the
import is managed by VDSM using the I<-o vdsm> output mode (which end
users should not try to use directly).
-You have to specify I<-o rhev> and an I<-os> option that points to the
-RHEV-M Export Storage Domain. You can either specify the NFS server
-and mountpoint, eg. S<C<-os rhev-storage:/rhev/export>>, or you can
+You have to specify I<-o rhv> and an I<-os> option that points to the
+RHV-M Export Storage Domain. You can either specify the NFS server
+and mountpoint, eg. S<C<-os rhv-storage:/rhv/export>>, or you can
mount that first and point to the directory where it is mounted,
eg. S<C<-os /tmp/mnt>>. Be careful not to point to the Data Storage
Domain by accident as that will not work.
On successful completion virt-v2v will have written the new guest to
the Export Storage Domain, but it will not yet be ready to run. It
-must be imported into RHEV using the UI before it can be used.
+must be imported into RHV using the UI before it can be used.
-In RHEV E<ge> 2.2 this is done from the Storage tab. Select the
+In RHV E<ge> 2.2 this is done from the Storage tab. Select the
export domain the guest was written to. A pane will appear underneath
the storage domain list displaying several tabs, one of which is "VM
Import". The converted guest will be listed here. Select the
-appropriate guest an click "Import". See the RHEV documentation for
+appropriate guest an click "Import". See the RHV documentation for
additional details.
If you export several guests, then you can import them all at the same
@@ -1735,7 +1739,7 @@ require either root or a special user:
=item Mounting the Export Storage Domain
-When using I<-o rhev -os server:/esd> virt-v2v has to have sufficient
+When using I<-o rhv -os server:/esd> virt-v2v has to have sufficient
privileges to NFS mount the Export Storage Domain from C<server>.
You can avoid needing root here by mounting it yourself before running
@@ -1744,11 +1748,11 @@ read the next S<section ...>
=item Writing to the Export Storage Domain as 36:36
-RHEV-M cannot read files and directories from the Export Storage
+RHV-M cannot read files and directories from the Export Storage
Domain unless they have UID:GID 36:36. You will see VM import
problems if the UID:GID is not correct.
-When you run virt-v2v I<-o rhev> as root, virt-v2v attempts to create
+When you run virt-v2v I<-o rhv> as root, virt-v2v attempts to create
files and directories with the correct ownership. If you run virt-v2v
as non-root, it will probably still work, but you will need to
manually change ownership after virt-v2v has finished.
@@ -1774,19 +1778,19 @@ documentation.
=back
-=head1 DEBUGGING RHEV-M IMPORT FAILURES
+=head1 DEBUGGING RHV-M IMPORT FAILURES
-When you export to the RHEV-M Export Storage Domain, and then import
-that guest through the RHEV-M UI, you may encounter an import failure.
+When you export to the RHV-M Export Storage Domain, and then import
+that guest through the RHV-M UI, you may encounter an import failure.
Diagnosing these failures is infuriatingly difficult as the UI
generally hides the true reason for the failure.
There are two log files of interest. The first is stored on the
-RHEV-M server itself, and is called
+RHV-M server itself, and is called
F</var/log/ovirt-engine/engine.log>
The second file, which is the most useful, is found on the SPM host
-(SPM stands for "Storage Pool Manager"). This is a RHEV node that is
+(SPM stands for "Storage Pool Manager"). This is a RHV node that is
elected to do all metadata modifications in the data center, such as
image or snapshot creation. You can find out which host is the
current SPM from the "Hosts" tab "Spm Status" column. Once you have
@@ -1972,14 +1976,14 @@ I<--firstboot> or I<--firstboot-command> options with
Windows guests.
(Optional)
-The RHEV Application Provisioning Tool (RHEV APT). If this file is
+The RHV Application Provisioning Tool (RHEV APT). If this file is
present, then RHEV APT will be installed in the Windows guest during
conversion. This tool is a guest agent which ensures that the virtio
drivers remain up to date when the guest is running on Red Hat
-Enterprise Virtualization (RHEV).
+Virtualization (RHV).
-This file comes from Red Hat Enterprise Virtualization (RHEV), and is
-not distributed with virt-v2v.
+This file comes from Red Hat Virtualization (RHV), and is not
+distributed with virt-v2v.
=back
@@ -2011,14 +2015,14 @@ conversion.
Variously called C<engine-image-uploader>, C<ovirt-image-uploader> or
C<rhevm-image-uploader>, this tool allows you to copy a guest from one
-oVirt or RHEV Export Storage Domain to another. It only permits
-importing a guest that was previously exported from another oVirt/RHEV
+oVirt or RHV Export Storage Domain to another. It only permits
+importing a guest that was previously exported from another oVirt/RHV
instance.
=item
L<import-to-ovirt.pl|http://git.annexia.org/?p=import-to-ovirt.git>
This script can be used to import guests that already run on KVM to
-oVirt or RHEV. For more information, see this blog posting by the
+oVirt or RHV. For more information, see this blog posting by the
author of virt-v2v:
L<https://rwmj.wordpress.com/2015/09/18/importing-kvm-guests-to-ovirt-...
--
2.10.2