PROBLEMS:
- Spools to a temporary disk instead of streaming
- Target cluster defaults to "Cluster"
- Handling of sparseness in raw format disks
This adds a new output mode to virt-v2v. virt-v2v -o rhv-upload
streams images directly to an oVirt or RHV >= 4 Data Domain using the
oVirt SDK v4. It is more efficient than -o rhv because it does not
need to go via the Export Storage Domain, and is possible for humans
to use unlike -o vdsm.
The implementation uses the Python SDK by running snippets of Python
code to interact with the ‘ovirtsdk4’ module. It requires both Python 3
and the Python SDK v4 to be installed at run time (these are not,
however, new dependencies of virt-v2v since most people wouldn't have
them).
---
v2v/Makefile.am | 2 +
v2v/cmdline.ml | 38 ++++
v2v/output_rhv_upload.ml | 472 ++++++++++++++++++++++++++++++++++++++++++++++
v2v/output_rhv_upload.mli | 27 +++
v2v/virt-v2v.pod | 90 +++++++--
5 files changed, 615 insertions(+), 14 deletions(-)
diff --git a/v2v/Makefile.am b/v2v/Makefile.am
index c2eb31097..cd44dfc2a 100644
--- a/v2v/Makefile.am
+++ b/v2v/Makefile.am
@@ -64,6 +64,7 @@ SOURCES_MLI = \
output_null.mli \
output_qemu.mli \
output_rhv.mli \
+ output_rhv_upload.mli \
output_vdsm.mli \
parse_ovf_from_ova.mli \
parse_libvirt_xml.mli \
@@ -116,6 +117,7 @@ SOURCES_ML = \
output_local.ml \
output_qemu.ml \
output_rhv.ml \
+ output_rhv_upload.ml \
output_vdsm.ml \
inspect_source.ml \
target_bus_assignment.ml \
diff --git a/v2v/cmdline.ml b/v2v/cmdline.ml
index d725ae022..c53d1703b 100644
--- a/v2v/cmdline.ml
+++ b/v2v/cmdline.ml
@@ -65,6 +65,8 @@ let parse_cmdline () =
let output_password = ref None in
let output_storage = ref None in
let password_file = ref None in
+ let rhv_cafile = ref None in
+ let rhv_direct = ref false in
let vddk_config = ref None in
let vddk_cookie = ref None in
let vddk_libdir = ref None in
@@ -143,6 +145,8 @@ let parse_cmdline () =
| "disk" | "local" -> output_mode := `Local
| "null" -> output_mode := `Null
| "ovirt" | "rhv" | "rhev" -> output_mode := `RHV
+ | "ovirt-upload" | "ovirt_upload" | "rhv-upload" |
"rhv_upload" ->
+ output_mode := `RHV_Upload
| "qemu" -> output_mode := `QEmu
| "vdsm" -> output_mode := `VDSM
| s ->
@@ -229,6 +233,9 @@ let parse_cmdline () =
[ L"print-source" ], Getopt.Set print_source,
s_"Print source and stop";
[ L"qemu-boot" ], Getopt.Set qemu_boot, s_"Boot in qemu (-o qemu
only)";
+ [ L"rhv-cafile" ], Getopt.String ("ca.pem",
set_string_option_once "--rhv-cafile" rhv_cafile),
+ s_"For -o rhv-upload, set ‘ca.pem’ file";
+ [ L"rhv-direct" ], Getopt.Set rhv_direct, s_"Use direct transfer
mode";
[ L"root" ], Getopt.String ("ask|... ", set_root_choice),
s_"How to choose root filesystem";
[ L"vddk-config" ], Getopt.String ("filename",
set_string_option_once "--vddk-config" vddk_config),
@@ -322,6 +329,8 @@ read the man page virt-v2v(1).
let password_file = !password_file in
let print_source = !print_source in
let qemu_boot = !qemu_boot in
+ let rhv_cafile = !rhv_cafile in
+ let rhv_direct = !rhv_direct in
let root_choice = !root_choice in
let vddk_options =
{ vddk_config = !vddk_config;
@@ -546,6 +555,35 @@ read the man page virt-v2v(1).
Output_rhv.output_rhv os output_alloc,
output_format, output_alloc
+ | `RHV_Upload ->
+ let output_conn =
+ match output_conn with
+ | None ->
+ error (f_"-o rhv-upload: use ‘-oc’ to point to the oVirt or RHV server
REST API URL, which is usually
https://servername/ovirt-engine/api")
+ | Some oc -> oc in
+ (* In theory we could make the password optional in future. *)
+ let output_password =
+ match output_password with
+ | None ->
+ error (f_"-o rhv-upload: output password file was not specified, use
‘-op’ to point to a file which contains the password used to connect to the oVirt or RHV
server")
+ | Some op -> op in
+ let os =
+ match output_storage with
+ | None ->
+ error (f_"-o rhv-upload: output storage was not specified, use
‘-os’");
+ | Some os -> os in
+ if qemu_boot then
+ error_option_cannot_be_used_in_output_mode "rhv-upload"
"--qemu-boot";
+ let rhv_cafile =
+ match rhv_cafile with
+ | None ->
+ error (f_"-o rhv-upload: must use ‘--rhv-cafile’ to supply the path to
the oVirt or RHV server’s ‘ca.pem’ file")
+ | Some rhv_cafile -> rhv_cafile in
+ Output_rhv_upload.output_rhv_upload output_alloc output_conn
+ output_password os
+ rhv_cafile rhv_direct,
+ output_format, output_alloc
+
| `VDSM ->
if output_password <> None then
error_option_cannot_be_used_in_output_mode "vdsm" "-op";
diff --git a/v2v/output_rhv_upload.ml b/v2v/output_rhv_upload.ml
new file mode 100644
index 000000000..c0d84ff44
--- /dev/null
+++ b/v2v/output_rhv_upload.ml
@@ -0,0 +1,472 @@
+(* virt-v2v
+ * Copyright (C) 2009-2018 Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *)
+
+open Printf
+open Unix
+
+open Std_utils
+open Tools_utils
+open Unix_utils
+open Common_gettext.Gettext
+
+open Types
+open Utils
+
+(* Timeout to wait for oVirt disks to change status, or the transfer
+ * object to finish initializing [seconds].
+ *)
+let ovirt_timeout = 5*60
+
+(* Very loosely this struct contains whatever information is needed
+ * to make a connection.
+ *)
+type connection = {
+ python : string; (* Python interpreter. *)
+ url : string;
+ username : string;
+ output_password : string; (* -op parameter, a filename not the password *)
+}
+
+(* Python code fragments go first. Note these must not be
+ * indented because of Python's stupid whitespace thing.
+ *)
+
+(* Print the Python version. *)
+let python_get_version = "
+import sys
+print (sys.version[0]) # syntax works on py2 or py3
+"
+
+(* Import all the Python modules needed. *)
+let python_imports = "
+import logging
+import ovirtsdk4 as sdk
+import ovirtsdk4.types as types
+import ssl
+import sys
+import time
+
+from http.client import HTTPSConnection
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+"
+
+(* Create the Python prologue which connects to the system service.
+ * This returns a string of Python code.
+ *)
+let python_connect conn =
+ sprintf "
+password_file = %s
+with open(password_file, 'r') as file:
+ password = file.read()
+password = password.rstrip()
+
+# Open the connection.
+connection = sdk.Connection(
+ url = %s,
+ username = %s,
+ password = password,
+ debug = %s,
+ log = logging.getLogger(),
+ insecure = True,
+)
+system_service = connection.system_service()
+" (py_quote conn.output_password)
+ (py_quote conn.url)
+ (py_quote conn.username)
+ (py_bool (verbose ()))
+
+let python_get_storage_domain_id output_storage =
+ let search_term = sprintf "name=%s" output_storage in
+ sprintf "
+sds_service = system_service.storage_domains_service()
+sd = sds_service.list(search=%s)[0]
+print(sd.id)
+" (py_quote search_term)
+
+let python_create_one_disk disk_name disk_format
+ output_alloc sd_id disk_size =
+ sprintf "
+disks_service = system_service.disks_service()
+disk = disks_service.add(
+ disk = types.Disk(
+ name = %s,
+ format = %s,
+ sparse = %s,
+ provisioned_size = %Ld,
+ storage_domains = [types.StorageDomain(id = %s)],
+ )
+)
+disk_id = disk.id
+
+# Wait til the disk is up. The transfer cannot start if the
+# disk is locked.
+disk_service = disks_service.disk_service(disk_id)
+timeout = time.time() + %d
+while True:
+ time.sleep(5)
+ disk = disk_service.get()
+ if disk.status == types.DiskStatus.OK:
+ break
+ if time.time() > timeout:
+ raise RuntimeError(\"Timed out waiting for disk to become unlocked\")
+
+# Return the disk ID.
+print(disk_id)
+" (py_quote disk_name)
+ disk_format (* it's a raw Python expression, don't quote *)
+ (py_bool (output_alloc = Sparse))
+ disk_size
+ (py_quote sd_id)
+ ovirt_timeout
+
+(* XXX Temporary function. *)
+let python_upload_one_disk disk_id disk_size filename rhv_cafile rhv_direct =
+ sprintf "
+transfers_service = system_service.image_transfers_service()
+
+transfer = transfers_service.add(
+ types.ImageTransfer(
+ image = types.Image (id=%s)
+ )
+)
+
+transfer_service = transfers_service.image_transfer_service(transfer.id)
+
+# After adding a new transfer for the disk, the transfer's status will
+# be INITIALIZING. Wait until the init phase is over.
+timeout = time.time() + %d
+while True:
+ time.sleep(5)
+ transfer = transfer_service.get()
+ if transfer.phase != types.ImageTransferPhase.INITIALIZING:
+ break
+ if time.time() > timeout:
+ raise RuntimeError(\"Timed out waiting for transfer status !=
INITIALIZING\")
+
+if %s:
+ if transfer.transfer_url is None:
+ raise RuntimeError(\"Direct upload to host not supported, requires
ovirt-engine >= 4.2 and only works when virt-v2v is run within the oVirt/RHV
environment, eg. on an ovirt node.\")
+ destination_url = urlparse(transfer.transfer_url)
+else:
+ destination_url = urlparse(transfer.proxy_url)
+
+context = ssl.create_default_context()
+context.load_verify_locations(cafile = %s)
+
+proxy_connection = HTTPSConnection(
+ destination_url.hostname,
+ destination_url.port,
+ context = context
+)
+
+image_path = %s
+image_size = %Ld
+
+proxy_connection.putrequest(\"PUT\", destination_url.path)
+proxy_connection.putheader(\"Authorization\", transfer.signed_ticket)
+proxy_connection.putheader('Content-Length', image_size)
+proxy_connection.endheaders()
+
+# This seems to give the best throughput when uploading from Yaniv's
+# machine to a server that drops the data. You may need to tune this
+# on your setup.
+BUF_SIZE = 128 * 1024
+
+with open(image_path, \"rb\") as disk:
+ pos = 0
+ while pos < image_size:
+ # Send the next chunk to the proxy.
+ to_read = min(image_size - pos, BUF_SIZE)
+ chunk = disk.read(to_read)
+ if not chunk:
+ transfer_service.pause()
+ raise RuntimeError(\"Unexpected end of file at pos=%%d\" %% pos)
+
+ proxy_connection.send(chunk)
+ pos += len(chunk)
+
+# Get the response
+response = proxy_connection.getresponse()
+if response.status != 200:
+ transfer_service.pause()
+ raise RuntimeError(\"Upload failed: %%s %%s\" %%
+ (response.status, response.reason))
+
+# Successful cleanup
+transfer_service.finalize()
+connection.close()
+proxy_connection.close()
+" (py_quote disk_id)
+ ovirt_timeout
+ (py_bool rhv_direct)
+ (py_quote rhv_cafile)
+ (py_quote filename)
+ disk_size
+
+let python_create_virtual_machine ovf =
+ sprintf "
+vms_service = system_service.vms_service()
+vm = vms_service.add(
+ types.Vm(
+ cluster=types.Cluster(name = %s),
+ initialization=types.Initialization(
+ configuration = types.Configuration(
+ type = types.ConfigurationType.OVA,
+ data = %s
+ )
+ )
+ )
+)
+" (py_quote "Default" (* XXX target cluster *))
+ (py_quote (DOM.doc_to_string ovf))
+
+let python_delete_disk disk_id =
+ sprintf "
+disk_id = %s
+disks_service = system_service.disks_service()
+disk_service = disks_service.disk_service(disk_id)
+disk_service.remove()
+" (py_quote disk_id)
+
+(* Find the Python 3 binary. *)
+let find_python3 () =
+ let rec loop = function
+ | [] ->
+ error "could not locate Python 3 binary on the $PATH. You may have to
install Python 3. If Python 3 is already installed then you may need to create a
directory containing a binary called ‘python3’ which runs Python 3."
+ | python :: rest ->
+ (* Use shell_command first to check the binary exists. *)
+ let cmd = sprintf "%s --help >/dev/null 2>&1" (quote python)
in
+ if shell_command cmd = 0 &&
+ run_python ~python python_get_version = ["3"] then (
+ debug "rhv-upload: python binary: %s" python;
+ python
+ )
+ else
+ loop rest
+ in
+ loop ["python3"; "python"]
+
+(* Parse the -oc URI. *)
+let parse_output_conn oc =
+ let uri = Xml.parse_uri oc in
+ if uri.Xml.uri_scheme <> Some "https" then
+ error (f_"rhv-upload: -oc: URI must start with https://...");
+ if uri.Xml.uri_server = None then
+ error (f_"rhv-upload: -oc: no remote server name in the URI");
+ if uri.Xml.uri_path = None || uri.Xml.uri_path = Some "/" then
+ error (f_"rhv-upload: -oc: URI path component looks incorrect");
+ let username =
+ match uri.Xml.uri_user with
+ | None ->
+ warning (f_"rhv-upload: -oc: username was missing from URI, assuming
‘admin@internal’");
+ "admin@internal"
+ | Some user -> user in
+ (* Reconstruct the URI without the username. *)
+ let url = sprintf "%s://%s%s"
+ (Option.default "https" uri.Xml.uri_scheme)
+ (Option.default "localhost" uri.Xml.uri_server)
+ (Option.default "" uri.Xml.uri_path) in
+ debug "rhv-upload: reconstructed url: %s" url;
+
+ url, username
+
+(* Get the storage domain ID. *)
+let get_storage_domain_id conn output_storage =
+ let code =
+ python_imports ^
+ python_connect conn ^
+ python_get_storage_domain_id output_storage in
+ match run_python ~python:conn.python code with
+ | [id] -> id
+ | _ ->
+ error (f_"rhv-upload: get_storage_domain_id: could not fetch storage domain ID
of ‘%s’ (does it exist on the server?). See previous output for more details.")
output_storage
+
+(* Create a single, empty disk on the target. *)
+let create_one_disk conn output_format output_alloc sd_id source target =
+ (* Give the disk a predictable name based on the source
+ * name and disk index.
+ *)
+ let disk_name =
+ let id = target.target_overlay.ov_source.s_disk_id in
+ sprintf "%s-%03d" source.s_name id in
+
+ let disk_format =
+ match output_format with
+ | `Raw -> "types.DiskFormat.RAW"
+ | `COW -> "types.DiskFormat.COW" in
+
+ (* This is the virtual size in bytes. *)
+ let disk_size = target.target_overlay.ov_virtual_size in
+
+ let code =
+ python_imports ^
+ python_connect conn ^
+ python_create_one_disk disk_name disk_format
+ output_alloc sd_id disk_size in
+ match run_python ~python:conn.python code with
+ | [id] -> id
+ | _ ->
+ error (f_"rhv-upload: create_one_disk: error creating disks, see previous
output")
+
+(* XXX Temporary function to upload spooled disk. *)
+let upload_one_disk conn rhv_cafile rhv_direct t filename disk_id =
+ let disk_size = t.target_overlay.ov_virtual_size in
+
+ let code =
+ python_imports ^
+ python_connect conn ^
+ python_upload_one_disk disk_id disk_size filename rhv_cafile rhv_direct in
+ ignore (run_python ~python:conn.python code)
+
+(* Upload the virtual machine metadata (ie OVF) and create a VM. *)
+let create_virtual_machine conn ovf =
+ let code =
+ python_imports ^
+ python_connect conn ^
+ python_create_virtual_machine ovf in
+ ignore (run_python ~python:conn.python code)
+
+(* Delete a disk by ID. *)
+let delete_disk conn disk_id =
+ let code =
+ python_imports ^
+ python_connect conn ^
+ python_delete_disk disk_id in
+ ignore (run_python ~python:conn.python code)
+
+class output_rhv_upload output_alloc output_conn
+ output_password output_storage
+ rhv_cafile rhv_direct =
+ let conn =
+ let python = find_python3 () in
+ let url, username = parse_output_conn output_conn in
+ { python = python; url = url;
+ username = username; output_password = output_password } in
+
+ (* The temporary directory is used for a few things such as passing
+ * passwords securely and (temporarily) for spooling disks (XXX).
+ *)
+ let tmpdir =
+ let base_dir = (open_guestfs ())#get_cachedir () in
+ let t = Mkdtemp.temp_dir ~base_dir "rhvupload." in
+ rmdir_on_exit t;
+ t in
+
+ (* Storage domain ID. *)
+ let sd_id =
+ get_storage_domain_id conn output_storage in
+object (self)
+ inherit output
+
+ method precheck () =
+ (* Check all the dependencies including the Python 3 oVirt SDK v4
+ * module are installed. This will fail with a Python error message.
+ *)
+ ignore (run_python ~python:conn.python python_imports)
+
+ method as_options =
+ "-o rhv-upload" ^
+ (match output_alloc with
+ | Sparse -> "" (* default, don't need to print it *)
+ | Preallocated -> " -oa preallocated") ^
+ sprintf " -oc %s -op %s -os %s"
+ output_conn output_password output_storage
+
+ method supported_firmware = [ TargetBIOS ]
+
+ (* List of disks we have created. There will be one per target. *)
+ val mutable target_disk_ids = []
+
+ (* This is set to true until we have created the VM successfully. *)
+ val mutable delete_disks_on_exit = true
+
+ method private delete_disks_on_exit () =
+ if delete_disks_on_exit then (
+ (* Need to unset this first because if the delete_disk function
+ * calls exit, this method will be called recursively.
+ *)
+ delete_disks_on_exit <- false;
+ let disk_ids = target_disk_ids in
+ target_disk_ids <- [];
+ List.iter (delete_disk conn) disk_ids
+ )
+
+ method prepare_targets source targets =
+ List.map (
+ fun t ->
+ (* Only allow output format "raw" or "qcow2". *)
+ let output_format =
+ match t.target_format with
+ | "raw" -> `Raw
+ | "qcow2" -> `COW
+ | _ ->
+ error (f_"rhv-upload: -of %s: Only output format ‘raw’ or ‘qcow2’ is
supported. If the input is in a different format then force one of these output formats
by adding either ‘-of raw’ or ‘-of qcow2’ on the command line.")
+ t.target_format in
+
+ let disk_id = create_one_disk conn output_format output_alloc
+ sd_id source t in
+
+ target_disk_ids <- target_disk_ids @ [disk_id];
+
+ (* XXX Temporarily spool disks to tmpdir. *)
+ let target_file = TargetFile (tmpdir // t.target_overlay.ov_sd) in
+ { t with target_file }
+ ) targets
+
+ method create_metadata source targets _ guestcaps inspect target_firmware =
+ (* Upload the spooled disks. *)
+ List.iter (
+ fun (t, disk_id) ->
+ let filename =
+ match t.target_file with
+ | TargetFile filename -> filename
+ | TargetURI _ -> assert false in
+ upload_one_disk conn rhv_cafile rhv_direct t filename disk_id
+ ) (List.combine targets target_disk_ids);
+
+ let image_uuids = target_disk_ids
+ and vol_uuids = List.map (fun _ -> uuidgen ()) target_disk_ids
+ and vm_uuid = uuidgen () in
+
+ (* Create the metadata. *)
+ let ovf =
+ Create_ovf.create_ovf source targets guestcaps inspect
+ Sparse
+ sd_id (* storage UUID *)
+ image_uuids
+ vol_uuids
+ vm_uuid
+ OVirt in
+
+ (* Add the virtual machine. *)
+ create_virtual_machine conn ovf;
+
+ (* Don't delete the disks now. *)
+ delete_disks_on_exit <- false
+
+ initializer
+ (* Register an at-exit handler to delete the disks on (bad) exit. *)
+ at_exit self#delete_disks_on_exit
+end
+
+let output_rhv_upload = new output_rhv_upload
+let () = Modules_list.register_output_module "rhv-upload"
diff --git a/v2v/output_rhv_upload.mli b/v2v/output_rhv_upload.mli
new file mode 100644
index 000000000..3e7086f85
--- /dev/null
+++ b/v2v/output_rhv_upload.mli
@@ -0,0 +1,27 @@
+(* virt-v2v
+ * Copyright (C) 2009-2018 Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *)
+
+(** [-o rhv-upload] target. *)
+
+val output_rhv_upload : Types.output_allocation -> string -> string ->
+ string -> string -> bool ->
+ Types.output
+(** [output_rhv_upload output_alloc output_conn output_password output_storage
+ rhv_cafile rhv_direct]
+ creates and returns a new {!Types.output} object specialized for writing
+ output to oVirt or RHV directly via RHV APIs. *)
diff --git a/v2v/virt-v2v.pod b/v2v/virt-v2v.pod
index d51e7ed2f..11b03d14f 100644
--- a/v2v/virt-v2v.pod
+++ b/v2v/virt-v2v.pod
@@ -6,15 +6,18 @@ virt-v2v - Convert a guest to use KVM
virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi vmware_guest
- virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi vmware_guest \
- -o rhv -os rhv.nfs:/export_domain --network ovirtmgmt
-
virt-v2v -i libvirtxml guest-domain.xml -o local -os /var/tmp
virt-v2v -i disk disk.img -o local -os /var/tmp
virt-v2v -i disk disk.img -o glance
+ virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi vmware_guest \
+ -o rhv-upload -oc
https://ovirt-engine.example.com/ovirt-engine/api \
+ -os ovirt-data -op /tmp/ovirt-admin-password \
+ --rhv-cafile /tmp/ca.pem --rhv-direct \
+ --network ovirtmgmt
+
virt-v2v -ic qemu:///system qemu_guest --in-place
=head1 DESCRIPTION
@@ -42,9 +45,9 @@ libguestfs E<ge> 1.28.
Xen ───▶│ -i libvirt ──▶ │ │ │ (default) │
... ───▶│ (default) │ │ │ ──┐ └────────────┘
└────────────┘ │ │ ─┐└──────▶ -o glance
- -i libvirtxml ─────────▶ │ │ ┐└─────────▶ -o rhv
- -i vmx ────────────────▶ │ │ └──────────▶ -o vdsm
- └────────────┘
+ -i libvirtxml ─────────▶ │ │ ┐├─────────▶ -o rhv
+ -i vmx ────────────────▶ │ │ │└─────────▶ -o vdsm
+ └────────────┘ └──────────▶ -o rhv-upload
Virt-v2v has a number of possible input and output modes, selected
using the I<-i> and I<-o> options. Only one input and output mode can
@@ -103,20 +106,18 @@ For more information see L</INPUT FROM VMWARE VCENTER SERVER>
below.
=head2 Convert from VMware to RHV/oVirt
This is the same as the previous example, except you want to send the
-guest to a RHV-M Export Storage Domain which is located remotely
-(over NFS) at C<rhv.nfs:/export_domain>. If you are unclear about
-the location of the Export Storage Domain you should check the
-settings on your RHV-M management console. Guest network
+guest to a RHV Data Domain using the RHV REST API. Guest network
interface(s) are connected to the target network called C<ovirtmgmt>.
virt-v2v -ic
vpx://vcenter.example.com/Datacenter/esxi vmware_guest \
- -o rhv -os rhv.nfs:/export_domain --network ovirtmgmt
+ -o rhv-upload -oc
https://ovirt-engine.example.com/ovirt-engine/api \
+ -os ovirt-data -op /tmp/ovirt-admin-password \
+ --rhv-cafile /tmp/ca.pem --rhv-direct \
+ --network ovirtmgmt
In this case the host running virt-v2v acts as a B<conversion server>.
-Note that after conversion, the guest will appear in the RHV-M Export
-Storage Domain, from where you will need to import it using the RHV-M
-user interface. (See L</OUTPUT TO RHV>).
+For more information see L</OUTPUT TO RHV> below.
=head2 Convert from ESXi hypervisor over SSH to local libvirt
@@ -509,6 +510,10 @@ written.
This is the same as I<-o rhv>.
+=item B<-o> B<ovirt-upload>
+
+This is the same as I<-o rhv-upload>.
+
=item B<-o> B<qemu>
Set the output method to I<qemu>.
@@ -533,6 +538,16 @@ I<-os> parameter must also be used to specify the location of
the
Export Storage Domain. Note this does not actually import the guest
into RHV. You have to do that manually later using the UI.
+See L</OUTPUT TO RHV (OLD METHOD)> below.
+
+=item B<-o> B<rhv-upload>
+
+Set the output method to I<rhv-upload>.
+
+The converted guest is written directly to a RHV Data Domain.
+This is a faster method than I<-o rhv>, but requires oVirt
+or RHV E<ge> 4.2.
+
See L</OUTPUT TO RHV> below.
=item B<-o> B<vdsm>
@@ -1870,6 +1885,53 @@ Define the final guest in libvirt:
=head1 OUTPUT TO RHV
+This new method to upload guests to oVirt or RHV directly via the REST
+API requires oVirt/RHV E<ge> 4.2.
+
+You need to specify I<-o rhv-upload> as well as the following extra
+parameters:
+
+=over 4
+
+=item I<-oc>
C<https://ovirt-engine.example.com/ovirt-engine/api>
+
+The URL of the REST API which is usually the server name with
+C</ovirt-engine/api> appended, but might be different if you installed
+oVirt Engine on a different path.
+
+You can optionally add a username and port number to the URL. If the
+username is not specified then virt-v2v defaults to using
+C<admin@internal> which is the typical superuser account for oVirt
+instances.
+
+=item I<-op> F<password-file>
+
+A file containing a password to be used when connecting to the oVirt
+engine. Note the file should contain the whole password, B<without
+any trailing newline>, and for security the file should have mode
+C<0600> so that others cannot read it.
+
+=item I<-os> C<ovirt-data>
+
+The storage domain.
+
+=item I<--rhv-cafile> F<ca.pem>
+
+The F<ca.pem> file (Certificate Authority), copied from
+F</etc/pki/ovirt-engine/ca.pem> on the oVirt engine.
+
+=item I<--rhv-direct>
+
+If this option is given then virt-v2v will attempt to directly upload
+the disk to the oVirt node, otherwise it will proxy the upload through
+the oVirt engine. Direct upload requires that you have network access
+to the oVirt nodes. Non-direct upload is slightly slower but should
+work in all situations.
+
+=back
+
+=head1 OUTPUT TO RHV (OLD METHOD)
+
This section only applies to the I<-o rhv> output mode. If you use
virt-v2v from the RHV-M user interface, then behind the scenes the
import is managed by VDSM using the I<-o vdsm> output mode (which end
--
2.13.2