Allow the user to control the maximum request size. This can improve
performance and minimize memory usage. With the new option, it is easy
to test and tune the tool for particular environment.
I tested this on our scale lab with FC storage, copying 100 GiB image
with 66 GiB of data from local fast SDD (Dell Express Flash PM1725b
3.2TB SFF) to a qcow2 preallocated volume on FC storage domain
(NETAPP,LUN C-Mode).
The source and destination images are served by qemu-nbd, using same
configuration used in oVirt:
qemu-nbd --persistent --shared=8 --format=qcow2 --cache=none --aio=native \
--read-only /scratch/nsoffer-v2v.qcow2 --socket /tmp/src.sock
qemu-nbd --persistent --shared=8 --format=qcow2 --cache=none --aio=native \
/dev/{vg-name}/{lv-name} --socket /tmp/dst.sock
Tested with hyperfine using using 10 runes for every request size.
Benchmark #1: ./nbdcopy --request-size=262144 nbd+unix:///?socket=/tmp/src.sock \
nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 113.299 s ± 1.160 s [User: 7.427 s, System: 23.862 s]
Range (min … max): 112.332 s … 115.598 s 10 runs
Benchmark #2: ./nbdcopy --request-size=524288 nbd+unix:///?socket=/tmp/src.sock \
nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 107.952 s ± 0.800 s [User: 10.085 s, System: 24.392 s]
Range (min … max): 107.023 s … 109.368 s 10 runs
Benchmark #3: ./nbdcopy --request-size=1048576 nbd+unix:///?socket=/tmp/src.sock \
nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 105.992 s ± 0.442 s [User: 11.809 s, System: 24.215 s]
Range (min … max): 105.391 s … 106.853 s 10 runs
Benchmark #4: ./nbdcopy --request-size=2097152 nbd+unix:///?socket=/tmp/src.sock \
nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 107.625 s ± 1.011 s [User: 11.767 s, System: 26.629 s]
Range (min … max): 105.650 s … 109.466 s 10 runs
Benchmark #5: ./nbdcopy --request-size=4194304 nbd+unix:///?socket=/tmp/src.sock \
nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 111.190 s ± 0.874 s [User: 11.160 s, System: 27.767 s]
Range (min … max): 109.967 s … 112.442 s 10 runs
Benchmark #6: ./nbdcopy --request-size=8388608 nbd+unix:///?socket=/tmp/src.sock \
nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 117.950 s ± 1.051 s [User: 10.570 s, System: 28.344 s]
Range (min … max): 116.077 s … 119.758 s 10 runs
Benchmark #7: ./nbdcopy --request-size=16777216 nbd+unix:///?socket=/tmp/src.sock \
nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 125.154 s ± 2.121 s [User: 10.213 s, System: 28.392 s]
Range (min … max): 122.395 s … 129.108 s 10 runs
Benchmark #8: ./nbdcopy --request-size=33554432 nbd+unix:///?socket=/tmp/src.sock \
nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 130.694 s ± 1.315 s [User: 4.459 s, System: 38.734 s]
Range (min … max): 128.872 s … 133.255 s 10 runs
For reference, same copy using qemu-img convert with maximum number of
coroutines:
Benchmark #9: qemu-img convert -n -f raw -O raw -W -m 16 \
nbd+unix:///?socket=/tmp/src.sock nbd+unix:///?socket=/tmp/dst.sock
Time (mean ± σ): 106.093 s ± 4.616 s [User: 3.994 s, System: 24.768 s]
Range (min … max): 102.407 s … 115.493 s 10 runs
We can see that current default 32 MiB request size is 23% slower and
use 17% more cpu time compared with 1 MiB request size.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
copy/main.c | 18 ++++++++++++++++++
copy/multi-thread-copying.c | 6 +++---
copy/nbdcopy.h | 2 ++
copy/nbdcopy.pod | 7 ++++++-
copy/synch-copying.c | 17 ++++++++++++-----
5 files changed, 41 insertions(+), 9 deletions(-)
diff --git a/copy/main.c b/copy/main.c
index 55c2b53..4fe7ae4 100644
--- a/copy/main.c
+++ b/copy/main.c
@@ -50,6 +50,7 @@ bool flush; /* --flush flag */
unsigned max_requests = 64; /* --requests */
bool progress; /* -p flag */
int progress_fd = -1; /* --progress=FD */
+unsigned request_size = MAX_REQUEST_SIZE; /* --request-size */
unsigned sparse_size = 4096; /* --sparse */
bool synchronous; /* --synchronous flag */
unsigned threads; /* --threads */
@@ -91,6 +92,7 @@ main (int argc, char *argv[])
DESTINATION_IS_ZERO_OPTION,
FLUSH_OPTION,
NO_EXTENTS_OPTION,
+ REQUEST_SIZE_OPTION,
SYNCHRONOUS_OPTION,
};
const char *short_options = "C:pR:S:T:vV";
@@ -103,6 +105,7 @@ main (int argc, char *argv[])
{ "flush", no_argument, NULL, FLUSH_OPTION },
{ "no-extents", no_argument, NULL, NO_EXTENTS_OPTION },
{ "progress", optional_argument, NULL, 'p' },
+ { "request-size", optional_argument, NULL, REQUEST_SIZE_OPTION },
{ "requests", required_argument, NULL, 'R' },
{ "short-options", no_argument, NULL, SHORT_OPTIONS },
{ "sparse", required_argument, NULL, 'S' },
@@ -183,6 +186,21 @@ main (int argc, char *argv[])
}
break;
+ case REQUEST_SIZE_OPTION:
+ if (sscanf (optarg, "%u", &request_size) != 1) {
+ fprintf (stderr, "%s: --request-size: could not parse: %s\n",
+ prog, optarg);
+ exit (EXIT_FAILURE);
+ }
+ if (request_size < MIN_REQUEST_SIZE || request_size > MAX_REQUEST_SIZE ||
+ !is_power_of_2 (request_size)) {
+ fprintf (stderr,
+ "%s: --request-size: must be a power of 2 within %d-%d\n",
+ prog, MIN_REQUEST_SIZE, MAX_REQUEST_SIZE);
+ exit (EXIT_FAILURE);
+ }
+ break;
+
case 'R':
if (sscanf (optarg, "%u", &max_requests) != 1 || max_requests == 0)
{
fprintf (stderr, "%s: --requests: could not parse: %s\n",
diff --git a/copy/multi-thread-copying.c b/copy/multi-thread-copying.c
index b1cc9a5..c649d2b 100644
--- a/copy/multi-thread-copying.c
+++ b/copy/multi-thread-copying.c
@@ -183,8 +183,8 @@ worker_thread (void *indexp)
*/
while (exts.ptr[i].length > 0) {
len = exts.ptr[i].length;
- if (len > MAX_REQUEST_SIZE)
- len = MAX_REQUEST_SIZE;
+ if (len > request_size)
+ len = request_size;
data = malloc (len);
if (data == NULL) {
perror ("malloc");
@@ -518,7 +518,7 @@ fill_dst_range_with_zeroes (struct command *command)
/* Fall back to loop writing zeroes. This is going to be slow
* anyway, so do it synchronously. XXX
*/
- data_size = MIN (MAX_REQUEST_SIZE, command->slice.len);
+ data_size = MIN (request_size, command->slice.len);
data = calloc (1, data_size);
if (!data) {
perror ("calloc");
diff --git a/copy/nbdcopy.h b/copy/nbdcopy.h
index e4c3d4e..e7fe1ea 100644
--- a/copy/nbdcopy.h
+++ b/copy/nbdcopy.h
@@ -27,6 +27,7 @@
#include "vector.h"
+#define MIN_REQUEST_SIZE 4096
#define MAX_REQUEST_SIZE (32 * 1024 * 1024)
/* This must be a multiple of MAX_REQUEST_SIZE. Larger is better up
@@ -218,6 +219,7 @@ extern bool flush;
extern unsigned max_requests;
extern bool progress;
extern int progress_fd;
+extern unsigned request_size;
extern unsigned sparse_size;
extern bool synchronous;
extern unsigned threads;
diff --git a/copy/nbdcopy.pod b/copy/nbdcopy.pod
index ae92547..c265550 100644
--- a/copy/nbdcopy.pod
+++ b/copy/nbdcopy.pod
@@ -7,7 +7,7 @@ nbdcopy - copy to and from an NBD server
nbdcopy [--allocated] [-C N|--connections=N]
[--destination-is-zero|--target-is-zero] [--flush]
[--no-extents] [-p|--progress|--progress=FD]
- [-R N|--requests=N] [-S N|--sparse=N]
+ [--request-size=N] [-R N|--requests=N] [-S N|--sparse=N]
[--synchronous] [-T N|--threads=N] [-v|--verbose]
SOURCE DESTINATION
@@ -152,6 +152,11 @@ following shell commands:
nbdcopy --progress=3 ...
exec 3>&-
+=item B<--request-size=>N
+
+Set the maximum request size in bytes. The maximum value is 32 MiB,
+specified by the NBD protocol.
+
=item B<-R> N
=item B<--requests=>N
diff --git a/copy/synch-copying.c b/copy/synch-copying.c
index 17bda16..c63bd2d 100644
--- a/copy/synch-copying.c
+++ b/copy/synch-copying.c
@@ -28,12 +28,17 @@
#include "nbdcopy.h"
-static char buf[MAX_REQUEST_SIZE];
-
void
synch_copying (void)
{
uint64_t offset = 0;
+ unsigned char *buf;
+
+ buf = malloc (request_size);
+ if (buf == NULL) {
+ perror ("malloc");
+ exit (EXIT_FAILURE);
+ }
/* If the source size is unknown then we copy data and cannot use
* extent information.
@@ -41,7 +46,7 @@ synch_copying (void)
if (src->size == -1) {
size_t r;
- while ((r = src->ops->synch_read (src, buf, sizeof buf, offset)) > 0) {
+ while ((r = src->ops->synch_read (src, buf, request_size, offset)) > 0) {
dst->ops->synch_write (dst, buf, r, offset);
offset += r;
progress_bar (offset, src->size);
@@ -57,8 +62,8 @@ synch_copying (void)
uint64_t count = src->size - offset;
size_t i, r;
- if (count > sizeof buf)
- count = sizeof buf;
+ if (count > request_size)
+ count = request_size;
if (extents)
src->ops->get_extents (src, 0, offset, count, &exts);
@@ -99,4 +104,6 @@ synch_copying (void)
free (exts.ptr);
} /* while */
}
+
+ free (buf);
}
--
2.26.2