[PATCH nbdkit] plugins: python: Add imageio plugin example
by Nir Soffer
This is mainly for testing the new parallel python threading model, but
it also an example how to manage multiple connection from a plugin.
I tested this with local imageio server, serving qcow2 image on local
SSD.
Start imageio server from imageio source:
./ovirt-imageio -c test
Create test disk:
qemu-img create -f qcow2 /var/tmp/disk.qcow2 6g
Add ticket to accessing the image, using nbd example ticket:
curl --unix-socket ../daemon/test/daemon.sock \
--upload-file examples/nbd.json http://localhost/ticket/nbd
Start qemu-nbd, serving the image for imageio:
qemu-nbd --socket=/tmp/nbd.sock --persistent --shared=8 --format=qcow2 \
--aio=native --cache=none --discard=unmap /var/tmp/disk.qcow2
Start nbdkit with this plugin:
./nbdkit -U nbd.sock -t4 -f python ./plugins/python/examples/imageio.py \
transfer_url=https://localhost:54322/images/nbd connections=4 secure=no
Finally, upload the image using qemu-img:
time qemu-img convert -n -f raw -O raw -W /var/tmp/fedora-32.raw \
nbd+unix:///?socket=./nbd.sock
I tested with 1 and 4 threads/connections, creating new empty qcow2
image before each test.
1 connections, 4 threads:
real 0m7.885s
user 0m0.663s
sys 0m0.803s
4 connections, 4 threads:
real 0m3.336s
user 0m0.439s
sys 0m0.651s
This is what we see on imageio side:
1 connection:
[connection 1 ops, 7.866482 s]
[dispatch 2630 ops, 6.488580 s]
[extents 1 ops, 0.002326 s]
[zero 1176 ops, 0.661475 s, 4.73 GiB, 7.15 GiB/s]
[write 1451 ops, 5.475842 s, 1.27 GiB, 237.08 MiB/s]
[flush 2 ops, 0.029208 s]
4 connections:
[connection 1 ops, 3.289038 s]
[dispatch 670 ops, 2.679317 s]
[extents 1 ops, 0.010870 s]
[write 383 ops, 2.172633 s, 333.70 MiB, 153.59 MiB/s]
[zero 286 ops, 0.346506 s, 1.29 GiB, 3.72 GiB/s]
[connection 1 ops, 3.303300 s]
[dispatch 632 ops, 2.711896 s]
[zero 273 ops, 0.380406 s, 1.12 GiB, 2.93 GiB/s]
[extents 1 ops, 0.000485 s]
[write 358 ops, 2.182803 s, 310.67 MiB, 142.33 MiB/s]
[connection 1 ops, 3.318177 s]
[dispatch 669 ops, 2.759531 s]
[extents 1 ops, 0.064217 s]
[write 354 ops, 2.067320 s, 336.70 MiB, 162.87 MiB/s]
[zero 313 ops, 0.470069 s, 1.20 GiB, 2.55 GiB/s]
[flush 1 ops, 0.002421 s]
[connection 1 ops, 3.280020 s]
[dispatch 662 ops, 2.685547 s]
[zero 304 ops, 0.431782 s, 1.13 GiB, 2.62 GiB/s]
[extents 1 ops, 0.000424 s]
[write 356 ops, 2.101127 s, 317.17 MiB, 150.95 MiB/s]
[flush 1 ops, 0.000127 s]
Results are not very stable, but the trend is clear. We can use this
to optimize the virt-v2v.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
plugins/python/examples/imageio.py | 167 +++++++++++++++++++++++++++++
1 file changed, 167 insertions(+)
create mode 100644 plugins/python/examples/imageio.py
diff --git a/plugins/python/examples/imageio.py b/plugins/python/examples/imageio.py
new file mode 100644
index 00000000..e77fd2f4
--- /dev/null
+++ b/plugins/python/examples/imageio.py
@@ -0,0 +1,167 @@
+# Example Python plugin.
+#
+# This example can be freely used for any purpose.
+#
+# Upload and download images to oVirt with nbdkit and qemu-img.
+#
+# Install ovirt-imageio-client
+#
+# dnf copr enable nsoffer/ovirt-imageio-preview
+# dnf install ovirt-imageio-client
+#
+# To upload or download images, you need to start an image transfer. The
+# easiest way is using oVirt image_transfer.py example:
+#
+# /usr/share/doc/python3-ovirt-enigne-sdk4/eamples/image_transfer.py \
+# --engine-url https://my.engine \
+# --username admin@internal \
+# --password-file password \
+# --cafile my.engine.pem \
+# upload disk-uuid
+#
+# This will print the trasnfer URL for this image transfer.
+#
+# Run this example from the build directory:
+#
+# ./nbdkit -t4 -f -v -U /tmp/nbd.sock -t4 python \
+# ./plugins/python/examples/imageio.py \
+# transfer_url=https://server:54322/images/ticket-id \
+# connections=4 \
+# secure=no
+#
+# Note that number of nbdkit threads and imageio connections should match.
+#
+# To upload an image run:
+#
+# qemu-img convert -f qcow2 -O raw disk.img nbd:///?socket=tmp/nbd.sock
+#
+# Downloading image is not efficient with this version, since we don't report
+# extents yet.
+#
+# The -f -v arguments are optional. They cause the server to stay in
+# the foreground and print debugging, which is useful when testing.
+
+import queue
+import threading
+from contextlib import contextmanager
+
+from ovirt_imageio.client import ImageioClient
+
+import nbdkit
+
+# Using version 2 supporting the buffer protocol for better performance.
+API_VERSION = 2
+
+# Plugin configuration, can be set using key=value in the command line.
+params = {
+ "secure": True,
+ "ca_file": "",
+ "connections": 1,
+ "transfer_url": None,
+}
+
+
+def config(key, value):
+ """
+ Parse the url parameter which contains the transfer URL that we want to
+ serve.
+ """
+ if key == "transfer_url":
+ params["transfer_url"] = value
+ elif key == "connections":
+ params["connections"] = int(value)
+ elif key == "ca_file":
+ params["ca_file"] = value
+ elif key == "secure":
+ params["secure"] = boolify(key, value)
+ else:
+ raise RuntimeError("unknown parameter: {!r}".format(key))
+
+
+def boolify(key, value):
+ v = value.lower()
+ if v in ("yes", "true", "1"):
+ return True
+ if v in ("no", "false", 0):
+ return False
+ raise RuntimeError("Invalid boolean value for {}: {!r}".format(key, value))
+
+
+def config_complete():
+ """
+ Called when configuration completed.
+ """
+ if params["transfer_url"] is None:
+ raise RuntimeError("'transfer_url' parameter is required")
+
+
+def thread_model():
+ """
+ Using parallel model to speed up transfer with multiple connections to
+ imageio server.
+ """
+ return nbdkit.THREAD_MODEL_PARALLEL
+
+
+def open(readonly):
+ """
+ Called once when plugin is loaded. We created a pool of connected clients
+ that will be used for requests later.
+ """
+ pool = queue.Queue()
+ for i in range(params["connections"]):
+ client = ImageioClient(
+ params["transfer_url"],
+ cafile=params["ca_file"],
+ secure=params["secure"])
+ pool.put(client)
+ return { "pool": pool }
+
+
+def close(h):
+ """
+ Called when plugin is closed. Close and remove all clients from the pool.
+ """
+ pool = h["pool"]
+ while not pool.empty():
+ client = pool.get()
+ client.close()
+
+
+@contextmanager
+def client(h):
+ """
+ Context manager fetching an imageio client from the pool. Blocks until a
+ client is available.
+ """
+ pool = h["pool"]
+ client = pool.get()
+ try:
+ yield client
+ finally:
+ pool.put(client)
+
+
+def get_size(h):
+ with client(h) as c:
+ return c.size()
+
+
+def pread(h, buf, offset, flags):
+ with client(h) as c:
+ c.read(offset, buf)
+
+
+def pwrite(h, buf, offset, flags):
+ with client(h) as c:
+ c.write(offset, buf)
+
+
+def zero(h, count, offset, flags):
+ with client(h) as c:
+ c.zero(offset, count)
+
+
+def flush(h, flags):
+ with client(h) as c:
+ c.flush()
--
2.25.4
4 years, 3 months
[nbdkit PATCH 0/4] More .list_exports uses
by Eric Blake
Here's changes to the file plugin (which I'm happy with) and a new
exportname filter (which is still at RFC stage; I need to finish
implementing strict mode in .open, and add tests).
I also discovered that we really want .list_exports and .open to know
when they are used on plaintext vs. tls clients for --tls=on, and we
may want to split out a new .default_export callback rather than
overloading .list_exports(default_only=true). Ah well, more to play
with tomorrow.
Eric Blake (4):
file: Forbid non-regular, non-block file names
file: Add .list_exports support
file: Use dirent.dt_type for speed
exportname: New filter
.../exportname/nbdkit-exportname-filter.pod | 116 +++++++++++
filters/ext2/nbdkit-ext2-filter.pod | 5 +
plugins/file/nbdkit-file-plugin.pod | 30 ++-
configure.ac | 3 +
filters/exportname/Makefile.am | 67 +++++++
tests/Makefile.am | 4 +-
plugins/file/file.c | 135 +++++++++++--
filters/exportname/exportname.c | 180 ++++++++++++++++++
tests/test-file-dir.sh | 143 ++++++++++++++
TODO | 9 +
10 files changed, 669 insertions(+), 23 deletions(-)
create mode 100644 filters/exportname/nbdkit-exportname-filter.pod
create mode 100644 filters/exportname/Makefile.am
create mode 100644 filters/exportname/exportname.c
create mode 100755 tests/test-file-dir.sh
--
2.28.0
4 years, 3 months
[PATCH nbdkit] Experiment with parallel python plugin
by Nir Soffer
This is a quick hack to experiment with parallel threading model in the
python plugin.
Changes:
- Use aligned buffers to make it possible to use O_DIRECT. Using
parallel I/O does not buy us much when using buffered I/O. pwrite()
copies data to the page cache, and pread() reads data from the page
cache.
- Disable extents in the file plugin. This way we can compare it with
the python file example.
- Implement flush in the file example.
With these changes, I could compare the file plugin with the new python
file example, and it seems that the parallel threading models works
nicely, and we get similar performance for the case of fully allocated
image.
I created a test image using:
$ virt-builder fedora-32 -o /var/tmp/fedora-32.raw --root-password=password:root
And a fully allocated test image using:
$ fallocate --length 6g /var/tmp/disk.raw
$ dd if=/var/tmp/fedora-32.raw bs=8M of=/var/tmp/disk.raw iflag=direct oflag=direct conv=fsync,notrunc
$ qemu-img map --output json /var/tmp/disk.raw
[{ "start": 0, "length": 6442450944, "depth": 0, "zero": false, "data": true, "offset": 0}]
For reference, copying this image with dd using direct I/O:
$ dd if=/var/tmp/disk.raw bs=2M of=/dev/shm/disk.raw iflag=direct conv=fsync status=progress
6442450944 bytes (6.4 GB, 6.0 GiB) copied, 10.4783 s, 615 MB/s
Copying same image with qemu-img convert, disabling zero detection,
using different number of coroutines:
$ time qemu-img convert -f raw -O raw -T none -S0 -m1 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m11.527s
user 0m0.102s
sys 0m2.330s
$ time qemu-img convert -f raw -O raw -T none -S0 -m2 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m5.971s
user 0m0.080s
sys 0m2.749s
$ time qemu-img convert -f raw -O raw -T none -S0 -m4 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m3.674s
user 0m0.071s
sys 0m3.140s
$ time qemu-img convert -f raw -O raw -T none -S0 -m8 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m3.408s
user 0m0.069s
sys 0m3.813s
$ time qemu-img convert -f raw -O raw -T none -S0 -m16 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m3.305s
user 0m0.054s
sys 0m3.767s
Same with the modified file plugin, using direct I/O and without
extents:
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t1 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m12.167s
user 0m5.798s
sys 0m2.477s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t2 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m7.981s
user 0m5.204s
sys 0m2.740s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t4 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.568s
user 0m4.996s
sys 0m3.167s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t8 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.493s
user 0m4.950s
sys 0m3.492s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t16 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.138s
user 0m4.621s
sys 0m3.550s
Finally, same with the python file example:
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t1 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m12.398s
user 0m6.652s
sys 0m2.484s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t2 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -p -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m8.169s
user 0m5.418s
sys 0m2.736s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t4 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -p -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.419s
user 0m4.891s
sys 0m3.103s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t8 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -p -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.610s
user 0m5.115s
sys 0m3.377s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t16 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -p -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.093s
user 0m4.520s
sys 0m3.567s
I think this show that the parallel threading model works for the python
plugin as good as for the file plugin.
---
plugins/file/file.c | 4 ++--
plugins/python/examples/file.py | 5 ++++-
server/plugins.c | 20 ++++++++++++++------
server/threadlocal.c | 7 +++++--
4 files changed, 25 insertions(+), 11 deletions(-)
diff --git a/plugins/file/file.c b/plugins/file/file.c
index dc99f992..27316b9f 100644
--- a/plugins/file/file.c
+++ b/plugins/file/file.c
@@ -170,7 +170,7 @@ file_open (int readonly)
return NULL;
}
- flags = O_CLOEXEC|O_NOCTTY;
+ flags = O_CLOEXEC|O_NOCTTY|O_DIRECT;
if (readonly)
flags |= O_RDONLY;
else
@@ -551,7 +551,7 @@ file_can_extents (void *handle)
nbdkit_debug ("extents disabled: lseek: SEEK_HOLE: %m");
return 0;
}
- return 1;
+ return 0;
}
static int
diff --git a/plugins/python/examples/file.py b/plugins/python/examples/file.py
index 866b8244..3652eb52 100644
--- a/plugins/python/examples/file.py
+++ b/plugins/python/examples/file.py
@@ -49,7 +49,7 @@ def open(readonly):
flags = os.O_RDONLY
else:
flags = os.O_RDWR
- fd = os.open(filename, flags)
+ fd = os.open(filename, flags | os.O_DIRECT)
return { 'fd': fd }
def get_size(h):
@@ -65,3 +65,6 @@ def pwrite(h, buf, offset, flags):
n = os.pwritev(h['fd'], [buf], offset)
if n != len(buf):
raise RuntimeError("short write")
+
+def flush(h, flags):
+ os.fsync(h['fd'])
diff --git a/server/plugins.c b/server/plugins.c
index d4364cd2..ce4700a3 100644
--- a/server/plugins.c
+++ b/server/plugins.c
@@ -631,6 +631,8 @@ plugin_zero (struct backend *b, void *handle,
bool fast_zero = flags & NBDKIT_FLAG_FAST_ZERO;
bool emulate = false;
bool need_flush = false;
+ void *zero_buffer = NULL;
+ int buffer_size = MIN (MAX_REQUEST_SIZE, count);
if (fua && backend_can_fua (b) != NBDKIT_FUA_NATIVE) {
flags &= ~NBDKIT_FLAG_FUA;
@@ -669,19 +671,25 @@ plugin_zero (struct backend *b, void *handle,
threadlocal_set_error (0);
*err = 0;
+ *err = posix_memalign(&zero_buffer, 4096, buffer_size);
+ if (*err != 0) {
+ r = -1;
+ goto done;
+ }
+
+ memset(zero_buffer, 0, buffer_size);
+
while (count) {
- /* Always contains zeroes, but we can't use const or else gcc 9
- * will use .rodata instead of .bss and inflate the binary size.
- */
- static /* const */ char buf[MAX_REQUEST_SIZE];
- uint32_t limit = MIN (count, sizeof buf);
+ uint32_t limit = MIN (count, buffer_size);
- r = plugin_pwrite (b, handle, buf, limit, offset, flags, err);
+ r = plugin_pwrite (b, handle, zero_buffer, limit, offset, flags, err);
if (r == -1)
break;
count -= limit;
}
+ free(zero_buffer);
+
done:
if (r != -1 && need_flush)
r = plugin_flush (b, handle, 0, err);
diff --git a/server/threadlocal.c b/server/threadlocal.c
index 90230028..04c82842 100644
--- a/server/threadlocal.c
+++ b/server/threadlocal.c
@@ -195,13 +195,16 @@ threadlocal_buffer (size_t size)
if (threadlocal->buffer_size < size) {
void *ptr;
+ int err;
- ptr = realloc (threadlocal->buffer, size);
- if (ptr == NULL) {
+ err = posix_memalign (&ptr, 4096, size);
+ if (err != 0) {
nbdkit_error ("threadlocal_buffer: realloc: %m");
return NULL;
}
+
memset (ptr, 0, size);
+ free(threadlocal->buffer);
threadlocal->buffer = ptr;
threadlocal->buffer_size = size;
}
--
2.25.4
4 years, 3 months
[PATCH nbdkit] Experiment with parallel python plugin
by Nir Soffer
This is a quick hack to experiment with parallel threading model in the
python plugin.
Changes:
- Use aligned buffers to make it possible to use O_DIRECT. Using
parallel I/O does not buy us much when using buffered I/O. pwrite()
copies data to the page cache, and pread() reads data from the page
cache.
- Disable extents in the file plugin. This way we can compare it with
the python file example.
- Implement flush in the file example.
With these changes, I could compare the file plugin with the new python
file example, and it seems that the parallel threading models works
nicely, and we get similar performance for the case of fully allocated
image.
I created a test image using:
$ virt-builder fedora-32 -o /var/tmp/fedora-32.raw --root-password=password:root
And a fully allocated test image using:
$ fallocate --length 6g /var/tmp/disk.raw
$ dd if=/var/tmp/fedora-32.raw bs=8M of=/var/tmp/disk.raw iflag=direct oflag=direct conv=fsync,notrunc
$ qemu-img map --output json /var/tmp/disk.raw
[{ "start": 0, "length": 6442450944, "depth": 0, "zero": false, "data": true, "offset": 0}]
For reference, copying this image with dd using direct I/O:
$ dd if=/var/tmp/disk.raw bs=2M of=/dev/shm/disk.raw iflag=direct conv=fsync status=progress
6442450944 bytes (6.4 GB, 6.0 GiB) copied, 10.4783 s, 615 MB/s
Copying same image with qemu-img convert, disabling zero detection,
using different number of coroutines:
$ time qemu-img convert -f raw -O raw -T none -S0 -m1 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m11.527s
user 0m0.102s
sys 0m2.330s
$ time qemu-img convert -f raw -O raw -T none -S0 -m2 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m5.971s
user 0m0.080s
sys 0m2.749s
$ time qemu-img convert -f raw -O raw -T none -S0 -m4 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m3.674s
user 0m0.071s
sys 0m3.140s
$ time qemu-img convert -f raw -O raw -T none -S0 -m8 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m3.408s
user 0m0.069s
sys 0m3.813s
$ time qemu-img convert -f raw -O raw -T none -S0 -m16 -W /var/tmp/disk.raw /dev/shm/disk.raw
real 0m3.305s
user 0m0.054s
sys 0m3.767s
Same with the modified file plugin, using direct I/O and without
extents:
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t1 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m12.167s
user 0m5.798s
sys 0m2.477s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t2 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m7.981s
user 0m5.204s
sys 0m2.740s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t4 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.568s
user 0m4.996s
sys 0m3.167s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t8 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.493s
user 0m4.950s
sys 0m3.492s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t16 -f -r file file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.138s
user 0m4.621s
sys 0m3.550s
Finally, same with the python file example:
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t1 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m12.398s
user 0m6.652s
sys 0m2.484s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t2 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -p -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m8.169s
user 0m5.418s
sys 0m2.736s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t4 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -p -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.419s
user 0m4.891s
sys 0m3.103s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t8 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -p -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.610s
user 0m5.115s
sys 0m3.377s
$ rm -f /tmp/nbd.sock && ./nbdkit -U /tmp/nbd.sock -t16 -f -r python ./plugins/python/examples/file.py file=/var/tmp/disk.raw
$ time qemu-img convert -p -f raw -O raw -S0 -m16 -W nbd:unix:/tmp/nbd.sock /dev/shm/disk.raw
real 0m6.093s
user 0m4.520s
sys 0m3.567s
I think this show that the parallel threading model works for the python
plugin as good as for the file plugin.
---
plugins/file/file.c | 4 ++--
plugins/python/examples/file.py | 5 ++++-
server/plugins.c | 20 ++++++++++++++------
server/threadlocal.c | 7 +++++--
4 files changed, 25 insertions(+), 11 deletions(-)
diff --git a/plugins/file/file.c b/plugins/file/file.c
index dc99f992..27316b9f 100644
--- a/plugins/file/file.c
+++ b/plugins/file/file.c
@@ -170,7 +170,7 @@ file_open (int readonly)
return NULL;
}
- flags = O_CLOEXEC|O_NOCTTY;
+ flags = O_CLOEXEC|O_NOCTTY|O_DIRECT;
if (readonly)
flags |= O_RDONLY;
else
@@ -551,7 +551,7 @@ file_can_extents (void *handle)
nbdkit_debug ("extents disabled: lseek: SEEK_HOLE: %m");
return 0;
}
- return 1;
+ return 0;
}
static int
diff --git a/plugins/python/examples/file.py b/plugins/python/examples/file.py
index 866b8244..3652eb52 100644
--- a/plugins/python/examples/file.py
+++ b/plugins/python/examples/file.py
@@ -49,7 +49,7 @@ def open(readonly):
flags = os.O_RDONLY
else:
flags = os.O_RDWR
- fd = os.open(filename, flags)
+ fd = os.open(filename, flags | os.O_DIRECT)
return { 'fd': fd }
def get_size(h):
@@ -65,3 +65,6 @@ def pwrite(h, buf, offset, flags):
n = os.pwritev(h['fd'], [buf], offset)
if n != len(buf):
raise RuntimeError("short write")
+
+def flush(h, flags):
+ os.fsync(h['fd'])
diff --git a/server/plugins.c b/server/plugins.c
index d4364cd2..ce4700a3 100644
--- a/server/plugins.c
+++ b/server/plugins.c
@@ -631,6 +631,8 @@ plugin_zero (struct backend *b, void *handle,
bool fast_zero = flags & NBDKIT_FLAG_FAST_ZERO;
bool emulate = false;
bool need_flush = false;
+ void *zero_buffer = NULL;
+ int buffer_size = MIN (MAX_REQUEST_SIZE, count);
if (fua && backend_can_fua (b) != NBDKIT_FUA_NATIVE) {
flags &= ~NBDKIT_FLAG_FUA;
@@ -669,19 +671,25 @@ plugin_zero (struct backend *b, void *handle,
threadlocal_set_error (0);
*err = 0;
+ *err = posix_memalign(&zero_buffer, 4096, buffer_size);
+ if (*err != 0) {
+ r = -1;
+ goto done;
+ }
+
+ memset(zero_buffer, 0, buffer_size);
+
while (count) {
- /* Always contains zeroes, but we can't use const or else gcc 9
- * will use .rodata instead of .bss and inflate the binary size.
- */
- static /* const */ char buf[MAX_REQUEST_SIZE];
- uint32_t limit = MIN (count, sizeof buf);
+ uint32_t limit = MIN (count, buffer_size);
- r = plugin_pwrite (b, handle, buf, limit, offset, flags, err);
+ r = plugin_pwrite (b, handle, zero_buffer, limit, offset, flags, err);
if (r == -1)
break;
count -= limit;
}
+ free(zero_buffer);
+
done:
if (r != -1 && need_flush)
r = plugin_flush (b, handle, 0, err);
diff --git a/server/threadlocal.c b/server/threadlocal.c
index 90230028..04c82842 100644
--- a/server/threadlocal.c
+++ b/server/threadlocal.c
@@ -195,13 +195,16 @@ threadlocal_buffer (size_t size)
if (threadlocal->buffer_size < size) {
void *ptr;
+ int err;
- ptr = realloc (threadlocal->buffer, size);
- if (ptr == NULL) {
+ err = posix_memalign (&ptr, 4096, size);
+ if (err != 0) {
nbdkit_error ("threadlocal_buffer: realloc: %m");
return NULL;
}
+
memset (ptr, 0, size);
+ free(threadlocal->buffer);
threadlocal->buffer = ptr;
threadlocal->buffer_size = size;
}
--
2.25.4
4 years, 3 months
[nbdkit PATCH v2 0/5] .list_exports
by Eric Blake
Since v1:
- patch 1: check size limits
- patch 2: better handling of default export name canonicalization
- patch 3: support filters as well as plugins
- patch 4: new
- patch 5: rewrite sh parser, fix testsuite to actually work and
cover more cases (now that libnbd.git is fixed)
Eric Blake (4):
server: Add exports list functions
server: Prepare to use export list from plugin
log: Add .list_exports support
sh, eval: Add .list_exports support
Richard W.M. Jones (1):
server: Implement list_exports.
docs/nbdkit-filter.pod | 92 +++++++++++++++--
docs/nbdkit-plugin.pod | 64 +++++++++++-
docs/nbdkit-protocol.pod | 4 +-
filters/log/nbdkit-log-filter.pod | 18 ++--
plugins/eval/nbdkit-eval-plugin.pod | 2 +
plugins/sh/nbdkit-sh-plugin.pod | 52 ++++++++++
include/nbdkit-common.h | 4 +
include/nbdkit-filter.h | 18 ++++
include/nbdkit-plugin.h | 3 +
server/Makefile.am | 2 +
tests/Makefile.am | 2 +
server/internal.h | 10 ++
common/utils/cleanup.h | 3 +
server/backend.c | 43 ++++++++
server/exports.c | 149 +++++++++++++++++++++++++++
server/filters.c | 13 +++
server/nbdkit.syms | 5 +
server/plugins.c | 15 +++
server/protocol-handshake-newstyle.c | 80 ++++++++------
common/utils/cleanup-nbdkit.c | 6 ++
plugins/sh/methods.h | 4 +-
plugins/eval/eval.c | 2 +
plugins/sh/methods.c | 106 +++++++++++++++++++
plugins/sh/sh.c | 1 +
plugins/sh/example.sh | 8 ++
filters/log/log.c | 50 ++++++++-
tests/test-eval-exports.sh | 108 +++++++++++++++++++
tests/test-layers-filter.c | 10 ++
tests/test-layers-plugin.c | 9 ++
tests/test-layers.c | 15 +++
30 files changed, 839 insertions(+), 59 deletions(-)
create mode 100644 server/exports.c
create mode 100755 tests/test-eval-exports.sh
--
2.28.0
4 years, 3 months
More parallelism in VDDK driver (was: Re: CFME-5.11.7.3 Perf. Tests)
by Richard W.M. Jones
[NB: Adding PUBLIC mailing list because this is upstream discussion]
On Mon, Aug 03, 2020 at 06:27:04PM +0100, Richard W.M. Jones wrote:
> On Mon, Aug 03, 2020 at 06:03:23PM +0300, Nir Soffer wrote:
> > On Mon, Aug 3, 2020 at 5:47 PM Richard W.M. Jones <rjones(a)redhat.com> wrote:
> > All this make sense, but when we upload 10 disks we have 10 connections
> > but still we cannot push data fast enough. Avoiding copies will help,
> > but I don't
> > expect huge difference.
> >
> > My guess is the issue is on the other side - pulling data from vmware.
>
> I can believe this too. VDDK is really slow, and especially the way
> we use it is probably not optimal either -- but it has a confusing
> threading model and I don't know if we can safely use a more parallel
> thread model:
>
> https://github.com/libguestfs/nbdkit/blob/89a36b1fab8302ddc370695d386a28a...
>
> I may have a play around with this tomorrow.
The threading model allowed by VDDK is restrictive. The rules are here:
https://code.vmware.com/docs/11750/virtual-disk-development-kit-programmi...
I did a bit of testing, and it's possible to do better than what we
are doing at the moment. Not sure at present if this will be easy or
will add a lot of complexity. Read on ...
I found through experimentation that it is possible to open multiple
VDDK handles pointing to the same disk. This would allow us to use
SERIALIZE_REQUESTS (instead of SERIALIZE_ALL_REQUESTS) and have
overlapping calls through different handles all pointing back to the
same server/disk. We should have to change all open/close calls to
make the request through a single background thread - see document
above for why.
Adding a background thread and all the RPC needed to marshall these
calls is the part which would add the complexity.
However I suspect we might be able to get away with just adding a
mutex around open/close. The open/close requests would happen on
different threads but would not overlap. This is contrary to the
rules above, but it could be sufficient. This is what I'm testing at
the moment.
It is definitely *not* possible to move to PARALLEL since nbdkit would
make requests in parallel on the same VDDK handle, which is not
allowed. (I did try this to see if the document above was serious,
and it crashed in all kinds of strange ways, so I guess yes they are
serious.)
Rich.
--
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
Read my programming and virtualization blog: http://rwmj.wordpress.com
Fedora Windows cross-compiler. Compile Windows programs, test, and
build Windows installers. Over 100 libraries supported.
http://fedoraproject.org/wiki/MinGW
4 years, 3 months
[PATCH NOT WORKING nbdkit 0/3] python: Allow thread model to be set from Python plugins.
by Richard W.M. Jones
Patch 2 certainly allows you to set the thread model. However patch 3
shows that if you set it to nbdkit.THREAD_MODEL_PARALLEL it will
crash.
If you look closely at the stack trace (attached below) you can see
that ignoring threads which are in parts of nbdkit unrelated to
Python:
Thread 4: In pread, waiting in time.sleep(). This thread has released
the GIL.
Thread 2: Started to process a pread call but didn't reach Python code yet.
Thread 1: In pread, segfaults when checking if pread() is defined
in the Python code.
My understanding is this should all be OK and there's no reason for
Python to crash here. I wonder if it's because we're calling "down"
into Python from C, rather than the usual way of calling from Python
into C.
Rich.
Core was generated by `/home/rjones/d/nbdkit/server/nbdkit -v -P test-python-thread-model.pid -U /tmp/'.
Program terminated with signal SIGSEGV, Segmentation fault.
#0 0x00007fc4b943e97c in find_name_in_mro (type=<optimized out>,
name=0x7fc4aba406b0, error=0x7fc4aa21e7d4)
at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Python/errors.c:221
221 PyErr_Occurred(void)
[Current thread is 1 (Thread 0x7fc4aa21f640 (LWP 109870))]
glibc-2.31.9000-21.fc33.x86_64 gmp-6.1.2-12.fc32.x86_64 libidn2-2.3.0-1.fc32.x86_64 libselinux-3.1-1.fc33.x86_64 libtasn1-4.15.0-1.fc32.x86_64 p11-kit-0.23.18.1-1.fc32.x86_64 pcre2-10.34-4.fc32.x86_64
(gdb) t a a bt
Thread 9 (Thread 0x7fc4a9a1e640 (LWP 109871)):
#0 0x00007fc4b9d32e5b in __lll_lock_wait_private () from /lib64/libpthread.so.0
#1 0x00007fc4b9d34115 in flockfile () from /lib64/libpthread.so.0
#2 0x000000000040a053 in nbdkit_debug (fs=0x41bada "starting worker thread %s") at debug.c:91
#3 0x00000000004081b7 in connection_worker (data=0x9ee9ff0) at connections.c:116
#4 0x00007fc4b9d293f9 in start_thread () from /lib64/libpthread.so.0
#5 0x00007fc4b9c55b23 in clone () from /lib64/libc.so.6
Thread 8 (Thread 0x7fc4aaa20640 (LWP 109869)):
#0 0x00007fc4b9d32e5b in __lll_lock_wait_private () from /lib64/libpthread.so.0
#1 0x00007fc4b9d34115 in flockfile () from /lib64/libpthread.so.0
#2 0x000000000040a053 in nbdkit_debug (fs=0x41b3ea "%s: pread count=%u offset=%lu") at debug.c:91
#3 0x0000000000406507 in backend_pread (b=0x9f07040, buf=0x9f80dc0, count=512, offset=0, flags=0, err=0x7fc4aaa1fa78) at backend.c:482
#4 0x0000000000411f7e in handle_request (cmd=0, flags=0, offset=0, count=512, buf=0x9f80dc0, extents=0x0) at protocol.c:241
#5 0x00000000004131c9 in protocol_recv_request_send_reply () at protocol.c:713
#6 0x00000000004081e7 in connection_worker (data=0x9ee9ff0) at connections.c:123
#7 0x00007fc4b9d293f9 in start_thread () from /lib64/libpthread.so.0
#8 0x00007fc4b9c55b23 in clone () from /lib64/libc.so.6
Thread 7 (Thread 0x7fc4aba22640 (LWP 109867)):
#0 0x00007fc4b9c5079b in mprotect () from /lib64/libc.so.6
#1 0x00007fc4b9d2a0e1 in pthread_create@(a)GLIBC_2.2.5 () from /lib64/libpthread.so.0
#2 0x0000000000408491 in handle_single_connection (sockin=8, sockout=8) at connections.c:204
#3 0x0000000000418824 in start_thread (datav=0x9ee9b10) at sockets.c:337
#4 0x00007fc4b9d293f9 in start_thread () from /lib64/libpthread.so.0
#5 0x00007fc4b9c55b23 in clone () from /lib64/libc.so.6
Thread 6 (Thread 0x7fc4b96bb200 (LWP 109858)):
#0 0x00007fc4b9c4aa2f in poll () from /lib64/libc.so.6
#1 0x0000000000418af3 in check_sockets_and_quit_fd (socks=0x7ffe7d9b3b80) at sockets.c:447
#2 0x0000000000418bd7 in accept_incoming_connections (socks=0x7ffe7d9b3b80) at sockets.c:475
#3 0x000000000040f6c9 in start_serving () at main.c:974
#4 0x000000000040ef8b in main (argc=9, argv=0x7ffe7d9b3de8) at main.c:736
Thread 5 (Thread 0x7fc4a921d640 (LWP 109872)):
#0 0x00007fc4b9c464ef in write () from /lib64/libc.so.6
#1 0x00007fc4b9bd5e5d in _IO_file_write@(a)GLIBC_2.2.5 () from /lib64/libc.so.6
#2 0x00007fc4b9bd5196 in new_do_write () from /lib64/libc.so.6
#3 0x00007fc4b9bd6f49 in __GI__IO_do_write () from /lib64/libc.so.6
#4 0x00007fc4b9bd73b3 in __GI__IO_file_overflow () from /lib64/libc.so.6
#5 0x00007fc4b9bd2423 in fputc () from /lib64/libc.so.6
#6 0x000000000040a0b9 in nbdkit_debug (fs=0x41bada "starting worker thread %s") at debug.c:98
#7 0x00000000004081b7 in connection_worker (data=0x9ee9cb0) at connections.c:116
#8 0x00007fc4b9d293f9 in start_thread () from /lib64/libpthread.so.0
#9 0x00007fc4b9c55b23 in clone () from /lib64/libc.so.6
Thread 4 (Thread 0x7fc4ab221640 (LWP 109868)):
#0 0x00007fc4b9c4d1bb in select () from /lib64/libc.so.6
#1 0x00007fc4b95321e9 in pysleep (secs=<optimized out>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Modules/timemodule.c:1909
#2 time_sleep (self=<optimized out>, obj=<optimized out>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Modules/timemodule.c:341
#3 0x00007fc4b9451a62 in cfunction_vectorcall_O (func=<built-in method sleep of module object at remote 0x7fc4abc41540>, args=0x7fc4abc3ffd0, nargsf=<optimized out>, kwnames=<optimized out>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/methodobject.c:510
#4 0x00007fc4b944a0a7 in _PyObject_VectorcallTstate (kwnames=0x0, nargsf=<optimized out>, args=0x7fc4abc3ffd0, callable=<built-in method sleep of module object at remote 0x7fc4abc41540>, tstate=0x9f0a610) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Include/cpython/abstract.h:118
#5 PyObject_Vectorcall (kwnames=0x0, nargsf=<optimized out>, args=0x7fc4abc3ffd0, callable=<built-in method sleep of module object at remote 0x7fc4abc41540>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Include/cpython/abstract.h:127
#6 call_function (kwnames=0x0, oparg=<optimized out>, pp_stack=<synthetic pointer>, tstate=0x9f0a610) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Python/ceval.c:5044
#7 _PyEval_EvalFrameDefault (tstate=<optimized out>, f=<optimized out>, throwflag=<optimized out>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Python/ceval.c:3459
#8 0x00007fc4b945270b in _PyEval_EvalFrame (throwflag=0, f=Frame 0x7fc4abc3fe40, for file ./python-thread-model.py, line 49, in pread (h={}, count=512, offset=0), tstate=0x9f0a610) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Include/internal/pycore_ceval.h:40
#9 function_code_fastcall (tstate=0x9f0a610, co=<optimized out>, args=<optimized out>, nargs=3, globals=<optimized out>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/call.c:329
#10 0x00007fc4b9455276 in _PyObject_VectorcallTstate (kwnames=0x0, nargsf=<optimized out>, args=0x7fc4ab220790, callable=<function at remote 0x7fc4aba34a60>, tstate=0x9f0a610) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Include/cpython/abstract.h:81
#11 _PyObject_CallFunctionVa (tstate=0x9f0a610, callable=<function at remote 0x7fc4aba34a60>, format=<optimized out>, va=<optimized out>, is_size_t=<optimized out>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/call.c:542
#12 0x00007fc4b94c8762 in _PyObject_CallFunction_SizeT (callable=<optimized out>, format=<optimized out>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/call.c:596
#13 0x00007fc4b9f7b840 in py_pread (handle=0x9ee9f20, buf=0x9f98ca0, count=512, offset=0, flags=0) at python.c:619
#14 0x0000000000410d89 in plugin_pread (b=0x9f07040, handle=0x9ee9f20, buf=0x9f98ca0, count=512, offset=0, flags=0, err=0x7fc4ab220a78) at plugins.c:524
#15 0x000000000040653f in backend_pread (b=0x9f07040, buf=0x9f98ca0, count=512, offset=0, flags=0, err=0x7fc4ab220a78) at backend.c:485
#16 0x0000000000411f7e in handle_request (cmd=0, flags=0, offset=0, count=512, buf=0x9f98ca0, extents=0x0) at protocol.c:241
#17 0x00000000004131c9 in protocol_recv_request_send_reply () at protocol.c:713
#18 0x00000000004081e7 in connection_worker (data=0x9ee9cb0) at connections.c:123
#19 0x00007fc4b9d293f9 in start_thread () from /lib64/libpthread.so.0
#20 0x00007fc4b9c55b23 in clone () from /lib64/libc.so.6
Thread 3 (Thread 0x7fc4a821b640 (LWP 109874)):
#0 0x00007fc4b9c55b15 in clone () from /lib64/libc.so.6
#1 0x00007fc4b9d29310 in annobin_start_thread.start () from /lib64/libpthread.so.0
#2 0x00007fc4a821b640 in ?? ()
#3 0x0000000000000000 in ?? ()
Thread 2 (Thread 0x7fc4a8a1c640 (LWP 109873)):
#0 0x00007fc4b9d32e5b in __lll_lock_wait_private () from /lib64/libpthread.so.0
#1 0x00007fc4b9d34115 in flockfile () from /lib64/libpthread.so.0
#2 0x000000000040a053 in nbdkit_debug (fs=0x41b3ea "%s: pread count=%u offset=%lu") at debug.c:91
#3 0x0000000000406507 in backend_pread (b=0x9f07040, buf=0x9f7cc40, count=512, offset=0, flags=0, err=0x7fc4a8a1ba78) at backend.c:482
#4 0x0000000000411f7e in handle_request (cmd=0, flags=0, offset=0, count=512, buf=0x9f7cc40, extents=0x0) at protocol.c:241
#5 0x00000000004131c9 in protocol_recv_request_send_reply () at protocol.c:713
#6 0x00000000004081e7 in connection_worker (data=0x9eea670) at connections.c:123
#7 0x00007fc4b9d293f9 in start_thread () from /lib64/libpthread.so.0
#8 0x00007fc4b9c55b23 in clone () from /lib64/libc.so.6
Thread 1 (Thread 0x7fc4aa21f640 (LWP 109870)):
#0 0x00007fc4b943e97c in find_name_in_mro (type=<optimized out>, name='pread', error=0x7fc4aa21e7d4) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Python/errors.c:221
#1 0x00007fc4b943e7e3 in _PyType_Lookup (type=0x7fc4b966c760 <PyModule_Type>, name='pread') at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/typeobject.c:3232
#2 0x00007fc4b94512bd in _PyObject_GenericGetAttrWithDict (obj=<module at remote 0x7fc4abbfabd0>, name='pread', dict=0x0, suppress=0) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/object.c:1194
#3 0x00007fc4b9455194 in PyObject_GenericGetAttr (name='pread', obj=<module at remote 0x7fc4abbfabd0>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/object.c:1278
#4 module_getattro (m=0x7fc4abbfabd0, name='pread') at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/moduleobject.c:717
#5 0x00007fc4b94c2014 in PyObject_GetAttrString (v=<module at remote 0x7fc4abbfabd0>, name=<optimized out>) at /usr/src/debug/python3.9-3.9.0~b3-1.fc33.x86_64/Objects/object.c:795
#6 0x00007fc4b9f7a5e8 in callback_defined (name=0x7fc4b9f7d4b6 "pread", obj_rtn=0x7fc4aa21e940) at python.c:77
#7 0x00007fc4b9f7b7c8 in py_pread (handle=0x9ee9f20, buf=0x9f88890, count=512, offset=0, flags=0) at python.c:610
#8 0x0000000000410d89 in plugin_pread (b=0x9f07040, handle=0x9ee9f20, buf=0x9f88890, count=512, offset=0, flags=0, err=0x7fc4aa21ea78) at plugins.c:524
#9 0x000000000040653f in backend_pread (b=0x9f07040, buf=0x9f88890, count=512, offset=0, flags=0, err=0x7fc4aa21ea78) at backend.c:485
#10 0x0000000000411f7e in handle_request (cmd=0, flags=0, offset=0, count=512, buf=0x9f88890, extents=0x0) at protocol.c:241
#11 0x00000000004131c9 in protocol_recv_request_send_reply () at protocol.c:713
#12 0x00000000004081e7 in connection_worker (data=0x9ee9cb0) at connections.c:123
#13 0x00007fc4b9d293f9 in start_thread () from /lib64/libpthread.so.0
#14 0x00007fc4b9c55b23 in clone () from /lib64/libc.so.6
4 years, 3 months
[v2v PATCH] libosinfo: remove auto-cleanup for OsinfoList
by Pino Toscano
Avoid using an auto-cleanup for OsinfoList, duplicating the cleanup
everywhere needed.
---
v2v/libosinfo-c.c | 18 +++++-------------
1 file changed, 5 insertions(+), 13 deletions(-)
diff --git a/v2v/libosinfo-c.c b/v2v/libosinfo-c.c
index 322e7d3d..75c2fae4 100644
--- a/v2v/libosinfo-c.c
+++ b/v2v/libosinfo-c.c
@@ -49,17 +49,6 @@
#if !IS_LIBOSINFO_VERSION(1, 8, 0)
G_DEFINE_AUTOPTR_CLEANUP_FUNC(OsinfoFilter, g_object_unref)
G_DEFINE_AUTOPTR_CLEANUP_FUNC(OsinfoLoader, g_object_unref)
-/*
- * Because of a bug in OsinfoList in libosinfo 1.7.0 (fixed in 1.8.0),
- * and a glib auto-cleanup addition for Module classes in 2.63.3,
- * avoid declaring this when:
- * - libosinfo is >= 1.7.0 and < 1.8.0
- * - glib is >= 2.63.3
- * (the 1.8.0 check is not done, as already covered by the check above)
- */
-#if !IS_LIBOSINFO_VERSION(1, 7, 0) || !GLIB_CHECK_VERSION(2, 63, 3)
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(OsinfoList, g_object_unref)
-#endif
G_DEFINE_AUTOPTR_CLEANUP_FUNC(OsinfoOsList, g_object_unref)
#endif
@@ -157,7 +146,7 @@ v2v_osinfo_os_find_os_by_short_id (value dbv, value osv)
CAMLlocal1 (rv);
g_autoptr(OsinfoFilter) filter = NULL;
g_autoptr(OsinfoOsList) os_list = NULL;
- g_autoptr(OsinfoList) list = NULL;
+ OsinfoList *list;
OsinfoOs *os;
os_list = osinfo_db_get_os_list (OsinfoDb_t_val (dbv));
@@ -165,11 +154,14 @@ v2v_osinfo_os_find_os_by_short_id (value dbv, value osv)
osinfo_filter_add_constraint (filter, OSINFO_PRODUCT_PROP_SHORT_ID, String_val (osv));
list = osinfo_list_new_filtered (OSINFO_LIST(os_list), filter);
- if (osinfo_list_get_length (list) == 0)
+ if (osinfo_list_get_length (list) == 0) {
+ g_object_unref (list);
caml_raise_not_found ();
+ }
os = OSINFO_OS(osinfo_list_get_nth (list, 0));
rv = Val_OsinfoOs_t (dbv, os);
+ g_object_unref (list);
CAMLreturn (rv);
}
--
2.26.2
4 years, 3 months