[libnbd PATCH] python: Allow control over copy/share of nbd.Buffer
by Eric Blake
Add new methods nbd.Buffer.{to,from}_buffer that avoid the copying
present in the existing nbd.Buffer.{to,from}_bytearray. The reduction
in copies possible by this approach is no longer quite as necessary,
now that aio_p{read,write} have been converted to take buffers
directly. However, there is still one (marginal) benefit: if you use
h.set_pread_initialize(False), and create a new buffer for every I/O
(rather than reusing a buffer pool), nbd.Buffer(n) handed to
h.aio_pread then buf.to_buffer() is slightly faster than handing a
bytearray(n) directly to h.aio_pread, because we are able to skip the
step of pre-zeroing the buffer.
---
Instead of adding a copy=True/False parameter to the existing API as I
had previously suggested, I decided that a new API made more sense.
generator/Python.ml | 39 ++++++++++++++++++++++----
python/t/585-aio-buffer-share.py | 47 ++++++++++++++++++++++++++++++++
2 files changed, 80 insertions(+), 6 deletions(-)
create mode 100644 python/t/585-aio-buffer-share.py
diff --git a/generator/Python.ml b/generator/Python.ml
index 03a7e6b..cb89ccd 100644
--- a/generator/Python.ml
+++ b/generator/Python.ml
@@ -735,6 +735,21 @@ let
'''Allocate an uninitialized AIO buffer used for nbd.aio_pread.'''
self._o = libnbdmod.alloc_aio_buffer(len)
+ @classmethod
+ def from_buffer(cls, buf):
+ '''Create an AIO buffer that shares an existing buffer-like object.
+
+ Because the buffer is shared, changes to the original are visible
+ to nbd.aio_pwrite, and changes in nbd.aio_pread are visible to the
+ original.
+ '''
+ self = cls(0)
+ # Ensure that buf is already buffer-like
+ with memoryview(buf):
+ self._o = buf
+ self._init = True
+ return self
+
@classmethod
def from_bytearray(cls, ba):
'''Create an AIO buffer from a bytearray or other buffer-like object.
@@ -743,16 +758,28 @@ let
bytearray constructor. Otherwise, ba is copied. Either way, the
resulting AIO buffer is independent from the original.
'''
- self = cls(0)
- self._o = bytearray(ba)
- self._init = True
- return self
+ return cls.from_buffer(bytearray(ba))
- def to_bytearray(self):
- '''Copy an AIO buffer into a bytearray.'''
+ def to_buffer(self):
+ '''Return a shared view of the AIO buffer contents.
+
+ This exposes the underlying buffer; changes to the buffer are
+ visible to nbd.aio_pwrite, and changes from nbd.aio_pread are
+ visible in the buffer.
+ '''
if not hasattr(self, '_init'):
self._o = bytearray(len(self._o))
self._init = True
+ return self._o
+
+ def to_bytearray(self):
+ '''Copy an AIO buffer into a bytearray.
+
+ This copies the contents of an AIO buffer to a new bytearray, which
+ remains independent from the original.
+ '''
+ if not hasattr(self, '_init'):
+ return bytearray(len(self._o))
return bytearray(self._o)
def size(self):
diff --git a/python/t/585-aio-buffer-share.py b/python/t/585-aio-buffer-share.py
new file mode 100644
index 0000000..21752b7
--- /dev/null
+++ b/python/t/585-aio-buffer-share.py
@@ -0,0 +1,47 @@
+# libnbd Python bindings
+# Copyright (C) 2010-2022 Red Hat Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# This tests the nbd.Buffer is_zero method. We can do this entirely
+# without using the NBD protocol.
+
+import nbd
+
+# Use of to/from_bytearray always creates copies
+ba = bytearray(512)
+buf = nbd.Buffer.from_bytearray(ba)
+ba.append(1)
+assert len(ba) == 513
+assert len(buf) == 512
+assert buf.is_zero()
+assert buf.to_bytearray() is not ba
+
+# Use of to/from_buffer shares the same buffer
+buf = nbd.Buffer.from_buffer(ba)
+assert not buf.is_zero()
+assert len(buf) == 513
+ba.pop()
+assert buf.is_zero()
+assert len(buf) == 512
+assert buf.to_buffer() is ba
+
+# Even though nbd.Buffer(n) start uninitialized, we sanitize before exporting.
+# This test cheats and examines the private member ._init
+buf = nbd.Buffer(512)
+assert buf.is_zero()
+assert not hasattr(buf, '_init')
+assert buf.to_buffer() == bytearray(512)
+assert hasattr(buf, '_init')
--
2.36.1
2 years, 5 months
Libguestfs Rsync-in with delta transfer enabled
by Ajay Nemade
Hi,
I am exploring libguestfs rsync-in to copy the incremental changes to
qcow2. I am following rsync example
<https://rwmj.wordpress.com/2013/04/22/using-rsync-with-libguestfs/> to use
the rsync-in. As per the example, I am using rsync daemon and rysnc-in is
working for me. But, If the changes are in the same file then the complete
file is getting transferred.
For example, if my file is of 100Mi then the first qcow2 is of approx
105Mi. After the first qcow2, I am appending data of 20Mi and then creating
the second qcow2 using the first qcow2 as a backing file. Here then the
second qcow2 should have been around approx 22Mi but it's showing 125Mi in
qemu-img info.
>From the above observation, I think the delta transfer algorithm is not
getting used while doing rsync-in.
Just out of curiosity, I tried adding arg --no-whole-file in this function
<https://github.com/libguestfs/libguestfs/blob/master/daemon/rsync.c#L40> but
it's still not working.
Can anyone please point me to how to enable the delta transfer algorithm?
Is it already supported ? or Do I need to make changes in the code? If yes,
then please let me know the places where I should add the changes.
Thanks,
Ajay
2 years, 5 months
[v2v PATCH v2] RHV outputs: limit copied disk count to 23
by Laszlo Ersek
We currently support virtio-blk (commonly) or IDE (unusually) for exposing
disks to the converted guest; refer to "guestcaps.gcaps_block_bus" in
"lib/create_ovf.ml". When using virtio-blk (i.e., in the common case), RHV
can deal with at most 23 disks, as it plugs each virtio-blk device in a
separate slot on the PCI(e) root bus; and the other slots are reserved for
various purposes. When a domain has too many disks, the problem only
becomes apparent once the copying finishes and an import is attempted.
Modify the RHV outputs to fail relatively early when a domain has more
than 23 disks that need to be copied.
Notes:
- With IDE, the theoretical limit may even be as low as 4. However, in the
"Output_module.setup" function, we don't have access to
"guestcaps.gcaps_block_bus", and in practice the IDE limitation has not
caused surprises. So for now stick with 23, assuming virtio-blk.
Modifying the "Output_module.setup" parameter list just for this seems
overkill.
- We could move the new check to an even earlier step, namely
"Output_module.parse_options", due to the v2v directory deliberately
existing (and having been populated with input sockets) at that time.
However, even discounting the fact that "parse_options" is not a good
name for including this kind of step, "parse_options" does not have
access to the v2v directory name, and modifying the signature just for
this is (again) overkill.
- By adding the check to "Output_module.setup", we waste *some* effort
(namely, the conversion occurs between "parse_options" and "setup"),
but: (a) the "rhv-disk-uuid" count check (against the disk count) is
already being done in the rhv-upload module's "setup" function, (b) in
practice the slowest step ought to be the copying, and placing the new
check in "setup" is early enough to prevent that.
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2051564
Signed-off-by: Laszlo Ersek <lersek(a)redhat.com>
---
Notes:
v2:
- rename "bail_if_disk_count_gt" to "error_if_disk_count_gt" [Rich]
v1:
This patch can be tested easily by replacing 23 with 0 in all three
affected output modules -- then the following test cases all fail in
"make check":
FAIL: test-v2v-o-rhv.sh
FAIL: test-v2v-o-vdsm-options.sh
FAIL: test-v2v-o-rhv-upload.sh
> [ 11.3] Setting up the destination: -o rhv
> virt-v2v: error: this output module doesn't support copying more than 0 disks
> [ 9.0] Setting up the destination: -o vdsm
> virt-v2v: error: this output module doesn't support copying more than 0 disks
> [ 11.2] Setting up the destination: -o rhv-upload -oc https://example.com/ovirt-engine/api -os Storage
> virt-v2v: error: this output module doesn't support copying more than 0 disks
output/output.mli | 7 +++++++
output/output.ml | 5 +++++
output/output_rhv.ml | 1 +
output/output_rhv_upload.ml | 1 +
output/output_vdsm.ml | 1 +
5 files changed, 15 insertions(+)
diff --git a/output/output.mli b/output/output.mli
index 533a0c51d31c..8d3d6865945f 100644
--- a/output/output.mli
+++ b/output/output.mli
@@ -76,6 +76,13 @@ val get_disks : string -> (int * int64) list
(** Examines the v2v directory and opens each input socket (in0 etc),
returning a list of input disk index and size. *)
+val error_if_disk_count_gt : string -> int -> unit
+(** This function lets an output module enforce a maximum disk count.
+ [error_if_disk_count_gt dir n] checks whether the domain has more than [n]
+ disks that need to be copied, by examining the existence of input NBD socket
+ "in[n]" in the v2v directory [dir]. If the socket exists, [error] is
+ called. *)
+
val output_to_local_file : ?changeuid:((unit -> unit) -> unit) ->
Types.output_allocation ->
string -> string -> int64 -> string ->
diff --git a/output/output.ml b/output/output.ml
index 10e685c46926..5c6670b99c69 100644
--- a/output/output.ml
+++ b/output/output.ml
@@ -64,6 +64,11 @@ let get_disks dir =
in
loop [] 0
+let error_if_disk_count_gt dir n =
+ let socket = sprintf "%s/in%d" dir n in
+ if Sys.file_exists socket then
+ error (f_"this output module doesn't support copying more than %d disks") n
+
let output_to_local_file ?(changeuid = fun f -> f ())
output_alloc output_format filename size socket =
(* Check nbdkit is installed and has the required plugin. *)
diff --git a/output/output_rhv.ml b/output/output_rhv.ml
index 119207fdc065..8571e07b0cc3 100644
--- a/output/output_rhv.ml
+++ b/output/output_rhv.ml
@@ -56,6 +56,7 @@ module RHV = struct
(options.output_alloc, options.output_format, output_name, output_storage)
let rec setup dir options source =
+ error_if_disk_count_gt dir 23;
let disks = get_disks dir in
let output_alloc, output_format, output_name, output_storage = options in
diff --git a/output/output_rhv_upload.ml b/output/output_rhv_upload.ml
index 828996b36261..f2ced4f4e98e 100644
--- a/output/output_rhv_upload.ml
+++ b/output/output_rhv_upload.ml
@@ -133,6 +133,7 @@ after their uploads (if you do, you must supply one for each disk):
else PCRE.matches (Lazy.force rex_uuid) uuid
let rec setup dir options source =
+ error_if_disk_count_gt dir 23;
let disks = get_disks dir in
let output_conn, output_format,
output_password, output_name, output_storage,
diff --git a/output/output_vdsm.ml b/output/output_vdsm.ml
index a1e8c2465810..23d1b9cd25b4 100644
--- a/output/output_vdsm.ml
+++ b/output/output_vdsm.ml
@@ -119,6 +119,7 @@ For each disk you must supply one of each of these options:
compat, ovf_flavour)
let setup dir options source =
+ error_if_disk_count_gt dir 23;
let disks = get_disks dir in
let output_alloc, output_format,
output_name, output_storage,
--
2.19.1.3.g30247aa5d201
2 years, 5 months
[v2v PATCH] RHV outputs: limit copied disk count to 23
by Laszlo Ersek
We currently support virtio-blk (commonly) or IDE (unusually) for exposing
disks to the converted guest; refer to "guestcaps.gcaps_block_bus" in
"lib/create_ovf.ml". When using virtio-blk (i.e., in the common case), RHV
can deal with at most 23 disks, as it plugs each virtio-blk device in a
separate slot on the PCI(e) root bus; and the other slots are reserved for
various purposes. When a domain has too many disks, the problem only
becomes apparent once the copying finishes and an import is attempted.
Modify the RHV outputs to fail relatively early when a domain has more
than 23 disks that need to be copied.
Notes:
- With IDE, the theoretical limit may even be as low as 4. However, in the
"Output_module.setup" function, we don't have access to
"guestcaps.gcaps_block_bus", and in practice the IDE limitation has not
caused surprises. So for now stick with 23, assuming virtio-blk.
Modifying the "Output_module.setup" parameter list just for this seems
overkill.
- We could move the new check to an even earlier step, namely
"Output_module.parse_options", due to the v2v directory deliberately
existing (and having been populated with input sockets) at that time.
However, even discounting the fact that "parse_options" is not a good
name for including this kind of step, "parse_options" does not have
access to the v2v directory name, and modifying the signature just for
this is (again) overkill.
- By adding the check to "Output_module.setup", we waste *some* effort
(namely, the conversion occurs between "parse_options" and "setup"),
but: (a) the "rhv-disk-uuid" count check (against the disk count) is
already being done in the rhv-upload module's "setup" function, (b) in
practice the slowest step ought to be the copying, and placing the new
check in "setup" is early enough to prevent that.
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2051564
Signed-off-by: Laszlo Ersek <lersek(a)redhat.com>
---
Notes:
This patch can be tested easily by replacing 23 with 0 in all three
affected output modules -- then the following test cases all fail in
"make check":
FAIL: test-v2v-o-rhv.sh
FAIL: test-v2v-o-vdsm-options.sh
FAIL: test-v2v-o-rhv-upload.sh
> [ 11.3] Setting up the destination: -o rhv
> virt-v2v: error: this output module doesn't support copying more than 0 disks
> [ 9.0] Setting up the destination: -o vdsm
> virt-v2v: error: this output module doesn't support copying more than 0 disks
> [ 11.2] Setting up the destination: -o rhv-upload -oc https://example.com/ovirt-engine/api -os Storage
> virt-v2v: error: this output module doesn't support copying more than 0 disks
output/output.mli | 7 +++++++
output/output.ml | 5 +++++
output/output_rhv.ml | 1 +
output/output_rhv_upload.ml | 1 +
output/output_vdsm.ml | 1 +
5 files changed, 15 insertions(+)
diff --git a/output/output.mli b/output/output.mli
index 533a0c51d31c..2dec8ccdc690 100644
--- a/output/output.mli
+++ b/output/output.mli
@@ -76,6 +76,13 @@ val get_disks : string -> (int * int64) list
(** Examines the v2v directory and opens each input socket (in0 etc),
returning a list of input disk index and size. *)
+val bail_if_disk_count_gt : string -> int -> unit
+(** This function lets an output module enforce a maximum disk count.
+ [bail_if_disk_count_gt dir n] checks whether the domain has more than [n]
+ disks that need to be copied, by examining the existence of input NBD socket
+ "in[n]" in the v2v directory [dir]. If the socket exists, [error] is
+ called. *)
+
val output_to_local_file : ?changeuid:((unit -> unit) -> unit) ->
Types.output_allocation ->
string -> string -> int64 -> string ->
diff --git a/output/output.ml b/output/output.ml
index 10e685c46926..0c4b437997d4 100644
--- a/output/output.ml
+++ b/output/output.ml
@@ -64,6 +64,11 @@ let get_disks dir =
in
loop [] 0
+let bail_if_disk_count_gt dir n =
+ let socket = sprintf "%s/in%d" dir n in
+ if Sys.file_exists socket then
+ error (f_"this output module doesn't support copying more than %d disks") n
+
let output_to_local_file ?(changeuid = fun f -> f ())
output_alloc output_format filename size socket =
(* Check nbdkit is installed and has the required plugin. *)
diff --git a/output/output_rhv.ml b/output/output_rhv.ml
index 119207fdc065..a0c0be270755 100644
--- a/output/output_rhv.ml
+++ b/output/output_rhv.ml
@@ -56,6 +56,7 @@ module RHV = struct
(options.output_alloc, options.output_format, output_name, output_storage)
let rec setup dir options source =
+ bail_if_disk_count_gt dir 23;
let disks = get_disks dir in
let output_alloc, output_format, output_name, output_storage = options in
diff --git a/output/output_rhv_upload.ml b/output/output_rhv_upload.ml
index 828996b36261..6a9abf4eecdf 100644
--- a/output/output_rhv_upload.ml
+++ b/output/output_rhv_upload.ml
@@ -133,6 +133,7 @@ after their uploads (if you do, you must supply one for each disk):
else PCRE.matches (Lazy.force rex_uuid) uuid
let rec setup dir options source =
+ bail_if_disk_count_gt dir 23;
let disks = get_disks dir in
let output_conn, output_format,
output_password, output_name, output_storage,
diff --git a/output/output_vdsm.ml b/output/output_vdsm.ml
index a1e8c2465810..02a2b5817fbc 100644
--- a/output/output_vdsm.ml
+++ b/output/output_vdsm.ml
@@ -119,6 +119,7 @@ For each disk you must supply one of each of these options:
compat, ovf_flavour)
let setup dir options source =
+ bail_if_disk_count_gt dir 23;
let disks = get_disks dir in
let output_alloc, output_format,
output_name, output_storage,
--
2.19.1.3.g30247aa5d201
2 years, 5 months
bug
by Marcos J Tavarez
marcos@marcos-MacBookPro:~$ cd /home/marcos/qemu_vms
marcos@marcos-MacBookPro:~/qemu_vms$ ls
2017-04-10-raspbian-jessie.img 2017-04-10-raspbian-jessie.zip
qemu-rpi-kernel-master.zip
marcos@marcos-MacBookPro:~/qemu_vms$ sudo mount -v -o offset=47185920 -t
ext4 ~/qemu_vms/2017-04-10-raspbian-jessie.img/mnt/raspbian
[sudo] password for marcos:
mount: /home/marcos/qemu_vms/2017-04-10-raspbian-jessie.img/mnt/raspbian:
can't find in /etc/fstab.
marcos@marcos-MacBookPro:~/qemu_vms$ virt-filesystems -a
2017-04-10-raspbian-jessie.img --all --long --uuid -h
libguestfs: error: /usr/bin/supermin exited with error status 1.
To see full error messages you may need to enable debugging.
Do:
export LIBGUESTFS_DEBUG=1 LIBGUESTFS_TRACE=1
and run the command again. For further information, read:
http://libguestfs.org/guestfs-faq.1.html#debugging-libguestfs
You can also run 'libguestfs-test-tool' and post the *complete* output
into a bug report or message to the libguestfs mailing list.
marcos@marcos-MacBookPro:~/qemu_vms$ libguestfs-test-tool
************************************************************
* IMPORTANT NOTICE
*
* When reporting bugs, include the COMPLETE, UNEDITED
* output below in your bug report.
*
************************************************************
PATH=/usr/lib/ccache:/home/linuxbrew/.linuxbrew/bin:/home/linuxbrew/.linuxbrew/sbin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr
/games:/usr/local/games:/snap/bin:/snap/bin
XDG_RUNTIME_DIR=/run/user/1000
SELinux: sh: 1: getenforce: not found
guestfs_get_append: (null)
guestfs_get_autosync: 1
guestfs_get_backend: direct
guestfs_get_backend_settings: []
guestfs_get_cachedir: /var/tmp
guestfs_get_hv: /usr/bin/qemu-system-x86_64
guestfs_get_memsize: 1280
guestfs_get_network: 0
guestfs_get_path: /usr/lib/x86_64-linux-gnu/guestfs
guestfs_get_pgroup: 0
guestfs_get_program: libguestfs-test-tool
guestfs_get_recovery_proc: 1
guestfs_get_smp: 1
guestfs_get_sockdir: /run/user/1000
guestfs_get_tmpdir: /tmp
guestfs_get_trace: 0
guestfs_get_verbose: 1
host_cpu: x86_64
Launching appliance, timeout set to 600 seconds.
libguestfs: launch: program=libguestfs-test-tool
libguestfs: launch: version=1.46.2
libguestfs: launch: backend registered: unix
libguestfs: launch: backend registered: uml
libguestfs: launch: backend registered: libvirt
libguestfs: launch: backend registered: direct
libguestfs: launch: backend=direct
libguestfs: launch: tmpdir=/tmp/libguestfsAKALOx
libguestfs: launch: umask=0002
libguestfs: launch: euid=1000
libguestfs: begin building supermin appliance
libguestfs: run supermin
libguestfs: command: run: /usr/bin/supermin
libguestfs: command: run: \ --build
libguestfs: command: run: \ --verbose
libguestfs: command: run: \ --if-newer
libguestfs: command: run: \ --lock /var/tmp/.guestfs-1000/lock
libguestfs: command: run: \ --copy-kernel
libguestfs: command: run: \ -f ext2
libguestfs: command: run: \ --host-cpu x86_64
libguestfs: command: run: \ /usr/lib/x86_64-linux-gnu/guestfs/supermin.d
libguestfs: command: run: \ -o /var/tmp/.guestfs-1000/appliance.d
supermin: version: 5.2.1
supermin: package handler: debian/dpkg
supermin: acquiring lock on /var/tmp/.guestfs-1000/lock
supermin: build: /usr/lib/x86_64-linux-gnu/guestfs/supermin.d
supermin: reading the supermin appliance
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/base.tar.gz type gzip base
image (tar)
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/daemon.tar.gz type gzip base
image (tar)
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/excludefiles type uncompressed
excludefiles
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/hostfiles type uncompressed
hostfiles
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/init.tar.gz type gzip base
image (tar)
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/packages type uncompressed
packages
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/packages-hfsplus type
uncompressed packages
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/packages-reiserfs type
uncompressed packages
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/packages-xfs type uncompressed
packages
supermin: build: visiting
/usr/lib/x86_64-linux-gnu/guestfs/supermin.d/udev-rules.tar.gz type gzip
base image (tar)
supermin: mapping package names to installed packages
supermin: resolving full list of package dependencies
supermin: build: 216 packages, including dependencies
supermin: build: 8426 files
supermin: build: 4965 files, after matching excludefiles
supermin: build: 4968 files, after adding hostfiles
supermin: build: 4965 files, after removing unreadable files
supermin: build: 4971 files, after munging
supermin: kernel: looking for kernel using environment variables ...
supermin: kernel: looking for kernels in /lib/modules/*/vmlinuz ...
supermin: kernel: looking for kernels in /boot ...
supermin: kernel: kernel version of /boot/vmlinuz-5.18.0-051800rc1-generic
= 5.18.0-051800rc1-generic (from filename)
supermin: kernel: picked modules path /lib/modules/5.18.0-051800rc1-generic
supermin: kernel: kernel version of /boot/vmlinuz-5.15.0-39-generic =
5.15.0-39-generic (from filename)
supermin: kernel: picked modules path /lib/modules/5.15.0-39-generic
supermin: kernel: kernel version of /boot/vmlinuz-5.15.0-37-generic =
5.15.0-37-generic (from filename)
supermin: kernel: picked modules path /lib/modules/5.15.0-37-generic
supermin: kernel: picked vmlinuz /boot/vmlinuz-5.18.0-051800rc1-generic
supermin: kernel: kernel_version 5.18.0-051800rc1-generic
supermin: kernel: modpath /lib/modules/5.18.0-051800rc1-generic
cp: cannot open '/boot/vmlinuz-5.18.0-051800rc1-generic' for reading:
Permission denied
supermin: cp -p '/boot/vmlinuz-5.18.0-051800rc1-generic'
'/var/tmp/.guestfs-1000/appliance.d.5fqk1kxl/kernel': command failed, see
earlier errors
libguestfs: error: /usr/bin/supermin exited with error status 1, see debug
messages above
libguestfs: closing guestfs handle 0x55781ba04360 (state 0)
libguestfs: command: run: rm
libguestfs: command: run: \ -rf /tmp/libguestfsAKALOx
marcos@marcos-MacBookPro:~/qemu_vms$
2 years, 5 months
[PATCH] php: add arginfo to php bindings
by Geoff Amey
Starting with PHP8, arginfo is mandatory for PHP extensions. This patch
updates the generator for the PHP bindings to generate the arginfo
structures, using the Zend API macros. Only basic arginfo is added,
without full documentation of argument and return types, in order to
ensure compatibility with as many versions of PHP as possible.
---
generator/php.ml | 37 ++++++++++++++++++++++++++++++++++---
1 file changed, 34 insertions(+), 3 deletions(-)
diff --git a/generator/php.ml b/generator/php.ml
index 5c7ef48e8..acdc7b877 100644
--- a/generator/php.ml
+++ b/generator/php.ml
@@ -130,6 +130,37 @@ typedef size_t guestfs_string_length;
typedef int guestfs_string_length;
#endif
+/* Declare argument info structures */
+ZEND_BEGIN_ARG_INFO_EX(arginfo_create, 0, 0, 0)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(arginfo_last_error, 0, 0, 1)
+ ZEND_ARG_INFO(0, g)
+ZEND_END_ARG_INFO()
+
+";
+ List.iter (
+ fun { name = shortname; style = ret, args, optargs; } ->
+ let len = List.length args in
+ pr "ZEND_BEGIN_ARG_INFO_EX(arginfo_%s, 0, 0, %d)\n" shortname (len + 1);
+ pr " ZEND_ARG_INFO(0, g)\n";
+ List.iter (
+ function
+ | BufferIn n | Bool n | Int n | Int64 n | OptString n
+ | Pointer(_, n) | String (_, n) | StringList (_, n) ->
+ pr " ZEND_ARG_INFO(0, %s)\n" n
+ ) args;
+
+ List.iter (
+ function
+ | OBool n | OInt n | OInt64 n | OString n | OStringList n ->
+ pr " ZEND_ARG_INFO(0, %s)\n" n
+ ) optargs;
+ pr "ZEND_END_ARG_INFO()\n\n";
+ ) (actions |> external_functions |> sort);
+
+ pr "
+
/* Convert array to list of strings.
* http://marc.info/?l=pecl-dev&m=112205192100631&w=2
*/
@@ -204,12 +235,12 @@ PHP_MINIT_FUNCTION (guestfs_php)
}
static zend_function_entry guestfs_php_functions[] = {
- PHP_FE (guestfs_create, NULL)
- PHP_FE (guestfs_last_error, NULL)
+ PHP_FE (guestfs_create, arginfo_create)
+ PHP_FE (guestfs_last_error, arginfo_last_error)
";
List.iter (
- fun { name } -> pr " PHP_FE (guestfs_%s, NULL)\n" name
+ fun { name } -> pr " PHP_FE (guestfs_%s, arginfo_%s)\n" name name
) (actions |> external_functions |> sort);
pr " { NULL, NULL, NULL }
--
2.25.1
2 years, 5 months