Acquire the per-handle lock on entering each public API function.
The lock is released by a cleanup handler, so we only need to use the
ACQUIRE_LOCK_FOR_CURRENT_SCOPE macro at the top of each function.
Note this means we require __attribute__((cleanup)). On platforms
where this is not supported, the code will probably hang whenever a
libguestfs function is called.
The only definitive list of public APIs is found indirectly in the
generator (in generator/c.ml : globals).
---
common/utils/cleanup.c | 10 +++++++++-
common/utils/guestfs-internal-frontend.h | 4 ++++
generator/c.ml | 4 ++++
lib/errors.c | 8 ++++++++
lib/events.c | 8 ++++++++
lib/guestfs-internal.h | 8 ++++++++
lib/handle.c | 17 ++++++++++++++++-
lib/private-data.c | 7 +++++++
8 files changed, 64 insertions(+), 2 deletions(-)
diff --git a/common/utils/cleanup.c b/common/utils/cleanup.c
index 6c4558c39..b0b0362fd 100644
--- a/common/utils/cleanup.c
+++ b/common/utils/cleanup.c
@@ -1,5 +1,5 @@
/* libguestfs
- * Copyright (C) 2013 Red Hat Inc.
+ * Copyright (C) 2013-2017 Red Hat Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -68,6 +68,7 @@
#include <libxml/xpath.h>
#include <libxml/xmlwriter.h>
+#include "glthread/lock.h"
#include "hash.h"
#include "guestfs.h"
@@ -185,3 +186,10 @@ guestfs_int_cleanup_pclose (void *ptr)
if (f)
pclose (f);
}
+
+void
+guestfs_int_cleanup_gl_recursive_lock_unlock (void *ptr)
+{
+ gl_recursive_lock_t *lockp = * (gl_recursive_lock_t **) ptr;
+ gl_recursive_lock_unlock (*lockp);
+}
diff --git a/common/utils/guestfs-internal-frontend.h
b/common/utils/guestfs-internal-frontend.h
index e48f4eb49..0cdceafcc 100644
--- a/common/utils/guestfs-internal-frontend.h
+++ b/common/utils/guestfs-internal-frontend.h
@@ -63,6 +63,8 @@
__attribute__((cleanup(guestfs_int_cleanup_xmlXPathFreeObject)))
#define CLEANUP_FCLOSE __attribute__((cleanup(guestfs_int_cleanup_fclose)))
#define CLEANUP_PCLOSE __attribute__((cleanup(guestfs_int_cleanup_pclose)))
+#define CLEANUP_GL_RECURSIVE_LOCK_UNLOCK \
+ __attribute__((cleanup(guestfs_int_cleanup_gl_recursive_lock_unlock)))
#else
#define CLEANUP_FREE
#define CLEANUP_FREE_STRING_LIST
@@ -77,6 +79,7 @@
#define CLEANUP_XMLXPATHFREEOBJECT
#define CLEANUP_FCLOSE
#define CLEANUP_PCLOSE
+/* XXX no safe equivalent to CLEANUP_GL_RECURSIVE_LOCK_UNLOCK */
#endif
/* utils.c */
@@ -130,6 +133,7 @@ extern void guestfs_int_cleanup_xmlXPathFreeContext (void *ptr);
extern void guestfs_int_cleanup_xmlXPathFreeObject (void *ptr);
extern void guestfs_int_cleanup_fclose (void *ptr);
extern void guestfs_int_cleanup_pclose (void *ptr);
+extern void guestfs_int_cleanup_gl_recursive_lock_unlock (void *ptr);
/* These are in a separate header so the header can be generated.
* Don't include the following file directly:
diff --git a/generator/c.ml b/generator/c.ml
index 1f099a221..dd5f99559 100644
--- a/generator/c.ml
+++ b/generator/c.ml
@@ -1693,6 +1693,7 @@ and generate_client_actions actions () =
~dll_public:true
c_name style;
pr "{\n";
+ pr " ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);\n";
handle_null_optargs optargs c_name;
@@ -1779,6 +1780,7 @@ and generate_client_actions actions () =
c_name style;
pr "{\n";
+ pr " ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);\n";
handle_null_optargs optargs c_name;
@@ -2124,6 +2126,7 @@ and generate_client_actions_variants () =
~handle:"g" ~prefix:"guestfs_" ~suffix:"_va"
~optarg_proto:VA
c_name style;
pr "{\n";
+ pr " ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);\n";
pr " struct guestfs_%s_argv optargs_s;\n" c_name;
pr " struct guestfs_%s_argv *optargs = &optargs_s;\n" c_name;
pr " int i;\n";
@@ -2181,6 +2184,7 @@ and generate_client_actions_variants () =
~handle:"g" ~prefix:"guestfs_"
name (ret, args, []);
pr "{\n";
+ pr " ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);\n";
pr " struct guestfs_%s_opts_argv optargs_s = { .bitmask = 0 };\n" name;
pr " struct guestfs_%s_opts_argv *optargs = &optargs_s;\n" name;
pr "\n";
diff --git a/lib/errors.c b/lib/errors.c
index ace6a89cf..def1d3c89 100644
--- a/lib/errors.c
+++ b/lib/errors.c
@@ -54,12 +54,14 @@
const char *
guestfs_last_error (guestfs_h *g)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
return g->last_error;
}
int
guestfs_last_errno (guestfs_h *g)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
return g->last_errnum;
}
@@ -217,12 +219,14 @@ guestfs_int_perrorf (guestfs_h *g, const char *fs, ...)
void
guestfs_set_out_of_memory_handler (guestfs_h *g, guestfs_abort_cb cb)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
g->abort_cb = cb;
}
guestfs_abort_cb
guestfs_get_out_of_memory_handler (guestfs_h *g)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
return g->abort_cb;
}
@@ -230,6 +234,7 @@ void
guestfs_set_error_handler (guestfs_h *g,
guestfs_error_handler_cb cb, void *data)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
g->error_cb = cb;
g->error_cb_data = data;
}
@@ -237,6 +242,7 @@ guestfs_set_error_handler (guestfs_h *g,
guestfs_error_handler_cb
guestfs_get_error_handler (guestfs_h *g, void **data_rtn)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
if (data_rtn) *data_rtn = g->error_cb_data;
return g->error_cb;
}
@@ -245,6 +251,7 @@ void
guestfs_push_error_handler (guestfs_h *g,
guestfs_error_handler_cb cb, void *data)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
struct error_cb_stack *old_stack;
old_stack = g->error_cb_stack;
@@ -259,6 +266,7 @@ guestfs_push_error_handler (guestfs_h *g,
void
guestfs_pop_error_handler (guestfs_h *g)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
struct error_cb_stack *next_stack;
if (g->error_cb_stack) {
diff --git a/lib/events.c b/lib/events.c
index 1bddd7611..8005b1cc8 100644
--- a/lib/events.c
+++ b/lib/events.c
@@ -35,6 +35,7 @@ guestfs_set_event_callback (guestfs_h *g,
int flags,
void *opaque)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
int event_handle;
if (flags != 0) {
@@ -69,6 +70,8 @@ guestfs_set_event_callback (guestfs_h *g,
void
guestfs_delete_event_callback (guestfs_h *g, int event_handle)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
+
if (event_handle < 0 || event_handle >= (int) g->nr_events)
return;
@@ -296,6 +299,7 @@ void
guestfs_set_log_message_callback (guestfs_h *g,
guestfs_log_message_cb cb, void *opaque)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
replace_old_style_event_callback (g, log_message_callback_wrapper,
GUESTFS_EVENT_APPLIANCE,
opaque, cb);
@@ -318,6 +322,7 @@ void
guestfs_set_subprocess_quit_callback (guestfs_h *g,
guestfs_subprocess_quit_cb cb, void *opaque)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
replace_old_style_event_callback (g, subprocess_quit_callback_wrapper,
GUESTFS_EVENT_SUBPROCESS_QUIT,
opaque, cb);
@@ -340,6 +345,7 @@ void
guestfs_set_launch_done_callback (guestfs_h *g,
guestfs_launch_done_cb cb, void *opaque)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
replace_old_style_event_callback (g, launch_done_callback_wrapper,
GUESTFS_EVENT_LAUNCH_DONE,
opaque, cb);
@@ -362,6 +368,7 @@ void
guestfs_set_close_callback (guestfs_h *g,
guestfs_close_cb cb, void *opaque)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
replace_old_style_event_callback (g, close_callback_wrapper,
GUESTFS_EVENT_CLOSE,
opaque, cb);
@@ -385,6 +392,7 @@ void
guestfs_set_progress_callback (guestfs_h *g,
guestfs_progress_cb cb, void *opaque)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
replace_old_style_event_callback (g, progress_callback_wrapper,
GUESTFS_EVENT_PROGRESS,
opaque, cb);
diff --git a/lib/guestfs-internal.h b/lib/guestfs-internal.h
index c4a14e962..37878b88d 100644
--- a/lib/guestfs-internal.h
+++ b/lib/guestfs-internal.h
@@ -78,6 +78,14 @@
#define TRACE4(name, arg1, arg2, arg3, arg4)
#endif
+/* Acquire and release the per-handle lock. Note the release happens
+ * in an __attribute__((cleanup)) handler, making it simple to write
+ * bug-free code.
+ */
+#define ACQUIRE_LOCK_FOR_CURRENT_SCOPE(g) \
+ CLEANUP_GL_RECURSIVE_LOCK_UNLOCK gl_recursive_lock_t *_lock = &(g)->lock; \
+ gl_recursive_lock_lock (*_lock)
+
/* Default and minimum appliance memory size. */
/* Needs to be larger on ppc64 because of the larger page size (64K).
diff --git a/lib/handle.c b/lib/handle.c
index 09c29ed84..183f247fb 100644
--- a/lib/handle.c
+++ b/lib/handle.c
@@ -322,6 +322,7 @@ guestfs_close (guestfs_h *g)
{
struct hv_param *hp, *hp_next;
guestfs_h **gg;
+ int r;
if (g->state == NO_HANDLE) {
/* Not safe to call ANY callbacks here, so ... */
@@ -402,7 +403,21 @@ guestfs_close (guestfs_h *g)
free (g->backend_data);
guestfs_int_free_string_list (g->backend_settings);
free (g->append);
- gl_recursive_lock_destroy (g->lock);
+ r = glthread_recursive_lock_destroy (&g->lock);
+ if (r != 0) {
+ /* If pthread_mutex_destroy returns 16 (EBUSY), this indicates
+ * that the lock is held somewhere. That means a programming
+ * error if the main program is using threads.
+ */
+ errno = r;
+ perror ("guestfs_close: g->lock");
+ /* While we're debugging locks in libguestfs I want this to fail
+ * noisily. Remove this later since there are valid times when
+ * this might fail such as if the program exits during a
+ * libguestfs operation.
+ */
+ abort ();
+ }
free (g);
}
diff --git a/lib/private-data.c b/lib/private-data.c
index f448894b4..65b902260 100644
--- a/lib/private-data.c
+++ b/lib/private-data.c
@@ -81,6 +81,7 @@ freer (void *x)
void
guestfs_set_private (guestfs_h *g, const char *key, void *data)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
struct pda_entry *new_entry, *old_entry, *entry;
if (g->pda == NULL) {
@@ -105,6 +106,8 @@ guestfs_set_private (guestfs_h *g, const char *key, void *data)
void *
guestfs_get_private (guestfs_h *g, const char *key)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
+
if (g->pda == NULL)
return NULL; /* no keys have been set */
@@ -120,6 +123,8 @@ guestfs_get_private (guestfs_h *g, const char *key)
void *
guestfs_first_private (guestfs_h *g, const char **key_rtn)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
+
if (g->pda == NULL)
return NULL;
@@ -139,6 +144,8 @@ guestfs_first_private (guestfs_h *g, const char **key_rtn)
void *
guestfs_next_private (guestfs_h *g, const char **key_rtn)
{
+ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (g);
+
if (g->pda == NULL)
return NULL;
--
2.13.0