On 01/27/22 17:39, Richard W.M. Jones wrote:
On platforms with 64K page sizes (RHEL + aarch64 or ppc64le) we have
hit a problem in test-memory-allocator-malloc-mlock.sh which appears
to be caused by:
1. The test requests a 2K mlocked buffer.
2. The allocated buffer straddles a page boundary sometimes.
3. Linux expands the mlock request to two pages, ie. the requested
size becomes 2 * 64K = 128K
4. The standard mlock limit on Linux is 64K so the mlock request is
rejected and the test fails.
Also POSIX permits the implementation to require mlock parameters to
be page aligned (although Linux does not require this).
This commit attempts to fix both problems by ensuring the mlock
request is always page aligned, using the new
vector_reserve_page_aligned function.
Thanks: Laszlo Ersek
---
common/allocators/malloc.c | 53 ++++++++++++++++++++++++++++++--------
1 file changed, 42 insertions(+), 11 deletions(-)
diff --git a/common/allocators/malloc.c b/common/allocators/malloc.c
index 9a2dd549..19df666a 100644
--- a/common/allocators/malloc.c
+++ b/common/allocators/malloc.c
@@ -81,11 +81,32 @@ m_alloc_free (struct allocator *a)
}
}
-/* Extend the underlying bytearray if needed. mlock if requested. */
+/* Extend the underlying bytearray if needed. */
static int
-extend (struct m_alloc *ma, uint64_t new_size)
+extend_without_mlock (struct m_alloc *ma, uint64_t new_size)
+{
+ size_t old_size, n;
+
+ if (ma->ba.cap < new_size) {
+ old_size = ma->ba.cap;
+ n = new_size - ma->ba.cap;
+
+ if (bytearray_reserve (&ma->ba, n) == -1) {
+ nbdkit_error ("realloc: %m");
+ return -1;
+ }
+
+ /* Initialize the newly allocated memory to 0. */
+ memset (ma->ba.ptr + old_size, 0, n);
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_MLOCK
+static int
+extend_with_mlock (struct m_alloc *ma, uint64_t new_size)
{
- ACQUIRE_WRLOCK_FOR_CURRENT_SCOPE (&ma->lock);
size_t old_size, n;
if (ma->ba.cap < new_size) {
@@ -100,7 +121,7 @@ extend (struct m_alloc *ma, uint64_t new_size)
munlock (ma->ba.ptr, ma->ba.cap);
#endif
- if (bytearray_reserve (&ma->ba, n) == -1) {
+ if (bytearray_reserve_page_aligned (&ma->ba, n) == -1) {
nbdkit_error ("realloc: %m");
return -1;
}
@@ -108,18 +129,28 @@ extend (struct m_alloc *ma, uint64_t new_size)
/* Initialize the newly allocated memory to 0. */
memset (ma->ba.ptr + old_size, 0, n);
-#ifdef HAVE_MLOCK
- if (ma->use_mlock) {
- if (mlock (ma->ba.ptr, ma->ba.cap) == -1) {
- nbdkit_error ("allocator=malloc: mlock: %m");
- return -1;
- }
+ if (mlock (ma->ba.ptr, ma->ba.cap) == -1) {
+ nbdkit_error ("allocator=malloc: mlock: %m");
+ return -1;
}
-#endif
}
return 0;
}
+#endif /* HAVE_MLOCK */
+
+static int
+extend (struct m_alloc *ma, uint64_t new_size)
+{
+ ACQUIRE_WRLOCK_FOR_CURRENT_SCOPE (&ma->lock);
+
+#ifdef HAVE_MLOCK
+ if (ma->use_mlock)
+ return extend_with_mlock (ma, new_size);
+#endif
+
+ return extend_without_mlock (ma, new_size);
+}
static int
m_alloc_set_size_hint (struct allocator *a, uint64_t size_hint)
Reviewed-by: Laszlo Ersek <lersek(a)redhat.com>