Add benchmark for coping a buffer using 3 strategies - reusing same
buffer, making a new uninitialized buffer per copy, and using a zeroed
buffer per copy. This benchmark is the worst possible case, copying a
buffer to memory. Any real I/O will be much slower, hiding the overhead
of allocating or zeroing buffers.
$ go test -run=AioBuffer -bench=Copy -benchtime=5s
goos: linux
goarch: amd64
pkg:
libguestfs.org/libnbd
cpu: Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz
BenchmarkAioBufferCopyBaseline-12 1142508 4523 ns/op
BenchmarkAioBufferCopyMake-12 1000000 5320 ns/op
BenchmarkAioBufferCopyMakeZero-12 728940 8218 ns/op
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
golang/libnbd_020_aio_buffer_test.go | 32 ++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/golang/libnbd_020_aio_buffer_test.go b/golang/libnbd_020_aio_buffer_test.go
index e07f8973..f38866e7 100644
--- a/golang/libnbd_020_aio_buffer_test.go
+++ b/golang/libnbd_020_aio_buffer_test.go
@@ -195,10 +195,42 @@ func BenchmarkAioBufferBytes(b *testing.B) {
func BenchmarkAioBufferSlice(b *testing.B) {
buf := MakeAioBuffer(bufferSize)
defer buf.Free()
var r int
b.ResetTimer()
for i := 0; i < b.N; i++ {
r += len(buf.Slice())
}
}
+
+var data = make([]byte, bufferSize)
+
+// Benchmark copying into same buffer, used as baseline for CopyMake and
+// CopyMakeZero benchmarks.
+func BenchmarkAioBufferCopyBaseline(b *testing.B) {
+ buf := MakeAioBufferZero(bufferSize)
+ defer buf.Free()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ copy(buf.Slice(), data)
+ }
+}
+
+// Benchmark overhead of making a new buffer per read.
+func BenchmarkAioBufferCopyMake(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ buf := MakeAioBuffer(bufferSize)
+ copy(buf.Slice(), data)
+ buf.Free()
+ }
+}
+
+// Benchmark overhead of making a new zero buffer per read.
+func BenchmarkAioBufferCopyMakeZero(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ buf := MakeAioBufferZero(bufferSize)
+ copy(buf.Slice(), data)
+ buf.Free()
+ }
+}
--
2.34.1