Now that we have an efficient way to use AioBuffer, we don't need the
hacks to create AioBuffer from Go slice.
Benchmarking AioBuffer show that allocating a 256k buffer is practically
free, so there is no need for the buffer pool. Now we allocate a new
buffer per request, keep it in the command, and free it when the request
is finished.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
golang/examples/aio_copy/aio_copy.go | 29 +++++-----------------------
1 file changed, 5 insertions(+), 24 deletions(-)
diff --git a/golang/examples/aio_copy/aio_copy.go b/golang/examples/aio_copy/aio_copy.go
index b6f5def1..bb20b478 100644
--- a/golang/examples/aio_copy/aio_copy.go
+++ b/golang/examples/aio_copy/aio_copy.go
@@ -37,53 +37,43 @@
// Example:
//
// ./aio_copy nbd+unix:///?socket=/tmp.nbd >/dev/null
//
package main
import (
"container/list"
"flag"
"os"
- "sync"
"syscall"
- "unsafe"
"libguestfs.org/libnbd"
)
var (
// These options give best performance with fast NVMe drive.
requestSize = flag.Uint("request-size", 256*1024, "maximum request size
in bytes")
requests = flag.Uint("requests", 4, "maximum number of requests in
flight")
h *libnbd.Libnbd
// Keeping commands in a queue ensures commands are written in the right
// order, even if they complete out of order. This allows parallel reads
// with non-seekable output.
queue list.List
-
- // Buffer pool allocating buffers as needed and reusing them.
- bufPool = sync.Pool{
- New: func() interface{} {
- return make([]byte, *requestSize)
- },
- }
)
// command keeps state of single AioPread call while the read is handled by
// libnbd, until the command reach the front of the queue and can be writen to
// the output.
type command struct {
- buf []byte
- length uint
+ buf libnbd.AioBuffer
ready bool
}
func main() {
flag.Parse()
var err error
h, err = libnbd.Create()
if err != nil {
@@ -139,60 +129,51 @@ func waitForCompletion() {
panic(err)
}
if inflightRequests() < start {
break // A read completed.
}
}
}
func startRead(offset uint64, length uint) {
- buf := bufPool.Get().([]byte)
-
- // Keep buffer in command so we can put it back into the pool when the
- // command completes.
- cmd := &command{buf: buf, length: length}
-
- // Create aio buffer from pool buffer to avoid unneeded allocation for
- // every read, and unneeded copy when completing the read.
- abuf := libnbd.AioBuffer{P: unsafe.Pointer(&buf[0]), Size: length}
+ cmd := &command{buf: libnbd.MakeAioBuffer(length)}
args := libnbd.AioPreadOptargs{
CompletionCallbackSet: true,
CompletionCallback: func(error *int) int {
if *error != 0 {
// This is not documented, but *error is errno value translated
// from the the NBD server error.
err := syscall.Errno(*error).Error()
panic(err)
}
cmd.ready = true
return 1
},
}
- _, err := h.AioPread(abuf, offset, &args)
+ _, err := h.AioPread(cmd.buf, offset, &args)
if err != nil {
panic(err)
}
queue.PushBack(cmd)
}
func readReady() bool {
return queue.Len() > 0 && queue.Front().Value.(*command).ready
}
func finishRead() {
e := queue.Front()
queue.Remove(e)
cmd := e.Value.(*command)
- b := cmd.buf[:cmd.length]
- _, err := os.Stdout.Write(b)
+ _, err := os.Stdout.Write(cmd.buf.Slice())
if err != nil {
panic(err)
}
- bufPool.Put(cmd.buf)
+ cmd.buf.Free()
}
--
2.34.1