Skip to content

Commit

Permalink
feat: new pkg mempool
Browse files Browse the repository at this point in the history
compared to bytedance/gopkg:
* No allocation when Get and Put
* No linkname to runtime.mallocgc
* Safe version of `Free`
* Min mem pool size = 4k for better mem reuse
* New `Cap` funcs, coz it has some constraints of cap usage.
* New `Append` and `AppendStr` funcs for better mem reuse
  • Loading branch information
xiaost committed Aug 21, 2024
1 parent 9fbe815 commit 1a8a7b3
Show file tree
Hide file tree
Showing 2 changed files with 289 additions and 0 deletions.
187 changes: 187 additions & 0 deletions cache/mempool/mempool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
/*
* Copyright 2024 CloudWeGo Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package mempool

import (
"math/bits"
"sync"
"unsafe"
)

type memPool struct {
sync.Pool

Size int
}

var pools []*memPool

const (
minMemPoolSize = 4 << 10 // 4KB, `Malloc` returns buf with cap >= the number
maxMemPoolSize = 128 << 30 // 128GB, `Malloc` will panic if > the number
)

const (
// footer is a [8]byte, it contains two parts: magic(58 bits) and index (6 bits)
footerLen = 8

footerMagicMask = uint64(0xFFFFFFFFFFFFFFC0) // 58 bits mask for `footerMagic`
footerIndexMask = uint64(0x000000000000003F) // 6 bits mask, [0, 63], for `pools`, `bits2idx`
footerMagic = uint64(0xBADC0DEBADC0DEC0) // uint64 ends with 6 zero bits
)

var bits2idx [64]int // bits.Len -> `pools[i]`

func init() {
i := 0
for sz := minMemPoolSize; sz <= maxMemPoolSize; sz <<= 1 {
p := &memPool{Size: sz}
p.New = func() interface{} {
b := make([]byte, 0, p.Size)
b = b[:p.Size]
return &b[0]
}
pools = append(pools, p)
bits2idx[bits.Len(uint(p.Size))] = i
i++
}
}

// poolIndex returns index of a pool which fits the given size `sz`
func poolIndex(sz int) int {
if sz <= minMemPoolSize {
return 0
}
i := bits2idx[bits.Len(uint(sz))]
if uint(sz)&(uint(sz)-1) == 0 {
// if power of two, it fits perfectly
// like `8192` should be in pools[1], but `8193` in pools[2]
return i
}
return i + 1
}

type sliceHeader struct {
Data unsafe.Pointer
Len int
Cap int
}

// Malloc creates a buf from pool.
// Tips for usage:
// * buf returned by Malloc may not be initialized with zeros, use at your own risk.
// * call `Free` when buf is no longer use, DO NOT REUSE buf after calling `Free`
// * use `buf = buf[:mempool.Cap(buf)]` to make use of the cap of a returned buf.
// * DO NOT USE `cap` or `append` to resize, coz bytes at the end of buf are used for storing malloc info.
func Malloc(size int) []byte {
if size == 0 {
return []byte{}
}
c := size + footerLen // reserve for footer
i := poolIndex(c)
pool := pools[i]
p := pool.Get().(*byte)

// prepare for return
ret := []byte{}
h := (*sliceHeader)(unsafe.Pointer(&ret))
h.Data = unsafe.Pointer(p)
h.Len = size
h.Cap = pool.Size // update to the correct cap

// add mallocMemMagic & index to the end of bytes
// it will check later when `Free`
*(*uint64)(unsafe.Add(h.Data, h.Cap-footerLen)) = footerMagic | uint64(i)
return ret
}

// Cap returns the max cap of a buf can be resized to.
// See comment of `Malloc` for details
func Cap(buf []byte) int {
if cap(buf)-len(buf) < footerLen || footer(buf) == 0 {
panic("buf not malloc by this package or buf len changed without using Cap func")
}
return cap(buf) - footerLen
}

// Append appends bytes to the given `[]byte`.
// It frees `a` and creates a new one if needed.
// Please make sure you're calling the func like `b = mempool.Append(b, data...)`
func Append(a []byte, b ...byte) []byte {
if cap(a)-len(a)-footerLen > len(b) {
return append(a, b...)
}
return appendSlow(a, b)
}

func appendSlow(a, b []byte) []byte {
ret := Malloc(len(a) + len(b))
copy(ret, a)
copy(ret[len(a):], b)
Free(a)
return ret
}

// AppendStr ... same as Append for string.
// See comment of `Append` for details.
func AppendStr(a []byte, b string) []byte {
if cap(a)-len(a)-footerLen > len(b) {
return append(a, b...)
}
return appendStrSlow(a, b)
}

func appendStrSlow(a []byte, b string) []byte {
ret := Malloc(len(a) + len(b))
copy(ret, a)
copy(ret[len(a):], b)
Free(a)
return ret
}

// Free should be called when a buf is no longer used.
// See comment of `Malloc` for details.
func Free(buf []byte) {
c := cap(buf)
if c < minMemPoolSize {
return
}
if uint(c)&uint(c-1) != 0 { // not malloc by this package
return
}
size := len(buf)
if c-size < footerLen { // size
return
}
footer := footer(buf)
// checks magic
if footer&footerMagicMask != footerMagic {
return
}
// checks index
i := int(footer & footerIndexMask)
if i < len(pools) {
if p := pools[i]; p.Size == c {
p.Put(&buf[0])
}
}
}

func footer(buf []byte) uint64 {
h := (*sliceHeader)(unsafe.Pointer(&buf))
return *(*uint64)(unsafe.Add(h.Data, h.Cap-footerLen))
}
102 changes: 102 additions & 0 deletions cache/mempool/mempool_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
/*
* Copyright 2024 CloudWeGo Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package mempool

import (
"runtime/debug"
"testing"
"unsafe"

"github.com/stretchr/testify/require"
)

func TestMallocFree(t *testing.T) {
for i := 127; i < 1<<20; i += 1000 { // it tests malloc 127B - 1MB, with step 1000
b := Malloc(i)
Free(b)
}
}

func TestCap(t *testing.T) {
sz8k := 8 << 10
b := Malloc(sz8k)
require.Greater(t, Cap(b), sz8k)
Free(b)

b = Malloc(sz8k - footerLen)
require.Equal(t, sz8k-footerLen, Cap(b))
require.Equal(t, sz8k, cap(b))
Free(b)
}

func TestAppend(t *testing.T) {
debug.SetGCPercent(-1) // make sure the buf in pools will not be recycled
defer debug.SetGCPercent(100) // reset to 100
str := "TestAppend"
b := Malloc(0)
for i := 0; i < 2000; i++ {
b = Append(b, []byte(str)...)
}
Free(b)

str = "TestAppendStr"
b = Malloc(0)
for i := 0; i < 2000; i++ {
b = AppendStr(b, str)
}
Free(b)
}

func TestFree(t *testing.T) {
minsz := minMemPoolSize

Free([]byte{}) // case: cap == 0
Free(make([]byte, 0, minsz+1)) // case: not power of two
Free(make([]byte, minsz-1, minsz)) // case: < footerLen

b := make([]byte, minsz-footerLen, minsz)
footer := make([]byte, footerLen)

Free(b) // case: magic err

*(*uint64)(unsafe.Pointer(&footer[0])) = footerMagic | 1
_ = append(b, footer...)
Free(b) // case: index err

*(*uint64)(unsafe.Pointer(&footer[0])) = footerMagic | 0
_ = append(b, footer...)
Free(b) // all good
}

func Benchmark_AppendStr(b *testing.B) {
str := "Benchmark_AppendStr"
b.ReportAllocs()
b.SetBytes(int64(len(str)))
b.RunParallel(func(pb *testing.PB) {
i := 1
b := Malloc(1)
for pb.Next() {
if i&0xff == 0 { // 255 * len(str) ~ 4845 > minMemPoolSize
Free(b)
b = Malloc(1)
}
b = AppendStr(b, str)
i++
}
Free(b)
})
}

0 comments on commit 1a8a7b3

Please sign in to comment.