22
vendor/github.com/VictoriaMetrics/fastcache/LICENSE
generated
vendored
Normal file
22
vendor/github.com/VictoriaMetrics/fastcache/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2018 VictoriaMetrics
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
116
vendor/github.com/VictoriaMetrics/fastcache/README.md
generated
vendored
Normal file
116
vendor/github.com/VictoriaMetrics/fastcache/README.md
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
[](https://github.com/VictoriaMetrics/fastcache/actions)
|
||||
[](http://godoc.org/github.com/VictoriaMetrics/fastcache)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/fastcache)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/fastcache)
|
||||
|
||||
# fastcache - fast thread-safe inmemory cache for big number of entries in Go
|
||||
|
||||
### Features
|
||||
|
||||
* Fast. Performance scales on multi-core CPUs. See benchmark results below.
|
||||
* Thread-safe. Concurrent goroutines may read and write into a single
|
||||
cache instance.
|
||||
* The fastcache is designed for storing big number of entries without
|
||||
[GC overhead](https://syslog.ravelin.com/further-dangers-of-large-heaps-in-go-7a267b57d487).
|
||||
* Fastcache automatically evicts old entries when reaching the maximum cache size
|
||||
set on its creation.
|
||||
* [Simple API](http://godoc.org/github.com/VictoriaMetrics/fastcache).
|
||||
* Simple source code.
|
||||
* Cache may be [saved to file](https://godoc.org/github.com/VictoriaMetrics/fastcache#Cache.SaveToFile)
|
||||
and [loaded from file](https://godoc.org/github.com/VictoriaMetrics/fastcache#LoadFromFile).
|
||||
* Works on [Google AppEngine](https://cloud.google.com/appengine/docs/go/).
|
||||
|
||||
|
||||
### Benchmarks
|
||||
|
||||
`Fastcache` performance is compared with [BigCache](https://github.com/allegro/bigcache), standard Go map
|
||||
and [sync.Map](https://golang.org/pkg/sync/#Map).
|
||||
|
||||
```
|
||||
GOMAXPROCS=4 go test github.com/VictoriaMetrics/fastcache -bench='Set|Get' -benchtime=10s
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/VictoriaMetrics/fastcache
|
||||
BenchmarkBigCacheSet-4 2000 10566656 ns/op 6.20 MB/s 4660369 B/op 6 allocs/op
|
||||
BenchmarkBigCacheGet-4 2000 6902694 ns/op 9.49 MB/s 684169 B/op 131076 allocs/op
|
||||
BenchmarkBigCacheSetGet-4 1000 17579118 ns/op 7.46 MB/s 5046744 B/op 131083 allocs/op
|
||||
BenchmarkCacheSet-4 5000 3808874 ns/op 17.21 MB/s 1142 B/op 2 allocs/op
|
||||
BenchmarkCacheGet-4 5000 3293849 ns/op 19.90 MB/s 1140 B/op 2 allocs/op
|
||||
BenchmarkCacheSetGet-4 2000 8456061 ns/op 15.50 MB/s 2857 B/op 5 allocs/op
|
||||
BenchmarkStdMapSet-4 2000 10559382 ns/op 6.21 MB/s 268413 B/op 65537 allocs/op
|
||||
BenchmarkStdMapGet-4 5000 2687404 ns/op 24.39 MB/s 2558 B/op 13 allocs/op
|
||||
BenchmarkStdMapSetGet-4 100 154641257 ns/op 0.85 MB/s 387405 B/op 65558 allocs/op
|
||||
BenchmarkSyncMapSet-4 500 24703219 ns/op 2.65 MB/s 3426543 B/op 262411 allocs/op
|
||||
BenchmarkSyncMapGet-4 5000 2265892 ns/op 28.92 MB/s 2545 B/op 79 allocs/op
|
||||
BenchmarkSyncMapSetGet-4 1000 14595535 ns/op 8.98 MB/s 3417190 B/op 262277 allocs/op
|
||||
```
|
||||
|
||||
`MB/s` column here actually means `millions of operations per second`.
|
||||
As you can see, `fastcache` is faster than the `BigCache` in all the cases.
|
||||
`fastcache` is faster than the standard Go map and `sync.Map` on workloads
|
||||
with inserts.
|
||||
|
||||
|
||||
### Limitations
|
||||
|
||||
* Keys and values must be byte slices. Other types must be marshaled before
|
||||
storing them in the cache.
|
||||
* Big entries with sizes exceeding 64KB must be stored via [distinct API](http://godoc.org/github.com/VictoriaMetrics/fastcache#Cache.SetBig).
|
||||
* There is no cache expiration. Entries are evicted from the cache only
|
||||
on cache size overflow. Entry deadline may be stored inside the value in order
|
||||
to implement cache expiration.
|
||||
|
||||
|
||||
### Architecture details
|
||||
|
||||
The cache uses ideas from [BigCache](https://github.com/allegro/bigcache):
|
||||
|
||||
* The cache consists of many buckets, each with its own lock.
|
||||
This helps scaling the performance on multi-core CPUs, since multiple
|
||||
CPUs may concurrently access distinct buckets.
|
||||
* Each bucket consists of a `hash(key) -> (key, value) position` map
|
||||
and 64KB-sized byte slices (chunks) holding encoded `(key, value)` entries.
|
||||
Each bucket contains only `O(chunksCount)` pointers. For instance, 64GB cache
|
||||
would contain ~1M pointers, while similarly-sized `map[string][]byte`
|
||||
would contain ~1B pointers for short keys and values. This would lead to
|
||||
[huge GC overhead](https://syslog.ravelin.com/further-dangers-of-large-heaps-in-go-7a267b57d487).
|
||||
|
||||
64KB-sized chunks reduce memory fragmentation and the total memory usage comparing
|
||||
to a single big chunk per bucket.
|
||||
Chunks are allocated off-heap if possible. This reduces total memory usage because
|
||||
GC collects unused memory more frequently without the need in `GOGC` tweaking.
|
||||
|
||||
|
||||
### Users
|
||||
|
||||
* `Fastcache` has been extracted from [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) sources.
|
||||
See [this article](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac)
|
||||
for more info about `VictoriaMetrics`.
|
||||
|
||||
|
||||
### FAQ
|
||||
|
||||
#### What is the difference between `fastcache` and other similar caches like [BigCache](https://github.com/allegro/bigcache) or [FreeCache](https://github.com/coocood/freecache)?
|
||||
|
||||
* `Fastcache` is faster. See benchmark results above.
|
||||
* `Fastcache` uses less memory due to lower heap fragmentation. This allows
|
||||
saving many GBs of memory on multi-GB caches.
|
||||
* `Fastcache` API [is simpler](http://godoc.org/github.com/VictoriaMetrics/fastcache).
|
||||
The API is designed to be used in zero-allocation mode.
|
||||
|
||||
|
||||
#### Why `fastcache` doesn't support cache expiration?
|
||||
|
||||
Because we don't need cache expiration in [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
Cached entries inside `VictoriaMetrics` never expire. They are automatically evicted on cache size overflow.
|
||||
|
||||
It is easy to implement cache expiration on top of `fastcache` by caching values
|
||||
with marshaled deadlines and verifying deadlines after reading these values
|
||||
from the cache.
|
||||
|
||||
|
||||
#### Why `fastcache` doesn't support advanced features such as [thundering herd protection](https://en.wikipedia.org/wiki/Thundering_herd_problem) or callbacks on entries' eviction?
|
||||
|
||||
Because these features would complicate the code and would make it slower.
|
||||
`Fastcache` source code is simple - just copy-paste it and implement the feature you want
|
||||
on top of it.
|
||||
160
vendor/github.com/VictoriaMetrics/fastcache/bigcache.go
generated
vendored
Normal file
160
vendor/github.com/VictoriaMetrics/fastcache/bigcache.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
package fastcache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
xxhash "github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
// maxSubvalueLen is the maximum size of subvalue chunk.
|
||||
//
|
||||
// - 16 bytes are for subkey encoding
|
||||
// - 4 bytes are for len(key)+len(value) encoding inside fastcache
|
||||
// - 1 byte is implementation detail of fastcache
|
||||
const maxSubvalueLen = chunkSize - 16 - 4 - 1
|
||||
|
||||
// maxKeyLen is the maximum size of key.
|
||||
//
|
||||
// - 16 bytes are for (hash + valueLen)
|
||||
// - 4 bytes are for len(key)+len(subkey)
|
||||
// - 1 byte is implementation detail of fastcache
|
||||
const maxKeyLen = chunkSize - 16 - 4 - 1
|
||||
|
||||
// SetBig sets (k, v) to c where len(v) may exceed 64KB.
|
||||
//
|
||||
// GetBig must be used for reading stored values.
|
||||
//
|
||||
// The stored entry may be evicted at any time either due to cache
|
||||
// overflow or due to unlikely hash collision.
|
||||
// Pass higher maxBytes value to New if the added items disappear
|
||||
// frequently.
|
||||
//
|
||||
// It is safe to store entries smaller than 64KB with SetBig.
|
||||
//
|
||||
// k and v contents may be modified after returning from SetBig.
|
||||
func (c *Cache) SetBig(k, v []byte) {
|
||||
atomic.AddUint64(&c.bigStats.SetBigCalls, 1)
|
||||
if len(k) > maxKeyLen {
|
||||
atomic.AddUint64(&c.bigStats.TooBigKeyErrors, 1)
|
||||
return
|
||||
}
|
||||
valueLen := len(v)
|
||||
valueHash := xxhash.Sum64(v)
|
||||
|
||||
// Split v into chunks with up to 64Kb each.
|
||||
subkey := getSubkeyBuf()
|
||||
var i uint64
|
||||
for len(v) > 0 {
|
||||
subkey.B = marshalUint64(subkey.B[:0], valueHash)
|
||||
subkey.B = marshalUint64(subkey.B, uint64(i))
|
||||
i++
|
||||
subvalueLen := maxSubvalueLen
|
||||
if len(v) < subvalueLen {
|
||||
subvalueLen = len(v)
|
||||
}
|
||||
subvalue := v[:subvalueLen]
|
||||
v = v[subvalueLen:]
|
||||
c.Set(subkey.B, subvalue)
|
||||
}
|
||||
|
||||
// Write metavalue, which consists of valueHash and valueLen.
|
||||
subkey.B = marshalUint64(subkey.B[:0], valueHash)
|
||||
subkey.B = marshalUint64(subkey.B, uint64(valueLen))
|
||||
c.Set(k, subkey.B)
|
||||
putSubkeyBuf(subkey)
|
||||
}
|
||||
|
||||
// GetBig searches for the value for the given k, appends it to dst
|
||||
// and returns the result.
|
||||
//
|
||||
// GetBig returns only values stored via SetBig. It doesn't work
|
||||
// with values stored via other methods.
|
||||
//
|
||||
// k contents may be modified after returning from GetBig.
|
||||
func (c *Cache) GetBig(dst, k []byte) (r []byte) {
|
||||
atomic.AddUint64(&c.bigStats.GetBigCalls, 1)
|
||||
subkey := getSubkeyBuf()
|
||||
dstWasNil := dst == nil
|
||||
defer func() {
|
||||
putSubkeyBuf(subkey)
|
||||
if len(r) == 0 && dstWasNil {
|
||||
// Guarantee that if the caller provided nil and this is a cache miss that
|
||||
// the caller can accurately test for a cache miss with `if r == nil`.
|
||||
r = nil
|
||||
}
|
||||
}()
|
||||
|
||||
// Read and parse metavalue
|
||||
subkey.B = c.Get(subkey.B[:0], k)
|
||||
if len(subkey.B) == 0 {
|
||||
// Nothing found.
|
||||
return dst
|
||||
}
|
||||
if len(subkey.B) != 16 {
|
||||
atomic.AddUint64(&c.bigStats.InvalidMetavalueErrors, 1)
|
||||
return dst
|
||||
}
|
||||
valueHash := unmarshalUint64(subkey.B)
|
||||
valueLen := unmarshalUint64(subkey.B[8:])
|
||||
|
||||
// Collect result from chunks.
|
||||
dstLen := len(dst)
|
||||
if n := dstLen + int(valueLen) - cap(dst); n > 0 {
|
||||
dst = append(dst[:cap(dst)], make([]byte, n)...)
|
||||
}
|
||||
dst = dst[:dstLen]
|
||||
var i uint64
|
||||
for uint64(len(dst)-dstLen) < valueLen {
|
||||
subkey.B = marshalUint64(subkey.B[:0], valueHash)
|
||||
subkey.B = marshalUint64(subkey.B, uint64(i))
|
||||
i++
|
||||
dstNew := c.Get(dst, subkey.B)
|
||||
if len(dstNew) == len(dst) {
|
||||
// Cannot find subvalue
|
||||
return dst[:dstLen]
|
||||
}
|
||||
dst = dstNew
|
||||
}
|
||||
|
||||
// Verify the obtained value.
|
||||
v := dst[dstLen:]
|
||||
if uint64(len(v)) != valueLen {
|
||||
atomic.AddUint64(&c.bigStats.InvalidValueLenErrors, 1)
|
||||
return dst[:dstLen]
|
||||
}
|
||||
h := xxhash.Sum64(v)
|
||||
if h != valueHash {
|
||||
atomic.AddUint64(&c.bigStats.InvalidValueHashErrors, 1)
|
||||
return dst[:dstLen]
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func getSubkeyBuf() *bytesBuf {
|
||||
v := subkeyPool.Get()
|
||||
if v == nil {
|
||||
return &bytesBuf{}
|
||||
}
|
||||
return v.(*bytesBuf)
|
||||
}
|
||||
|
||||
func putSubkeyBuf(bb *bytesBuf) {
|
||||
bb.B = bb.B[:0]
|
||||
subkeyPool.Put(bb)
|
||||
}
|
||||
|
||||
var subkeyPool sync.Pool
|
||||
|
||||
type bytesBuf struct {
|
||||
B []byte
|
||||
}
|
||||
|
||||
func marshalUint64(dst []byte, u uint64) []byte {
|
||||
return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u))
|
||||
}
|
||||
|
||||
func unmarshalUint64(src []byte) uint64 {
|
||||
_ = src[7]
|
||||
return uint64(src[0])<<56 | uint64(src[1])<<48 | uint64(src[2])<<40 | uint64(src[3])<<32 | uint64(src[4])<<24 | uint64(src[5])<<16 | uint64(src[6])<<8 | uint64(src[7])
|
||||
}
|
||||
415
vendor/github.com/VictoriaMetrics/fastcache/fastcache.go
generated
vendored
Normal file
415
vendor/github.com/VictoriaMetrics/fastcache/fastcache.go
generated
vendored
Normal file
@@ -0,0 +1,415 @@
|
||||
// Package fastcache implements fast in-memory cache.
|
||||
//
|
||||
// The package has been extracted from https://victoriametrics.com/
|
||||
package fastcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
xxhash "github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
const bucketsCount = 512
|
||||
|
||||
const chunkSize = 64 * 1024
|
||||
|
||||
const bucketSizeBits = 40
|
||||
|
||||
const genSizeBits = 64 - bucketSizeBits
|
||||
|
||||
const maxGen = 1<<genSizeBits - 1
|
||||
|
||||
const maxBucketSize uint64 = 1 << bucketSizeBits
|
||||
|
||||
// Stats represents cache stats.
|
||||
//
|
||||
// Use Cache.UpdateStats for obtaining fresh stats from the cache.
|
||||
type Stats struct {
|
||||
// GetCalls is the number of Get calls.
|
||||
GetCalls uint64
|
||||
|
||||
// SetCalls is the number of Set calls.
|
||||
SetCalls uint64
|
||||
|
||||
// Misses is the number of cache misses.
|
||||
Misses uint64
|
||||
|
||||
// Collisions is the number of cache collisions.
|
||||
//
|
||||
// Usually the number of collisions must be close to zero.
|
||||
// High number of collisions suggest something wrong with cache.
|
||||
Collisions uint64
|
||||
|
||||
// Corruptions is the number of detected corruptions of the cache.
|
||||
//
|
||||
// Corruptions may occur when corrupted cache is loaded from file.
|
||||
Corruptions uint64
|
||||
|
||||
// EntriesCount is the current number of entries in the cache.
|
||||
EntriesCount uint64
|
||||
|
||||
// BytesSize is the current size of the cache in bytes.
|
||||
BytesSize uint64
|
||||
|
||||
// BigStats contains stats for GetBig/SetBig methods.
|
||||
BigStats
|
||||
}
|
||||
|
||||
// Reset resets s, so it may be re-used again in Cache.UpdateStats.
|
||||
func (s *Stats) Reset() {
|
||||
*s = Stats{}
|
||||
}
|
||||
|
||||
// BigStats contains stats for GetBig/SetBig methods.
|
||||
type BigStats struct {
|
||||
// GetBigCalls is the number of GetBig calls.
|
||||
GetBigCalls uint64
|
||||
|
||||
// SetBigCalls is the number of SetBig calls.
|
||||
SetBigCalls uint64
|
||||
|
||||
// TooBigKeyErrors is the number of calls to SetBig with too big key.
|
||||
TooBigKeyErrors uint64
|
||||
|
||||
// InvalidMetavalueErrors is the number of calls to GetBig resulting
|
||||
// to invalid metavalue.
|
||||
InvalidMetavalueErrors uint64
|
||||
|
||||
// InvalidValueLenErrors is the number of calls to GetBig resulting
|
||||
// to a chunk with invalid length.
|
||||
InvalidValueLenErrors uint64
|
||||
|
||||
// InvalidValueHashErrors is the number of calls to GetBig resulting
|
||||
// to a chunk with invalid hash value.
|
||||
InvalidValueHashErrors uint64
|
||||
}
|
||||
|
||||
func (bs *BigStats) reset() {
|
||||
atomic.StoreUint64(&bs.GetBigCalls, 0)
|
||||
atomic.StoreUint64(&bs.SetBigCalls, 0)
|
||||
atomic.StoreUint64(&bs.TooBigKeyErrors, 0)
|
||||
atomic.StoreUint64(&bs.InvalidMetavalueErrors, 0)
|
||||
atomic.StoreUint64(&bs.InvalidValueLenErrors, 0)
|
||||
atomic.StoreUint64(&bs.InvalidValueHashErrors, 0)
|
||||
}
|
||||
|
||||
// Cache is a fast thread-safe inmemory cache optimized for big number
|
||||
// of entries.
|
||||
//
|
||||
// It has much lower impact on GC comparing to a simple `map[string][]byte`.
|
||||
//
|
||||
// Use New or LoadFromFile* for creating new cache instance.
|
||||
// Concurrent goroutines may call any Cache methods on the same cache instance.
|
||||
//
|
||||
// Call Reset when the cache is no longer needed. This reclaims the allocated
|
||||
// memory.
|
||||
type Cache struct {
|
||||
buckets [bucketsCount]bucket
|
||||
|
||||
bigStats BigStats
|
||||
}
|
||||
|
||||
// New returns new cache with the given maxBytes capacity in bytes.
|
||||
//
|
||||
// maxBytes must be smaller than the available RAM size for the app,
|
||||
// since the cache holds data in memory.
|
||||
//
|
||||
// If maxBytes is less than 32MB, then the minimum cache capacity is 32MB.
|
||||
func New(maxBytes int) *Cache {
|
||||
if maxBytes <= 0 {
|
||||
panic(fmt.Errorf("maxBytes must be greater than 0; got %d", maxBytes))
|
||||
}
|
||||
var c Cache
|
||||
maxBucketBytes := uint64((maxBytes + bucketsCount - 1) / bucketsCount)
|
||||
for i := range c.buckets[:] {
|
||||
c.buckets[i].Init(maxBucketBytes)
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Set stores (k, v) in the cache.
|
||||
//
|
||||
// Get must be used for reading the stored entry.
|
||||
//
|
||||
// The stored entry may be evicted at any time either due to cache
|
||||
// overflow or due to unlikely hash collision.
|
||||
// Pass higher maxBytes value to New if the added items disappear
|
||||
// frequently.
|
||||
//
|
||||
// (k, v) entries with summary size exceeding 64KB aren't stored in the cache.
|
||||
// SetBig can be used for storing entries exceeding 64KB.
|
||||
//
|
||||
// k and v contents may be modified after returning from Set.
|
||||
func (c *Cache) Set(k, v []byte) {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
c.buckets[idx].Set(k, v, h)
|
||||
}
|
||||
|
||||
// Get appends value by the key k to dst and returns the result.
|
||||
//
|
||||
// Get allocates new byte slice for the returned value if dst is nil.
|
||||
//
|
||||
// Get returns only values stored in c via Set.
|
||||
//
|
||||
// k contents may be modified after returning from Get.
|
||||
func (c *Cache) Get(dst, k []byte) []byte {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
dst, _ = c.buckets[idx].Get(dst, k, h, true)
|
||||
return dst
|
||||
}
|
||||
|
||||
// HasGet works identically to Get, but also returns whether the given key
|
||||
// exists in the cache. This method makes it possible to differentiate between a
|
||||
// stored nil/empty value versus and non-existing value.
|
||||
func (c *Cache) HasGet(dst, k []byte) ([]byte, bool) {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
return c.buckets[idx].Get(dst, k, h, true)
|
||||
}
|
||||
|
||||
// Has returns true if entry for the given key k exists in the cache.
|
||||
func (c *Cache) Has(k []byte) bool {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
_, ok := c.buckets[idx].Get(nil, k, h, false)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Del deletes value for the given k from the cache.
|
||||
//
|
||||
// k contents may be modified after returning from Del.
|
||||
func (c *Cache) Del(k []byte) {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
c.buckets[idx].Del(h)
|
||||
}
|
||||
|
||||
// Reset removes all the items from the cache.
|
||||
func (c *Cache) Reset() {
|
||||
for i := range c.buckets[:] {
|
||||
c.buckets[i].Reset()
|
||||
}
|
||||
c.bigStats.reset()
|
||||
}
|
||||
|
||||
// UpdateStats adds cache stats to s.
|
||||
//
|
||||
// Call s.Reset before calling UpdateStats if s is re-used.
|
||||
func (c *Cache) UpdateStats(s *Stats) {
|
||||
for i := range c.buckets[:] {
|
||||
c.buckets[i].UpdateStats(s)
|
||||
}
|
||||
s.GetBigCalls += atomic.LoadUint64(&c.bigStats.GetBigCalls)
|
||||
s.SetBigCalls += atomic.LoadUint64(&c.bigStats.SetBigCalls)
|
||||
s.TooBigKeyErrors += atomic.LoadUint64(&c.bigStats.TooBigKeyErrors)
|
||||
s.InvalidMetavalueErrors += atomic.LoadUint64(&c.bigStats.InvalidMetavalueErrors)
|
||||
s.InvalidValueLenErrors += atomic.LoadUint64(&c.bigStats.InvalidValueLenErrors)
|
||||
s.InvalidValueHashErrors += atomic.LoadUint64(&c.bigStats.InvalidValueHashErrors)
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// chunks is a ring buffer with encoded (k, v) pairs.
|
||||
// It consists of 64KB chunks.
|
||||
chunks [][]byte
|
||||
|
||||
// m maps hash(k) to idx of (k, v) pair in chunks.
|
||||
m map[uint64]uint64
|
||||
|
||||
// idx points to chunks for writing the next (k, v) pair.
|
||||
idx uint64
|
||||
|
||||
// gen is the generation of chunks.
|
||||
gen uint64
|
||||
|
||||
getCalls uint64
|
||||
setCalls uint64
|
||||
misses uint64
|
||||
collisions uint64
|
||||
corruptions uint64
|
||||
}
|
||||
|
||||
func (b *bucket) Init(maxBytes uint64) {
|
||||
if maxBytes == 0 {
|
||||
panic(fmt.Errorf("maxBytes cannot be zero"))
|
||||
}
|
||||
if maxBytes >= maxBucketSize {
|
||||
panic(fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize))
|
||||
}
|
||||
maxChunks := (maxBytes + chunkSize - 1) / chunkSize
|
||||
b.chunks = make([][]byte, maxChunks)
|
||||
b.m = make(map[uint64]uint64)
|
||||
b.Reset()
|
||||
}
|
||||
|
||||
func (b *bucket) Reset() {
|
||||
b.mu.Lock()
|
||||
chunks := b.chunks
|
||||
for i := range chunks {
|
||||
putChunk(chunks[i])
|
||||
chunks[i] = nil
|
||||
}
|
||||
bm := b.m
|
||||
for k := range bm {
|
||||
delete(bm, k)
|
||||
}
|
||||
b.idx = 0
|
||||
b.gen = 1
|
||||
atomic.StoreUint64(&b.getCalls, 0)
|
||||
atomic.StoreUint64(&b.setCalls, 0)
|
||||
atomic.StoreUint64(&b.misses, 0)
|
||||
atomic.StoreUint64(&b.collisions, 0)
|
||||
atomic.StoreUint64(&b.corruptions, 0)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *bucket) Clean() {
|
||||
b.mu.Lock()
|
||||
bGen := b.gen & ((1 << genSizeBits) - 1)
|
||||
bIdx := b.idx
|
||||
bm := b.m
|
||||
for k, v := range bm {
|
||||
gen := v >> bucketSizeBits
|
||||
idx := v & ((1 << bucketSizeBits) - 1)
|
||||
if gen == bGen && idx < bIdx || gen+1 == bGen && idx >= bIdx || gen == maxGen && bGen == 1 && idx >= bIdx {
|
||||
continue
|
||||
}
|
||||
delete(bm, k)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *bucket) UpdateStats(s *Stats) {
|
||||
s.GetCalls += atomic.LoadUint64(&b.getCalls)
|
||||
s.SetCalls += atomic.LoadUint64(&b.setCalls)
|
||||
s.Misses += atomic.LoadUint64(&b.misses)
|
||||
s.Collisions += atomic.LoadUint64(&b.collisions)
|
||||
s.Corruptions += atomic.LoadUint64(&b.corruptions)
|
||||
|
||||
b.mu.RLock()
|
||||
s.EntriesCount += uint64(len(b.m))
|
||||
for _, chunk := range b.chunks {
|
||||
s.BytesSize += uint64(cap(chunk))
|
||||
}
|
||||
b.mu.RUnlock()
|
||||
}
|
||||
|
||||
func (b *bucket) Set(k, v []byte, h uint64) {
|
||||
setCalls := atomic.AddUint64(&b.setCalls, 1)
|
||||
if setCalls%(1<<14) == 0 {
|
||||
b.Clean()
|
||||
}
|
||||
|
||||
if len(k) >= (1<<16) || len(v) >= (1<<16) {
|
||||
// Too big key or value - its length cannot be encoded
|
||||
// with 2 bytes (see below). Skip the entry.
|
||||
return
|
||||
}
|
||||
var kvLenBuf [4]byte
|
||||
kvLenBuf[0] = byte(uint16(len(k)) >> 8)
|
||||
kvLenBuf[1] = byte(len(k))
|
||||
kvLenBuf[2] = byte(uint16(len(v)) >> 8)
|
||||
kvLenBuf[3] = byte(len(v))
|
||||
kvLen := uint64(len(kvLenBuf) + len(k) + len(v))
|
||||
if kvLen >= chunkSize {
|
||||
// Do not store too big keys and values, since they do not
|
||||
// fit a chunk.
|
||||
return
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
idx := b.idx
|
||||
idxNew := idx + kvLen
|
||||
chunkIdx := idx / chunkSize
|
||||
chunkIdxNew := idxNew / chunkSize
|
||||
if chunkIdxNew > chunkIdx {
|
||||
if chunkIdxNew >= uint64(len(b.chunks)) {
|
||||
idx = 0
|
||||
idxNew = kvLen
|
||||
chunkIdx = 0
|
||||
b.gen++
|
||||
if b.gen&((1<<genSizeBits)-1) == 0 {
|
||||
b.gen++
|
||||
}
|
||||
} else {
|
||||
idx = chunkIdxNew * chunkSize
|
||||
idxNew = idx + kvLen
|
||||
chunkIdx = chunkIdxNew
|
||||
}
|
||||
b.chunks[chunkIdx] = b.chunks[chunkIdx][:0]
|
||||
}
|
||||
chunk := b.chunks[chunkIdx]
|
||||
if chunk == nil {
|
||||
chunk = getChunk()
|
||||
chunk = chunk[:0]
|
||||
}
|
||||
chunk = append(chunk, kvLenBuf[:]...)
|
||||
chunk = append(chunk, k...)
|
||||
chunk = append(chunk, v...)
|
||||
b.chunks[chunkIdx] = chunk
|
||||
b.m[h] = idx | (b.gen << bucketSizeBits)
|
||||
b.idx = idxNew
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *bucket) Get(dst, k []byte, h uint64, returnDst bool) ([]byte, bool) {
|
||||
atomic.AddUint64(&b.getCalls, 1)
|
||||
found := false
|
||||
b.mu.RLock()
|
||||
v := b.m[h]
|
||||
bGen := b.gen & ((1 << genSizeBits) - 1)
|
||||
if v > 0 {
|
||||
gen := v >> bucketSizeBits
|
||||
idx := v & ((1 << bucketSizeBits) - 1)
|
||||
if gen == bGen && idx < b.idx || gen+1 == bGen && idx >= b.idx || gen == maxGen && bGen == 1 && idx >= b.idx {
|
||||
chunkIdx := idx / chunkSize
|
||||
if chunkIdx >= uint64(len(b.chunks)) {
|
||||
// Corrupted data during the load from file. Just skip it.
|
||||
atomic.AddUint64(&b.corruptions, 1)
|
||||
goto end
|
||||
}
|
||||
chunk := b.chunks[chunkIdx]
|
||||
idx %= chunkSize
|
||||
if idx+4 >= chunkSize {
|
||||
// Corrupted data during the load from file. Just skip it.
|
||||
atomic.AddUint64(&b.corruptions, 1)
|
||||
goto end
|
||||
}
|
||||
kvLenBuf := chunk[idx : idx+4]
|
||||
keyLen := (uint64(kvLenBuf[0]) << 8) | uint64(kvLenBuf[1])
|
||||
valLen := (uint64(kvLenBuf[2]) << 8) | uint64(kvLenBuf[3])
|
||||
idx += 4
|
||||
if idx+keyLen+valLen >= chunkSize {
|
||||
// Corrupted data during the load from file. Just skip it.
|
||||
atomic.AddUint64(&b.corruptions, 1)
|
||||
goto end
|
||||
}
|
||||
if string(k) == string(chunk[idx:idx+keyLen]) {
|
||||
idx += keyLen
|
||||
if returnDst {
|
||||
dst = append(dst, chunk[idx:idx+valLen]...)
|
||||
}
|
||||
found = true
|
||||
} else {
|
||||
atomic.AddUint64(&b.collisions, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
end:
|
||||
b.mu.RUnlock()
|
||||
if !found {
|
||||
atomic.AddUint64(&b.misses, 1)
|
||||
}
|
||||
return dst, found
|
||||
}
|
||||
|
||||
func (b *bucket) Del(h uint64) {
|
||||
b.mu.Lock()
|
||||
delete(b.m, h)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
419
vendor/github.com/VictoriaMetrics/fastcache/file.go
generated
vendored
Normal file
419
vendor/github.com/VictoriaMetrics/fastcache/file.go
generated
vendored
Normal file
@@ -0,0 +1,419 @@
|
||||
package fastcache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
// SaveToFile atomically saves cache data to the given filePath using a single
|
||||
// CPU core.
|
||||
//
|
||||
// SaveToFile may be called concurrently with other operations on the cache.
|
||||
//
|
||||
// The saved data may be loaded with LoadFromFile*.
|
||||
//
|
||||
// See also SaveToFileConcurrent for faster saving to file.
|
||||
func (c *Cache) SaveToFile(filePath string) error {
|
||||
return c.SaveToFileConcurrent(filePath, 1)
|
||||
}
|
||||
|
||||
// SaveToFileConcurrent saves cache data to the given filePath using concurrency
|
||||
// CPU cores.
|
||||
//
|
||||
// SaveToFileConcurrent may be called concurrently with other operations
|
||||
// on the cache.
|
||||
//
|
||||
// The saved data may be loaded with LoadFromFile*.
|
||||
//
|
||||
// See also SaveToFile.
|
||||
func (c *Cache) SaveToFileConcurrent(filePath string, concurrency int) error {
|
||||
// Create dir if it doesn't exist.
|
||||
dir := filepath.Dir(filePath)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("cannot stat %q: %s", dir, err)
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("cannot create dir %q: %s", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Save cache data into a temporary directory.
|
||||
tmpDir, err := ioutil.TempDir(dir, "fastcache.tmp.")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create temporary dir inside %q: %s", dir, err)
|
||||
}
|
||||
defer func() {
|
||||
if tmpDir != "" {
|
||||
_ = os.RemoveAll(tmpDir)
|
||||
}
|
||||
}()
|
||||
gomaxprocs := runtime.GOMAXPROCS(-1)
|
||||
if concurrency <= 0 || concurrency > gomaxprocs {
|
||||
concurrency = gomaxprocs
|
||||
}
|
||||
if err := c.save(tmpDir, concurrency); err != nil {
|
||||
return fmt.Errorf("cannot save cache data to temporary dir %q: %s", tmpDir, err)
|
||||
}
|
||||
|
||||
// Remove old filePath contents, since os.Rename may return
|
||||
// error if filePath dir exists.
|
||||
if err := os.RemoveAll(filePath); err != nil {
|
||||
return fmt.Errorf("cannot remove old contents at %q: %s", filePath, err)
|
||||
}
|
||||
if err := os.Rename(tmpDir, filePath); err != nil {
|
||||
return fmt.Errorf("cannot move temporary dir %q to %q: %s", tmpDir, filePath, err)
|
||||
}
|
||||
tmpDir = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFromFile loads cache data from the given filePath.
|
||||
//
|
||||
// See SaveToFile* for saving cache data to file.
|
||||
func LoadFromFile(filePath string) (*Cache, error) {
|
||||
return load(filePath, 0)
|
||||
}
|
||||
|
||||
// LoadFromFileOrNew tries loading cache data from the given filePath.
|
||||
//
|
||||
// The function falls back to creating new cache with the given maxBytes
|
||||
// capacity if error occurs during loading the cache from file.
|
||||
func LoadFromFileOrNew(filePath string, maxBytes int) *Cache {
|
||||
c, err := load(filePath, maxBytes)
|
||||
if err == nil {
|
||||
return c
|
||||
}
|
||||
return New(maxBytes)
|
||||
}
|
||||
|
||||
func (c *Cache) save(dir string, workersCount int) error {
|
||||
if err := saveMetadata(c, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save buckets by workersCount concurrent workers.
|
||||
workCh := make(chan int, workersCount)
|
||||
results := make(chan error)
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func(workerNum int) {
|
||||
results <- saveBuckets(c.buckets[:], workCh, dir, workerNum)
|
||||
}(i)
|
||||
}
|
||||
// Feed workers with work
|
||||
for i := range c.buckets[:] {
|
||||
workCh <- i
|
||||
}
|
||||
close(workCh)
|
||||
|
||||
// Read results.
|
||||
var err error
|
||||
for i := 0; i < workersCount; i++ {
|
||||
result := <-results
|
||||
if result != nil && err == nil {
|
||||
err = result
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func load(filePath string, maxBytes int) (*Cache, error) {
|
||||
maxBucketChunks, err := loadMetadata(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if maxBytes > 0 {
|
||||
maxBucketBytes := uint64((maxBytes + bucketsCount - 1) / bucketsCount)
|
||||
expectedBucketChunks := (maxBucketBytes + chunkSize - 1) / chunkSize
|
||||
if maxBucketChunks != expectedBucketChunks {
|
||||
return nil, fmt.Errorf("cache file %s contains maxBytes=%d; want %d", filePath, maxBytes, expectedBucketChunks*chunkSize*bucketsCount)
|
||||
}
|
||||
}
|
||||
|
||||
// Read bucket files from filePath dir.
|
||||
d, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open %q: %s", filePath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = d.Close()
|
||||
}()
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read files from %q: %s", filePath, err)
|
||||
}
|
||||
results := make(chan error)
|
||||
workersCount := 0
|
||||
var c Cache
|
||||
for _, fi := range fis {
|
||||
fn := fi.Name()
|
||||
if fi.IsDir() || !dataFileRegexp.MatchString(fn) {
|
||||
continue
|
||||
}
|
||||
workersCount++
|
||||
go func(dataPath string) {
|
||||
results <- loadBuckets(c.buckets[:], dataPath, maxBucketChunks)
|
||||
}(filePath + "/" + fn)
|
||||
}
|
||||
err = nil
|
||||
for i := 0; i < workersCount; i++ {
|
||||
result := <-results
|
||||
if result != nil && err == nil {
|
||||
err = result
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Initialize buckets, which could be missing due to incomplete or corrupted files in the cache.
|
||||
// It is better initializing such buckets instead of returning error, since the rest of buckets
|
||||
// contain valid data.
|
||||
for i := range c.buckets[:] {
|
||||
b := &c.buckets[i]
|
||||
if len(b.chunks) == 0 {
|
||||
b.chunks = make([][]byte, maxBucketChunks)
|
||||
b.m = make(map[uint64]uint64)
|
||||
}
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
func saveMetadata(c *Cache, dir string) error {
|
||||
metadataPath := dir + "/metadata.bin"
|
||||
metadataFile, err := os.Create(metadataPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create %q: %s", metadataPath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = metadataFile.Close()
|
||||
}()
|
||||
maxBucketChunks := uint64(cap(c.buckets[0].chunks))
|
||||
if err := writeUint64(metadataFile, maxBucketChunks); err != nil {
|
||||
return fmt.Errorf("cannot write maxBucketChunks=%d to %q: %s", maxBucketChunks, metadataPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadMetadata(dir string) (uint64, error) {
|
||||
metadataPath := dir + "/metadata.bin"
|
||||
metadataFile, err := os.Open(metadataPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot open %q: %s", metadataPath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = metadataFile.Close()
|
||||
}()
|
||||
maxBucketChunks, err := readUint64(metadataFile)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot read maxBucketChunks from %q: %s", metadataPath, err)
|
||||
}
|
||||
if maxBucketChunks == 0 {
|
||||
return 0, fmt.Errorf("invalid maxBucketChunks=0 read from %q", metadataPath)
|
||||
}
|
||||
return maxBucketChunks, nil
|
||||
}
|
||||
|
||||
var dataFileRegexp = regexp.MustCompile(`^data\.\d+\.bin$`)
|
||||
|
||||
func saveBuckets(buckets []bucket, workCh <-chan int, dir string, workerNum int) error {
|
||||
dataPath := fmt.Sprintf("%s/data.%d.bin", dir, workerNum)
|
||||
dataFile, err := os.Create(dataPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create %q: %s", dataPath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = dataFile.Close()
|
||||
}()
|
||||
zw := snappy.NewBufferedWriter(dataFile)
|
||||
for bucketNum := range workCh {
|
||||
if err := writeUint64(zw, uint64(bucketNum)); err != nil {
|
||||
return fmt.Errorf("cannot write bucketNum=%d to %q: %s", bucketNum, dataPath, err)
|
||||
}
|
||||
if err := buckets[bucketNum].Save(zw); err != nil {
|
||||
return fmt.Errorf("cannot save bucket[%d] to %q: %s", bucketNum, dataPath, err)
|
||||
}
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
return fmt.Errorf("cannot close snappy.Writer for %q: %s", dataPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadBuckets(buckets []bucket, dataPath string, maxChunks uint64) error {
|
||||
dataFile, err := os.Open(dataPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %q: %s", dataPath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = dataFile.Close()
|
||||
}()
|
||||
zr := snappy.NewReader(dataFile)
|
||||
for {
|
||||
bucketNum, err := readUint64(zr)
|
||||
if err == io.EOF {
|
||||
// Reached the end of file.
|
||||
return nil
|
||||
}
|
||||
if bucketNum >= uint64(len(buckets)) {
|
||||
return fmt.Errorf("unexpected bucketNum read from %q: %d; must be smaller than %d", dataPath, bucketNum, len(buckets))
|
||||
}
|
||||
if err := buckets[bucketNum].Load(zr, maxChunks); err != nil {
|
||||
return fmt.Errorf("cannot load bucket[%d] from %q: %s", bucketNum, dataPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bucket) Save(w io.Writer) error {
|
||||
b.Clean()
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
// Store b.idx, b.gen and b.m to w.
|
||||
|
||||
bIdx := b.idx
|
||||
bGen := b.gen
|
||||
chunksLen := 0
|
||||
for _, chunk := range b.chunks {
|
||||
if chunk == nil {
|
||||
break
|
||||
}
|
||||
chunksLen++
|
||||
}
|
||||
kvs := make([]byte, 0, 2*8*len(b.m))
|
||||
var u64Buf [8]byte
|
||||
for k, v := range b.m {
|
||||
binary.LittleEndian.PutUint64(u64Buf[:], k)
|
||||
kvs = append(kvs, u64Buf[:]...)
|
||||
binary.LittleEndian.PutUint64(u64Buf[:], v)
|
||||
kvs = append(kvs, u64Buf[:]...)
|
||||
}
|
||||
|
||||
if err := writeUint64(w, bIdx); err != nil {
|
||||
return fmt.Errorf("cannot write b.idx: %s", err)
|
||||
}
|
||||
if err := writeUint64(w, bGen); err != nil {
|
||||
return fmt.Errorf("cannot write b.gen: %s", err)
|
||||
}
|
||||
if err := writeUint64(w, uint64(len(kvs))/2/8); err != nil {
|
||||
return fmt.Errorf("cannot write len(b.m): %s", err)
|
||||
}
|
||||
if _, err := w.Write(kvs); err != nil {
|
||||
return fmt.Errorf("cannot write b.m: %s", err)
|
||||
}
|
||||
|
||||
// Store b.chunks to w.
|
||||
if err := writeUint64(w, uint64(chunksLen)); err != nil {
|
||||
return fmt.Errorf("cannot write len(b.chunks): %s", err)
|
||||
}
|
||||
for chunkIdx := 0; chunkIdx < chunksLen; chunkIdx++ {
|
||||
chunk := b.chunks[chunkIdx][:chunkSize]
|
||||
if _, err := w.Write(chunk); err != nil {
|
||||
return fmt.Errorf("cannot write b.chunks[%d]: %s", chunkIdx, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bucket) Load(r io.Reader, maxChunks uint64) error {
|
||||
if maxChunks == 0 {
|
||||
return fmt.Errorf("the number of chunks per bucket cannot be zero")
|
||||
}
|
||||
bIdx, err := readUint64(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read b.idx: %s", err)
|
||||
}
|
||||
bGen, err := readUint64(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read b.gen: %s", err)
|
||||
}
|
||||
kvsLen, err := readUint64(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read len(b.m): %s", err)
|
||||
}
|
||||
kvsLen *= 2 * 8
|
||||
kvs := make([]byte, kvsLen)
|
||||
if _, err := io.ReadFull(r, kvs); err != nil {
|
||||
return fmt.Errorf("cannot read b.m: %s", err)
|
||||
}
|
||||
m := make(map[uint64]uint64, kvsLen/2/8)
|
||||
for len(kvs) > 0 {
|
||||
k := binary.LittleEndian.Uint64(kvs)
|
||||
kvs = kvs[8:]
|
||||
v := binary.LittleEndian.Uint64(kvs)
|
||||
kvs = kvs[8:]
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
maxBytes := maxChunks * chunkSize
|
||||
if maxBytes >= maxBucketSize {
|
||||
return fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize)
|
||||
}
|
||||
chunks := make([][]byte, maxChunks)
|
||||
chunksLen, err := readUint64(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read len(b.chunks): %s", err)
|
||||
}
|
||||
if chunksLen > uint64(maxChunks) {
|
||||
return fmt.Errorf("chunksLen=%d cannot exceed maxChunks=%d", chunksLen, maxChunks)
|
||||
}
|
||||
currChunkIdx := bIdx / chunkSize
|
||||
if currChunkIdx > 0 && currChunkIdx >= chunksLen {
|
||||
return fmt.Errorf("too big bIdx=%d; should be smaller than %d", bIdx, chunksLen*chunkSize)
|
||||
}
|
||||
for chunkIdx := uint64(0); chunkIdx < chunksLen; chunkIdx++ {
|
||||
chunk := getChunk()
|
||||
chunks[chunkIdx] = chunk
|
||||
if _, err := io.ReadFull(r, chunk); err != nil {
|
||||
// Free up allocated chunks before returning the error.
|
||||
for _, chunk := range chunks {
|
||||
if chunk != nil {
|
||||
putChunk(chunk)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("cannot read b.chunks[%d]: %s", chunkIdx, err)
|
||||
}
|
||||
}
|
||||
// Adjust len for the chunk pointed by currChunkIdx.
|
||||
if chunksLen > 0 {
|
||||
chunkLen := bIdx % chunkSize
|
||||
chunks[currChunkIdx] = chunks[currChunkIdx][:chunkLen]
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
for _, chunk := range b.chunks {
|
||||
putChunk(chunk)
|
||||
}
|
||||
b.chunks = chunks
|
||||
b.m = m
|
||||
b.idx = bIdx
|
||||
b.gen = bGen
|
||||
b.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUint64(w io.Writer, u uint64) error {
|
||||
var u64Buf [8]byte
|
||||
binary.LittleEndian.PutUint64(u64Buf[:], u)
|
||||
_, err := w.Write(u64Buf[:])
|
||||
return err
|
||||
}
|
||||
|
||||
func readUint64(r io.Reader) (uint64, error) {
|
||||
var u64Buf [8]byte
|
||||
if _, err := io.ReadFull(r, u64Buf[:]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
u := binary.LittleEndian.Uint64(u64Buf[:])
|
||||
return u, nil
|
||||
}
|
||||
11
vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go
generated
vendored
Normal file
11
vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build appengine windows
|
||||
|
||||
package fastcache
|
||||
|
||||
func getChunk() []byte {
|
||||
return make([]byte, chunkSize)
|
||||
}
|
||||
|
||||
func putChunk(chunk []byte) {
|
||||
// No-op.
|
||||
}
|
||||
53
vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go
generated
vendored
Normal file
53
vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// +build !appengine,!windows
|
||||
|
||||
package fastcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const chunksPerAlloc = 1024
|
||||
|
||||
var (
|
||||
freeChunks []*[chunkSize]byte
|
||||
freeChunksLock sync.Mutex
|
||||
)
|
||||
|
||||
func getChunk() []byte {
|
||||
freeChunksLock.Lock()
|
||||
if len(freeChunks) == 0 {
|
||||
// Allocate offheap memory, so GOGC won't take into account cache size.
|
||||
// This should reduce free memory waste.
|
||||
data, err := unix.Mmap(-1, 0, chunkSize*chunksPerAlloc, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_ANON|unix.MAP_PRIVATE)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot allocate %d bytes via mmap: %s", chunkSize*chunksPerAlloc, err))
|
||||
}
|
||||
for len(data) > 0 {
|
||||
p := (*[chunkSize]byte)(unsafe.Pointer(&data[0]))
|
||||
freeChunks = append(freeChunks, p)
|
||||
data = data[chunkSize:]
|
||||
}
|
||||
}
|
||||
n := len(freeChunks) - 1
|
||||
p := freeChunks[n]
|
||||
freeChunks[n] = nil
|
||||
freeChunks = freeChunks[:n]
|
||||
freeChunksLock.Unlock()
|
||||
return p[:]
|
||||
}
|
||||
|
||||
func putChunk(chunk []byte) {
|
||||
if chunk == nil {
|
||||
return
|
||||
}
|
||||
chunk = chunk[:chunkSize]
|
||||
p := (*[chunkSize]byte)(unsafe.Pointer(&chunk[0]))
|
||||
|
||||
freeChunksLock.Lock()
|
||||
freeChunks = append(freeChunks, p)
|
||||
freeChunksLock.Unlock()
|
||||
}
|
||||
Reference in New Issue
Block a user