cache
packageAPI reference for the cache
package.
Imports
(23)context
STD
errors
STD
time
PKG
github.com/redis/go-redis/v9
INT
github.com/mirkobrombin/go-warp/v1/errors
STD
reflect
STD
testing
PKG
github.com/alicebob/miniredis/v2
STD
bytes
STD
encoding/gob
STD
encoding/json
STD
log/slog
PKG
github.com/dgraph-io/ristretto
STD
sync/atomic
STD
strconv
STD
container/list
STD
sync
STD
unsafe
PKG
github.com/prometheus/client_golang/prometheus
PKG
go.opentelemetry.io/otel
PKG
go.opentelemetry.io/otel/attribute
PKG
go.opentelemetry.io/otel/trace
STD
fmt
RedisCache
RedisCache implements Cache using a Redis backend.
type RedisCache struct
Fields
| Name | Type | Description |
|---|---|---|
| client | *redis.Client | |
| codec | Codec |
Uses
NewRedis
NewRedis returns a new RedisCache using the provided Redis client.
If codec is nil, JSONCodec is used by default.
Parameters
Returns
func NewRedis[T any](client *redis.Client, codec Codec) *RedisCache[T]
{
if codec == nil {
codec = JSONCodec{}
}
return &RedisCache[T]{client: client, codec: codec}
}
Uses
newRedisCache
newRedisCache returns a Redis-backed cache and context for testing.
It also registers cleanup to flush data, close the client and stop the
underlying miniredis server.
Parameters
Returns
func newRedisCache[T any](t *testing.T) (*RedisCache[T], context.Context)
{
t.Helper()
mr, err := miniredis.Run()
if err != nil {
t.Fatalf("miniredis run: %v", err)
}
client := redis.NewClient(&redis.Options{Addr: mr.Addr()})
ctx := context.Background()
t.Cleanup(func() {
_ = client.FlushDB(ctx).Err()
_ = client.Close()
mr.Close()
})
return NewRedis[T](client, nil), ctx
}
TestRedisCacheGetSetInvalidate
Parameters
func TestRedisCacheGetSetInvalidate(t *testing.T)
{
c, ctx := newRedisCache[string](t)
if err := c.Set(ctx, "foo", "bar", time.Minute); err != nil {
t.Fatalf("Set: %v", err)
}
if v, ok, err := c.Get(ctx, "foo"); err != nil || !ok || v != "bar" {
t.Fatalf("Get: expected bar, got %v err %v", v, err)
}
if err := c.Invalidate(ctx, "foo"); err != nil {
t.Fatalf("Invalidate: %v", err)
}
if _, ok, err := c.Get(ctx, "foo"); ok || err != nil {
t.Fatalf("expected miss after invalidate")
}
}
TestRedisCacheComplexStruct
Parameters
func TestRedisCacheComplexStruct(t *testing.T)
{
type complex struct {
Name string
Age int
Tags []string
}
c, ctx := newRedisCache[complex](t)
expected := complex{Name: "Alice", Age: 30, Tags: []string{"go", "redis"}}
if err := c.Set(ctx, "user:1", expected, time.Minute); err != nil {
t.Fatalf("Set: %v", err)
}
got, ok, err := c.Get(ctx, "user:1")
if err != nil || !ok {
t.Fatalf("expected value, got miss err %v", err)
}
if !reflect.DeepEqual(got, expected) {
t.Fatalf("expected %+v, got %+v", expected, got)
}
}
TestRedisCacheGetErrors
Parameters
func TestRedisCacheGetErrors(t *testing.T)
{
t.Run("client error", func(t *testing.T) {
c, ctx := newRedisCache[string](t)
_ = c.client.Close()
if _, _, err := c.Get(ctx, "foo"); !errors.Is(err, warperrors.ErrConnectionClosed) {
t.Fatalf("expected connection closed error, got %v", err)
}
})
t.Run("unmarshal error", func(t *testing.T) {
c, ctx := newRedisCache[string](t)
// store invalid JSON to trigger unmarshal error
if err := c.client.Set(ctx, "foo", "{invalid", 0).Err(); err != nil {
t.Fatalf("setup: %v", err)
}
if _, _, err := c.Get(ctx, "foo"); err == nil {
t.Fatalf("expected unmarshal error")
}
})
t.Run("timeout", func(t *testing.T) {
c, ctx := newRedisCache[string](t)
tCtx, cancel := context.WithTimeout(ctx, time.Nanosecond)
defer cancel()
time.Sleep(time.Millisecond)
if _, _, err := c.Get(tCtx, "foo"); !errors.Is(err, warperrors.ErrTimeout) {
t.Fatalf("expected timeout error, got %v", err)
}
})
}
JSONCodec
JSONCodec implements Codec using encoding/json.
type JSONCodec struct
GobCodec
GobCodec implements Codec using encoding/gob.
type GobCodec struct
Methods
Parameters
Returns
func (GobCodec) Marshal(v any) ([]byte, error)
{
var b bytes.Buffer
enc := gob.NewEncoder(&b)
if err := enc.Encode(v); err != nil {
return nil, err
}
return b.Bytes(), nil
}
Parameters
Returns
func (GobCodec) Unmarshal(data []byte, v any) error
{
b := bytes.NewBuffer(data)
dec := gob.NewDecoder(b)
return dec.Decode(v)
}
ByteCodec
ByteCodec implements Codec for raw byte slices (Zero-Allocation/Zero-Copy friendly).
It fails if the value is not []byte.
type ByteCodec struct
Methods
Parameters
Returns
func (ByteCodec) Marshal(v any) ([]byte, error)
{
if b, ok := v.([]byte); ok {
return b, nil
}
return nil, stdErrors.New("ByteCodec: value is not []byte")
}
Parameters
Returns
func (ByteCodec) Unmarshal(data []byte, v any) error
{
if ptr, ok := v.(*[]byte); ok {
*ptr = data
return nil
}
return stdErrors.New("ByteCodec: v is not *[]byte")
}
TestByteCodec
Parameters
func TestByteCodec(t *testing.T)
{
codec := ByteCodec{}
t.Run("Marshal []byte", func(t *testing.T) {
input := []byte("hello")
data, err := codec.Marshal(input)
if err != nil {
t.Fatalf("Marshal failed: %v", err)
}
if !bytes.Equal(data, input) {
t.Fatalf("Marshal returned unexpected data: got %s, want %s", data, input)
}
})
t.Run("Marshal Invalid Type", func(t *testing.T) {
input := "string"
_, err := codec.Marshal(input)
if err == nil {
t.Fatal("Marshal expected error for non-[]byte input")
}
})
t.Run("Unmarshal *[]byte", func(t *testing.T) {
input := []byte("world")
var output []byte
if err := codec.Unmarshal(input, &output); err != nil {
t.Fatalf("Unmarshal failed: %v", err)
}
if !bytes.Equal(output, input) {
t.Fatalf("Unmarshal returned unexpected data: got %s, want %s", output, input)
}
})
t.Run("Unmarshal Invalid Type", func(t *testing.T) {
input := []byte("world")
var output string
if err := codec.Unmarshal(input, &output); err == nil {
t.Fatal("Unmarshal expected error for non-*[]byte target")
}
})
}
TestAdaptiveCacheInvalidate
Parameters
func TestAdaptiveCacheInvalidate(t *testing.T)
{
ctx := context.Background()
ac := NewAdaptive[string]()
defer ac.Close()
if err := ac.Set(ctx, "foo", "bar", time.Minute); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := ac.Invalidate(ctx, "foo"); err != nil {
t.Fatalf("invalidate: %v", err)
}
if _, ok, _ := ac.lru.Get(ctx, "foo"); ok {
t.Fatalf("expected lru cache to remove key")
}
if _, ok, _ := ac.lfu.Get(ctx, "foo"); ok {
t.Fatalf("expected lfu cache to remove key")
}
}
TestAdaptiveCacheClose
Parameters
func TestAdaptiveCacheClose(t *testing.T)
{
ctx := context.Background()
ac := NewAdaptive[string]()
if err := ac.Set(ctx, "foo", "bar", time.Minute); err != nil {
t.Fatalf("unexpected error: %v", err)
}
ac.Close()
if c, ok := ac.lru.(*InMemoryCache[string]); ok {
c.mu.RLock()
size := len(c.items)
c.mu.RUnlock()
if size != 0 {
t.Fatalf("expected lru cache to be empty after close")
}
}
if _, ok, _ := ac.lfu.Get(ctx, "foo"); ok {
t.Fatalf("expected lfu cache to be empty after close")
}
}
Strategy
Strategy defines the cache eviction policy used by cache.New.
type Strategy int
Option
Option configures cache.New.
type Option func(*factoryConfig[T])
factoryConfig
type factoryConfig struct
Fields
| Name | Type | Description |
|---|---|---|
| strategy | Strategy |
Uses
WithStrategy
WithStrategy selects the eviction strategy to use. The default is LRUStrategy.
Parameters
Returns
func WithStrategy[T any](s Strategy) Option[T]
{
return func(cfg *factoryConfig[T]) {
cfg.strategy = s
}
}
Uses
New
New returns a Cache using the selected strategy.
By default an LRU cache is created. LFU and Adaptive strategies can be
requested via WithStrategy.
Parameters
Returns
func New[T any](opts ...Option[T]) Cache[T]
{
cfg := factoryConfig[T]{strategy: LRUStrategy}
for _, opt := range opts {
opt(&cfg)
}
switch cfg.strategy {
case LFUStrategy:
return NewLFU[T]()
case AdaptiveStrategy:
return NewAdaptive[T]()
default:
return NewLRU[T]()
}
}
LFUCache
LFUCache provides a cache with a least-frequently-used eviction policy.
It is backed by Ristretto which implements a TinyLFU algorithm.
type LFUCache struct
NewLFU
NewLFU returns a new LFUCache instance.
It reuses the Ristretto implementation under the hood.
Parameters
Returns
func NewLFU[T any](opts ...RistrettoOption) *LFUCache[T]
{
return &LFUCache[T]{NewRistretto[T](opts...)}
}
LRUCache
LRUCache is an in-memory cache using a least-recently-used eviction policy.
It is an alias of InMemoryCache for clarity when selecting cache strategies.
type LRUCache InMemoryCache[T]
NewLRU
NewLRU returns a new LRUCache instance.
Parameters
Returns
func NewLRU[T any](opts ...InMemoryOption[T]) *LRUCache[T]
{
return NewInMemory[T](opts...)
}
ResilientCache
ResilientCache wraps a Cache implementation and suppresses errors,
logging them instead of returning them. This ensures that cache failures
(e.g. Redis down) do not propagate to the application, treating them
as cache misses or successful (but skipped) writes.
type ResilientCache struct
Fields
| Name | Type | Description |
|---|---|---|
| inner | Cache[T] |
NewResilient
NewResilient creates a new ResilientCache wrapper.
Parameters
Returns
func NewResilient[T any](inner Cache[T]) *ResilientCache[T]
{
return &ResilientCache[T]{inner: inner}
}
RistrettoCache
RistrettoCache implements Cache using dgraph-io/ristretto.
type RistrettoCache struct
Fields
| Name | Type | Description |
|---|---|---|
| c | *ristretto.Cache |
RistrettoOption
RistrettoOption configures the underlying ristretto cache.
type RistrettoOption func(*ristretto.Config)
WithRistretto
WithRistretto applies a custom ristretto configuration.
If cfg is nil, defaults are used.
Parameters
Returns
func WithRistretto(cfg *ristretto.Config) RistrettoOption
{
return func(c *ristretto.Config) {
if cfg == nil {
return
}
*c = *cfg
}
}
NewRistretto
NewRistretto returns a Cache backed by ristretto.
Default configuration aims for a generous in-memory cache.
Parameters
Returns
func NewRistretto[T any](opts ...RistrettoOption) *RistrettoCache[T]
{
cfg := &ristretto.Config{
NumCounters: 1e4, // number of keys to track frequency of (10k).
MaxCost: 1 << 20, // maximum cost of cache (1MB by default).
BufferItems: 64, // number of keys per Get buffer.
}
for _, opt := range opts {
opt(cfg)
}
rc, err := ristretto.NewCache(cfg)
if err != nil {
panic(err)
}
return &RistrettoCache[T]{c: rc}
}
newRistrettoCache
newRistrettoCache returns a Ristretto-backed cache for testing.
Parameters
Returns
func newRistrettoCache[T any](t *testing.T) (*RistrettoCache[T], context.Context)
{
t.Helper()
c := NewRistretto[T]()
ctx := context.Background()
t.Cleanup(func() { c.Close() })
return c, ctx
}
TestRistrettoCacheGetSetInvalidate
Parameters
func TestRistrettoCacheGetSetInvalidate(t *testing.T)
{
c, ctx := newRistrettoCache[string](t)
if err := c.Set(ctx, "foo", "bar", time.Minute); err != nil {
t.Fatalf("Set: %v", err)
}
if v, ok, err := c.Get(ctx, "foo"); err != nil || !ok || v != "bar" {
t.Fatalf("Get: expected bar, got %v err %v", v, err)
}
if err := c.Invalidate(ctx, "foo"); err != nil {
t.Fatalf("Invalidate: %v", err)
}
if _, ok, err := c.Get(ctx, "foo"); ok || err != nil {
t.Fatalf("expected miss after invalidate")
}
}
TestRistrettoCacheExpiration
Parameters
func TestRistrettoCacheExpiration(t *testing.T)
{
c, ctx := newRistrettoCache[string](t)
if err := c.Set(ctx, "foo", "bar", 10*time.Millisecond); err != nil {
t.Fatalf("Set: %v", err)
}
time.Sleep(20 * time.Millisecond)
if _, ok, err := c.Get(ctx, "foo"); ok || err != nil {
t.Fatalf("expected key to expire")
}
}
TestRistrettoCacheContext
Parameters
func TestRistrettoCacheContext(t *testing.T)
{
c, _ := newRistrettoCache[string](t)
defer c.Close()
ctxSet, cancelSet := context.WithCancel(context.Background())
cancelSet()
if err := c.Set(ctxSet, "a", "b", time.Minute); !errors.Is(err, context.Canceled) {
t.Fatalf("expected context canceled error, got %v", err)
}
if _, ok, err := c.Get(context.Background(), "a"); ok || err != nil {
t.Fatalf("item should not be stored when context is canceled")
}
if err := c.Set(context.Background(), "foo", "bar", time.Minute); err != nil {
t.Fatalf("unexpected error: %v", err)
}
ctxGet, cancelGet := context.WithCancel(context.Background())
cancelGet()
if v, ok, err := c.Get(ctxGet, "foo"); !errors.Is(err, context.Canceled) || ok || v != "" {
t.Fatalf("expected canceled context to prevent retrieval")
}
ctxInv, cancelInv := context.WithCancel(context.Background())
cancelInv()
if err := c.Invalidate(ctxInv, "foo"); !errors.Is(err, context.Canceled) {
t.Fatalf("expected context canceled error, got %v", err)
}
if v, ok, err := c.Get(context.Background(), "foo"); err != nil || !ok || v != "bar" {
t.Fatalf("item should remain after canceled invalidate")
}
}
TTLStrategy
TTLStrategy provides dynamic TTL values based on access patterns.
Implementations may adjust and observe key usage to decide the
appropriate expiration for a cache entry.
type TTLStrategy interface
Methods
TTLOptions
TTLOptions configures TTL behavior for cache entries.
When Sliding is true, accessing a key resets its expiration using the
current TTL. The other fields enable simple dynamic adjustments of the TTL
based on the time elapsed between consecutive accesses.
If the elapsed time between two accesses is less than FreqThreshold the TTL
is increased by Increment, up to MaxTTL if set. Otherwise the TTL is
decreased by Decrement but not below MinTTL.
type TTLOptions struct
Methods
Adjust updates the TTL based on the configured options and returns the new TTL. last is the time of the previous access; now is the current time.
Parameters
Returns
func (TTLOptions) Adjust(cur time.Duration, last, now time.Time) time.Duration
{
if o.FreqThreshold <= 0 {
return cur
}
if now.Sub(last) <= o.FreqThreshold {
cur += o.Increment
if o.MaxTTL > 0 && cur > o.MaxTTL {
cur = o.MaxTTL
}
} else {
cur -= o.Decrement
if cur < o.MinTTL {
cur = o.MinTTL
}
}
return cur
}
Fields
| Name | Type | Description |
|---|---|---|
| Sliding | bool | |
| FreqThreshold | time.Duration | |
| Increment | time.Duration | |
| Decrement | time.Duration | |
| MinTTL | time.Duration | |
| MaxTTL | time.Duration | |
| FailSafeGracePeriod | time.Duration | |
| SoftTimeout | time.Duration | |
| EagerRefreshThreshold | float64 |
TTLOption
TTLOption mutates TTLOptions.
type TTLOption func(*TTLOptions)
WithSliding
WithSliding enables sliding expiration for a key.
Returns
func WithSliding() TTLOption
{ return func(o *TTLOptions) { o.Sliding = true } }
Uses
WithFailSafe
WithFailSafe enables the stale-if-error pattern.
If the backend fails, the cache will return the expired value if it is within the grace period.
Parameters
Returns
func WithFailSafe(grace time.Duration) TTLOption
{
return func(o *TTLOptions) {
o.FailSafeGracePeriod = grace
}
}
Uses
WithSoftTimeout
WithSoftTimeout sets a timeout for backend fetch operations.
If the backend takes longer than the duration, the cache returns the stale value (if available)
instead of waiting or failing.
Parameters
Returns
func WithSoftTimeout(d time.Duration) TTLOption
{
return func(o *TTLOptions) {
o.SoftTimeout = d
}
}
Uses
WithEagerRefresh
WithEagerRefresh enables proactive background refreshing of cache entries.
If an item’s remaining TTL falls below the specified threshold (e.g., 0.1 for 10%),
a refresh is triggered in the background, serving the current item immediately.
The threshold must be between 0.0 and 1.0.
Parameters
Returns
func WithEagerRefresh(threshold float64) TTLOption
{
return func(o *TTLOptions) {
if threshold >= 0.0 && threshold <= 1.0 {
o.EagerRefreshThreshold = threshold
}
}
}
Uses
WithDynamicTTL
WithDynamicTTL configures simple frequency based TTL adjustments.
freq defines the maximum duration between two accesses for the key to be
considered “hot”. When a hot access is detected the TTL is increased by inc
up to max. Cold accesses reduce the TTL by dec but not below min.
Parameters
Returns
func WithDynamicTTL(freq, inc, dec, min, max time.Duration) TTLOption
{
return func(o *TTLOptions) {
o.FreqThreshold = freq
o.Increment = inc
o.Decrement = dec
o.MinTTL = min
o.MaxTTL = max
}
}
Uses
AdaptiveCache
AdaptiveCache switches between LRU and LFU strategies based on access patterns.
It monitors hit/miss ratios and selects the strategy with more hits.
type AdaptiveCache struct
Fields
| Name | Type | Description |
|---|---|---|
| lru | Cache[T] | |
| lfu | Cache[T] | |
| useLFU | atomic.Bool | |
| hits | atomic.Uint64 | |
| misses | atomic.Uint64 | |
| switchEvery | uint64 |
NewAdaptive
NewAdaptive creates a new AdaptiveCache.
The cache starts with an LRU strategy and evaluates the hit/miss ratio
every 100 operations, switching to LFU when misses dominate and back to
LRU when hits dominate.
Returns
func NewAdaptive[T any]() *AdaptiveCache[T]
{
ac := &AdaptiveCache[T]{
lru: NewLRU[T](),
lfu: NewLFU[T](),
switchEvery: 100,
}
ac.useLFU.Store(false)
return ac
}
benchmarkSet
benchmarkSet measures Set performance for a cache.
Parameters
func benchmarkSet(b *testing.B, c Cache[string])
{
ctx := context.Background()
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := c.Set(ctx, strconv.Itoa(i), "val", time.Minute); err != nil {
b.Fatalf("set failed: %v", err)
}
}
}
benchmarkGet
benchmarkGet measures Get performance for a cache.
Parameters
func benchmarkGet(b *testing.B, c Cache[string])
{
ctx := context.Background()
if err := c.Set(ctx, "key", "val", time.Minute); err != nil {
b.Fatalf("setup failed: %v", err)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, ok, err := c.Get(ctx, "key"); err != nil || !ok {
b.Fatalf("get failed: %v ok=%v", err, ok)
}
}
}
BenchmarkInMemoryCacheSet
Parameters
func BenchmarkInMemoryCacheSet(b *testing.B)
{
c := NewInMemory[string]()
defer c.Close()
benchmarkSet(b, c)
}
BenchmarkInMemoryCacheGet
Parameters
func BenchmarkInMemoryCacheGet(b *testing.B)
{
c := NewInMemory[string]()
defer c.Close()
benchmarkGet(b, c)
}
BenchmarkRistrettoCacheSet
Parameters
func BenchmarkRistrettoCacheSet(b *testing.B)
{
c := NewRistretto[string]()
defer c.Close()
benchmarkSet(b, c)
}
BenchmarkRistrettoCacheGet
Parameters
func BenchmarkRistrettoCacheGet(b *testing.B)
{
c := NewRistretto[string]()
defer c.Close()
benchmarkGet(b, c)
}
benchRedisCache
benchRedisCache returns a RedisCache backed by an in-memory Redis server.
Returns
func benchRedisCache() (*RedisCache[string], func())
{
mr, _ := miniredis.Run()
client := redis.NewClient(&redis.Options{Addr: mr.Addr()})
cleanup := func() {
_ = client.Close()
mr.Close()
}
return NewRedis[string](client, nil), cleanup
}
BenchmarkRedisCacheSet
Parameters
func BenchmarkRedisCacheSet(b *testing.B)
{
c, cleanup := benchRedisCache()
defer cleanup()
benchmarkSet(b, c)
}
BenchmarkRedisCacheGet
Parameters
func BenchmarkRedisCacheGet(b *testing.B)
{
c, cleanup := benchRedisCache()
defer cleanup()
benchmarkGet(b, c)
}
Sizer
Cache defines the basic operations for a cache layer.
T represents the type of values stored in the cache.
type Sizer interface
Methods
Cache
Cache defines the basic operations for a cache layer.
type Cache interface
Methods
InMemoryCache
InMemoryCache is a simple in-memory cache implementation with TTL support.
type InMemoryCache struct
Fields
| Name | Type | Description |
|---|---|---|
| mu | sync.RWMutex | |
| items | map[string]item[T] | |
| order | *list.List | |
| hits | atomic.Uint64 | |
| misses | atomic.Uint64 | |
| sweepInterval | time.Duration | |
| ctx | context.Context | |
| cancel | context.CancelFunc | |
| wg | sync.WaitGroup | |
| maxEntries | int | |
| maxMemory | int64 | |
| currentMemory | atomic.Int64 | |
| hitCounter | prometheus.Counter | |
| missCounter | prometheus.Counter | |
| evictionCounter | prometheus.Counter | |
| latencyHist | prometheus.Histogram | |
| traceEnabled | bool |
item
type item struct
Fields
| Name | Type | Description |
|---|---|---|
| value | T | |
| size | int64 | |
| expiresAt | time.Time | |
| element | *list.Element |
InMemoryOption
InMemoryOption configures an InMemoryCache.
type InMemoryOption func(*InMemoryCache[T])
WithSweepInterval
WithSweepInterval sets the interval at which expired items are removed.
A zero or negative duration disables the background sweeper.
Parameters
Returns
func WithSweepInterval[T any](d time.Duration) InMemoryOption[T]
{
return func(c *InMemoryCache[T]) {
c.sweepInterval = d
}
}
WithMaxEntries
WithMaxEntries sets the maximum number of entries the cache can hold.
A non-positive value means the cache size is unbounded.
Parameters
Returns
func WithMaxEntries[T any](n int) InMemoryOption[T]
{
return func(c *InMemoryCache[T]) {
c.maxEntries = n
}
}
WithMaxMemory
WithMaxMemory sets the maximum memory in bytes the cache can hold.
A non-positive value means the memory size is unbounded.
Parameters
Returns
func WithMaxMemory[T any](bytes int64) InMemoryOption[T]
{
return func(c *InMemoryCache[T]) {
c.maxMemory = bytes
}
}
WithMetrics
WithMetrics enables Prometheus metrics collection using the provided registerer.
Parameters
Returns
func WithMetrics[T any](reg prometheus.Registerer) InMemoryOption[T]
{
return func(c *InMemoryCache[T]) {
c.hitCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "warp_cache_hits_total",
Help: "Total number of cache hits",
})
c.missCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "warp_cache_misses_total",
Help: "Total number of cache misses",
})
c.evictionCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "warp_cache_evictions_total",
Help: "Total number of cache evictions",
})
c.latencyHist = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "warp_cache_latency_seconds",
Help: "Latency of cache operations",
Buckets: prometheus.DefBuckets,
})
reg.MustRegister(c.hitCounter, c.missCounter, c.evictionCounter, c.latencyHist)
}
}
NewInMemory
NewInMemory returns a new InMemoryCache instance.
An optional sweep interval can be provided using WithSweepInterval. When
enabled, a background goroutine periodically removes expired items from the
cache. The default interval is one minute.
Parameters
Returns
func NewInMemory[T any](opts ...InMemoryOption[T]) *InMemoryCache[T]
{
ctx, cancel := context.WithCancel(context.Background())
c := &InMemoryCache[T]{
items: make(map[string]item[T]),
order: list.New(),
sweepInterval: defaultSweepInterval,
ctx: ctx,
cancel: cancel,
}
for _, opt := range opts {
opt(c)
}
if c.sweepInterval > 0 {
c.wg.Add(1)
go c.sweeper()
}
return c
}
Stats
Stats reports basic metrics about cache usage.
type Stats struct
Fields
| Name | Type | Description |
|---|---|---|
| Hits | uint64 | |
| Misses | uint64 | |
| Size | int |
WithTracing
WithTracing enables OpenTelemetry tracing for cache operations.
Returns
func WithTracing[T any]() InMemoryOption[T]
{
return func(c *InMemoryCache[T]) {
c.traceEnabled = true
}
}
EstimateSize
EstimateSize estimates the size of a value in bytes.
Parameters
Returns
func EstimateSize[T any](v T) int64
{
// 1. Check if it implements Sizer
if s, ok := any(v).(Sizer); ok {
return s.Size()
}
// 2. Common types
switch val := any(v).(type) {
case string:
return int64(len(val))
case []byte:
return int64(len(val))
case int, uint, int64, uint64, float64, complex128:
return 8
case int32, uint32, float32:
return 4
case int16, uint16:
return 2
case int8, uint8, bool:
return 1
}
// 3. Last resort: unsafe.Sizeof (Shallow)
return int64(unsafe.Sizeof(v))
}
TestInMemoryCache
Parameters
func TestInMemoryCache(t *testing.T)
{
ctx := context.Background()
c := NewInMemory[string]()
defer c.Close()
if err := c.Set(ctx, "foo", "bar", time.Millisecond); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if v, ok, err := c.Get(ctx, "foo"); err != nil || !ok || v != "bar" {
t.Fatalf("expected bar, got %v err %v", v, err)
}
time.Sleep(2 * time.Millisecond)
if _, ok, err := c.Get(ctx, "foo"); ok || err != nil {
t.Fatalf("expected key to expire")
}
m := c.Metrics()
if m.Hits != 1 || m.Misses != 1 {
t.Fatalf("unexpected metrics: %+v", m)
}
}
TestInMemoryCacheSweeper
Parameters
func TestInMemoryCacheSweeper(t *testing.T)
{
ctx := context.Background()
c := NewInMemory[string](WithSweepInterval[string](5 * time.Millisecond))
defer c.Close()
if err := c.Set(ctx, "foo", "bar", 5*time.Millisecond); err != nil {
t.Fatalf("unexpected error: %v", err)
}
time.Sleep(20 * time.Millisecond)
c.mu.RLock()
_, ok := c.items["foo"]
c.mu.RUnlock()
if ok {
t.Fatalf("expected key to be swept")
}
}
TestInMemoryCacheContext
Parameters
func TestInMemoryCacheContext(t *testing.T)
{
c := NewInMemory[string]()
defer c.Close()
// Set with canceled context should fail and not store the item.
ctxSet, cancelSet := context.WithCancel(context.Background())
cancelSet()
if err := c.Set(ctxSet, "a", "b", time.Minute); !errors.Is(err, context.Canceled) {
t.Fatalf("expected context canceled error, got %v", err)
}
if _, ok, err := c.Get(context.Background(), "a"); ok || err != nil {
t.Fatalf("item should not be stored when context is canceled")
}
// Prepare an item for further context tests.
if err := c.Set(context.Background(), "foo", "bar", time.Minute); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Get with canceled context should not retrieve the item.
ctxGet, cancelGet := context.WithCancel(context.Background())
cancelGet()
if v, ok, err := c.Get(ctxGet, "foo"); !errors.Is(err, context.Canceled) || ok || v != "" {
t.Fatalf("expected canceled context to prevent retrieval")
}
// Invalidate with canceled context should fail and keep the item.
ctxInv, cancelInv := context.WithCancel(context.Background())
cancelInv()
if err := c.Invalidate(ctxInv, "foo"); !errors.Is(err, context.Canceled) {
t.Fatalf("expected context canceled error, got %v", err)
}
if v, ok, err := c.Get(context.Background(), "foo"); err != nil || !ok || v != "bar" {
t.Fatalf("item should remain after canceled invalidate")
}
}
TestInMemoryCacheEviction
Parameters
func TestInMemoryCacheEviction(t *testing.T)
{
ctx := context.Background()
c := NewInMemory[string](WithMaxEntries[string](2))
defer c.Close()
if err := c.Set(ctx, "a", "1", time.Minute); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := c.Set(ctx, "b", "2", time.Minute); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Access "a" so that "b" becomes the least recently used.
if _, ok, err := c.Get(ctx, "a"); err != nil || !ok {
t.Fatalf("expected to retrieve a: %v", err)
}
if err := c.Set(ctx, "c", "3", time.Minute); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, _ := c.Get(ctx, "b"); ok {
t.Fatalf("expected b to be evicted")
}
if _, ok, _ := c.Get(ctx, "a"); !ok {
t.Fatalf("expected a to remain in cache")
}
if _, ok, _ := c.Get(ctx, "c"); !ok {
t.Fatalf("expected c to be present")
}
}
TestInMemoryCache_MaxMemory
Parameters
func TestInMemoryCache_MaxMemory(t *testing.T)
{
ctx := context.Background()
c := NewInMemory[string](WithMaxMemory[string](20))
defer c.Close()
c.Set(ctx, "k1", "12345", time.Minute)
c.Set(ctx, "k2", "12345", time.Minute)
c.Set(ctx, "k3", "12345", time.Minute)
if c.GetCurrentMemory() != 15 {
t.Fatalf("expected 15 bytes, got %d", c.GetCurrentMemory())
}
c.Set(ctx, "k4", "1234567890", time.Minute)
if _, ok, _ := c.Get(ctx, "k1"); ok {
t.Fatalf("expected k1 to be evicted due to memory pressure")
}
if _, ok, _ := c.Get(ctx, "k4"); !ok {
t.Fatalf("expected k4 to be present")
}
mem := c.GetCurrentMemory()
if mem > 20 {
t.Fatalf("memory usage %d exceeds limit 20", mem)
}
}
TestNewStrategies
Parameters
func TestNewStrategies(t *testing.T)
{
c := New[int]()
if _, ok := c.(*InMemoryCache[int]); !ok {
t.Fatalf("expected LRU cache by default")
}
c = New[int](WithStrategy[int](LFUStrategy))
if _, ok := c.(*LFUCache[int]); !ok {
t.Fatalf("expected LFU cache")
}
}
TestAdaptiveSwitch
Parameters
func TestAdaptiveSwitch(t *testing.T)
{
ctx := context.Background()
c := New[int](WithStrategy[int](AdaptiveStrategy))
ac, ok := c.(*AdaptiveCache[int])
if !ok {
t.Fatalf("expected AdaptiveCache")
}
defer ac.Close()
for i := 0; i < 150; i++ {
key := fmt.Sprintf("k%d", i)
c.Get(ctx, key)
}
if !ac.useLFU.Load() {
t.Fatalf("expected adaptive cache to switch to LFU")
}
}
TestTTLOptionsAdjust
Parameters
func TestTTLOptionsAdjust(t *testing.T)
{
now := time.Now()
opts := TTLOptions{
FreqThreshold: 10 * time.Millisecond,
Increment: 5 * time.Millisecond,
Decrement: 3 * time.Millisecond,
MinTTL: 4 * time.Millisecond,
MaxTTL: 20 * time.Millisecond,
}
t.Run("increment", func(t *testing.T) {
ttl := opts.Adjust(10*time.Millisecond, now.Add(-5*time.Millisecond), now)
if ttl != 15*time.Millisecond {
t.Fatalf("expected 15ms, got %v", ttl)
}
})
t.Run("decrement", func(t *testing.T) {
ttl := opts.Adjust(15*time.Millisecond, now.Add(-20*time.Millisecond), now)
if ttl != 12*time.Millisecond {
t.Fatalf("expected 12ms, got %v", ttl)
}
})
t.Run("sliding", func(t *testing.T) {
opts.Sliding = true
ttl := opts.Adjust(12*time.Millisecond, now.Add(-5*time.Millisecond), now)
if ttl != 17*time.Millisecond {
t.Fatalf("expected 17ms, got %v", ttl)
}
if !opts.Sliding {
t.Fatalf("expected sliding to remain enabled")
}
})
}