cache API

cache

package

API reference for the cache package.

S
struct

RedisCache

RedisCache implements Cache using a Redis backend.

v1/cache/redis.go:16-19
type RedisCache struct

Fields

Name Type Description
client *redis.Client
codec Codec
F
function

NewRedis

NewRedis returns a new RedisCache using the provided Redis client.
If codec is nil, JSONCodec is used by default.

Parameters

client
codec

Returns

*RedisCache[T]
v1/cache/redis.go:23-28
func NewRedis[T any](client *redis.Client, codec Codec) *RedisCache[T]

{
	if codec == nil {
		codec = JSONCodec{}
	}
	return &RedisCache[T]{client: client, codec: codec}
}
F
function

newRedisCache

newRedisCache returns a Redis-backed cache and context for testing.
It also registers cleanup to flush data, close the client and stop the
underlying miniredis server.

Parameters

Returns

*RedisCache[T]
v1/cache/redis_test.go:19-36
func newRedisCache[T any](t *testing.T) (*RedisCache[T], context.Context)

{
	t.Helper()
	mr, err := miniredis.Run()
	if err != nil {
		t.Fatalf("miniredis run: %v", err)
	}

	client := redis.NewClient(&redis.Options{Addr: mr.Addr()})
	ctx := context.Background()

	t.Cleanup(func() {
		_ = client.FlushDB(ctx).Err()
		_ = client.Close()
		mr.Close()
	})

	return NewRedis[T](client, nil), ctx
}
F
function

TestRedisCacheGetSetInvalidate

Parameters

v1/cache/redis_test.go:38-56
func TestRedisCacheGetSetInvalidate(t *testing.T)

{
	c, ctx := newRedisCache[string](t)

	if err := c.Set(ctx, "foo", "bar", time.Minute); err != nil {
		t.Fatalf("Set: %v", err)
	}

	if v, ok, err := c.Get(ctx, "foo"); err != nil || !ok || v != "bar" {
		t.Fatalf("Get: expected bar, got %v err %v", v, err)
	}

	if err := c.Invalidate(ctx, "foo"); err != nil {
		t.Fatalf("Invalidate: %v", err)
	}

	if _, ok, err := c.Get(ctx, "foo"); ok || err != nil {
		t.Fatalf("expected miss after invalidate")
	}
}
F
function

TestRedisCacheComplexStruct

Parameters

v1/cache/redis_test.go:58-79
func TestRedisCacheComplexStruct(t *testing.T)

{
	type complex struct {
		Name string
		Age  int
		Tags []string
	}

	c, ctx := newRedisCache[complex](t)

	expected := complex{Name: "Alice", Age: 30, Tags: []string{"go", "redis"}}
	if err := c.Set(ctx, "user:1", expected, time.Minute); err != nil {
		t.Fatalf("Set: %v", err)
	}

	got, ok, err := c.Get(ctx, "user:1")
	if err != nil || !ok {
		t.Fatalf("expected value, got miss err %v", err)
	}
	if !reflect.DeepEqual(got, expected) {
		t.Fatalf("expected %+v, got %+v", expected, got)
	}
}
F
function

TestRedisCacheGetErrors

Parameters

v1/cache/redis_test.go:81-110
func TestRedisCacheGetErrors(t *testing.T)

{
	t.Run("client error", func(t *testing.T) {
		c, ctx := newRedisCache[string](t)
		_ = c.client.Close()
		if _, _, err := c.Get(ctx, "foo"); !errors.Is(err, warperrors.ErrConnectionClosed) {
			t.Fatalf("expected connection closed error, got %v", err)
		}
	})

	t.Run("unmarshal error", func(t *testing.T) {
		c, ctx := newRedisCache[string](t)
		// store invalid JSON to trigger unmarshal error
		if err := c.client.Set(ctx, "foo", "{invalid", 0).Err(); err != nil {
			t.Fatalf("setup: %v", err)
		}
		if _, _, err := c.Get(ctx, "foo"); err == nil {
			t.Fatalf("expected unmarshal error")
		}
	})

	t.Run("timeout", func(t *testing.T) {
		c, ctx := newRedisCache[string](t)
		tCtx, cancel := context.WithTimeout(ctx, time.Nanosecond)
		defer cancel()
		time.Sleep(time.Millisecond)
		if _, _, err := c.Get(tCtx, "foo"); !errors.Is(err, warperrors.ErrTimeout) {
			t.Fatalf("expected timeout error, got %v", err)
		}
	})
}
I
interface

Codec

Codec defines methods for encoding and decoding values.

v1/cache/codec.go:11-14
type Codec interface

Methods

Marshal
Method

Parameters

v any

Returns

[]byte
error
func Marshal(...)
Unmarshal
Method

Parameters

data []byte
v any

Returns

error
func Unmarshal(...)
S
struct
Implements: Codec

JSONCodec

JSONCodec implements Codec using encoding/json.

v1/cache/codec.go:17-17
type JSONCodec struct

Methods

Marshal
Method

Parameters

v any

Returns

[]byte
error
func (JSONCodec) Marshal(v any) ([]byte, error)
{ return json.Marshal(v) }
Unmarshal
Method

Parameters

data []byte
v any

Returns

error
func (JSONCodec) Unmarshal(data []byte, v any) error
{ return json.Unmarshal(data, v) }
S
struct
Implements: Codec

GobCodec

GobCodec implements Codec using encoding/gob.

v1/cache/codec.go:23-23
type GobCodec struct

Methods

Marshal
Method

Parameters

v any

Returns

[]byte
error
func (GobCodec) Marshal(v any) ([]byte, error)
{
	var b bytes.Buffer
	enc := gob.NewEncoder(&b)
	if err := enc.Encode(v); err != nil {
		return nil, err
	}
	return b.Bytes(), nil
}
Unmarshal
Method

Parameters

data []byte
v any

Returns

error
func (GobCodec) Unmarshal(data []byte, v any) error
{
	b := bytes.NewBuffer(data)
	dec := gob.NewDecoder(b)
	return dec.Decode(v)
}
S
struct
Implements: Codec

ByteCodec

ByteCodec implements Codec for raw byte slices (Zero-Allocation/Zero-Copy friendly).
It fails if the value is not []byte.

v1/cache/codec.go:42-42
type ByteCodec struct

Methods

Marshal
Method

Parameters

v any

Returns

[]byte
error
func (ByteCodec) Marshal(v any) ([]byte, error)
{
	if b, ok := v.([]byte); ok {
		return b, nil
	}
	return nil, stdErrors.New("ByteCodec: value is not []byte")
}
Unmarshal
Method

Parameters

data []byte
v any

Returns

error
func (ByteCodec) Unmarshal(data []byte, v any) error
{
	if ptr, ok := v.(*[]byte); ok {
		*ptr = data
		return nil
	}
	return stdErrors.New("ByteCodec: v is not *[]byte")
}
F
function

TestByteCodec

Parameters

v1/cache/codec_test.go:8-48
func TestByteCodec(t *testing.T)

{
	codec := ByteCodec{}

	t.Run("Marshal []byte", func(t *testing.T) {
		input := []byte("hello")
		data, err := codec.Marshal(input)
		if err != nil {
			t.Fatalf("Marshal failed: %v", err)
		}
		if !bytes.Equal(data, input) {
			t.Fatalf("Marshal returned unexpected data: got %s, want %s", data, input)
		}
	})

	t.Run("Marshal Invalid Type", func(t *testing.T) {
		input := "string"
		_, err := codec.Marshal(input)
		if err == nil {
			t.Fatal("Marshal expected error for non-[]byte input")
		}
	})

	t.Run("Unmarshal *[]byte", func(t *testing.T) {
		input := []byte("world")
		var output []byte
		if err := codec.Unmarshal(input, &output); err != nil {
			t.Fatalf("Unmarshal failed: %v", err)
		}
		if !bytes.Equal(output, input) {
			t.Fatalf("Unmarshal returned unexpected data: got %s, want %s", output, input)
		}
	})

	t.Run("Unmarshal Invalid Type", func(t *testing.T) {
		input := []byte("world")
		var output string
		if err := codec.Unmarshal(input, &output); err == nil {
			t.Fatal("Unmarshal expected error for non-*[]byte target")
		}
	})
}
F
function

TestAdaptiveCacheInvalidate

Parameters

v1/cache/adaptive_cache_test.go:9-26
func TestAdaptiveCacheInvalidate(t *testing.T)

{
	ctx := context.Background()
	ac := NewAdaptive[string]()
	defer ac.Close()

	if err := ac.Set(ctx, "foo", "bar", time.Minute); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if err := ac.Invalidate(ctx, "foo"); err != nil {
		t.Fatalf("invalidate: %v", err)
	}
	if _, ok, _ := ac.lru.Get(ctx, "foo"); ok {
		t.Fatalf("expected lru cache to remove key")
	}
	if _, ok, _ := ac.lfu.Get(ctx, "foo"); ok {
		t.Fatalf("expected lfu cache to remove key")
	}
}
F
function

TestAdaptiveCacheClose

Parameters

v1/cache/adaptive_cache_test.go:28-46
func TestAdaptiveCacheClose(t *testing.T)

{
	ctx := context.Background()
	ac := NewAdaptive[string]()
	if err := ac.Set(ctx, "foo", "bar", time.Minute); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	ac.Close()
	if c, ok := ac.lru.(*InMemoryCache[string]); ok {
		c.mu.RLock()
		size := len(c.items)
		c.mu.RUnlock()
		if size != 0 {
			t.Fatalf("expected lru cache to be empty after close")
		}
	}
	if _, ok, _ := ac.lfu.Get(ctx, "foo"); ok {
		t.Fatalf("expected lfu cache to be empty after close")
	}
}
T
type

Strategy

Strategy defines the cache eviction policy used by cache.New.

v1/cache/factory.go:4-4
type Strategy int
T
type

Option

Option configures cache.New.

v1/cache/factory.go:16-16
type Option func(*factoryConfig[T])
S
struct

factoryConfig

v1/cache/factory.go:18-20
type factoryConfig struct

Fields

Name Type Description
strategy Strategy
F
function

WithStrategy

WithStrategy selects the eviction strategy to use. The default is LRUStrategy.

Parameters

Returns

Option[T]
v1/cache/factory.go:23-27
func WithStrategy[T any](s Strategy) Option[T]

{
	return func(cfg *factoryConfig[T]) {
		cfg.strategy = s
	}
}
F
function

New

New returns a Cache using the selected strategy.

By default an LRU cache is created. LFU and Adaptive strategies can be
requested via WithStrategy.

Parameters

opts
...Option[T]

Returns

Cache[T]
v1/cache/factory.go:33-46
func New[T any](opts ...Option[T]) Cache[T]

{
	cfg := factoryConfig[T]{strategy: LRUStrategy}
	for _, opt := range opts {
		opt(&cfg)
	}
	switch cfg.strategy {
	case LFUStrategy:
		return NewLFU[T]()
	case AdaptiveStrategy:
		return NewAdaptive[T]()
	default:
		return NewLRU[T]()
	}
}
S
struct

LFUCache

LFUCache provides a cache with a least-frequently-used eviction policy.

It is backed by Ristretto which implements a TinyLFU algorithm.

v1/cache/lfu.go:6-8
type LFUCache struct
F
function

NewLFU

NewLFU returns a new LFUCache instance.

It reuses the Ristretto implementation under the hood.

Parameters

opts
...RistrettoOption

Returns

*LFUCache[T]
v1/cache/lfu.go:13-15
func NewLFU[T any](opts ...RistrettoOption) *LFUCache[T]

{
	return &LFUCache[T]{NewRistretto[T](opts...)}
}
T
type

LRUCache

LRUCache is an in-memory cache using a least-recently-used eviction policy.

It is an alias of InMemoryCache for clarity when selecting cache strategies.

v1/cache/lru.go:6-6
type LRUCache InMemoryCache[T]
F
function

NewLRU

NewLRU returns a new LRUCache instance.

Parameters

opts
...InMemoryOption[T]

Returns

*LRUCache[T]
v1/cache/lru.go:9-11
func NewLRU[T any](opts ...InMemoryOption[T]) *LRUCache[T]

{
	return NewInMemory[T](opts...)
}
S
struct

ResilientCache

ResilientCache wraps a Cache implementation and suppresses errors,
logging them instead of returning them. This ensures that cache failures
(e.g. Redis down) do not propagate to the application, treating them
as cache misses or successful (but skipped) writes.

v1/cache/resilient.go:13-15
type ResilientCache struct

Fields

Name Type Description
inner Cache[T]
F
function

NewResilient

NewResilient creates a new ResilientCache wrapper.

Parameters

inner
Cache[T]

Returns

*ResilientCache[T]
v1/cache/resilient.go:18-20
func NewResilient[T any](inner Cache[T]) *ResilientCache[T]

{
	return &ResilientCache[T]{inner: inner}
}
S
struct

RistrettoCache

RistrettoCache implements Cache using dgraph-io/ristretto.

v1/cache/ristretto.go:11-13
type RistrettoCache struct

Fields

Name Type Description
c *ristretto.Cache
T
type

RistrettoOption

RistrettoOption configures the underlying ristretto cache.

v1/cache/ristretto.go:16-16
type RistrettoOption func(*ristretto.Config)
F
function

WithRistretto

WithRistretto applies a custom ristretto configuration.

If cfg is nil, defaults are used.

Parameters

Returns

v1/cache/ristretto.go:21-28
func WithRistretto(cfg *ristretto.Config) RistrettoOption

{
	return func(c *ristretto.Config) {
		if cfg == nil {
			return
		}
		*c = *cfg
	}
}
F
function

NewRistretto

NewRistretto returns a Cache backed by ristretto.

Default configuration aims for a generous in-memory cache.

Parameters

opts
...RistrettoOption

Returns

*RistrettoCache[T]
v1/cache/ristretto.go:33-47
func NewRistretto[T any](opts ...RistrettoOption) *RistrettoCache[T]

{
	cfg := &ristretto.Config{
		NumCounters: 1e4,     // number of keys to track frequency of (10k).
		MaxCost:     1 << 20, // maximum cost of cache (1MB by default).
		BufferItems: 64,      // number of keys per Get buffer.
	}
	for _, opt := range opts {
		opt(cfg)
	}
	rc, err := ristretto.NewCache(cfg)
	if err != nil {
		panic(err)
	}
	return &RistrettoCache[T]{c: rc}
}
F
function

newRistrettoCache

newRistrettoCache returns a Ristretto-backed cache for testing.

Parameters

Returns

*RistrettoCache[T]
v1/cache/ristretto_test.go:11-17
func newRistrettoCache[T any](t *testing.T) (*RistrettoCache[T], context.Context)

{
	t.Helper()
	c := NewRistretto[T]()
	ctx := context.Background()
	t.Cleanup(func() { c.Close() })
	return c, ctx
}
F
function

TestRistrettoCacheGetSetInvalidate

Parameters

v1/cache/ristretto_test.go:19-34
func TestRistrettoCacheGetSetInvalidate(t *testing.T)

{
	c, ctx := newRistrettoCache[string](t)

	if err := c.Set(ctx, "foo", "bar", time.Minute); err != nil {
		t.Fatalf("Set: %v", err)
	}
	if v, ok, err := c.Get(ctx, "foo"); err != nil || !ok || v != "bar" {
		t.Fatalf("Get: expected bar, got %v err %v", v, err)
	}
	if err := c.Invalidate(ctx, "foo"); err != nil {
		t.Fatalf("Invalidate: %v", err)
	}
	if _, ok, err := c.Get(ctx, "foo"); ok || err != nil {
		t.Fatalf("expected miss after invalidate")
	}
}
F
function

TestRistrettoCacheExpiration

Parameters

v1/cache/ristretto_test.go:36-46
func TestRistrettoCacheExpiration(t *testing.T)

{
	c, ctx := newRistrettoCache[string](t)

	if err := c.Set(ctx, "foo", "bar", 10*time.Millisecond); err != nil {
		t.Fatalf("Set: %v", err)
	}
	time.Sleep(20 * time.Millisecond)
	if _, ok, err := c.Get(ctx, "foo"); ok || err != nil {
		t.Fatalf("expected key to expire")
	}
}
F
function

TestRistrettoCacheContext

Parameters

v1/cache/ristretto_test.go:48-79
func TestRistrettoCacheContext(t *testing.T)

{
	c, _ := newRistrettoCache[string](t)
	defer c.Close()

	ctxSet, cancelSet := context.WithCancel(context.Background())
	cancelSet()
	if err := c.Set(ctxSet, "a", "b", time.Minute); !errors.Is(err, context.Canceled) {
		t.Fatalf("expected context canceled error, got %v", err)
	}
	if _, ok, err := c.Get(context.Background(), "a"); ok || err != nil {
		t.Fatalf("item should not be stored when context is canceled")
	}

	if err := c.Set(context.Background(), "foo", "bar", time.Minute); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	ctxGet, cancelGet := context.WithCancel(context.Background())
	cancelGet()
	if v, ok, err := c.Get(ctxGet, "foo"); !errors.Is(err, context.Canceled) || ok || v != "" {
		t.Fatalf("expected canceled context to prevent retrieval")
	}

	ctxInv, cancelInv := context.WithCancel(context.Background())
	cancelInv()
	if err := c.Invalidate(ctxInv, "foo"); !errors.Is(err, context.Canceled) {
		t.Fatalf("expected context canceled error, got %v", err)
	}
	if v, ok, err := c.Get(context.Background(), "foo"); err != nil || !ok || v != "bar" {
		t.Fatalf("item should remain after canceled invalidate")
	}
}
I
interface

TTLStrategy

TTLStrategy provides dynamic TTL values based on access patterns.
Implementations may adjust and observe key usage to decide the
appropriate expiration for a cache entry.

v1/cache/ttl.go:8-13
type TTLStrategy interface

Methods

Record
Method

Parameters

key string
func Record(...)
TTL
Method

Parameters

key string

Returns

func TTL(...)
S
struct

TTLOptions

TTLOptions configures TTL behavior for cache entries.

When Sliding is true, accessing a key resets its expiration using the
current TTL. The other fields enable simple dynamic adjustments of the TTL
based on the time elapsed between consecutive accesses.

If the elapsed time between two accesses is less than FreqThreshold the TTL
is increased by Increment, up to MaxTTL if set. Otherwise the TTL is
decreased by Decrement but not below MinTTL.

v1/cache/ttl.go:24-34
type TTLOptions struct

Methods

Adjust
Method

Adjust updates the TTL based on the configured options and returns the new TTL. last is the time of the previous access; now is the current time.

Parameters

Returns

func (TTLOptions) Adjust(cur time.Duration, last, now time.Time) time.Duration
{
	if o.FreqThreshold <= 0 {
		return cur
	}
	if now.Sub(last) <= o.FreqThreshold {
		cur += o.Increment
		if o.MaxTTL > 0 && cur > o.MaxTTL {
			cur = o.MaxTTL
		}
	} else {
		cur -= o.Decrement
		if cur < o.MinTTL {
			cur = o.MinTTL
		}
	}
	return cur
}

Fields

Name Type Description
Sliding bool
FreqThreshold time.Duration
Increment time.Duration
Decrement time.Duration
MinTTL time.Duration
MaxTTL time.Duration
FailSafeGracePeriod time.Duration
SoftTimeout time.Duration
EagerRefreshThreshold float64
T
type

TTLOption

TTLOption mutates TTLOptions.

v1/cache/ttl.go:37-37
type TTLOption func(*TTLOptions)
F
function

WithSliding

WithSliding enables sliding expiration for a key.

Returns

v1/cache/ttl.go:40-40
func WithSliding() TTLOption

{ return func(o *TTLOptions) { o.Sliding = true } }
F
function

WithFailSafe

WithFailSafe enables the stale-if-error pattern.
If the backend fails, the cache will return the expired value if it is within the grace period.

Parameters

Returns

v1/cache/ttl.go:44-48
func WithFailSafe(grace time.Duration) TTLOption

{
	return func(o *TTLOptions) {
		o.FailSafeGracePeriod = grace
	}
}
F
function

WithSoftTimeout

WithSoftTimeout sets a timeout for backend fetch operations.
If the backend takes longer than the duration, the cache returns the stale value (if available)
instead of waiting or failing.

Parameters

Returns

v1/cache/ttl.go:53-57
func WithSoftTimeout(d time.Duration) TTLOption

{
	return func(o *TTLOptions) {
		o.SoftTimeout = d
	}
}
F
function

WithEagerRefresh

WithEagerRefresh enables proactive background refreshing of cache entries.
If an item’s remaining TTL falls below the specified threshold (e.g., 0.1 for 10%),
a refresh is triggered in the background, serving the current item immediately.
The threshold must be between 0.0 and 1.0.

Parameters

threshold
float64

Returns

v1/cache/ttl.go:63-69
func WithEagerRefresh(threshold float64) TTLOption

{
	return func(o *TTLOptions) {
		if threshold >= 0.0 && threshold <= 1.0 {
			o.EagerRefreshThreshold = threshold
		}
	}
}
F
function

WithDynamicTTL

WithDynamicTTL configures simple frequency based TTL adjustments.

freq defines the maximum duration between two accesses for the key to be
considered “hot”. When a hot access is detected the TTL is increased by inc
up to max. Cold accesses reduce the TTL by dec but not below min.

v1/cache/ttl.go:76-84
func WithDynamicTTL(freq, inc, dec, min, max time.Duration) TTLOption

{
	return func(o *TTLOptions) {
		o.FreqThreshold = freq
		o.Increment = inc
		o.Decrement = dec
		o.MinTTL = min
		o.MaxTTL = max
	}
}
S
struct

AdaptiveCache

AdaptiveCache switches between LRU and LFU strategies based on access patterns.

It monitors hit/miss ratios and selects the strategy with more hits.

v1/cache/adaptive_cache.go:12-21
type AdaptiveCache struct

Fields

Name Type Description
lru Cache[T]
lfu Cache[T]
useLFU atomic.Bool
hits atomic.Uint64
misses atomic.Uint64
switchEvery uint64
F
function

NewAdaptive

NewAdaptive creates a new AdaptiveCache.

The cache starts with an LRU strategy and evaluates the hit/miss ratio
every 100 operations, switching to LFU when misses dominate and back to
LRU when hits dominate.

Returns

*AdaptiveCache[T]
v1/cache/adaptive_cache.go:28-36
func NewAdaptive[T any]() *AdaptiveCache[T]

{
	ac := &AdaptiveCache[T]{
		lru:         NewLRU[T](),
		lfu:         NewLFU[T](),
		switchEvery: 100,
	}
	ac.useLFU.Store(false)
	return ac
}
F
function

benchmarkSet

benchmarkSet measures Set performance for a cache.

Parameters

c
Cache[string]
v1/cache/bench_test.go:14-23
func benchmarkSet(b *testing.B, c Cache[string])

{
	ctx := context.Background()
	b.ReportAllocs()
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		if err := c.Set(ctx, strconv.Itoa(i), "val", time.Minute); err != nil {
			b.Fatalf("set failed: %v", err)
		}
	}
}
F
function

benchmarkGet

benchmarkGet measures Get performance for a cache.

Parameters

c
Cache[string]
v1/cache/bench_test.go:26-38
func benchmarkGet(b *testing.B, c Cache[string])

{
	ctx := context.Background()
	if err := c.Set(ctx, "key", "val", time.Minute); err != nil {
		b.Fatalf("setup failed: %v", err)
	}
	b.ReportAllocs()
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		if _, ok, err := c.Get(ctx, "key"); err != nil || !ok {
			b.Fatalf("get failed: %v ok=%v", err, ok)
		}
	}
}
F
function

BenchmarkInMemoryCacheSet

Parameters

v1/cache/bench_test.go:40-44
func BenchmarkInMemoryCacheSet(b *testing.B)

{
	c := NewInMemory[string]()
	defer c.Close()
	benchmarkSet(b, c)
}
F
function

BenchmarkInMemoryCacheGet

Parameters

v1/cache/bench_test.go:46-50
func BenchmarkInMemoryCacheGet(b *testing.B)

{
	c := NewInMemory[string]()
	defer c.Close()
	benchmarkGet(b, c)
}
F
function

BenchmarkRistrettoCacheSet

Parameters

v1/cache/bench_test.go:52-56
func BenchmarkRistrettoCacheSet(b *testing.B)

{
	c := NewRistretto[string]()
	defer c.Close()
	benchmarkSet(b, c)
}
F
function

BenchmarkRistrettoCacheGet

Parameters

v1/cache/bench_test.go:58-62
func BenchmarkRistrettoCacheGet(b *testing.B)

{
	c := NewRistretto[string]()
	defer c.Close()
	benchmarkGet(b, c)
}
F
function

benchRedisCache

benchRedisCache returns a RedisCache backed by an in-memory Redis server.

Returns

*RedisCache[string]
func()
v1/cache/bench_test.go:65-73
func benchRedisCache() (*RedisCache[string], func())

{
	mr, _ := miniredis.Run()
	client := redis.NewClient(&redis.Options{Addr: mr.Addr()})
	cleanup := func() {
		_ = client.Close()
		mr.Close()
	}
	return NewRedis[string](client, nil), cleanup
}
F
function

BenchmarkRedisCacheSet

Parameters

v1/cache/bench_test.go:75-79
func BenchmarkRedisCacheSet(b *testing.B)

{
	c, cleanup := benchRedisCache()
	defer cleanup()
	benchmarkSet(b, c)
}
F
function

BenchmarkRedisCacheGet

Parameters

v1/cache/bench_test.go:81-85
func BenchmarkRedisCacheGet(b *testing.B)

{
	c, cleanup := benchRedisCache()
	defer cleanup()
	benchmarkGet(b, c)
}
I
interface

Sizer

Cache defines the basic operations for a cache layer.

T represents the type of values stored in the cache.

v1/cache/cache.go:23-25
type Sizer interface

Methods

Size
Method

Returns

int64
func Size(...)
I
interface

Cache

Cache defines the basic operations for a cache layer.

v1/cache/cache.go:28-37
type Cache interface

Methods

Get
Method

Parameters

key string

Returns

T
bool
error
func Get(...)
Set
Method

Parameters

key string
value T

Returns

error
func Set(...)
Invalidate
Method

Parameters

key string

Returns

error
func Invalidate(...)
S
struct

InMemoryCache

InMemoryCache is a simple in-memory cache implementation with TTL support.

v1/cache/cache.go:40-59
type InMemoryCache struct

Fields

Name Type Description
mu sync.RWMutex
items map[string]item[T]
order *list.List
hits atomic.Uint64
misses atomic.Uint64
sweepInterval time.Duration
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
maxEntries int
maxMemory int64
currentMemory atomic.Int64
hitCounter prometheus.Counter
missCounter prometheus.Counter
evictionCounter prometheus.Counter
latencyHist prometheus.Histogram
traceEnabled bool
S
struct

item

v1/cache/cache.go:61-66
type item struct

Fields

Name Type Description
value T
size int64
expiresAt time.Time
element *list.Element
T
type

InMemoryOption

InMemoryOption configures an InMemoryCache.

v1/cache/cache.go:69-69
type InMemoryOption func(*InMemoryCache[T])
F
function

WithSweepInterval

WithSweepInterval sets the interval at which expired items are removed.
A zero or negative duration disables the background sweeper.

Parameters

Returns

InMemoryOption[T]
v1/cache/cache.go:73-77
func WithSweepInterval[T any](d time.Duration) InMemoryOption[T]

{
	return func(c *InMemoryCache[T]) {
		c.sweepInterval = d
	}
}
F
function

WithMaxEntries

WithMaxEntries sets the maximum number of entries the cache can hold.
A non-positive value means the cache size is unbounded.

Parameters

n
int

Returns

InMemoryOption[T]
v1/cache/cache.go:81-85
func WithMaxEntries[T any](n int) InMemoryOption[T]

{
	return func(c *InMemoryCache[T]) {
		c.maxEntries = n
	}
}
F
function

WithMaxMemory

WithMaxMemory sets the maximum memory in bytes the cache can hold.
A non-positive value means the memory size is unbounded.

Parameters

bytes
int64

Returns

InMemoryOption[T]
v1/cache/cache.go:89-93
func WithMaxMemory[T any](bytes int64) InMemoryOption[T]

{
	return func(c *InMemoryCache[T]) {
		c.maxMemory = bytes
	}
}
F
function

WithMetrics

WithMetrics enables Prometheus metrics collection using the provided registerer.

Parameters

Returns

InMemoryOption[T]
v1/cache/cache.go:96-117
func WithMetrics[T any](reg prometheus.Registerer) InMemoryOption[T]

{
	return func(c *InMemoryCache[T]) {
		c.hitCounter = prometheus.NewCounter(prometheus.CounterOpts{
			Name: "warp_cache_hits_total",
			Help: "Total number of cache hits",
		})
		c.missCounter = prometheus.NewCounter(prometheus.CounterOpts{
			Name: "warp_cache_misses_total",
			Help: "Total number of cache misses",
		})
		c.evictionCounter = prometheus.NewCounter(prometheus.CounterOpts{
			Name: "warp_cache_evictions_total",
			Help: "Total number of cache evictions",
		})
		c.latencyHist = prometheus.NewHistogram(prometheus.HistogramOpts{
			Name:    "warp_cache_latency_seconds",
			Help:    "Latency of cache operations",
			Buckets: prometheus.DefBuckets,
		})
		reg.MustRegister(c.hitCounter, c.missCounter, c.evictionCounter, c.latencyHist)
	}
}
F
function

NewInMemory

NewInMemory returns a new InMemoryCache instance.

An optional sweep interval can be provided using WithSweepInterval. When
enabled, a background goroutine periodically removes expired items from the
cache. The default interval is one minute.

Parameters

opts
...InMemoryOption[T]

Returns

*InMemoryCache[T]
v1/cache/cache.go:128-145
func NewInMemory[T any](opts ...InMemoryOption[T]) *InMemoryCache[T]

{
	ctx, cancel := context.WithCancel(context.Background())
	c := &InMemoryCache[T]{
		items:         make(map[string]item[T]),
		order:         list.New(),
		sweepInterval: defaultSweepInterval,
		ctx:           ctx,
		cancel:        cancel,
	}
	for _, opt := range opts {
		opt(c)
	}
	if c.sweepInterval > 0 {
		c.wg.Add(1)
		go c.sweeper()
	}
	return c
}
S
struct

Stats

Stats reports basic metrics about cache usage.

v1/cache/cache.go:436-440
type Stats struct

Fields

Name Type Description
Hits uint64
Misses uint64
Size int
F
function

WithTracing

WithTracing enables OpenTelemetry tracing for cache operations.

Returns

InMemoryOption[T]
v1/cache/cache.go:455-459
func WithTracing[T any]() InMemoryOption[T]

{
	return func(c *InMemoryCache[T]) {
		c.traceEnabled = true
	}
}
F
function

EstimateSize

EstimateSize estimates the size of a value in bytes.

Parameters

v
T

Returns

int64
v1/cache/cache.go:462-486
func EstimateSize[T any](v T) int64

{
	// 1. Check if it implements Sizer
	if s, ok := any(v).(Sizer); ok {
		return s.Size()
	}

	// 2. Common types
	switch val := any(v).(type) {
	case string:
		return int64(len(val))
	case []byte:
		return int64(len(val))
	case int, uint, int64, uint64, float64, complex128:
		return 8
	case int32, uint32, float32:
		return 4
	case int16, uint16:
		return 2
	case int8, uint8, bool:
		return 1
	}

	// 3. Last resort: unsafe.Sizeof (Shallow)
	return int64(unsafe.Sizeof(v))
}
F
function

TestInMemoryCache

Parameters

v1/cache/cache_test.go:11-32
func TestInMemoryCache(t *testing.T)

{
	ctx := context.Background()
	c := NewInMemory[string]()
	defer c.Close()
	if err := c.Set(ctx, "foo", "bar", time.Millisecond); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if v, ok, err := c.Get(ctx, "foo"); err != nil || !ok || v != "bar" {
		t.Fatalf("expected bar, got %v err %v", v, err)
	}

	time.Sleep(2 * time.Millisecond)
	if _, ok, err := c.Get(ctx, "foo"); ok || err != nil {
		t.Fatalf("expected key to expire")
	}

	m := c.Metrics()
	if m.Hits != 1 || m.Misses != 1 {
		t.Fatalf("unexpected metrics: %+v", m)
	}
}
F
function

TestInMemoryCacheSweeper

Parameters

v1/cache/cache_test.go:34-48
func TestInMemoryCacheSweeper(t *testing.T)

{
	ctx := context.Background()
	c := NewInMemory[string](WithSweepInterval[string](5 * time.Millisecond))
	defer c.Close()
	if err := c.Set(ctx, "foo", "bar", 5*time.Millisecond); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	time.Sleep(20 * time.Millisecond)
	c.mu.RLock()
	_, ok := c.items["foo"]
	c.mu.RUnlock()
	if ok {
		t.Fatalf("expected key to be swept")
	}
}
F
function

TestInMemoryCacheContext

Parameters

v1/cache/cache_test.go:50-85
func TestInMemoryCacheContext(t *testing.T)

{
	c := NewInMemory[string]()
	defer c.Close()

	// Set with canceled context should fail and not store the item.
	ctxSet, cancelSet := context.WithCancel(context.Background())
	cancelSet()
	if err := c.Set(ctxSet, "a", "b", time.Minute); !errors.Is(err, context.Canceled) {
		t.Fatalf("expected context canceled error, got %v", err)
	}
	if _, ok, err := c.Get(context.Background(), "a"); ok || err != nil {
		t.Fatalf("item should not be stored when context is canceled")
	}

	// Prepare an item for further context tests.
	if err := c.Set(context.Background(), "foo", "bar", time.Minute); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Get with canceled context should not retrieve the item.
	ctxGet, cancelGet := context.WithCancel(context.Background())
	cancelGet()
	if v, ok, err := c.Get(ctxGet, "foo"); !errors.Is(err, context.Canceled) || ok || v != "" {
		t.Fatalf("expected canceled context to prevent retrieval")
	}

	// Invalidate with canceled context should fail and keep the item.
	ctxInv, cancelInv := context.WithCancel(context.Background())
	cancelInv()
	if err := c.Invalidate(ctxInv, "foo"); !errors.Is(err, context.Canceled) {
		t.Fatalf("expected context canceled error, got %v", err)
	}
	if v, ok, err := c.Get(context.Background(), "foo"); err != nil || !ok || v != "bar" {
		t.Fatalf("item should remain after canceled invalidate")
	}
}
F
function

TestInMemoryCacheEviction

Parameters

v1/cache/cache_test.go:87-117
func TestInMemoryCacheEviction(t *testing.T)

{
	ctx := context.Background()
	c := NewInMemory[string](WithMaxEntries[string](2))
	defer c.Close()

	if err := c.Set(ctx, "a", "1", time.Minute); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if err := c.Set(ctx, "b", "2", time.Minute); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Access "a" so that "b" becomes the least recently used.
	if _, ok, err := c.Get(ctx, "a"); err != nil || !ok {
		t.Fatalf("expected to retrieve a: %v", err)
	}

	if err := c.Set(ctx, "c", "3", time.Minute); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if _, ok, _ := c.Get(ctx, "b"); ok {
		t.Fatalf("expected b to be evicted")
	}
	if _, ok, _ := c.Get(ctx, "a"); !ok {
		t.Fatalf("expected a to remain in cache")
	}
	if _, ok, _ := c.Get(ctx, "c"); !ok {
		t.Fatalf("expected c to be present")
	}
}
F
function

TestInMemoryCache_MaxMemory

Parameters

v1/cache/cache_test.go:119-145
func TestInMemoryCache_MaxMemory(t *testing.T)

{
	ctx := context.Background()
	c := NewInMemory[string](WithMaxMemory[string](20))
	defer c.Close()

	c.Set(ctx, "k1", "12345", time.Minute)
	c.Set(ctx, "k2", "12345", time.Minute)
	c.Set(ctx, "k3", "12345", time.Minute)

	if c.GetCurrentMemory() != 15 {
		t.Fatalf("expected 15 bytes, got %d", c.GetCurrentMemory())
	}

	c.Set(ctx, "k4", "1234567890", time.Minute)

	if _, ok, _ := c.Get(ctx, "k1"); ok {
		t.Fatalf("expected k1 to be evicted due to memory pressure")
	}
	if _, ok, _ := c.Get(ctx, "k4"); !ok {
		t.Fatalf("expected k4 to be present")
	}

	mem := c.GetCurrentMemory()
	if mem > 20 {
		t.Fatalf("memory usage %d exceeds limit 20", mem)
	}
}
F
function

TestNewStrategies

Parameters

v1/cache/cache_test.go:147-156
func TestNewStrategies(t *testing.T)

{
	c := New[int]()
	if _, ok := c.(*InMemoryCache[int]); !ok {
		t.Fatalf("expected LRU cache by default")
	}
	c = New[int](WithStrategy[int](LFUStrategy))
	if _, ok := c.(*LFUCache[int]); !ok {
		t.Fatalf("expected LFU cache")
	}
}
F
function

TestAdaptiveSwitch

Parameters

v1/cache/cache_test.go:158-173
func TestAdaptiveSwitch(t *testing.T)

{
	ctx := context.Background()
	c := New[int](WithStrategy[int](AdaptiveStrategy))
	ac, ok := c.(*AdaptiveCache[int])
	if !ok {
		t.Fatalf("expected AdaptiveCache")
	}
	defer ac.Close()
	for i := 0; i < 150; i++ {
		key := fmt.Sprintf("k%d", i)
		c.Get(ctx, key)
	}
	if !ac.useLFU.Load() {
		t.Fatalf("expected adaptive cache to switch to LFU")
	}
}
F
function

TestTTLOptionsAdjust

Parameters

v1/cache/cache_test.go:175-209
func TestTTLOptionsAdjust(t *testing.T)

{
	now := time.Now()
	opts := TTLOptions{
		FreqThreshold: 10 * time.Millisecond,
		Increment:     5 * time.Millisecond,
		Decrement:     3 * time.Millisecond,
		MinTTL:        4 * time.Millisecond,
		MaxTTL:        20 * time.Millisecond,
	}

	t.Run("increment", func(t *testing.T) {
		ttl := opts.Adjust(10*time.Millisecond, now.Add(-5*time.Millisecond), now)
		if ttl != 15*time.Millisecond {
			t.Fatalf("expected 15ms, got %v", ttl)
		}
	})

	t.Run("decrement", func(t *testing.T) {
		ttl := opts.Adjust(15*time.Millisecond, now.Add(-20*time.Millisecond), now)
		if ttl != 12*time.Millisecond {
			t.Fatalf("expected 12ms, got %v", ttl)
		}
	})

	t.Run("sliding", func(t *testing.T) {
		opts.Sliding = true
		ttl := opts.Adjust(12*time.Millisecond, now.Add(-5*time.Millisecond), now)
		if ttl != 17*time.Millisecond {
			t.Fatalf("expected 17ms, got %v", ttl)
		}
		if !opts.Sliding {
			t.Fatalf("expected sliding to remain enabled")
		}
	})
}