• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

orneryd / NornicDB / 22888612440

10 Mar 2026 05:30AM UTC coverage: 87.388% (+0.3%) from 87.113%
22888612440

push

github

orneryd
increasing test coverage

3 of 3 new or added lines in 1 file covered. (100.0%)

15 existing lines in 4 files now uncovered.

87683 of 100338 relevant lines covered (87.39%)

1.01 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.64
/pkg/cache/query_cache.go
1
// Package cache provides query plan caching for NornicDB.
2
//
3
// Query plan caching avoids re-parsing identical Cypher queries,
4
// significantly improving throughput for repeated queries.
5
//
6
// Features:
7
// - LRU eviction for bounded memory
8
// - TTL expiration for stale plans
9
// - Thread-safe operations
10
// - Cache hit/miss statistics
11
//
12
// Usage:
13
//
14
//        cache := NewQueryCache(1000, 5*time.Minute)
15
//
16
//        // Check cache before parsing
17
//        if plan, ok := cache.Get(query); ok {
18
//                return plan // Cache hit
19
//        }
20
//
21
//        // Parse and cache
22
//        plan := parseQuery(query)
23
//        cache.Put(query, plan)
24
package cache
25

26
import (
27
        "container/list"
28
        "hash/fnv"
29
        "sync"
30
        "sync/atomic"
31
        "time"
32
)
33

34
// QueryCache is a thread-safe LRU cache for parsed query plans.
35
//
36
// The cache uses:
37
// - Hash map for O(1) lookups
38
// - Doubly-linked list for LRU ordering
39
// - TTL for automatic expiration
40
//
41
// Example:
42
//
43
//        cache := NewQueryCache(1000, 5*time.Minute)
44
//
45
//        // Try cache first
46
//        key := cache.Key(query, params)
47
//        if plan, ok := cache.Get(key); ok {
48
//                return plan.(*ParsedPlan)
49
//        }
50
//
51
//        // Parse and cache
52
//        plan := parseQuery(query)
53
//        cache.Put(key, plan)
54
type QueryCache struct {
55
        mu sync.RWMutex
56

57
        // Configuration
58
        maxSize int
59
        ttl     time.Duration
60
        enabled bool
61

62
        // LRU list and map
63
        list  *list.List
64
        items map[uint64]*list.Element
65

66
        // Statistics
67
        hits   uint64
68
        misses uint64
69
}
70

71
// cacheEntry holds a cached item with metadata.
72
type cacheEntry struct {
73
        key       uint64
74
        value     interface{}
75
        expiresAt time.Time
76
}
77

78
// NewQueryCache creates a new query cache.
79
//
80
// Parameters:
81
//   - maxSize: Maximum number of cached plans (LRU eviction when exceeded)
82
//   - ttl: Time-to-live for cached entries (0 = no expiration)
83
//
84
// Example:
85
//
86
//        // Cache up to 1000 plans for 5 minutes each
87
//        cache := NewQueryCache(1000, 5*time.Minute)
88
//
89
//        // Unlimited TTL (only LRU eviction)
90
//        cache = NewQueryCache(1000, 0)
91
func NewQueryCache(maxSize int, ttl time.Duration) *QueryCache {
1✔
92
        if maxSize <= 0 {
2✔
93
                maxSize = 1000
1✔
94
        }
1✔
95
        return &QueryCache{
1✔
96
                maxSize: maxSize,
1✔
97
                ttl:     ttl,
1✔
98
                enabled: true,
1✔
99
                list:    list.New(),
1✔
100
                items:   make(map[uint64]*list.Element, maxSize),
1✔
101
        }
1✔
102
}
103

104
// Key generates a cache key from query and parameters.
105
//
106
// The key is a 64-bit hash (FNV-1a algorithm) that uniquely identifies a query
107
// pattern. The hash includes the query text and parameter keys (but not values),
108
// allowing parameterized queries to be cached efficiently.
109
//
110
// Parameters:
111
//   - query: The Cypher query string
112
//   - params: Query parameters (only keys are hashed, not values)
113
//
114
// Returns:
115
//   - uint64 hash suitable for map lookups
116
//
117
// Example 1 - Basic Usage:
118
//
119
//        cache := cache.NewQueryCache(1000, 5*time.Minute)
120
//
121
//        query := "MATCH (n:Person {name: $name}) RETURN n"
122
//        params := map[string]interface{}{"name": "Alice"}
123
//
124
//        key := cache.Key(query, params)
125
//        fmt.Printf("Cache key: %d\n", key)
126
//
127
// Example 2 - Same Query, Different Values:
128
//
129
//        // These produce the SAME key (parameter values don't matter)
130
//        key1 := cache.Key("MATCH (n {id: $id}) RETURN n", map[string]interface{}{"id": 1})
131
//        key2 := cache.Key("MATCH (n {id: $id}) RETURN n", map[string]interface{}{"id": 2})
132
//        // key1 == key2 (same query pattern)
133
//
134
//        // This produces a DIFFERENT key (different query)
135
//        key3 := cache.Key("MATCH (n {name: $name}) RETURN n", map[string]interface{}{"name": "Bob"})
136
//        // key3 != key1 (different query pattern)
137
//
138
// Example 3 - Integration with Parser:
139
//
140
//        func executeQuery(query string, params map[string]interface{}) (*Result, error) {
141
//                cache := cache.GlobalQueryCache()
142
//                key := cache.Key(query, params)
143
//
144
//                // Try cache first
145
//                if plan, ok := cache.Get(key); ok {
146
//                        return executePlan(plan.(*ParsedPlan), params)
147
//                }
148
//
149
//                // Parse and cache
150
//                plan, err := parseQuery(query)
151
//                if err != nil {
152
//                        return nil, err
153
//                }
154
//                cache.Put(key, plan)
155
//
156
//                return executePlan(plan, params)
157
//        }
158
//
159
// Performance:
160
//   - FNV-1a hash: ~50-100 ns for typical queries
161
//   - O(1) lookup in cache map
162
//   - Parameter keys included for correctness
163
//   - Parameter values excluded for reusability
164
//
165
// ELI12:
166
//
167
// Think of the cache key like a fingerprint for a query:
168
//   - Same query pattern = same fingerprint
169
//   - Different values (like "Alice" vs "Bob") = same fingerprint
170
//   - Different query = different fingerprint
171
//
172
// Why? Because the query structure is what we cache, not the specific values.
173
// It's like caching a recipe (the steps) rather than the actual meal (with
174
// specific ingredients). You can use the same recipe with different ingredients!
175
func (c *QueryCache) Key(query string, params map[string]interface{}) uint64 {
1✔
176
        h := fnv.New64a()
1✔
177
        h.Write([]byte(query))
1✔
178

1✔
179
        // Include parameter keys (not values - they might differ)
1✔
180
        // This allows caching parameterized queries
1✔
181
        for k := range params {
2✔
182
                h.Write([]byte(k))
1✔
183
        }
1✔
184

185
        return h.Sum64()
1✔
186
}
187

188
// Get retrieves a cached plan if present and not expired.
189
//
190
// This method performs an O(1) lookup in the cache map and automatically:
191
//   - Checks TTL expiration (removes expired entries)
192
//   - Updates LRU ordering (moves accessed entry to front)
193
//   - Tracks hit/miss statistics
194
//
195
// Parameters:
196
//   - key: Cache key from Key() method
197
//
198
// Returns:
199
//   - (value, true) on cache hit
200
//   - (nil, false) on cache miss or expiration
201
//
202
// Example 1 - Basic Cache Check:
203
//
204
//        cache := cache.NewQueryCache(1000, 5*time.Minute)
205
//        key := cache.Key(query, params)
206
//
207
//        if plan, ok := cache.Get(key); ok {
208
//                fmt.Println("Cache hit!")
209
//                return plan.(*ParsedPlan)
210
//        }
211
//        fmt.Println("Cache miss - need to parse")
212
//
213
// Example 2 - Query Executor Pattern:
214
//
215
//        func (e *Executor) Execute(query string, params map[string]interface{}) (*Result, error) {
216
//                key := e.cache.Key(query, params)
217
//
218
//                // Fast path: cached plan
219
//                if cached, ok := e.cache.Get(key); ok {
220
//                        plan := cached.(*ParsedPlan)
221
//                        return e.executePlan(plan, params)
222
//                }
223
//
224
//                // Slow path: parse and cache
225
//                plan, err := e.parser.Parse(query)
226
//                if err != nil {
227
//                        return nil, err
228
//                }
229
//                e.cache.Put(key, plan)
230
//
231
//                return e.executePlan(plan, params)
232
//        }
233
//
234
// Example 3 - TTL Expiration:
235
//
236
//        cache := cache.NewQueryCache(1000, 1*time.Second)
237
//        key := cache.Key("MATCH (n) RETURN n", nil)
238
//
239
//        cache.Put(key, parsedPlan)
240
//
241
//        // Immediate access: cache hit
242
//        if _, ok := cache.Get(key); ok {
243
//                fmt.Println("Hit!") // Prints
244
//        }
245
//
246
//        // After TTL: cache miss (auto-removed)
247
//        time.Sleep(2 * time.Second)
248
//        if _, ok := cache.Get(key); !ok {
249
//                fmt.Println("Expired!") // Prints
250
//        }
251
//
252
// Example 4 - Type Assertion:
253
//
254
//        if cached, ok := cache.Get(key); ok {
255
//                // Type assert to your plan type
256
//                plan, ok := cached.(*ParsedPlan)
257
//                if !ok {
258
//                        return nil, fmt.Errorf("invalid cached type")
259
//                }
260
//                return executePlan(plan, params)
261
//        }
262
//
263
// Performance:
264
//   - Cache hit: O(1) map lookup + O(1) list move
265
//   - Cache miss: O(1) map lookup
266
//   - TTL check: O(1) time comparison
267
//   - Typical latency: <100 ns
268
//
269
// Thread Safety:
270
//   - Safe for concurrent reads (RLock)
271
//   - Safe for concurrent writes (Lock)
272
//   - Statistics updated atomically
273
//
274
// ELI12:
275
//
276
// Imagine a library with a "recently returned" shelf:
277
//   - Get checks if your book is on the shelf
278
//   - If found, you take it and move it to the front (most recent)
279
//   - If the book is too old (expired), it's thrown away
280
//   - If not found, you have to go find it in the main stacks (parse)
281
//
282
// The cache remembers what you looked at recently so you don't have to
283
// search the whole library every time!
284
func (c *QueryCache) Get(key uint64) (interface{}, bool) {
1✔
285
        if !c.enabled {
2✔
286
                atomic.AddUint64(&c.misses, 1)
1✔
287
                return nil, false
1✔
288
        }
1✔
289

290
        c.mu.RLock()
1✔
291
        elem, ok := c.items[key]
1✔
292
        c.mu.RUnlock()
1✔
293

1✔
294
        if !ok {
2✔
295
                atomic.AddUint64(&c.misses, 1)
1✔
296
                return nil, false
1✔
297
        }
1✔
298

299
        entry := elem.Value.(*cacheEntry)
1✔
300

1✔
301
        // Check TTL
1✔
302
        if c.ttl > 0 && time.Now().After(entry.expiresAt) {
2✔
303
                // Expired - remove and return miss
1✔
304
                c.mu.Lock()
1✔
305
                c.removeElement(elem)
1✔
306
                c.mu.Unlock()
1✔
307
                atomic.AddUint64(&c.misses, 1)
1✔
308
                return nil, false
1✔
309
        }
1✔
310

311
        // Move to front (most recently used)
312
        c.mu.Lock()
1✔
313
        c.list.MoveToFront(elem)
1✔
314
        c.mu.Unlock()
1✔
315

1✔
316
        atomic.AddUint64(&c.hits, 1)
1✔
317
        return entry.value, true
1✔
318
}
319

320
// Put adds a plan to the cache.
321
//
322
// This method stores a parsed query plan in the cache for future reuse.
323
// It automatically handles:
324
//   - LRU eviction when cache is full
325
//   - TTL timestamp setting
326
//   - Updating existing entries
327
//   - Moving entry to front of LRU list
328
//
329
// Parameters:
330
//   - key: Cache key from Key() method
331
//   - value: Parsed query plan (typically *ParsedPlan)
332
//
333
// Example 1 - Basic Caching:
334
//
335
//        cache := cache.NewQueryCache(1000, 5*time.Minute)
336
//
337
//        query := "MATCH (n:Person) RETURN n"
338
//        plan := parseQuery(query) // Your parser
339
//
340
//        key := cache.Key(query, nil)
341
//        cache.Put(key, plan)
342
//
343
//        // Later: instant retrieval
344
//        if cached, ok := cache.Get(key); ok {
345
//                fmt.Println("Reusing cached plan!")
346
//        }
347
//
348
// Example 2 - Parse-Once Pattern:
349
//
350
//        func getOrParsePlan(query string, params map[string]interface{}) (*ParsedPlan, error) {
351
//                cache := cache.GlobalQueryCache()
352
//                key := cache.Key(query, params)
353
//
354
//                // Try cache
355
//                if cached, ok := cache.Get(key); ok {
356
//                        return cached.(*ParsedPlan), nil
357
//                }
358
//
359
//                // Parse (expensive operation)
360
//                plan, err := parser.Parse(query)
361
//                if err != nil {
362
//                        return nil, err
363
//                }
364
//
365
//                // Cache for next time
366
//                cache.Put(key, plan)
367
//                return plan, nil
368
//        }
369
//
370
// Example 3 - Updating Cached Entry:
371
//
372
//        // First put
373
//        key := cache.Key(query, nil)
374
//        cache.Put(key, plan1)
375
//
376
//        // Later: update with optimized plan
377
//        optimizedPlan := optimizePlan(plan1)
378
//        cache.Put(key, optimizedPlan) // Replaces old value
379
//
380
// Example 4 - LRU Eviction:
381
//
382
//        cache := cache.NewQueryCache(3, 0) // Only 3 entries, no TTL
383
//
384
//        cache.Put(1, "plan-A")
385
//        cache.Put(2, "plan-B")
386
//        cache.Put(3, "plan-C")
387
//        // Cache: [C, B, A] (most recent first)
388
//
389
//        cache.Get(1) // Access A
390
//        // Cache: [A, C, B]
391
//
392
//        cache.Put(4, "plan-D") // Cache full, evicts B (least recent)
393
//        // Cache: [D, A, C]
394
//
395
// Performance:
396
//   - O(1) insertion or update
397
//   - O(1) eviction when full
398
//   - No allocations for updates
399
//   - Typical latency: <200 ns
400
//
401
// Memory Management:
402
//   - LRU eviction prevents unbounded growth
403
//   - TTL expiration removes stale entries
404
//   - Eviction happens synchronously on Put
405
//
406
// Thread Safety:
407
//   - Exclusive lock held during Put
408
//   - Safe for concurrent Put/Get operations
409
//
410
// ELI12:
411
//
412
// Think of Put like adding a book to the "recently returned" shelf:
413
//   - If there's space, just add it to the front
414
//   - If the shelf is full, remove the oldest book from the back
415
//   - If the book is already there, move it to the front with new info
416
//   - Mark when it was added so we know when it's too old
417
//
418
// The shelf always keeps the most recently used books, automatically
419
// throwing away old ones you haven't touched in a while!
420
func (c *QueryCache) Put(key uint64, value interface{}) {
1✔
421
        if !c.enabled {
2✔
422
                return
1✔
423
        }
1✔
424

425
        c.mu.Lock()
1✔
426
        defer c.mu.Unlock()
1✔
427

1✔
428
        // Check if already exists
1✔
429
        if elem, ok := c.items[key]; ok {
2✔
430
                // Update existing entry
1✔
431
                entry := elem.Value.(*cacheEntry)
1✔
432
                entry.value = value
1✔
433
                if c.ttl > 0 {
2✔
434
                        entry.expiresAt = time.Now().Add(c.ttl)
1✔
435
                }
1✔
436
                c.list.MoveToFront(elem)
1✔
437
                return
1✔
438
        }
439

440
        // Evict if at capacity
441
        for c.list.Len() >= c.maxSize {
2✔
442
                c.evictOldest()
1✔
443
        }
1✔
444

445
        // Add new entry
446
        entry := &cacheEntry{
1✔
447
                key:   key,
1✔
448
                value: value,
1✔
449
        }
1✔
450
        if c.ttl > 0 {
2✔
451
                entry.expiresAt = time.Now().Add(c.ttl)
1✔
452
        }
1✔
453

454
        elem := c.list.PushFront(entry)
1✔
455
        c.items[key] = elem
1✔
456
}
457

458
// Remove removes an entry from the cache.
459
//
460
// Use this to manually invalidate a cached query plan, for example when
461
// the underlying data schema changes or when you know a plan is no longer
462
// valid.
463
//
464
// Parameters:
465
//   - key: Cache key to remove
466
//
467
// Example 1 - Schema Change Invalidation:
468
//
469
//        func createIndex(label, property string) error {
470
//                if err := db.CreateIndex(label, property); err != nil {
471
//                        return err
472
//                }
473
//
474
//                // Invalidate affected queries
475
//                cache := cache.GlobalQueryCache()
476
//                for _, query := range affectedQueries {
477
//                        key := cache.Key(query, nil)
478
//                        cache.Remove(key)
479
//                }
480
//                return nil
481
//        }
482
//
483
// Example 2 - Selective Invalidation:
484
//
485
//        // Remove specific query from cache
486
//        query := "MATCH (n:Person) RETURN n"
487
//        key := cache.Key(query, nil)
488
//        cache.Remove(key)
489
//
490
//        // Next execution will re-parse
491
//        result := executeQuery(query, nil) // Cache miss
492
//
493
// Performance:
494
//   - O(1) removal from map and list
495
//   - No-op if key doesn't exist
496
func (c *QueryCache) Remove(key uint64) {
1✔
497
        c.mu.Lock()
1✔
498
        defer c.mu.Unlock()
1✔
499

1✔
500
        if elem, ok := c.items[key]; ok {
2✔
501
                c.removeElement(elem)
1✔
502
        }
1✔
503
}
504

505
// Clear removes all entries from the cache.
506
//
507
// Use this to completely reset the cache, for example during testing,
508
// after major schema changes, or when switching databases.
509
//
510
// Example 1 - Testing:
511
//
512
//        func TestQueryExecution(t *testing.T) {
513
//                cache := cache.NewQueryCache(100, 0)
514
//
515
//                // Test with cache
516
//                result1 := executeQuery("MATCH (n) RETURN n", nil)
517
//
518
//                // Clear for next test
519
//                cache.Clear()
520
//
521
//                // Test without cache
522
//                result2 := executeQuery("MATCH (n) RETURN n", nil)
523
//        }
524
//
525
// Example 2 - Schema Migration:
526
//
527
//        func migrateSchema() error {
528
//                // Perform migration
529
//                if err := db.Migrate(); err != nil {
530
//                        return err
531
//                }
532
//
533
//                // Invalidate all cached plans
534
//                cache.GlobalQueryCache().Clear()
535
//                return nil
536
//        }
537
//
538
// Example 3 - Memory Pressure:
539
//
540
//        // Free memory under pressure
541
//        if memoryPressure() {
542
//                cache.GlobalQueryCache().Clear()
543
//                runtime.GC()
544
//        }
545
//
546
// Performance:
547
//   - O(n) where n is cache size
548
//   - Reinitializes internal structures
549
//   - Resets statistics
550
func (c *QueryCache) Clear() {
1✔
551
        c.mu.Lock()
1✔
552
        defer c.mu.Unlock()
1✔
553

1✔
554
        c.list.Init()
1✔
555
        c.items = make(map[uint64]*list.Element, c.maxSize)
1✔
556
}
1✔
557

558
// Len returns the number of cached entries.
559
//
560
// Use this to monitor cache utilization or for debugging.
561
//
562
// Returns:
563
//   - Current number of entries in the cache
564
//
565
// Example 1 - Monitoring:
566
//
567
//        cache := cache.GlobalQueryCache()
568
//        fmt.Printf("Cache size: %d/%d\n", cache.Len(), 1000)
569
//
570
// Example 2 - Metrics:
571
//
572
//        func collectMetrics() {
573
//                cache := cache.GlobalQueryCache()
574
//                stats := cache.Stats()
575
//
576
//                metrics.Gauge("cache.size", float64(cache.Len()))
577
//                metrics.Gauge("cache.hit_rate", stats.HitRate)
578
//        }
579
//
580
// Performance:
581
//   - O(1) with read lock
582
func (c *QueryCache) Len() int {
1✔
583
        c.mu.RLock()
1✔
584
        defer c.mu.RUnlock()
1✔
585
        return c.list.Len()
1✔
586
}
1✔
587

588
// Stats returns cache statistics.
589
//
590
// Use this to monitor cache performance and tune cache size and TTL settings.
591
// Statistics are tracked atomically and have minimal performance overhead.
592
//
593
// Returns:
594
//   - CacheStats with hit rate, size, and access counts
595
//
596
// Example 1 - Performance Monitoring:
597
//
598
//        cache := cache.GlobalQueryCache()
599
//        stats := cache.Stats()
600
//
601
//        fmt.Printf("Cache Performance:\n")
602
//        fmt.Printf("  Size: %d/%d (%.1f%% full)\n",
603
//                stats.Size, stats.MaxSize,
604
//                float64(stats.Size)/float64(stats.MaxSize)*100)
605
//        fmt.Printf("  Hit Rate: %.2f%%\n", stats.HitRate)
606
//        fmt.Printf("  Hits: %d\n", stats.Hits)
607
//        fmt.Printf("  Misses: %d\n", stats.Misses)
608
//
609
// Example 2 - Metrics Collection:
610
//
611
//        func recordCacheMetrics() {
612
//                cache := cache.GlobalQueryCache()
613
//                stats := cache.Stats()
614
//
615
//                metrics.Gauge("query_cache.size", float64(stats.Size))
616
//                metrics.Gauge("query_cache.hit_rate", stats.HitRate)
617
//                metrics.Counter("query_cache.hits", float64(stats.Hits))
618
//                metrics.Counter("query_cache.misses", float64(stats.Misses))
619
//        }
620
//
621
// Example 3 - Tuning Decisions:
622
//
623
//        stats := cache.GlobalQueryCache().Stats()
624
//
625
//        if stats.HitRate < 50 {
626
//                log.Println("Low hit rate - consider increasing cache size")
627
//        }
628
//
629
//        if stats.Size == stats.MaxSize {
630
//                log.Println("Cache full - consider increasing maxSize")
631
//        }
632
//
633
// Example 4 - Periodic Reporting:
634
//
635
//        go func() {
636
//                ticker := time.NewTicker(1 * time.Minute)
637
//                for range ticker.C {
638
//                        stats := cache.GlobalQueryCache().Stats()
639
//                        log.Printf("Cache: %d entries, %.1f%% hit rate",
640
//                                stats.Size, stats.HitRate)
641
//                }
642
//        }()
643
//
644
// Interpreting Hit Rate:
645
//   - >80%: Excellent - cache is very effective
646
//   - 60-80%: Good - cache is helping
647
//   - 40-60%: Fair - consider tuning
648
//   - <40%: Poor - cache may be too small or TTL too short
649
//
650
// Performance:
651
//   - O(1) with read lock
652
//   - Atomic statistics access
653
//   - No allocations
654
//
655
// ELI12:
656
//
657
// Stats tells you how well your cache is working:
658
//   - Hit Rate: How often you find what you're looking for (higher is better)
659
//   - Size: How many things are in the cache right now
660
//   - Hits: How many times you found what you wanted
661
//   - Misses: How many times you had to go searching
662
//
663
// It's like checking your homework success rate - if you're getting most
664
// answers from your notes (high hit rate), your notes are working well!
665
func (c *QueryCache) Stats() CacheStats {
1✔
666
        hits := atomic.LoadUint64(&c.hits)
1✔
667
        misses := atomic.LoadUint64(&c.misses)
1✔
668

1✔
669
        c.mu.RLock()
1✔
670
        size := c.list.Len()
1✔
671
        c.mu.RUnlock()
1✔
672

1✔
673
        total := hits + misses
1✔
674
        var hitRate float64
1✔
675
        if total > 0 {
2✔
676
                hitRate = float64(hits) / float64(total) * 100
1✔
677
        }
1✔
678

679
        return CacheStats{
1✔
680
                Size:    size,
1✔
681
                MaxSize: c.maxSize,
1✔
682
                Hits:    hits,
1✔
683
                Misses:  misses,
1✔
684
                HitRate: hitRate,
1✔
685
        }
1✔
686
}
687

688
// CacheStats holds cache performance statistics.
689
//
690
// Use these statistics to monitor cache effectiveness and make tuning decisions.
691
// All fields are safe to read concurrently.
692
//
693
// Fields:
694
//   - Size: Current number of entries in the cache
695
//   - MaxSize: Maximum capacity (from NewQueryCache)
696
//   - Hits: Total number of successful cache lookups
697
//   - Misses: Total number of cache misses (parse required)
698
//   - HitRate: Percentage of lookups that were hits (0-100)
699
//
700
// Example 1 - Health Check:
701
//
702
//        func checkCacheHealth() error {
703
//                stats := cache.GlobalQueryCache().Stats()
704
//
705
//                if stats.HitRate < 50 {
706
//                        return fmt.Errorf("cache hit rate too low: %.1f%%", stats.HitRate)
707
//                }
708
//
709
//                if stats.Size == stats.MaxSize {
710
//                        log.Warn("Cache is full - consider increasing size")
711
//                }
712
//
713
//                return nil
714
//        }
715
//
716
// Example 2 - Dashboard Display:
717
//
718
//        stats := cache.GlobalQueryCache().Stats()
719
//        fmt.Printf(`
720
//        Query Cache Status:
721
//          Capacity: %d/%d (%.1f%% full)
722
//          Hit Rate: %.2f%%
723
//          Total Requests: %d
724
//            Hits: %d
725
//            Misses: %d
726
//        `,
727
//                stats.Size, stats.MaxSize,
728
//                float64(stats.Size)/float64(stats.MaxSize)*100,
729
//                stats.HitRate,
730
//                stats.Hits+stats.Misses,
731
//                stats.Hits,
732
//                stats.Misses)
733
//
734
// Example 3 - Prometheus Metrics:
735
//
736
//        func exportPrometheusMetrics(stats cache.CacheStats) {
737
//                prometheus.GaugeSet("query_cache_size", float64(stats.Size))
738
//                prometheus.GaugeSet("query_cache_max_size", float64(stats.MaxSize))
739
//                prometheus.GaugeSet("query_cache_hit_rate", stats.HitRate)
740
//                prometheus.CounterAdd("query_cache_hits_total", float64(stats.Hits))
741
//                prometheus.CounterAdd("query_cache_misses_total", float64(stats.Misses))
742
//        }
743
//
744
// ELI12:
745
//
746
// CacheStats is like a report card for your cache:
747
//   - Size/MaxSize: How full is your backpack? (5/10 books)
748
//   - Hits: How many times you found your homework in your backpack
749
//   - Misses: How many times you had to search your locker
750
//   - HitRate: Your success percentage (80% means you find it 8 out of 10 times)
751
//
752
// Higher hit rate = better cache = faster queries!
753
type CacheStats struct {
754
        Size    int     // Current number of entries
755
        MaxSize int     // Maximum capacity
756
        Hits    uint64  // Number of cache hits
757
        Misses  uint64  // Number of cache misses
758
        HitRate float64 // Hit rate percentage (0-100)
759
}
760

761
// SetEnabled enables or disables the cache.
762
//
763
// When disabled, all Get operations return cache misses and Put operations
764
// are no-ops. The cache is also cleared when disabled. Use this for debugging
765
// or when you want to bypass caching temporarily.
766
//
767
// Parameters:
768
//   - enabled: true to enable caching, false to disable
769
//
770
// Example 1 - Debugging:
771
//
772
//        // Disable cache to test parsing performance
773
//        cache := cache.GlobalQueryCache()
774
//        cache.SetEnabled(false)
775
//
776
//        start := time.Now()
777
//        for i := 0; i < 1000; i++ {
778
//                executeQuery("MATCH (n) RETURN n", nil)
779
//        }
780
//        fmt.Printf("Without cache: %v\n", time.Since(start))
781
//
782
//        // Re-enable for comparison
783
//        cache.SetEnabled(true)
784
//        start = time.Now()
785
//        for i := 0; i < 1000; i++ {
786
//                executeQuery("MATCH (n) RETURN n", nil)
787
//        }
788
//        fmt.Printf("With cache: %v\n", time.Since(start))
789
//
790
// Example 2 - Conditional Caching:
791
//
792
//        func executeQuery(query string, useCache bool) (*Result, error) {
793
//                cache := cache.GlobalQueryCache()
794
//                cache.SetEnabled(useCache)
795
//
796
//                // Execute query (cache behavior depends on useCache)
797
//                return executor.Execute(query, nil)
798
//        }
799
//
800
// Example 3 - Testing:
801
//
802
//        func TestParserWithoutCache(t *testing.T) {
803
//                cache := cache.NewQueryCache(100, 0)
804
//                cache.SetEnabled(false) // Force re-parsing
805
//
806
//                // All queries will be parsed fresh
807
//                for _, query := range testQueries {
808
//                        result := executeQuery(query, nil)
809
//                        // Verify parsing logic...
810
//                }
811
//        }
812
//
813
// Performance Impact:
814
//   - Disabled: All Get() returns false (cache miss)
815
//   - Disabled: All Put() are no-ops
816
//   - Disabling clears the cache (frees memory)
817
//
818
// Thread Safety:
819
//   - Safe to call concurrently
820
//   - Exclusive lock held during state change
821
func (c *QueryCache) SetEnabled(enabled bool) {
1✔
822
        c.mu.Lock()
1✔
823
        defer c.mu.Unlock()
1✔
824
        c.enabled = enabled
1✔
825

1✔
826
        if !enabled {
2✔
827
                c.list.Init()
1✔
828
                c.items = make(map[uint64]*list.Element, c.maxSize)
1✔
829
        }
1✔
830
}
831

832
// evictOldest removes the least recently used entry.
833
// Caller must hold the lock.
834
func (c *QueryCache) evictOldest() {
1✔
835
        elem := c.list.Back()
1✔
836
        if elem != nil {
2✔
837
                c.removeElement(elem)
1✔
838
        }
1✔
839
}
840

841
// removeElement removes an element from the cache.
842
// Caller must hold the lock.
843
func (c *QueryCache) removeElement(elem *list.Element) {
1✔
844
        c.list.Remove(elem)
1✔
845
        entry := elem.Value.(*cacheEntry)
1✔
846
        delete(c.items, entry.key)
1✔
847
}
1✔
848

849
// =============================================================================
850
// Global Query Cache (singleton for convenience)
851
// =============================================================================
852

853
var (
854
        globalQueryCache     *QueryCache
855
        globalQueryCacheOnce sync.Once
856
)
857

858
// GlobalQueryCache returns the global query cache instance.
859
//
860
// The global cache is a singleton that's lazily initialized with default
861
// settings (1000 entries, 5-minute TTL). Use ConfigureGlobalCache to
862
// customize the cache before first use.
863
//
864
// Returns:
865
//   - Shared QueryCache instance
866
//
867
// Example 1 - Simple Usage:
868
//
869
//        func executeQuery(query string, params map[string]interface{}) (*Result, error) {
870
//                cache := cache.GlobalQueryCache()
871
//                key := cache.Key(query, params)
872
//
873
//                if plan, ok := cache.Get(key); ok {
874
//                        return executePlan(plan.(*ParsedPlan), params)
875
//                }
876
//
877
//                plan, err := parseQuery(query)
878
//                if err != nil {
879
//                        return nil, err
880
//                }
881
//                cache.Put(key, plan)
882
//
883
//                return executePlan(plan, params)
884
//        }
885
//
886
// Example 2 - With Custom Configuration:
887
//
888
//        func init() {
889
//                // Configure before first use
890
//                cache.ConfigureGlobalCache(5000, 10*time.Minute)
891
//        }
892
//
893
//        func main() {
894
//                // Now uses custom configuration
895
//                cache := cache.GlobalQueryCache()
896
//                fmt.Printf("Cache size: %d\n", cache.Len())
897
//        }
898
//
899
// Example 3 - Monitoring:
900
//
901
//        go func() {
902
//                ticker := time.NewTicker(1 * time.Minute)
903
//                for range ticker.C {
904
//                        stats := cache.GlobalQueryCache().Stats()
905
//                        log.Printf("Cache hit rate: %.1f%%", stats.HitRate)
906
//                }
907
//        }()
908
//
909
// Default Configuration:
910
//   - MaxSize: 1000 entries
911
//   - TTL: 5 minutes
912
//   - Enabled: true
913
//
914
// Thread Safety:
915
//   - Singleton initialization is thread-safe
916
//   - All cache operations are thread-safe
917
//
918
// ELI12:
919
//
920
// GlobalQueryCache is like having ONE shared notebook for the whole class:
921
//   - Everyone uses the same notebook (singleton)
922
//   - First person to open it sets it up (lazy initialization)
923
//   - Everyone can read and write at the same time (thread-safe)
924
//   - No need to pass the notebook around - just call GlobalQueryCache()!
925
func GlobalQueryCache() *QueryCache {
1✔
926
        globalQueryCacheOnce.Do(func() {
1✔
UNCOV
927
                globalQueryCache = NewQueryCache(1000, 5*time.Minute)
×
UNCOV
928
        })
×
929
        return globalQueryCache
1✔
930
}
931

932
// ConfigureGlobalCache configures the global query cache.
933
//
934
// This function must be called before the first use of GlobalQueryCache()
935
// to customize the cache settings. Subsequent calls are no-ops (first call wins).
936
//
937
// Parameters:
938
//   - maxSize: Maximum number of cached plans (LRU eviction when exceeded)
939
//   - ttl: Time-to-live for cached entries (0 = no expiration)
940
//
941
// Example 1 - Application Initialization:
942
//
943
//        func main() {
944
//                // Configure cache early in main()
945
//                cache.ConfigureGlobalCache(5000, 10*time.Minute)
946
//
947
//                // Start application
948
//                server.Start()
949
//        }
950
//
951
// Example 2 - Environment-Based Configuration:
952
//
953
//        func init() {
954
//                maxSize := getEnvInt("CACHE_SIZE", 1000)
955
//                ttl := getEnvDuration("CACHE_TTL", 5*time.Minute)
956
//
957
//                cache.ConfigureGlobalCache(maxSize, ttl)
958
//        }
959
//
960
// Example 3 - Production vs Development:
961
//
962
//        func init() {
963
//                if os.Getenv("ENV") == "production" {
964
//                        // Large cache for production
965
//                        cache.ConfigureGlobalCache(10000, 15*time.Minute)
966
//                } else {
967
//                        // Small cache for development
968
//                        cache.ConfigureGlobalCache(100, 1*time.Minute)
969
//                }
970
//        }
971
//
972
// Example 4 - Testing:
973
//
974
//        func TestMain(m *testing.M) {
975
//                // Small cache for tests
976
//                cache.ConfigureGlobalCache(10, 0)
977
//                os.Exit(m.Run())
978
//        }
979
//
980
// Timing:
981
//   - Call in init() or early in main()
982
//   - Before any query execution
983
//   - Before starting HTTP server
984
//
985
// Thread Safety:
986
//   - First call wins (sync.Once)
987
//   - Subsequent calls are ignored
988
//   - Safe to call from multiple goroutines
989
//
990
// ELI12:
991
//
992
// ConfigureGlobalCache is like setting up the classroom before students arrive:
993
//   - You decide how big the shared notebook should be (maxSize)
994
//   - You decide how long notes stay valid (ttl)
995
//   - Once students arrive, you can't change the notebook (first call wins)
996
//   - Do this in init() or main() before anyone uses the cache!
997
func ConfigureGlobalCache(maxSize int, ttl time.Duration) {
1✔
998
        globalQueryCacheOnce.Do(func() {
2✔
999
                globalQueryCache = NewQueryCache(maxSize, ttl)
1✔
1000
        })
1✔
1001
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc