• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

dgraph-io / ristretto / 5920607970

02 Jul 2023 06:33AM UTC coverage: 73.696%. Remained the same
5920607970

push

web-flow
chore: fix typo error (#341)


remove typo error

1 of 1 new or added line in 1 file covered. (100.0%)

2219 of 3011 relevant lines covered (73.7%)

2509404.21 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.3
/cache.go
1
/*
2
 * Copyright 2019 Dgraph Labs, Inc. and Contributors
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16

17
// Ristretto is a fast, fixed size, in-memory cache with a dual focus on
18
// throughput and hit ratio performance. You can easily add Ristretto to an
19
// existing system and keep the most valuable data where you need it.
20
package ristretto
21

22
import (
23
        "bytes"
24
        "errors"
25
        "fmt"
26
        "sync"
27
        "sync/atomic"
28
        "time"
29
        "unsafe"
30

31
        "github.com/dgraph-io/ristretto/z"
32
)
33

34
var (
35
        // TODO: find the optimal value for this or make it configurable
36
        setBufSize = 32 * 1024
37
)
38

39
type itemCallback func(*Item)
40

41
const itemSize = int64(unsafe.Sizeof(storeItem{}))
42

43
// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission
44
// policy and a Sampled LFU eviction policy. You can use the same Cache instance
45
// from as many goroutines as you want.
46
type Cache struct {
47
        // store is the central concurrent hashmap where key-value items are stored.
48
        store store
49
        // policy determines what gets let in to the cache and what gets kicked out.
50
        policy policy
51
        // getBuf is a custom ring buffer implementation that gets pushed to when
52
        // keys are read.
53
        getBuf *ringBuffer
54
        // setBuf is a buffer allowing us to batch/drop Sets during times of high
55
        // contention.
56
        setBuf chan *Item
57
        // onEvict is called for item evictions.
58
        onEvict itemCallback
59
        // onReject is called when an item is rejected via admission policy.
60
        onReject itemCallback
61
        // onExit is called whenever a value goes out of scope from the cache.
62
        onExit (func(interface{}))
63
        // KeyToHash function is used to customize the key hashing algorithm.
64
        // Each key will be hashed using the provided function. If keyToHash value
65
        // is not set, the default keyToHash function is used.
66
        keyToHash func(interface{}) (uint64, uint64)
67
        // stop is used to stop the processItems goroutine.
68
        stop chan struct{}
69
        // indicates whether cache is closed.
70
        isClosed bool
71
        // cost calculates cost from a value.
72
        cost func(value interface{}) int64
73
        // ignoreInternalCost dictates whether to ignore the cost of internally storing
74
        // the item in the cost calculation.
75
        ignoreInternalCost bool
76
        // cleanupTicker is used to periodically check for entries whose TTL has passed.
77
        cleanupTicker *time.Ticker
78
        // Metrics contains a running log of important statistics like hits, misses,
79
        // and dropped items.
80
        Metrics *Metrics
81
}
82

83
// Config is passed to NewCache for creating new Cache instances.
84
type Config struct {
85
        // NumCounters determines the number of counters (keys) to keep that hold
86
        // access frequency information. It's generally a good idea to have more
87
        // counters than the max cache capacity, as this will improve eviction
88
        // accuracy and subsequent hit ratios.
89
        //
90
        // For example, if you expect your cache to hold 1,000,000 items when full,
91
        // NumCounters should be 10,000,000 (10x). Each counter takes up roughly
92
        // 3 bytes (4 bits for each counter * 4 copies plus about a byte per
93
        // counter for the bloom filter). Note that the number of counters is
94
        // internally rounded up to the nearest power of 2, so the space usage
95
        // may be a little larger than 3 bytes * NumCounters.
96
        NumCounters int64
97
        // MaxCost can be considered as the cache capacity, in whatever units you
98
        // choose to use.
99
        //
100
        // For example, if you want the cache to have a max capacity of 100MB, you
101
        // would set MaxCost to 100,000,000 and pass an item's number of bytes as
102
        // the `cost` parameter for calls to Set. If new items are accepted, the
103
        // eviction process will take care of making room for the new item and not
104
        // overflowing the MaxCost value.
105
        MaxCost int64
106
        // BufferItems determines the size of Get buffers.
107
        //
108
        // Unless you have a rare use case, using `64` as the BufferItems value
109
        // results in good performance.
110
        BufferItems int64
111
        // Metrics determines whether cache statistics are kept during the cache's
112
        // lifetime. There *is* some overhead to keeping statistics, so you should
113
        // only set this flag to true when testing or throughput performance isn't a
114
        // major factor.
115
        Metrics bool
116
        // OnEvict is called for every eviction and passes the hashed key, value,
117
        // and cost to the function.
118
        OnEvict func(item *Item)
119
        // OnReject is called for every rejection done via the policy.
120
        OnReject func(item *Item)
121
        // OnExit is called whenever a value is removed from cache. This can be
122
        // used to do manual memory deallocation. Would also be called on eviction
123
        // and rejection of the value.
124
        OnExit func(val interface{})
125
        // KeyToHash function is used to customize the key hashing algorithm.
126
        // Each key will be hashed using the provided function. If keyToHash value
127
        // is not set, the default keyToHash function is used.
128
        KeyToHash func(key interface{}) (uint64, uint64)
129
        // Cost evaluates a value and outputs a corresponding cost. This function
130
        // is ran after Set is called for a new item or an item update with a cost
131
        // param of 0.
132
        Cost func(value interface{}) int64
133
        // IgnoreInternalCost set to true indicates to the cache that the cost of
134
        // internally storing the value should be ignored. This is useful when the
135
        // cost passed to set is not using bytes as units. Keep in mind that setting
136
        // this to true will increase the memory usage.
137
        IgnoreInternalCost bool
138
}
139

140
type itemFlag byte
141

142
const (
143
        itemNew itemFlag = iota
144
        itemDelete
145
        itemUpdate
146
)
147

148
// Item is passed to setBuf so items can eventually be added to the cache.
149
type Item struct {
150
        flag       itemFlag
151
        Key        uint64
152
        Conflict   uint64
153
        Value      interface{}
154
        Cost       int64
155
        Expiration time.Time
156
        wg         *sync.WaitGroup
157
}
158

159
// NewCache returns a new Cache instance and any configuration errors, if any.
160
func NewCache(config *Config) (*Cache, error) {
139✔
161
        switch {
139✔
162
        case config.NumCounters == 0:
1✔
163
                return nil, errors.New("NumCounters can't be zero")
1✔
164
        case config.MaxCost == 0:
1✔
165
                return nil, errors.New("MaxCost can't be zero")
1✔
166
        case config.BufferItems == 0:
1✔
167
                return nil, errors.New("BufferItems can't be zero")
1✔
168
        }
169
        policy := newPolicy(config.NumCounters, config.MaxCost)
136✔
170
        cache := &Cache{
136✔
171
                store:              newStore(),
136✔
172
                policy:             policy,
136✔
173
                getBuf:             newRingBuffer(policy, config.BufferItems),
136✔
174
                setBuf:             make(chan *Item, setBufSize),
136✔
175
                keyToHash:          config.KeyToHash,
136✔
176
                stop:               make(chan struct{}),
136✔
177
                cost:               config.Cost,
136✔
178
                ignoreInternalCost: config.IgnoreInternalCost,
136✔
179
                cleanupTicker:      time.NewTicker(time.Duration(bucketDurationSecs) * time.Second / 2),
136✔
180
        }
136✔
181
        cache.onExit = func(val interface{}) {
80,794✔
182
                if config.OnExit != nil && val != nil {
120,658✔
183
                        config.OnExit(val)
40,000✔
184
                }
40,000✔
185
        }
186
        cache.onEvict = func(item *Item) {
32,954✔
187
                if config.OnEvict != nil {
32,824✔
188
                        config.OnEvict(item)
6✔
189
                }
6✔
190
                cache.onExit(item.Value)
32,818✔
191
        }
192
        cache.onReject = func(item *Item) {
9,247✔
193
                if config.OnReject != nil {
9,111✔
194
                        config.OnReject(item)
×
195
                }
×
196
                cache.onExit(item.Value)
9,111✔
197
        }
198
        if cache.keyToHash == nil {
271✔
199
                cache.keyToHash = z.KeyToHash
135✔
200
        }
135✔
201
        if config.Metrics {
266✔
202
                cache.collectMetrics()
130✔
203
        }
130✔
204
        // NOTE: benchmarks seem to show that performance decreases the more
205
        //       goroutines we have running cache.processItems(), so 1 should
206
        //       usually be sufficient
207
        go cache.processItems()
136✔
208
        return cache, nil
136✔
209
}
210

211
// Wait blocks until all buffered writes have been applied. This ensures a call to Set()
212
// will be visible to future calls to Get().
213
func (c *Cache) Wait() {
210✔
214
        if c == nil || c.isClosed {
210✔
215
                return
×
216
        }
×
217
        wg := &sync.WaitGroup{}
210✔
218
        wg.Add(1)
210✔
219
        c.setBuf <- &Item{wg: wg}
210✔
220
        wg.Wait()
210✔
221
}
222

223
// Get returns the value (if any) and a boolean representing whether the
224
// value was found or not. The value can be nil and the boolean can be true at
225
// the same time. Get will not return expired items.
226
func (c *Cache) Get(key interface{}) (interface{}, bool) {
151,473✔
227
        if c == nil || c.isClosed || key == nil {
151,476✔
228
                return nil, false
3✔
229
        }
3✔
230
        keyHash, conflictHash := c.keyToHash(key)
151,470✔
231
        c.getBuf.Push(keyHash)
151,470✔
232
        value, ok := c.store.Get(keyHash, conflictHash)
151,470✔
233
        if ok {
262,031✔
234
                c.Metrics.add(hit, keyHash, 1)
110,561✔
235
        } else {
151,470✔
236
                c.Metrics.add(miss, keyHash, 1)
40,909✔
237
        }
40,909✔
238
        return value, ok
151,470✔
239
}
240

241
// Set attempts to add the key-value item to the cache. If it returns false,
242
// then the Set was dropped and the key-value item isn't added to the cache. If
243
// it returns true, there's still a chance it could be dropped by the policy if
244
// its determined that the key-value item isn't worth keeping, but otherwise the
245
// item will be added and other items will be evicted in order to make room.
246
//
247
// To dynamically evaluate the items cost using the Config.Coster function, set
248
// the cost parameter to 0 and Coster will be ran when needed in order to find
249
// the items true cost.
250
func (c *Cache) Set(key, value interface{}, cost int64) bool {
62,680✔
251
        return c.SetWithTTL(key, value, cost, 0*time.Second)
62,680✔
252
}
62,680✔
253

254
// SetWithTTL works like Set but adds a key-value pair to the cache that will expire
255
// after the specified TTL (time to live) has passed. A zero value means the value never
256
// expires, which is identical to calling Set. A negative value is a no-op and the value
257
// is discarded.
258
func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool {
82,703✔
259
        if c == nil || c.isClosed || key == nil {
82,706✔
260
                return false
3✔
261
        }
3✔
262

263
        var expiration time.Time
82,700✔
264
        switch {
82,700✔
265
        case ttl == 0:
62,681✔
266
                // No expiration.
62,681✔
267
                break
62,681✔
268
        case ttl < 0:
×
269
                // Treat this a no-op.
×
270
                return false
×
271
        default:
20,019✔
272
                expiration = time.Now().Add(ttl)
20,019✔
273
        }
274

275
        keyHash, conflictHash := c.keyToHash(key)
82,700✔
276
        i := &Item{
82,700✔
277
                flag:       itemNew,
82,700✔
278
                Key:        keyHash,
82,700✔
279
                Conflict:   conflictHash,
82,700✔
280
                Value:      value,
82,700✔
281
                Cost:       cost,
82,700✔
282
                Expiration: expiration,
82,700✔
283
        }
82,700✔
284
        // cost is eventually updated. The expiration must also be immediately updated
82,700✔
285
        // to prevent items from being prematurely removed from the map.
82,700✔
286
        if prev, ok := c.store.Update(i); ok {
114,414✔
287
                c.onExit(prev)
31,714✔
288
                i.flag = itemUpdate
31,714✔
289
        }
31,714✔
290
        // Attempt to send item to policy.
291
        select {
82,700✔
292
        case c.setBuf <- i:
82,510✔
293
                return true
82,510✔
294
        default:
190✔
295
                if i.flag == itemUpdate {
190✔
296
                        // Return true if this was an update operation since we've already
×
297
                        // updated the store. For all the other operations (set/delete), we
×
298
                        // return false which means the item was not inserted.
×
299
                        return true
×
300
                }
×
301
                c.Metrics.add(dropSets, keyHash, 1)
190✔
302
                return false
190✔
303
        }
304
}
305

306
// Del deletes the key-value item from the cache if it exists.
307
func (c *Cache) Del(key interface{}) {
4,042✔
308
        if c == nil || c.isClosed || key == nil {
4,045✔
309
                return
3✔
310
        }
3✔
311
        keyHash, conflictHash := c.keyToHash(key)
4,039✔
312
        // Delete immediately.
4,039✔
313
        _, prev := c.store.Del(keyHash, conflictHash)
4,039✔
314
        c.onExit(prev)
4,039✔
315
        // If we've set an item, it would be applied slightly later.
4,039✔
316
        // So we must push the same item to `setBuf` with the deletion flag.
4,039✔
317
        // This ensures that if a set is followed by a delete, it will be
4,039✔
318
        // applied in the correct order.
4,039✔
319
        c.setBuf <- &Item{
4,039✔
320
                flag:     itemDelete,
4,039✔
321
                Key:      keyHash,
4,039✔
322
                Conflict: conflictHash,
4,039✔
323
        }
4,039✔
324
}
325

326
// GetTTL returns the TTL for the specified key and a bool that is true if the
327
// item was found and is not expired.
328
func (c *Cache) GetTTL(key interface{}) (time.Duration, bool) {
5✔
329
        if c == nil || key == nil {
5✔
330
                return 0, false
×
331
        }
×
332

333
        keyHash, conflictHash := c.keyToHash(key)
5✔
334
        if _, ok := c.store.Get(keyHash, conflictHash); !ok {
8✔
335
                // not found
3✔
336
                return 0, false
3✔
337
        }
3✔
338

339
        expiration := c.store.Expiration(keyHash)
2✔
340
        if expiration.IsZero() {
3✔
341
                // found but no expiration
1✔
342
                return 0, true
1✔
343
        }
1✔
344

345
        if time.Now().After(expiration) {
1✔
346
                // found but expired
×
347
                return 0, false
×
348
        }
×
349

350
        return time.Until(expiration), true
1✔
351
}
352

353
// Close stops all goroutines and closes all channels.
354
func (c *Cache) Close() {
112✔
355
        if c == nil || c.isClosed {
115✔
356
                return
3✔
357
        }
3✔
358
        c.Clear()
109✔
359

109✔
360
        // Block until processItems goroutine is returned.
109✔
361
        c.stop <- struct{}{}
109✔
362
        close(c.stop)
109✔
363
        close(c.setBuf)
109✔
364
        c.policy.Close()
109✔
365
        c.isClosed = true
109✔
366
}
367

368
// Clear empties the hashmap and zeroes all policy counters. Note that this is
369
// not an atomic operation (but that shouldn't be a problem as it's assumed that
370
// Set/Get calls won't be occurring until after this).
371
func (c *Cache) Clear() {
124✔
372
        if c == nil || c.isClosed {
126✔
373
                return
2✔
374
        }
2✔
375
        // Block until processItems goroutine is returned.
376
        c.stop <- struct{}{}
122✔
377

122✔
378
        // Clear out the setBuf channel.
122✔
379
loop:
122✔
380
        for {
11,598✔
381
                select {
11,476✔
382
                case i := <-c.setBuf:
11,354✔
383
                        if i.wg != nil {
11,360✔
384
                                i.wg.Done()
6✔
385
                                continue
6✔
386
                        }
387
                        if i.flag != itemUpdate {
19,633✔
388
                                // In itemUpdate, the value is already set in the store.  So, no need to call
8,285✔
389
                                // onEvict here.
8,285✔
390
                                c.onEvict(i)
8,285✔
391
                        }
8,285✔
392
                default:
122✔
393
                        break loop
122✔
394
                }
395
        }
396

397
        // Clear value hashmap and policy data.
398
        c.policy.Clear()
122✔
399
        c.store.Clear(c.onEvict)
122✔
400
        // Only reset metrics if they're enabled.
122✔
401
        if c.Metrics != nil {
232✔
402
                c.Metrics.Clear()
110✔
403
        }
110✔
404
        // Restart processItems goroutine.
405
        go c.processItems()
122✔
406
}
407

408
// MaxCost returns the max cost of the cache.
409
func (c *Cache) MaxCost() int64 {
2✔
410
        if c == nil {
2✔
411
                return 0
×
412
        }
×
413
        return c.policy.MaxCost()
2✔
414
}
415

416
// UpdateMaxCost updates the maxCost of an existing cache.
417
func (c *Cache) UpdateMaxCost(maxCost int64) {
1✔
418
        if c == nil {
1✔
419
                return
×
420
        }
×
421
        c.policy.UpdateMaxCost(maxCost)
1✔
422
}
423

424
// processItems is ran by goroutines processing the Set buffer.
425
func (c *Cache) processItems() {
258✔
426
        startTs := make(map[uint64]time.Time)
258✔
427
        numToKeep := 100000 // TODO: Make this configurable via options.
258✔
428

258✔
429
        trackAdmission := func(key uint64) {
28,818✔
430
                if c.Metrics == nil {
28,568✔
431
                        return
8✔
432
                }
8✔
433
                startTs[key] = time.Now()
28,552✔
434
                if len(startTs) > numToKeep {
28,552✔
435
                        for k := range startTs {
×
436
                                if len(startTs) <= numToKeep {
×
437
                                        break
×
438
                                }
439
                                delete(startTs, k)
×
440
                        }
441
                }
442
        }
443
        onEvict := func(i *Item) {
21,775✔
444
                if ts, has := startTs[i.Key]; has {
43,032✔
445
                        c.Metrics.trackEviction(int64(time.Since(ts) / time.Second))
21,515✔
446
                        delete(startTs, i.Key)
21,515✔
447
                }
21,515✔
448
                if c.onEvict != nil {
43,034✔
449
                        c.onEvict(i)
21,517✔
450
                }
21,517✔
451
        }
452

453
        for {
71,171✔
454
                select {
70,913✔
455
                case i := <-c.setBuf:
69,501✔
456
                        if i.wg != nil {
69,705✔
457
                                i.wg.Done()
204✔
458
                                continue
204✔
459
                        }
460
                        // Calculate item cost value if new or update.
461
                        if i.Cost == 0 && c.cost != nil && i.flag != itemDelete {
69,299✔
462
                                i.Cost = c.cost(i.Value)
2✔
463
                        }
2✔
464
                        if !c.ignoreInternalCost {
138,450✔
465
                                // Add the cost of internally storing the object.
69,153✔
466
                                i.Cost += itemSize
69,153✔
467
                        }
69,153✔
468

469
                        switch i.flag {
69,297✔
470
                        case itemNew:
37,671✔
471
                                victims, added := c.policy.Add(i.Key, i.Cost)
37,671✔
472
                                if added {
66,231✔
473
                                        c.store.Set(i)
28,560✔
474
                                        c.Metrics.add(keyAdd, i.Key, 1)
28,560✔
475
                                        trackAdmission(i.Key)
28,560✔
476
                                } else {
37,671✔
477
                                        c.onReject(i)
9,111✔
478
                                }
9,111✔
479
                                for _, victim := range victims {
55,982✔
480
                                        victim.Conflict, victim.Value = c.store.Del(victim.Key, 0)
18,311✔
481
                                        onEvict(victim)
18,311✔
482
                                }
18,311✔
483

484
                        case itemUpdate:
28,650✔
485
                                c.policy.Update(i.Key, i.Cost)
28,650✔
486

487
                        case itemDelete:
2,976✔
488
                                c.policy.Del(i.Key) // Deals with metrics updates.
2,976✔
489
                                _, val := c.store.Del(i.Key, i.Conflict)
2,976✔
490
                                c.onExit(val)
2,976✔
491
                        }
492
                case <-c.cleanupTicker.C:
1,155✔
493
                        c.store.Cleanup(c.policy, onEvict)
1,155✔
494
                case <-c.stop:
232✔
495
                        return
232✔
496
                }
497
        }
498
}
499

500
// collectMetrics just creates a new *Metrics instance and adds the pointers
501
// to the cache and policy instances.
502
func (c *Cache) collectMetrics() {
130✔
503
        c.Metrics = newMetrics()
130✔
504
        c.policy.CollectMetrics(c.Metrics)
130✔
505
}
130✔
506

507
type metricType int
508

509
const (
510
        // The following 2 keep track of hits and misses.
511
        hit = iota
512
        miss
513
        // The following 3 keep track of number of keys added, updated and evicted.
514
        keyAdd
515
        keyUpdate
516
        keyEvict
517
        // The following 2 keep track of cost of keys added and evicted.
518
        costAdd
519
        costEvict
520
        // The following keep track of how many sets were dropped or rejected later.
521
        dropSets
522
        rejectSets
523
        // The following 2 keep track of how many gets were kept and dropped on the
524
        // floor.
525
        dropGets
526
        keepGets
527
        // This should be the final enum. Other enums should be set before this.
528
        doNotUse
529
)
530

531
func stringFor(t metricType) string {
12✔
532
        switch t {
12✔
533
        case hit:
1✔
534
                return "hit"
1✔
535
        case miss:
1✔
536
                return "miss"
1✔
537
        case keyAdd:
1✔
538
                return "keys-added"
1✔
539
        case keyUpdate:
1✔
540
                return "keys-updated"
1✔
541
        case keyEvict:
1✔
542
                return "keys-evicted"
1✔
543
        case costAdd:
1✔
544
                return "cost-added"
1✔
545
        case costEvict:
1✔
546
                return "cost-evicted"
1✔
547
        case dropSets:
1✔
548
                return "sets-dropped"
1✔
549
        case rejectSets:
1✔
550
                return "sets-rejected" // by policy.
1✔
551
        case dropGets:
1✔
552
                return "gets-dropped"
1✔
553
        case keepGets:
1✔
554
                return "gets-kept"
1✔
555
        default:
1✔
556
                return "unidentified"
1✔
557
        }
558
}
559

560
// Metrics is a snapshot of performance statistics for the lifetime of a cache instance.
561
type Metrics struct {
562
        all [doNotUse][]*uint64
563

564
        mu   sync.RWMutex
565
        life *z.HistogramData // Tracks the life expectancy of a key.
566
}
567

568
func newMetrics() *Metrics {
136✔
569
        s := &Metrics{
136✔
570
                life: z.NewHistogramData(z.HistogramBounds(1, 16)),
136✔
571
        }
136✔
572
        for i := 0; i < doNotUse; i++ {
1,632✔
573
                s.all[i] = make([]*uint64, 256)
1,496✔
574
                slice := s.all[i]
1,496✔
575
                for j := range slice {
384,472✔
576
                        slice[j] = new(uint64)
382,976✔
577
                }
382,976✔
578
        }
579
        return s
136✔
580
}
581

582
func (p *Metrics) add(t metricType, hash, delta uint64) {
300,904✔
583
        if p == nil {
300,979✔
584
                return
75✔
585
        }
75✔
586
        valp := p.all[t]
300,829✔
587
        // Avoid false sharing by padding at least 64 bytes of space between two
300,829✔
588
        // atomic counters which would be incremented.
300,829✔
589
        idx := (hash % 25) * 10
300,829✔
590
        atomic.AddUint64(valp[idx], delta)
300,829✔
591
}
592

593
func (p *Metrics) get(t metricType) uint64 {
95✔
594
        if p == nil {
105✔
595
                return 0
10✔
596
        }
10✔
597
        valp := p.all[t]
85✔
598
        var total uint64
85✔
599
        for i := range valp {
21,845✔
600
                total += atomic.LoadUint64(valp[i])
21,760✔
601
        }
21,760✔
602
        return total
85✔
603
}
604

605
// Hits is the number of Get calls where a value was found for the corresponding key.
606
func (p *Metrics) Hits() uint64 {
4✔
607
        return p.get(hit)
4✔
608
}
4✔
609

610
// Misses is the number of Get calls where a value was not found for the corresponding key.
611
func (p *Metrics) Misses() uint64 {
2✔
612
        return p.get(miss)
2✔
613
}
2✔
614

615
// KeysAdded is the total number of Set calls where a new key-value item was added.
616
func (p *Metrics) KeysAdded() uint64 {
5✔
617
        return p.get(keyAdd)
5✔
618
}
5✔
619

620
// KeysUpdated is the total number of Set calls where the value was updated.
621
func (p *Metrics) KeysUpdated() uint64 {
1✔
622
        return p.get(keyUpdate)
1✔
623
}
1✔
624

625
// KeysEvicted is the total number of keys evicted.
626
func (p *Metrics) KeysEvicted() uint64 {
2✔
627
        return p.get(keyEvict)
2✔
628
}
2✔
629

630
// CostAdded is the sum of costs that have been added (successful Set calls).
631
func (p *Metrics) CostAdded() uint64 {
21✔
632
        return p.get(costAdd)
21✔
633
}
21✔
634

635
// CostEvicted is the sum of all costs that have been evicted.
636
func (p *Metrics) CostEvicted() uint64 {
22✔
637
        return p.get(costEvict)
22✔
638
}
22✔
639

640
// SetsDropped is the number of Set calls that don't make it into internal
641
// buffers (due to contention or some other reason).
642
func (p *Metrics) SetsDropped() uint64 {
3✔
643
        return p.get(dropSets)
3✔
644
}
3✔
645

646
// SetsRejected is the number of Set calls rejected by the policy (TinyLFU).
647
func (p *Metrics) SetsRejected() uint64 {
2✔
648
        return p.get(rejectSets)
2✔
649
}
2✔
650

651
// GetsDropped is the number of Get counter increments that are dropped
652
// internally.
653
func (p *Metrics) GetsDropped() uint64 {
2✔
654
        return p.get(dropGets)
2✔
655
}
2✔
656

657
// GetsKept is the number of Get counter increments that are kept.
658
func (p *Metrics) GetsKept() uint64 {
2✔
659
        return p.get(keepGets)
2✔
660
}
2✔
661

662
// Ratio is the number of Hits over all accesses (Hits + Misses). This is the
663
// percentage of successful Get calls.
664
func (p *Metrics) Ratio() float64 {
9✔
665
        if p == nil {
10✔
666
                return 0.0
1✔
667
        }
1✔
668
        hits, misses := p.get(hit), p.get(miss)
8✔
669
        if hits == 0 && misses == 0 {
9✔
670
                return 0.0
1✔
671
        }
1✔
672
        return float64(hits) / float64(hits+misses)
7✔
673
}
674

675
func (p *Metrics) trackEviction(numSeconds int64) {
21,515✔
676
        if p == nil {
21,515✔
677
                return
×
678
        }
×
679
        p.mu.Lock()
21,515✔
680
        defer p.mu.Unlock()
21,515✔
681
        p.life.Update(numSeconds)
21,515✔
682
}
683

684
func (p *Metrics) LifeExpectancySeconds() *z.HistogramData {
×
685
        if p == nil {
×
686
                return nil
×
687
        }
×
688
        p.mu.RLock()
×
689
        defer p.mu.RUnlock()
×
690
        return p.life.Copy()
×
691
}
692

693
// Clear resets all the metrics.
694
func (p *Metrics) Clear() {
111✔
695
        if p == nil {
112✔
696
                return
1✔
697
        }
1✔
698
        for i := 0; i < doNotUse; i++ {
1,320✔
699
                for j := range p.all[i] {
310,970✔
700
                        atomic.StoreUint64(p.all[i][j], 0)
309,760✔
701
                }
309,760✔
702
        }
703
        p.mu.Lock()
110✔
704
        p.life = z.NewHistogramData(z.HistogramBounds(1, 16))
110✔
705
        p.mu.Unlock()
110✔
706
}
707

708
// String returns a string representation of the metrics.
709
func (p *Metrics) String() string {
2✔
710
        if p == nil {
3✔
711
                return ""
1✔
712
        }
1✔
713
        var buf bytes.Buffer
1✔
714
        for i := 0; i < doNotUse; i++ {
12✔
715
                t := metricType(i)
11✔
716
                fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t))
11✔
717
        }
11✔
718
        fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss))
1✔
719
        fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio())
1✔
720
        return buf.String()
1✔
721
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc