• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ben-manes / caffeine / #5371

11 Apr 2026 07:54PM UTC coverage: 99.887% (+0.001%) from 99.886%
#5371

push

github

ben-manes
additional test coverage

3875 of 3890 branches covered (99.61%)

7948 of 7957 relevant lines covered (99.89%)

1.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.82
/caffeine/src/main/java/com/github/benmanes/caffeine/cache/BoundedLocalCache.java
1
/*
2
 * Copyright 2014 Ben Manes. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
package com.github.benmanes.caffeine.cache;
17

18
import static com.github.benmanes.caffeine.cache.Async.ASYNC_EXPIRY;
19
import static com.github.benmanes.caffeine.cache.Caffeine.calculateHashMapCapacity;
20
import static com.github.benmanes.caffeine.cache.Caffeine.ceilingPowerOfTwo;
21
import static com.github.benmanes.caffeine.cache.Caffeine.requireArgument;
22
import static com.github.benmanes.caffeine.cache.Caffeine.toNanosSaturated;
23
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newBulkMappingFunction;
24
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newMappingFunction;
25
import static com.github.benmanes.caffeine.cache.Node.PROBATION;
26
import static com.github.benmanes.caffeine.cache.Node.PROTECTED;
27
import static com.github.benmanes.caffeine.cache.Node.WINDOW;
28
import static java.util.Locale.US;
29
import static java.util.Objects.requireNonNull;
30
import static java.util.Spliterator.DISTINCT;
31
import static java.util.Spliterator.IMMUTABLE;
32
import static java.util.Spliterator.NONNULL;
33
import static java.util.Spliterator.ORDERED;
34

35
import java.io.InvalidObjectException;
36
import java.io.ObjectInputStream;
37
import java.io.Serializable;
38
import java.lang.System.Logger;
39
import java.lang.System.Logger.Level;
40
import java.lang.invoke.MethodHandles;
41
import java.lang.invoke.VarHandle;
42
import java.lang.ref.Reference;
43
import java.lang.ref.ReferenceQueue;
44
import java.lang.ref.WeakReference;
45
import java.time.Duration;
46
import java.util.AbstractCollection;
47
import java.util.AbstractSet;
48
import java.util.ArrayDeque;
49
import java.util.Collection;
50
import java.util.Collections;
51
import java.util.Comparator;
52
import java.util.Deque;
53
import java.util.HashMap;
54
import java.util.IdentityHashMap;
55
import java.util.Iterator;
56
import java.util.LinkedHashMap;
57
import java.util.Map;
58
import java.util.NoSuchElementException;
59
import java.util.Objects;
60
import java.util.Optional;
61
import java.util.OptionalInt;
62
import java.util.OptionalLong;
63
import java.util.Set;
64
import java.util.Spliterator;
65
import java.util.Spliterators;
66
import java.util.concurrent.CancellationException;
67
import java.util.concurrent.CompletableFuture;
68
import java.util.concurrent.CompletionException;
69
import java.util.concurrent.ConcurrentHashMap;
70
import java.util.concurrent.ConcurrentMap;
71
import java.util.concurrent.Executor;
72
import java.util.concurrent.ForkJoinPool;
73
import java.util.concurrent.ForkJoinTask;
74
import java.util.concurrent.ThreadLocalRandom;
75
import java.util.concurrent.TimeUnit;
76
import java.util.concurrent.TimeoutException;
77
import java.util.concurrent.locks.ReentrantLock;
78
import java.util.function.BiConsumer;
79
import java.util.function.BiFunction;
80
import java.util.function.Consumer;
81
import java.util.function.Function;
82
import java.util.function.Predicate;
83
import java.util.stream.Stream;
84
import java.util.stream.StreamSupport;
85

86
import org.jspecify.annotations.NonNull;
87
import org.jspecify.annotations.Nullable;
88

89
import com.github.benmanes.caffeine.cache.Async.AsyncExpiry;
90
import com.github.benmanes.caffeine.cache.LinkedDeque.PeekingIterator;
91
import com.github.benmanes.caffeine.cache.Policy.CacheEntry;
92
import com.github.benmanes.caffeine.cache.References.InternalReference;
93
import com.github.benmanes.caffeine.cache.stats.StatsCounter;
94
import com.google.errorprone.annotations.CanIgnoreReturnValue;
95
import com.google.errorprone.annotations.Var;
96
import com.google.errorprone.annotations.concurrent.GuardedBy;
97

98
/**
99
 * An in-memory cache implementation that supports full concurrency of retrievals, a high expected
100
 * concurrency for updates, and multiple ways to bound the cache.
101
 * <p>
102
 * This class is abstract and code generated subclasses provide the complete implementation for a
103
 * particular configuration. This is to ensure that only the fields and execution paths necessary
104
 * for a given configuration are used.
105
 *
106
 * @author ben.manes@gmail.com (Ben Manes)
107
 * @param <K> the type of keys maintained by this cache
108
 * @param <V> the type of mapped values
109
 */
110
@SuppressWarnings({"RedundantSuppression", "ResultOfMethodCallIgnored", "serial", "unused"})
111
abstract class BoundedLocalCache<K, V> extends BLCHeader.DrainStatusRef
112
    implements LocalCache<K, V> {
113

114
  /*
115
   * This class performs a best-effort bounding of a ConcurrentHashMap using a page-replacement
116
   * algorithm to determine which entries to evict when the capacity is exceeded.
117
   *
118
   * Concurrency:
119
   * ------------
120
   * The page replacement algorithms are kept eventually consistent with the map. An update to the
121
   * map and recording of reads may not be immediately reflected in the policy's data structures.
122
   * These structures are guarded by a lock, and operations are applied in batches to avoid lock
123
   * contention. The penalty of applying the batches is spread across threads, so that the amortized
124
   * cost is slightly higher than performing just the ConcurrentHashMap operation [1].
125
   *
126
   * A memento of the reads and writes that were performed on the map is recorded in buffers. These
127
   * buffers are drained at the first opportunity after a write or when a read buffer is full. The
128
   * reads are offered to a buffer that will reject additions if contended on or if it is full. Due
129
   * to the concurrent nature of the read and write operations, a strict policy ordering is not
130
   * possible, but it may be observably strict when single-threaded. The buffers are drained
131
   * asynchronously to minimize the request latency and uses a state machine to determine when to
132
   * schedule this work on an executor.
133
   *
134
   * Due to a lack of a strict ordering guarantee, a task can be executed out-of-order, such as a
135
   * removal followed by its addition. The state of the entry is encoded using the key field to
136
   * avoid additional memory usage. An entry is "alive" if it is in both the hash table and the page
137
   * replacement policy. It is "retired" if it is not in the hash table and is pending removal from
138
   * the page replacement policy. Finally, an entry transitions to the "dead" state when it is
139
   * neither in the hash table nor the page replacement policy. Both the retired and dead states are
140
   * represented by a sentinel key that should not be used for map operations.
141
   *
142
   * Eviction:
143
   * ---------
144
   * Maximum size is implemented using the Window TinyLfu policy [2] due to its high hit rate, O(1)
145
   * time complexity, and small footprint. A new entry starts in the admission window and remains
146
   * there as long as it has high temporal locality (recency). Eventually an entry will slip from
147
   * the window into the main space. If the main space is already full, then a historic frequency
148
   * filter determines whether to evict the newly admitted entry or the victim entry chosen by the
149
   * eviction policy. This process ensures that the entries in the window were very recently used,
150
   * while entries in the main space are accessed very frequently and remain moderately recent. The
151
   * windowing allows the policy to have a high hit rate when entries exhibit a bursty access
152
   * pattern, while the filter ensures that popular items are retained. The admission window uses
153
   * LRU and the main space uses Segmented LRU.
154
   *
155
   * The optimal size of the window vs. main spaces is workload dependent [3]. A large admission
156
   * window is favored by recency-biased workloads, while a small one favors frequency-biased
157
   * workloads. When the window is too small, then recent arrivals are prematurely evicted, but when
158
   * it is too large, then they pollute the cache and force the eviction of more popular entries.
159
   * The optimal configuration is dynamically determined by using hill climbing to walk the hit rate
160
   * curve. This is achieved by sampling the hit rate and adjusting the window size in the direction
161
   * that is improving (making positive or negative steps). At each interval, the step size is
162
   * decreased until the hit rate climber converges at the optimal setting. The process is restarted
163
   * when the hit rate changes over a threshold, indicating that the workload altered, and a new
164
   * setting may be required.
165
   *
166
   * The historic usage is retained in a compact popularity sketch, which uses hashing to
167
   * probabilistically estimate an item's frequency. This exposes a flaw where an adversary could
168
   * use hash flooding [4] to artificially raise the frequency of the main space's victim and cause
169
   * all candidates to be rejected. In the worst case, by exploiting hash collisions, an attacker
170
   * could cause the cache to never hit and hold only worthless items, resulting in a
171
   * denial-of-service attack against the underlying resource. This is mitigated by introducing
172
   * jitter, allowing candidates that are at least moderately popular to have a small, random chance
173
   * of being admitted. This causes the victim to be evicted, but in a way that marginally impacts
174
   * the hit rate.
175
   *
176
   * Expiration:
177
   * -----------
178
   * Expiration is implemented in O(1) time complexity. The time-to-idle policy uses an access-order
179
   * queue, the time-to-live policy uses a write-order queue, and variable expiration uses a
180
   * hierarchical timer wheel [5]. The queuing policies allow for peeking at the oldest entry to
181
   * determine if it has expired. If it has not, then the younger entries must not have expired
182
   * either. If a maximum size is set, then expiration will share the queues, minimizing the
183
   * per-entry footprint. The timer wheel based policy uses hashing and cascading in a manner that
184
   * amortizes the penalty of sorting to achieve a similar algorithmic cost.
185
   *
186
   * The expiration updates are applied in a best effort fashion. The reordering of variable or
187
   * access-order expiration may be discarded by the read buffer if it is full or contended.
188
   * Similarly, the reordering of write expiration may be ignored for an entry if the last update
189
   * was within a short time window. This is done to avoid overwhelming the write buffer.
190
   *
191
   * [1] BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free
192
   * https://web.njit.edu/~dingxn/papers/BP-Wrapper.pdf
193
   * [2] TinyLFU: A Highly Efficient Cache Admission Policy
194
   * https://dl.acm.org/citation.cfm?id=3149371
195
   * [3] Adaptive Software Cache Management
196
   * https://dl.acm.org/citation.cfm?id=3274816
197
   * [4] Denial of Service via Algorithmic Complexity Attack
198
   * https://www.usenix.org/legacy/events/sec03/tech/full_papers/crosby/crosby.pdf
199
   * [5] Hashed and Hierarchical Timing Wheels
200
   * http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
201
   */
202

203
  static final Logger logger = System.getLogger(BoundedLocalCache.class.getName());
1✔
204

205
  /** The number of CPUs */
206
  static final int NCPU = Runtime.getRuntime().availableProcessors();
1✔
207
  /** The initial capacity of the write buffer. */
208
  static final int WRITE_BUFFER_MIN = 4;
209
  /** The maximum capacity of the write buffer. */
210
  static final int WRITE_BUFFER_MAX = 128 * ceilingPowerOfTwo(NCPU);
1✔
211
  /** The number of attempts to insert into the write buffer before yielding. */
212
  static final int WRITE_BUFFER_RETRIES = 100;
213
  /** The maximum weighted capacity of the map. */
214
  static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
215
  /** The initial percent of the maximum weighted capacity dedicated to the main space. */
216
  static final double PERCENT_MAIN = 0.99d;
217
  /** The percent of the maximum weighted capacity dedicated to the main's protected space. */
218
  static final double PERCENT_MAIN_PROTECTED = 0.80d;
219
  /** The difference in hit rates that restarts the climber. */
220
  static final double HILL_CLIMBER_RESTART_THRESHOLD = 0.05d;
221
  /** The percent of the total size to adapt the window by. */
222
  static final double HILL_CLIMBER_STEP_PERCENT = 0.0625d;
223
  /** The rate to decrease the step size to adapt by. */
224
  static final double HILL_CLIMBER_STEP_DECAY_RATE = 0.98d;
225
  /** The minimum popularity for allowing randomized admission. */
226
  static final int ADMIT_HASHDOS_THRESHOLD = 6;
227
  /** The maximum number of entries that can be transferred between queues. */
228
  static final int QUEUE_TRANSFER_THRESHOLD = 1_000;
229
  /** The maximum time window between entry updates before the expiration must be reordered. */
230
  static final long EXPIRE_WRITE_TOLERANCE = TimeUnit.SECONDS.toNanos(1);
1✔
231
  /** The maximum duration before an entry expires. */
232
  static final long MAXIMUM_EXPIRY = (Long.MAX_VALUE >> 1); // 150 years
233
  /** The duration to wait on the eviction lock before warning of a possible misuse. */
234
  static final long WARN_AFTER_LOCK_WAIT_NANOS = TimeUnit.SECONDS.toNanos(30);
1✔
235
  /** The number of retries before computing to validate the entry's integrity; pow2 modulus. */
236
  static final int MAX_PUT_SPIN_WAIT_ATTEMPTS = 1024 - 1;
237
  /** The handle for the in-flight refresh operations. */
238
  static final VarHandle REFRESHES = findVarHandle(
1✔
239
      BoundedLocalCache.class, "refreshes", ConcurrentMap.class);
240

241
  final @Nullable RemovalListener<K, V> evictionListener;
242
  final @Nullable AsyncCacheLoader<K, V> cacheLoader;
243

244
  final MpscGrowableArrayQueue<Runnable> writeBuffer;
245
  final ConcurrentHashMap<Object, Node<K, V>> data;
246
  final PerformCleanupTask drainBuffersTask;
247
  final Consumer<Node<K, V>> accessPolicy;
248
  final Buffer<Node<K, V>> readBuffer;
249
  final NodeFactory<K, V> nodeFactory;
250
  final ReentrantLock evictionLock;
251
  final Weigher<K, V> weigher;
252
  final Executor executor;
253

254
  final boolean isWeighted;
255
  final boolean isAsync;
256

257
  @Nullable Set<K> keySet;
258
  @Nullable Collection<V> values;
259
  @Nullable Set<Entry<K, V>> entrySet;
260
  volatile @Nullable ConcurrentMap<Object, CompletableFuture<?>> refreshes;
261

262
  /** Creates an instance based on the builder's configuration. */
263
  @SuppressWarnings("GuardedBy")
264
  protected BoundedLocalCache(Caffeine<K, V> builder,
265
      @Nullable AsyncCacheLoader<K, V> cacheLoader, boolean isAsync) {
1✔
266
    this.isAsync = isAsync;
1✔
267
    this.cacheLoader = cacheLoader;
1✔
268
    executor = builder.getExecutor();
1✔
269
    isWeighted = builder.isWeighted();
1✔
270
    evictionLock = new ReentrantLock();
1✔
271
    weigher = builder.getWeigher(isAsync);
1✔
272
    drainBuffersTask = new PerformCleanupTask(this);
1✔
273
    nodeFactory = NodeFactory.newFactory(builder, isAsync);
1✔
274
    evictionListener = builder.getEvictionListener(isAsync);
1✔
275
    data = new ConcurrentHashMap<>(builder.getInitialCapacity());
1✔
276
    readBuffer = evicts() || collectKeys() || collectValues() || expiresAfterAccess()
1✔
277
        ? new BoundedBuffer<>()
1✔
278
        : Buffer.disabled();
1✔
279
    accessPolicy = (evicts() || expiresAfterAccess()) ? this::onAccess : e -> {};
1✔
280
    writeBuffer = new MpscGrowableArrayQueue<>(WRITE_BUFFER_MIN, WRITE_BUFFER_MAX);
1✔
281

282
    if (evicts()) {
1✔
283
      setMaximumSize(builder.getMaximum());
1✔
284
    }
285
  }
1✔
286

287
  /** Ensures that the node is alive during the map operation. */
288
  void requireIsAlive(Object key, Node<?, ?> node) {
289
    if (!node.isAlive()) {
1✔
290
      throw new IllegalStateException(brokenEqualityMessage(key, node));
1✔
291
    }
292
  }
1✔
293

294
  /** Logs if the node cannot be found in the map but is still alive. */
295
  void logIfAlive(Node<?, ?> node) {
296
    if (node.isAlive()) {
1✔
297
      String message = brokenEqualityMessage(node.getKeyReference(), node);
1✔
298
      logger.log(Level.ERROR, message, new IllegalStateException());
1✔
299
    }
300
  }
1✔
301

302
  /** Returns the formatted broken equality error message. */
303
  String brokenEqualityMessage(Object key, Node<?, ?> node) {
304
    return String.format(US, "An invalid state was detected, occurring when the key's equals or "
1✔
305
        + "hashCode was modified while residing in the cache. This violation of the Map "
306
        + "contract can lead to non-deterministic behavior (key: %s, key type: %s, "
307
        + "node type: %s, cache type: %s).", key, key.getClass().getName(),
1✔
308
        node.getClass().getSimpleName(), getClass().getSimpleName());
1✔
309
  }
310

311
  /** Throws the exception, wrapping checked exceptions in a {@link CompletionException}. */
312
  static void throwException(Throwable t) {
313
    if (t instanceof RuntimeException) {
1✔
314
      throw (RuntimeException) t;
1✔
315
    } else if (t instanceof Error) {
1!
316
      throw (Error) t;
1✔
317
    }
318
    throw new CompletionException(t);
×
319
  }
320

321
  /* --------------- Shared --------------- */
322

323
  @Override
324
  public boolean isAsync() {
325
    return isAsync;
1✔
326
  }
327

328
  /** Returns if the node's value is currently being computed asynchronously. */
329
  final boolean isComputingAsync(@Nullable V value) {
330
    return isAsync && !Async.isReady((CompletableFuture<?>) value);
1✔
331
  }
332

333
  @GuardedBy("evictionLock")
334
  protected AccessOrderDeque<Node<K, V>> accessOrderWindowDeque() {
335
    throw new UnsupportedOperationException();
1✔
336
  }
337

338
  @GuardedBy("evictionLock")
339
  protected AccessOrderDeque<Node<K, V>> accessOrderProbationDeque() {
340
    throw new UnsupportedOperationException();
1✔
341
  }
342

343
  @GuardedBy("evictionLock")
344
  protected AccessOrderDeque<Node<K, V>> accessOrderProtectedDeque() {
345
    throw new UnsupportedOperationException();
1✔
346
  }
347

348
  @GuardedBy("evictionLock")
349
  protected WriteOrderDeque<Node<K, V>> writeOrderDeque() {
350
    throw new UnsupportedOperationException();
1✔
351
  }
352

353
  @Override
354
  public final Executor executor() {
355
    return executor;
1✔
356
  }
357

358
  @Override
359
  public ConcurrentMap<Object, CompletableFuture<?>> refreshes() {
360
    @Var var pending = refreshes;
1✔
361
    if (pending == null) {
1✔
362
      pending = new ConcurrentHashMap<>();
1✔
363
      if (!REFRESHES.compareAndSet(this, null, pending)) {
1✔
364
        pending = requireNonNull(refreshes);
1✔
365
      }
366
    }
367
    return pending;
1✔
368
  }
369

370
  /** Invalidate the in-flight refresh. */
371
  @SuppressWarnings("RedundantCollectionOperation")
372
  void discardRefresh(Object keyReference) {
373
    var pending = refreshes;
1✔
374
    if ((pending != null) && pending.containsKey(keyReference)) {
1✔
375
      pending.remove(keyReference);
1✔
376
    }
377
  }
1✔
378

379
  @Override
380
  public Object referenceKey(K key) {
381
    return nodeFactory.newLookupKey(key);
1✔
382
  }
383

384
  @Override
385
  public boolean isPendingEviction(K key) {
386
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
387
    if (node == null) {
1✔
388
      return false;
1✔
389
    }
390
    V value = node.getValue();
1✔
391
    return (value == null) || hasExpired(node, expirationTicker().read(), value);
1✔
392
  }
393

394
  /* --------------- Stats Support --------------- */
395

396
  @Override
397
  public boolean isRecordingStats() {
398
    return false;
1✔
399
  }
400

401
  @Override
402
  public StatsCounter statsCounter() {
403
    return StatsCounter.disabledStatsCounter();
1✔
404
  }
405

406
  @Override
407
  public Ticker statsTicker() {
408
    return Ticker.disabledTicker();
1✔
409
  }
410

411
  /* --------------- Removal Listener Support --------------- */
412

413
  protected @Nullable RemovalListener<K, V> removalListener() {
414
    return null;
1✔
415
  }
416

417
  @Override
418
  public void notifyRemoval(@Nullable K key, @Nullable V value, RemovalCause cause) {
419
    var removalListener = removalListener();
1✔
420
    if (removalListener == null) {
1✔
421
      return;
1✔
422
    }
423
    Runnable task = () -> {
1✔
424
      try {
425
        removalListener.onRemoval(key, value, cause);
1✔
426
      } catch (Throwable t) {
1✔
427
        logger.log(Level.WARNING, "Exception thrown by removal listener", t);
1✔
428
      }
1✔
429
    };
1✔
430
    try {
431
      executor.execute(task);
1✔
432
    } catch (Throwable t) {
1✔
433
      logger.log(Level.ERROR, "Exception thrown when submitting removal listener", t);
1✔
434
      task.run();
1✔
435
    }
1✔
436
  }
1✔
437

438
  /* --------------- Eviction Listener Support --------------- */
439

440
  void notifyEviction(@Nullable K key, @Nullable V value, RemovalCause cause) {
441
    if (evictionListener == null) {
1✔
442
      return;
1✔
443
    }
444
    try {
445
      evictionListener.onRemoval(key, value, cause);
1✔
446
    } catch (Throwable t) {
1✔
447
      logger.log(Level.WARNING, "Exception thrown by eviction listener", t);
1✔
448
    }
1✔
449
  }
1✔
450

451
  /* --------------- Reference Support --------------- */
452

453
  /** Returns if the keys are weak reference garbage collected. */
454
  protected boolean collectKeys() {
455
    return false;
1✔
456
  }
457

458
  /** Returns if the values are weak or soft reference garbage collected. */
459
  protected boolean collectValues() {
460
    return false;
1✔
461
  }
462

463
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
464
  protected ReferenceQueue<K> keyReferenceQueue() {
465
    return null;
1✔
466
  }
467

468
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
469
  protected ReferenceQueue<V> valueReferenceQueue() {
470
    return null;
1✔
471
  }
472

473
  /* --------------- Expiration Support --------------- */
474

475
  /** Returns the {@link Pacer} used to schedule the maintenance task. */
476
  protected @Nullable Pacer pacer() {
477
    return null;
1✔
478
  }
479

480
  /** Returns if the cache expires entries after a variable time threshold. */
481
  protected boolean expiresVariable() {
482
    return false;
1✔
483
  }
484

485
  /** Returns if the cache expires entries after an access time threshold. */
486
  protected boolean expiresAfterAccess() {
487
    return false;
1✔
488
  }
489

490
  /** Returns how long after the last access to an entry the map will retain that entry. */
491
  protected long expiresAfterAccessNanos() {
492
    throw new UnsupportedOperationException();
1✔
493
  }
494

495
  protected void setExpiresAfterAccessNanos(long expireAfterAccessNanos) {
496
    throw new UnsupportedOperationException();
1✔
497
  }
498

499
  /** Returns if the cache expires entries after a write time threshold. */
500
  protected boolean expiresAfterWrite() {
501
    return false;
1✔
502
  }
503

504
  /** Returns how long after the last write to an entry the map will retain that entry. */
505
  protected long expiresAfterWriteNanos() {
506
    throw new UnsupportedOperationException();
1✔
507
  }
508

509
  protected void setExpiresAfterWriteNanos(long expireAfterWriteNanos) {
510
    throw new UnsupportedOperationException();
1✔
511
  }
512

513
  /** Returns if the cache refreshes entries after a write time threshold. */
514
  protected boolean refreshAfterWrite() {
515
    return false;
1✔
516
  }
517

518
  /** Returns how long after the last write an entry becomes a candidate for refresh. */
519
  protected long refreshAfterWriteNanos() {
520
    throw new UnsupportedOperationException();
1✔
521
  }
522

523
  protected void setRefreshAfterWriteNanos(long refreshAfterWriteNanos) {
524
    throw new UnsupportedOperationException();
1✔
525
  }
526

527
  @Override
528
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
529
  public Expiry<K, V> expiry() {
530
    return null;
1✔
531
  }
532

533
  /** Returns the {@link Ticker} used by this cache for expiration. */
534
  public Ticker expirationTicker() {
535
    return Ticker.disabledTicker();
1✔
536
  }
537

538
  protected TimerWheel<K, V> timerWheel() {
539
    throw new UnsupportedOperationException();
1✔
540
  }
541

542
  /* --------------- Eviction Support --------------- */
543

544
  /** Returns if the cache evicts entries due to a maximum size or weight threshold. */
545
  protected boolean evicts() {
546
    return false;
1✔
547
  }
548

549
  /** Returns if entries may be assigned different weights. */
550
  protected boolean isWeighted() {
551
    return (weigher != Weigher.singletonWeigher());
1✔
552
  }
553

554
  protected FrequencySketch frequencySketch() {
555
    throw new UnsupportedOperationException();
1✔
556
  }
557

558
  /** Returns if an access to an entry can skip notifying the eviction policy. */
559
  protected boolean fastpath() {
560
    return false;
1✔
561
  }
562

563
  /** Returns the maximum weighted size. */
564
  protected long maximum() {
565
    throw new UnsupportedOperationException();
1✔
566
  }
567

568
  /** Returns the maximum weighted size. */
569
  protected long maximumAcquire() {
570
    throw new UnsupportedOperationException();
1✔
571
  }
572

573
  /** Returns the maximum weighted size of the window space. */
574
  protected long windowMaximum() {
575
    throw new UnsupportedOperationException();
1✔
576
  }
577

578
  /** Returns the maximum weighted size of the main's protected space. */
579
  protected long mainProtectedMaximum() {
580
    throw new UnsupportedOperationException();
1✔
581
  }
582

583
  @GuardedBy("evictionLock")
584
  protected void setMaximum(long maximum) {
585
    throw new UnsupportedOperationException();
1✔
586
  }
587

588
  @GuardedBy("evictionLock")
589
  protected void setWindowMaximum(long maximum) {
590
    throw new UnsupportedOperationException();
1✔
591
  }
592

593
  @GuardedBy("evictionLock")
594
  protected void setMainProtectedMaximum(long maximum) {
595
    throw new UnsupportedOperationException();
1✔
596
  }
597

598
  /** Returns the combined weight of the values in the cache (may be negative). */
599
  protected long weightedSize() {
600
    throw new UnsupportedOperationException();
1✔
601
  }
602

603
  /** Returns the combined weight of the values in the cache (may be negative). */
604
  protected long weightedSizeAcquire() {
605
    throw new UnsupportedOperationException();
1✔
606
  }
607

608
  /** Returns the uncorrected combined weight of the values in the window space. */
609
  protected long windowWeightedSize() {
610
    throw new UnsupportedOperationException();
1✔
611
  }
612

613
  /** Returns the uncorrected combined weight of the values in the main's protected space. */
614
  protected long mainProtectedWeightedSize() {
615
    throw new UnsupportedOperationException();
1✔
616
  }
617

618
  @GuardedBy("evictionLock")
619
  protected void setWeightedSize(long weightedSize) {
620
    throw new UnsupportedOperationException();
1✔
621
  }
622

623
  @GuardedBy("evictionLock")
624
  protected void setWindowWeightedSize(long weightedSize) {
625
    throw new UnsupportedOperationException();
1✔
626
  }
627

628
  @GuardedBy("evictionLock")
629
  protected void setMainProtectedWeightedSize(long weightedSize) {
630
    throw new UnsupportedOperationException();
1✔
631
  }
632

633
  protected int hitsInSample() {
634
    throw new UnsupportedOperationException();
1✔
635
  }
636

637
  protected int missesInSample() {
638
    throw new UnsupportedOperationException();
1✔
639
  }
640

641
  protected double stepSize() {
642
    throw new UnsupportedOperationException();
1✔
643
  }
644

645
  protected double previousSampleHitRate() {
646
    throw new UnsupportedOperationException();
1✔
647
  }
648

649
  protected long adjustment() {
650
    throw new UnsupportedOperationException();
1✔
651
  }
652

653
  @GuardedBy("evictionLock")
654
  protected void setHitsInSample(int hitCount) {
655
    throw new UnsupportedOperationException();
1✔
656
  }
657

658
  @GuardedBy("evictionLock")
659
  protected void setMissesInSample(int missCount) {
660
    throw new UnsupportedOperationException();
1✔
661
  }
662

663
  @GuardedBy("evictionLock")
664
  protected void setStepSize(double stepSize) {
665
    throw new UnsupportedOperationException();
1✔
666
  }
667

668
  @GuardedBy("evictionLock")
669
  protected void setPreviousSampleHitRate(double hitRate) {
670
    throw new UnsupportedOperationException();
1✔
671
  }
672

673
  @GuardedBy("evictionLock")
674
  protected void setAdjustment(long amount) {
675
    throw new UnsupportedOperationException();
1✔
676
  }
677

678
  /**
679
   * Sets the maximum weighted size of the cache. The caller may need to perform a maintenance cycle
680
   * to eagerly evicts entries until the cache shrinks to the appropriate size.
681
   */
682
  @GuardedBy("evictionLock")
683
  @SuppressWarnings({"ConstantValue", "Varifier"})
684
  void setMaximumSize(long maximum) {
685
    requireArgument(maximum >= 0, "maximum must not be negative");
1✔
686
    if (maximum == maximum()) {
1✔
687
      return;
1✔
688
    }
689

690
    long max = Math.min(maximum, MAXIMUM_CAPACITY);
1✔
691
    long window = max - (long) (PERCENT_MAIN * max);
1✔
692
    long mainProtected = (long) (PERCENT_MAIN_PROTECTED * (max - window));
1✔
693

694
    setMaximum(max);
1✔
695
    setWindowMaximum(window);
1✔
696
    setMainProtectedMaximum(mainProtected);
1✔
697

698
    setHitsInSample(0);
1✔
699
    setMissesInSample(0);
1✔
700
    setStepSize(-HILL_CLIMBER_STEP_PERCENT * max);
1✔
701

702
    if ((frequencySketch() != null) && !isWeighted() && (weightedSize() >= (max >>> 1))) {
1✔
703
      // Lazily initialize when close to the maximum size
704
      frequencySketch().ensureCapacity(max);
1✔
705
    }
706
  }
1✔
707

708
  /** Evicts entries if the cache exceeds the maximum. */
709
  @GuardedBy("evictionLock")
710
  void evictEntries() {
711
    if (!evicts()) {
1✔
712
      return;
1✔
713
    }
714
    var candidate = evictFromWindow();
1✔
715
    evictFromMain(candidate);
1✔
716
  }
1✔
717

718
  /**
719
   * Evicts entries from the window space into the main space while the window size exceeds a
720
   * maximum.
721
   *
722
   * @return the first candidate promoted into the probation space
723
   */
724
  @GuardedBy("evictionLock")
725
  @Nullable Node<K, V> evictFromWindow() {
726
    @Var Node<K, V> first = null;
1✔
727
    @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
728
    while (windowWeightedSize() > windowMaximum()) {
1✔
729
      // The pending operations will adjust the size to reflect the correct weight
730
      if (node == null) {
1✔
731
        break;
1✔
732
      }
733

734
      Node<K, V> next = node.getNextInAccessOrder();
1✔
735
      if (node.getPolicyWeight() != 0) {
1✔
736
        node.makeMainProbation();
1✔
737
        accessOrderWindowDeque().remove(node);
1✔
738
        accessOrderProbationDeque().offerLast(node);
1✔
739
        if (first == null) {
1✔
740
          first = node;
1✔
741
        }
742

743
        setWindowWeightedSize(windowWeightedSize() - node.getPolicyWeight());
1✔
744
      }
745
      node = next;
1✔
746
    }
1✔
747

748
    return first;
1✔
749
  }
750

751
  /**
752
   * Evicts entries from the main space if the cache exceeds the maximum capacity. The main space
753
   * determines whether admitting an entry (coming from the window space) is preferable to retaining
754
   * the eviction policy's victim. This decision is made using a frequency filter so that the
755
   * least frequently used entry is removed.
756
   * <p>
757
   * The window space's candidates were previously promoted to the probation space at its MRU
758
   * position and the eviction policy's victim starts at the LRU position. The candidates are
759
   * evaluated in promotion order while an eviction is required, and if exhausted then additional
760
   * entries are retrieved from the window space. Likewise, if the victim selection exhausts the
761
   * probation space then additional entries are retrieved from the protected space. The queues are
762
   * consumed in LRU order and the evicted entry is the one with a lower relative frequency, where
763
   * the preference is to retain the main space's victims versus the window space's candidates on a
764
   * tie.
765
   *
766
   * @param candidate the first candidate promoted into the probation space
767
   */
768
  @GuardedBy("evictionLock")
769
  void evictFromMain(@Var @Nullable Node<K, V> candidate) {
770
    @Var int victimQueue = PROBATION;
1✔
771
    @Var int candidateQueue = PROBATION;
1✔
772
    @Var Node<K, V> victim = accessOrderProbationDeque().peekFirst();
1✔
773
    while (weightedSize() > maximum()) {
1✔
774
      // Search the admission window for additional candidates
775
      if ((candidate == null) && (candidateQueue == PROBATION)) {
1✔
776
        candidate = accessOrderWindowDeque().peekFirst();
1✔
777
        candidateQueue = WINDOW;
1✔
778
      }
779

780
      // Try evicting from the protected and window queues
781
      if ((candidate == null) && (victim == null)) {
1✔
782
        if (victimQueue == PROBATION) {
1✔
783
          victim = accessOrderProtectedDeque().peekFirst();
1✔
784
          victimQueue = PROTECTED;
1✔
785
          continue;
1✔
786
        } else if (victimQueue == PROTECTED) {
1✔
787
          victim = accessOrderWindowDeque().peekFirst();
1✔
788
          victimQueue = WINDOW;
1✔
789
          continue;
1✔
790
        }
791

792
        // The pending operations will adjust the size to reflect the correct weight
793
        break;
794
      }
795

796
      // Skip over entries with zero weight
797
      if ((victim != null) && (victim.getPolicyWeight() == 0)) {
1✔
798
        victim = victim.getNextInAccessOrder();
1✔
799
        continue;
1✔
800
      } else if ((candidate != null) && (candidate.getPolicyWeight() == 0)) {
1✔
801
        candidate = candidate.getNextInAccessOrder();
1✔
802
        continue;
1✔
803
      }
804

805
      // Evict immediately if only one of the entries is present
806
      if (victim == null) {
1✔
807
        requireNonNull(candidate);
1✔
808
        Node<K, V> previous = candidate.getNextInAccessOrder();
1✔
809
        Node<K, V> evict = candidate;
1✔
810
        candidate = previous;
1✔
811
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
812
        continue;
1✔
813
      } else if (candidate == null) {
1✔
814
        Node<K, V> evict = victim;
1✔
815
        victim = victim.getNextInAccessOrder();
1✔
816
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
817
        continue;
1✔
818
      }
819

820
      // Evict immediately if both selected the same entry
821
      if (candidate == victim) {
1✔
822
        victim = victim.getNextInAccessOrder();
1✔
823
        evictEntry(candidate, RemovalCause.SIZE, 0L);
1✔
824
        candidate = null;
1✔
825
        continue;
1✔
826
      }
827

828
      // Evict immediately if an entry was collected
829
      var victimKeyRef = victim.getKeyReferenceOrNull();
1✔
830
      var candidateKeyRef = candidate.getKeyReferenceOrNull();
1✔
831
      if (victimKeyRef == null) {
1✔
832
        Node<K, V> evict = victim;
1✔
833
        victim = victim.getNextInAccessOrder();
1✔
834
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
835
        continue;
1✔
836
      } else if (candidateKeyRef == null) {
1✔
837
        Node<K, V> evict = candidate;
1✔
838
        candidate = candidate.getNextInAccessOrder();
1✔
839
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
840
        continue;
1✔
841
      }
842

843
      // Evict immediately if an entry was removed
844
      if (!victim.isAlive()) {
1✔
845
        Node<K, V> evict = victim;
1✔
846
        victim = victim.getNextInAccessOrder();
1✔
847
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
848
        continue;
1✔
849
      } else if (!candidate.isAlive()) {
1✔
850
        Node<K, V> evict = candidate;
1✔
851
        candidate = candidate.getNextInAccessOrder();
1✔
852
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
853
        continue;
1✔
854
      }
855

856
      // Evict immediately if the candidate's weight exceeds the maximum
857
      if (candidate.getPolicyWeight() > maximum()) {
1✔
858
        Node<K, V> evict = candidate;
1✔
859
        candidate = candidate.getNextInAccessOrder();
1✔
860
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
861
        continue;
1✔
862
      }
863

864
      // Evict the entry with the lowest frequency
865
      if (admit(candidateKeyRef, victimKeyRef)) {
1✔
866
        Node<K, V> evict = victim;
1✔
867
        victim = victim.getNextInAccessOrder();
1✔
868
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
869
        candidate = candidate.getNextInAccessOrder();
1✔
870
      } else {
1✔
871
        Node<K, V> evict = candidate;
1✔
872
        candidate = candidate.getNextInAccessOrder();
1✔
873
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
874
      }
875
    }
1✔
876
  }
1✔
877

878
  /**
879
   * Determines if the candidate should be accepted into the main space, as determined by its
880
   * frequency relative to the victim. A small amount of randomness is used to protect against hash
881
   * collision attacks, where the victim's frequency is artificially raised so that no new entries
882
   * are admitted.
883
   *
884
   * @param candidateKeyRef the keyRef for the entry being proposed for long term retention
885
   * @param victimKeyRef the keyRef for the entry chosen by the eviction policy for replacement
886
   * @return if the candidate should be admitted and the victim ejected
887
   */
888
  @GuardedBy("evictionLock")
889
  boolean admit(Object candidateKeyRef, Object victimKeyRef) {
890
    int candidateFreq = frequencySketch().frequency(candidateKeyRef);
1✔
891
    int victimFreq = frequencySketch().frequency(victimKeyRef);
1✔
892
    if (candidateFreq > victimFreq) {
1✔
893
      return true;
1✔
894
    } else if (candidateFreq >= ADMIT_HASHDOS_THRESHOLD) {
1✔
895
      // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack
896
      // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm
897
      // candidate reduces the number of random acceptances to minimize the impact on the hit rate.
898
      int random = ThreadLocalRandom.current().nextInt();
1✔
899
      return ((random & 127) == 0);
1✔
900
    }
901
    return false;
1✔
902
  }
903

904
  /** Expires entries that have expired by access, write, or variable. */
905
  @GuardedBy("evictionLock")
906
  void expireEntries() {
907
    long now = expirationTicker().read();
1✔
908
    expireAfterAccessEntries(now);
1✔
909
    expireAfterWriteEntries(now);
1✔
910
    expireVariableEntries(now);
1✔
911

912
    Pacer pacer = pacer();
1✔
913
    if (pacer != null) {
1✔
914
      long delay = getExpirationDelay(now);
1✔
915
      if (delay == Long.MAX_VALUE) {
1✔
916
        pacer.cancel();
1✔
917
      } else {
918
        pacer.schedule(executor, drainBuffersTask, now, delay);
1✔
919
      }
920
    }
921
  }
1✔
922

923
  /** Expires entries in the access-order queue. */
924
  @GuardedBy("evictionLock")
925
  void expireAfterAccessEntries(long now) {
926
    if (!expiresAfterAccess()) {
1✔
927
      return;
1✔
928
    }
929

930
    expireAfterAccessEntries(now, accessOrderWindowDeque());
1✔
931
    if (evicts()) {
1✔
932
      expireAfterAccessEntries(now, accessOrderProbationDeque());
1✔
933
      expireAfterAccessEntries(now, accessOrderProtectedDeque());
1✔
934
    }
935
  }
1✔
936

937
  /** Expires entries in an access-order queue. */
938
  @GuardedBy("evictionLock")
939
  void expireAfterAccessEntries(long now, AccessOrderDeque<Node<K, V>> accessOrderDeque) {
940
    long duration = expiresAfterAccessNanos();
1✔
941
    for (;;) {
942
      Node<K, V> node = accessOrderDeque.peekFirst();
1✔
943
      if ((node == null) || ((now - node.getAccessTime()) < duration)
1✔
944
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
945
        return;
1✔
946
      }
947
    }
1✔
948
  }
949

950
  /** Expires entries on the write-order queue. */
951
  @GuardedBy("evictionLock")
952
  void expireAfterWriteEntries(long now) {
953
    if (!expiresAfterWrite()) {
1✔
954
      return;
1✔
955
    }
956
    long duration = expiresAfterWriteNanos();
1✔
957
    for (;;) {
958
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
959
      if ((node == null) || ((now - node.getWriteTime()) < duration)
1✔
960
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
961
        break;
1✔
962
      }
963
    }
1✔
964
  }
1✔
965

966
  /** Expires entries in the timer wheel. */
967
  @GuardedBy("evictionLock")
968
  void expireVariableEntries(long now) {
969
    if (expiresVariable()) {
1✔
970
      timerWheel().advance(this, now);
1✔
971
    }
972
  }
1✔
973

974
  /** Returns the duration until the next item expires, or {@link Long#MAX_VALUE} if none. */
975
  @GuardedBy("evictionLock")
976
  long getExpirationDelay(long now) {
977
    @Var long delay = Long.MAX_VALUE;
1✔
978
    if (expiresAfterAccess()) {
1✔
979
      @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
980
      if (node != null) {
1✔
981
        long age = Math.max(0, now - node.getAccessTime());
1✔
982
        delay = Math.min(delay, expiresAfterAccessNanos() - age);
1✔
983
      }
984
      if (evicts()) {
1✔
985
        node = accessOrderProbationDeque().peekFirst();
1✔
986
        if (node != null) {
1✔
987
          long age = Math.max(0, now - node.getAccessTime());
1✔
988
          delay = Math.min(delay, expiresAfterAccessNanos() - age);
1✔
989
        }
990
        node = accessOrderProtectedDeque().peekFirst();
1✔
991
        if (node != null) {
1✔
992
          long age = Math.max(0, now - node.getAccessTime());
1✔
993
          delay = Math.min(delay, expiresAfterAccessNanos() - age);
1✔
994
        }
995
      }
996
    }
997
    if (expiresAfterWrite()) {
1✔
998
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
999
      if (node != null) {
1✔
1000
        long age = Math.max(0, now - node.getWriteTime());
1✔
1001
        delay = Math.min(delay, expiresAfterWriteNanos() - age);
1✔
1002
      }
1003
    }
1004
    if (expiresVariable()) {
1✔
1005
      delay = Math.min(delay, timerWheel().getExpirationDelay());
1✔
1006
    }
1007
    return delay;
1✔
1008
  }
1009

1010
  /** Returns if the entry has expired. */
1011
  @SuppressWarnings("ShortCircuitBoolean")
1012
  boolean hasExpired(Node<K, V> node, long now, V value) {
1013
    if (isComputingAsync(value)) {
1✔
1014
      return false;
1✔
1015
    }
1016
    return (expiresAfterAccess() && (now - node.getAccessTime() >= expiresAfterAccessNanos()))
1✔
1017
        | (expiresAfterWrite() && (now - node.getWriteTime() >= expiresAfterWriteNanos()))
1✔
1018
        | (expiresVariable() && (now - node.getVariableTime() >= 0));
1✔
1019
  }
1020

1021
  /**
1022
   * Attempts to evict the entry based on the given removal cause. A removal may be ignored if the
1023
   * entry was updated and is no longer eligible for eviction.
1024
   *
1025
   * @param node the entry to evict
1026
   * @param cause the reason to evict
1027
   * @param now the current time, used only if expiring
1028
   * @return if the entry was evicted
1029
   */
1030
  @GuardedBy("evictionLock")
1031
  @SuppressWarnings({"GuardedByChecker", "SynchronizationOnLocalVariableOrMethodParameter"})
1032
  boolean evictEntry(Node<K, V> node, RemovalCause cause, long now) {
1033
    K key = node.getKey();
1✔
1034
    var ctx = new EvictContext<V>();
1✔
1035
    var keyReference = node.getKeyReference();
1✔
1036

1037
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
1038
      if (n != node) {
1✔
1039
        return n;
1✔
1040
      }
1041
      synchronized (node) {
1✔
1042
        ctx.value = node.getValue();
1✔
1043

1044
        if ((key == null) || (ctx.value == null)) {
1✔
1045
          ctx.cause = RemovalCause.COLLECTED;
1✔
1046
        } else if (cause == RemovalCause.COLLECTED) {
1✔
1047
          ctx.resurrect = true;
1✔
1048
          return node;
1✔
1049
        } else {
1050
          ctx.cause = cause;
1✔
1051
        }
1052

1053
        if (ctx.cause == RemovalCause.EXPIRED) {
1✔
1054
          @Var boolean expired = false;
1✔
1055
          if (expiresAfterAccess()) {
1✔
1056
            expired |= ((now - node.getAccessTime()) >= expiresAfterAccessNanos());
1✔
1057
          }
1058
          if (expiresAfterWrite()) {
1✔
1059
            expired |= ((now - node.getWriteTime()) >= expiresAfterWriteNanos());
1✔
1060
          }
1061
          if (expiresVariable()) {
1✔
1062
            expired |= ((now - node.getVariableTime()) >= 0);
1✔
1063
          }
1064
          if (!expired) {
1✔
1065
            ctx.resurrect = true;
1✔
1066
            return node;
1✔
1067
          }
1068
        } else if (ctx.cause == RemovalCause.SIZE) {
1✔
1069
          int weight = node.getWeight();
1✔
1070
          if (weight == 0) {
1✔
1071
            ctx.resurrect = true;
1✔
1072
            return node;
1✔
1073
          }
1074
        }
1075

1076
        notifyEviction(key, ctx.value, ctx.cause);
1✔
1077
        discardRefresh(keyReference);
1✔
1078
        ctx.removed = true;
1✔
1079
        node.retire();
1✔
1080
        return null;
1✔
1081
      }
1082
    });
1083

1084
    // The entry is no longer eligible for eviction
1085
    if (ctx.resurrect) {
1✔
1086
      return false;
1✔
1087
    }
1088

1089
    // If the eviction fails due to a concurrent removal of the victim, that removal may cancel out
1090
    // the addition that triggered this eviction. The victim is eagerly unlinked and the size
1091
    // decremented before the removal task so that if an eviction is still required then a new
1092
    // victim will be chosen for removal.
1093
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1094
      accessOrderWindowDeque().remove(node);
1✔
1095
    } else if (evicts()) {
1✔
1096
      if (node.inMainProbation()) {
1✔
1097
        accessOrderProbationDeque().remove(node);
1✔
1098
      } else {
1099
        accessOrderProtectedDeque().remove(node);
1✔
1100
      }
1101
    }
1102
    if (expiresAfterWrite()) {
1✔
1103
      writeOrderDeque().remove(node);
1✔
1104
    } else if (expiresVariable()) {
1✔
1105
      timerWheel().deschedule(node);
1✔
1106
    }
1107

1108
    synchronized (node) {
1✔
1109
      logIfAlive(node);
1✔
1110
      makeDead(node);
1✔
1111
    }
1✔
1112

1113
    if (ctx.removed) {
1✔
1114
      var removeCause = requireNonNull(ctx.cause);
1✔
1115
      statsCounter().recordEviction(node.getWeight(), removeCause);
1✔
1116
      notifyRemoval(key, ctx.value, removeCause);
1✔
1117
    }
1118

1119
    return true;
1✔
1120
  }
1121

1122
  /** Adapts the eviction policy to towards the optimal recency / frequency configuration. */
1123
  @GuardedBy("evictionLock")
1124
  @SuppressWarnings("UnnecessaryReturnStatement")
1125
  void climb() {
1126
    if (!evicts()) {
1✔
1127
      return;
1✔
1128
    }
1129

1130
    determineAdjustment();
1✔
1131
    demoteFromMainProtected();
1✔
1132
    long amount = adjustment();
1✔
1133
    if (amount == 0) {
1✔
1134
      return;
1✔
1135
    } else if (amount > 0) {
1✔
1136
      increaseWindow();
1✔
1137
    } else {
1138
      decreaseWindow();
1✔
1139
    }
1140
  }
1✔
1141

1142
  /** Calculates the amount to adapt the window by and sets {@link #adjustment()} accordingly. */
1143
  @GuardedBy("evictionLock")
1144
  void determineAdjustment() {
1145
    if (frequencySketch().isNotInitialized()) {
1✔
1146
      setPreviousSampleHitRate(0.0);
1✔
1147
      setMissesInSample(0);
1✔
1148
      setHitsInSample(0);
1✔
1149
      return;
1✔
1150
    }
1151

1152
    long requestCount = (long) hitsInSample() + missesInSample();
1✔
1153
    if (requestCount < frequencySketch().sampleSize) {
1✔
1154
      return;
1✔
1155
    }
1156

1157
    double hitRate = (double) hitsInSample() / requestCount;
1✔
1158
    double hitRateChange = hitRate - previousSampleHitRate();
1✔
1159
    double amount = (hitRateChange >= 0) ? stepSize() : -stepSize();
1✔
1160
    double nextStepSize = (Math.abs(hitRateChange) >= HILL_CLIMBER_RESTART_THRESHOLD)
1✔
1161
        ? HILL_CLIMBER_STEP_PERCENT * maximum() * (amount >= 0 ? 1 : -1)
1✔
1162
        : HILL_CLIMBER_STEP_DECAY_RATE * amount;
1✔
1163
    setPreviousSampleHitRate(hitRate);
1✔
1164
    setAdjustment((long) amount);
1✔
1165
    setStepSize(nextStepSize);
1✔
1166
    setMissesInSample(0);
1✔
1167
    setHitsInSample(0);
1✔
1168
  }
1✔
1169

1170
  /**
1171
   * Increases the size of the admission window by shrinking the portion allocated to the main
1172
   * space. As the main space is partitioned into probation and protected regions (80% / 20%), for
1173
   * simplicity only the protected is reduced. If the regions exceed their maximums, this may cause
1174
   * protected items to be demoted to the probation region and probation items to be demoted to the
1175
   * admission window.
1176
   */
1177
  @GuardedBy("evictionLock")
1178
  void increaseWindow() {
1179
    if (mainProtectedMaximum() == 0) {
1✔
1180
      return;
1✔
1181
    }
1182

1183
    @Var long quota = Math.min(adjustment(), mainProtectedMaximum());
1✔
1184
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1185
    setWindowMaximum(windowMaximum() + quota);
1✔
1186
    demoteFromMainProtected();
1✔
1187

1188
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1189
      @Var Node<K, V> candidate = accessOrderProbationDeque().peekFirst();
1✔
1190
      @Var boolean probation = true;
1✔
1191
      if ((candidate == null) || (quota < candidate.getPolicyWeight())) {
1✔
1192
        candidate = accessOrderProtectedDeque().peekFirst();
1✔
1193
        probation = false;
1✔
1194
      }
1195
      if (candidate == null) {
1✔
1196
        break;
1✔
1197
      }
1198

1199
      int weight = candidate.getPolicyWeight();
1✔
1200
      if (quota < weight) {
1✔
1201
        break;
1✔
1202
      }
1203

1204
      quota -= weight;
1✔
1205
      if (probation) {
1✔
1206
        accessOrderProbationDeque().remove(candidate);
1✔
1207
      } else {
1208
        setMainProtectedWeightedSize(mainProtectedWeightedSize() - weight);
1✔
1209
        accessOrderProtectedDeque().remove(candidate);
1✔
1210
      }
1211
      setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1212
      accessOrderWindowDeque().offerLast(candidate);
1✔
1213
      candidate.makeWindow();
1✔
1214
    }
1215

1216
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1217
    setWindowMaximum(windowMaximum() - quota);
1✔
1218
    setAdjustment(quota);
1✔
1219
  }
1✔
1220

1221
  /** Decreases the size of the admission window and increases the main's protected region. */
1222
  @GuardedBy("evictionLock")
1223
  void decreaseWindow() {
1224
    if (windowMaximum() <= 1) {
1✔
1225
      return;
1✔
1226
    }
1227

1228
    @Var long quota = Math.min(-adjustment(), Math.max(0, windowMaximum() - 1));
1✔
1229
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1230
    setWindowMaximum(windowMaximum() - quota);
1✔
1231

1232
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1233
      Node<K, V> candidate = accessOrderWindowDeque().peekFirst();
1✔
1234
      if (candidate == null) {
1✔
1235
        break;
1✔
1236
      }
1237

1238
      int weight = candidate.getPolicyWeight();
1✔
1239
      if (quota < weight) {
1✔
1240
        break;
1✔
1241
      }
1242

1243
      quota -= weight;
1✔
1244
      setWindowWeightedSize(windowWeightedSize() - weight);
1✔
1245
      accessOrderWindowDeque().remove(candidate);
1✔
1246
      accessOrderProbationDeque().offerLast(candidate);
1✔
1247
      candidate.makeMainProbation();
1✔
1248
    }
1249

1250
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1251
    setWindowMaximum(windowMaximum() + quota);
1✔
1252
    setAdjustment(-quota);
1✔
1253
  }
1✔
1254

1255
  /** Transfers the nodes from the protected to the probation region if it exceeds the maximum. */
1256
  @GuardedBy("evictionLock")
1257
  void demoteFromMainProtected() {
1258
    long mainProtectedMaximum = mainProtectedMaximum();
1✔
1259
    @Var long mainProtectedWeightedSize = mainProtectedWeightedSize();
1✔
1260
    if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1261
      return;
1✔
1262
    }
1263

1264
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1265
      if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1266
        break;
1✔
1267
      }
1268

1269
      Node<K, V> demoted = accessOrderProtectedDeque().pollFirst();
1✔
1270
      if (demoted == null) {
1✔
1271
        break;
1✔
1272
      }
1273
      demoted.makeMainProbation();
1✔
1274
      accessOrderProbationDeque().offerLast(demoted);
1✔
1275
      mainProtectedWeightedSize -= demoted.getPolicyWeight();
1✔
1276
    }
1277
    setMainProtectedWeightedSize(mainProtectedWeightedSize);
1✔
1278
  }
1✔
1279

1280
  /**
1281
   * Performs the post-processing work required after a read.
1282
   *
1283
   * @param node the entry in the page replacement policy
1284
   * @param now the current time, in nanoseconds
1285
   * @param recordHit if the hit count should be incremented
1286
   * @return the refreshed value if immediately loaded, else null
1287
   */
1288
  @Nullable V afterRead(Node<K, V> node, long now, boolean recordHit) {
1289
    if (recordHit) {
1✔
1290
      statsCounter().recordHits(1);
1✔
1291
    }
1292

1293
    boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL);
1✔
1294
    if (shouldDrainBuffers(delayable)) {
1✔
1295
      scheduleDrainBuffers();
1✔
1296
    }
1297
    return refreshIfNeeded(node, now);
1✔
1298
  }
1299

1300
  /** Returns if the cache should bypass the read buffer. */
1301
  boolean skipReadBuffer() {
1302
    return fastpath() && frequencySketch().isNotInitialized();
1✔
1303
  }
1304

1305
  /**
1306
   * Asynchronously refreshes the entry if eligible.
1307
   *
1308
   * @param node the entry in the cache to refresh
1309
   * @param now the current time, in nanoseconds
1310
   * @return the refreshed value if immediately loaded, else null
1311
   */
1312
  @SuppressWarnings("FutureReturnValueIgnored")
1313
  @Nullable V refreshIfNeeded(Node<K, V> node, long now) {
1314
    if (!refreshAfterWrite()) {
1✔
1315
      return null;
1✔
1316
    }
1317

1318
    K key;
1319
    V oldValue;
1320
    long writeTime = node.getWriteTime();
1✔
1321
    long refreshWriteTime = writeTime | 1L;
1✔
1322
    Object keyReference = node.getKeyReference();
1✔
1323
    ConcurrentMap<Object, CompletableFuture<?>> refreshes;
1324
    if (((now - writeTime) > refreshAfterWriteNanos())
1✔
1325
        && ((key = node.getKey()) != null) && ((oldValue = node.getValue()) != null)
1✔
1326
        && !isComputingAsync(oldValue) && ((writeTime & 1L) == 0L)
1✔
1327
        && !(refreshes = refreshes()).containsKey(keyReference)
1✔
1328
        && node.isAlive() && node.casWriteTime(writeTime, refreshWriteTime)) {
1✔
1329
      long[] startTime = new long[1];
1✔
1330
      @SuppressWarnings({"rawtypes", "unchecked"})
1331
      @Nullable CompletableFuture<? extends @Nullable V>[] refreshFuture = new CompletableFuture[1];
1✔
1332
      try {
1333
        refreshes.computeIfAbsent(keyReference, k -> {
1✔
1334
          try {
1335
            startTime[0] = statsTicker().read();
1✔
1336
            if (isAsync) {
1✔
1337
              @SuppressWarnings("unchecked")
1338
              var future = (CompletableFuture<V>) oldValue;
1✔
1339
              if (Async.isReady(future)) {
1✔
1340
                requireNonNull(cacheLoader);
1✔
1341
                var refresh = cacheLoader.asyncReload(key, future.join(), executor);
1✔
1342
                refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1343
              } else {
1✔
1344
                // no-op if the future's completion state was modified (e.g. obtrude methods)
1345
                return null;
1✔
1346
              }
1347
            } else {
1✔
1348
              requireNonNull(cacheLoader);
1✔
1349
              var refresh = cacheLoader.asyncReload(key, oldValue, executor);
1✔
1350
              refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1351
            }
1352
            return refreshFuture[0];
1✔
1353
          } catch (InterruptedException e) {
1✔
1354
            Thread.currentThread().interrupt();
1✔
1355
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1356
            return null;
1✔
1357
          } catch (Throwable e) {
1✔
1358
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1359
            return null;
1✔
1360
          }
1361
        });
1362
      } finally {
1363
        node.casWriteTime(refreshWriteTime, writeTime);
1✔
1364
      }
1365

1366
      if (refreshFuture[0] == null) {
1✔
1367
        return null;
1✔
1368
      }
1369

1370
      var refreshed = refreshFuture[0].handle((newValue, error) -> {
1✔
1371
        long loadTime = statsTicker().read() - startTime[0];
1✔
1372
        if (error != null) {
1✔
1373
          if (!(error instanceof CancellationException) && !(error instanceof TimeoutException)) {
1✔
1374
            logger.log(Level.WARNING, "Exception thrown during refresh", error);
1✔
1375
          }
1376
          refreshes.remove(keyReference, refreshFuture[0]);
1✔
1377
          statsCounter().recordLoadFailure(loadTime);
1✔
1378
          return null;
1✔
1379
        }
1380

1381
        @SuppressWarnings("unchecked")
1382
        V value = (isAsync && (newValue != null)) ? (V) refreshFuture[0] : newValue;
1✔
1383

1384
        @Nullable RemovalCause[] cause = new RemovalCause[1];
1✔
1385
        V result = compute(key, (K k, @Nullable V currentValue) -> {
1✔
1386
          boolean removed = refreshes.remove(keyReference, refreshFuture[0]);
1✔
1387
          if (currentValue == null) {
1✔
1388
            // If the entry is absent then discard the refresh and maybe notifying the listener
1389
            if (value != null) {
1✔
1390
              cause[0] = RemovalCause.EXPLICIT;
1✔
1391
            }
1392
            return null;
1✔
1393
          } else if (currentValue == value) {
1✔
1394
            // If the reloaded value is the same instance then no-op
1395
            return currentValue;
1✔
1396
          } else if (isAsync &&
1✔
1397
              (newValue == Async.getIfReady((CompletableFuture<?>) currentValue))) {
1✔
1398
            // If the completed futures hold the same value instance then no-op
1399
            return currentValue;
1✔
1400
          } else if (removed && (currentValue == oldValue) && (node.getWriteTime() == writeTime)) {
1!
1401
            // If the entry was not modified while in-flight (no ABA) then replace
1402
            return value;
1✔
1403
          }
1404
          // Otherwise, a write invalidated the refresh so discard it and notify the listener
1405
          cause[0] = RemovalCause.REPLACED;
1✔
1406
          return currentValue;
1✔
1407
        }, expiry(), /* recordLoad= */ false, /* recordLoadFailure= */ true);
1✔
1408

1409
        if (cause[0] != null) {
1✔
1410
          notifyRemoval(key, value, cause[0]);
1✔
1411
        }
1412
        if (newValue == null) {
1✔
1413
          statsCounter().recordLoadFailure(loadTime);
1✔
1414
        } else {
1415
          statsCounter().recordLoadSuccess(loadTime);
1✔
1416
        }
1417
        return result;
1✔
1418
      });
1419
      return Async.getIfReady(refreshed);
1✔
1420
    }
1421

1422
    return null;
1✔
1423
  }
1424

1425
  /**
1426
   * Returns the expiration time for the entry after being created.
1427
   *
1428
   * @param key the key of the entry that was created
1429
   * @param value the value of the entry that was created
1430
   * @param expiry the calculator for the expiration time
1431
   * @param now the current time, in nanoseconds
1432
   * @return the expiration time
1433
   */
1434
  long expireAfterCreate(K key, V value, @Nullable Expiry<? super K, ? super V> expiry, long now) {
1435
    if (expiresVariable()) {
1✔
1436
      requireNonNull(expiry);
1✔
1437
      long duration = Math.max(0L, expiry.expireAfterCreate(key, value, now));
1✔
1438
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1439
    }
1440
    return 0L;
1✔
1441
  }
1442

1443
  /**
1444
   * Returns the expiration time for the entry after being updated.
1445
   *
1446
   * @param node the entry in the page replacement policy
1447
   * @param key the key of the entry that was updated
1448
   * @param value the value of the entry that was updated
1449
   * @param expiry the calculator for the expiration time
1450
   * @param now the current time, in nanoseconds
1451
   * @return the expiration time
1452
   */
1453
  long expireAfterUpdate(Node<K, V> node, K key, V value,
1454
      @Nullable Expiry<? super K, ? super V> expiry, long now) {
1455
    if (expiresVariable()) {
1✔
1456
      requireNonNull(expiry);
1✔
1457
      long currentDuration = Math.max(1, node.getVariableTime() - now);
1✔
1458
      long duration = Math.max(0L, expiry.expireAfterUpdate(key, value, now, currentDuration));
1✔
1459
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1460
    }
1461
    return 0L;
1✔
1462
  }
1463

1464
  /**
1465
   * Returns the access time for the entry after a read.
1466
   *
1467
   * @param node the entry in the page replacement policy
1468
   * @param key the key of the entry that was read
1469
   * @param value the value of the entry that was read
1470
   * @param expiry the calculator for the expiration time
1471
   * @param now the current time, in nanoseconds
1472
   * @return the expiration time
1473
   */
1474
  long expireAfterRead(Node<K, V> node, K key, V value, Expiry<K, V> expiry, long now) {
1475
    if (expiresVariable()) {
1✔
1476
      long currentDuration = Math.max(0L, node.getVariableTime() - now);
1✔
1477
      long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
1✔
1478
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1479
    }
1480
    return 0L;
1✔
1481
  }
1482

1483
  /**
1484
   * Attempts to update the access time for the entry after a read.
1485
   *
1486
   * @param node the entry in the page replacement policy
1487
   * @param key the key of the entry that was read
1488
   * @param value the value of the entry that was read
1489
   * @param expiry the calculator for the expiration time
1490
   * @param now the current time, in nanoseconds
1491
   */
1492
  void tryExpireAfterRead(Node<K, V> node, K key, V value, Expiry<K, V> expiry, long now) {
1493
    if (!expiresVariable()) {
1✔
1494
      return;
1✔
1495
    }
1496

1497
    long variableTime = node.getVariableTime();
1✔
1498
    long currentDuration = Math.max(1, variableTime - now);
1✔
1499
    if (isAsync && (currentDuration > MAXIMUM_EXPIRY)) {
1✔
1500
      // expireAfterCreate has not yet set the duration after completion
1501
      return;
1✔
1502
    }
1503

1504
    long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
1✔
1505
    if (duration != currentDuration) {
1✔
1506
      long expirationTime = isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1507
      node.casVariableTime(variableTime, expirationTime);
1✔
1508
    }
1509
  }
1✔
1510

1511
  void setVariableTime(Node<K, V> node, long expirationTime) {
1512
    if (expiresVariable()) {
1✔
1513
      node.setVariableTime(expirationTime);
1✔
1514
    }
1515
  }
1✔
1516

1517
  void setWriteTime(Node<K, V> node, long now) {
1518
    if (expiresAfterWrite() || refreshAfterWrite()) {
1✔
1519
      node.setWriteTime(now & ~1L);
1✔
1520
    }
1521
  }
1✔
1522

1523
  void setAccessTime(Node<K, V> node, long now) {
1524
    if (expiresAfterAccess()) {
1✔
1525
      node.setAccessTime(now);
1✔
1526
    }
1527
  }
1✔
1528

1529
  /** Returns if the entry's write time would exceed the minimum expiration reorder threshold. */
1530
  boolean exceedsWriteTimeTolerance(Node<K, V> node, long varTime, long now) {
1531
    long variableTime = node.getVariableTime();
1✔
1532
    long tolerance = EXPIRE_WRITE_TOLERANCE;
1✔
1533
    long writeTime = node.getWriteTime();
1✔
1534
    return
1✔
1535
        (expiresAfterWrite()
1✔
1536
            && ((expiresAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
1✔
1537
        || (refreshAfterWrite()
1✔
1538
            && ((refreshAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
1✔
1539
        || (expiresVariable() && (Math.abs(varTime - variableTime) > tolerance));
1✔
1540
  }
1541

1542
  /**
1543
   * Performs the post-processing work required after a write.
1544
   *
1545
   * @param task the pending operation to be applied
1546
   */
1547
  void afterWrite(Runnable task) {
1548
    for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) {
1✔
1549
      if (writeBuffer.offer(task)) {
1✔
1550
        scheduleAfterWrite();
1✔
1551
        return;
1✔
1552
      }
1553
      scheduleDrainBuffers();
1✔
1554
      Thread.onSpinWait();
1✔
1555
    }
1556

1557
    // In scenarios where the writing threads cannot make progress then they attempt to provide
1558
    // assistance by performing the eviction work directly. This can resolve cases where the
1559
    // maintenance task is scheduled but not running. That might occur due to all of the executor's
1560
    // threads being busy (perhaps writing into this cache), the write rate greatly exceeds the
1561
    // consuming rate, priority inversion, or if the executor silently discarded the maintenance
1562
    // task. Unfortunately this cannot resolve when the eviction is blocked waiting on a long-
1563
    // running computation due to an eviction listener, the victim is being computed on by a writer,
1564
    // or the victim residing in the same hash bin as a computing entry. In those cases a warning is
1565
    // logged to encourage the application to decouple these computations from the map operations.
1566
    lock();
1✔
1567
    try {
1568
      maintenance(task);
1✔
1569
    } catch (RuntimeException e) {
1✔
1570
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1571
    } finally {
1572
      evictionLock.unlock();
1✔
1573
    }
1574
    rescheduleCleanUpIfIncomplete();
1✔
1575
  }
1✔
1576

1577
  /** Acquires the eviction lock. */
1578
  void lock() {
1579
    @Var long remainingNanos = WARN_AFTER_LOCK_WAIT_NANOS;
1✔
1580
    long end = System.nanoTime() + remainingNanos;
1✔
1581
    @Var boolean interrupted = false;
1✔
1582
    try {
1583
      for (;;) {
1584
        try {
1585
          if (evictionLock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
1✔
1586
            return;
1✔
1587
          }
1588
          logger.log(Level.WARNING, "The cache is experiencing excessive wait times for acquiring "
1✔
1589
              + "the eviction lock. This may indicate that a long-running computation has halted "
1590
              + "eviction when trying to remove the victim entry. Consider using AsyncCache to "
1591
              + "decouple the computation from the map operation.", new TimeoutException());
1592
          evictionLock.lock();
1✔
1593
          return;
1✔
1594
        } catch (InterruptedException e) {
1✔
1595
          remainingNanos = end - System.nanoTime();
1✔
1596
          interrupted = true;
1✔
1597
        }
1✔
1598
      }
1599
    } finally {
1600
      if (interrupted) {
1✔
1601
        Thread.currentThread().interrupt();
1✔
1602
      }
1603
    }
1604
  }
1605

1606
  /**
1607
   * Conditionally schedules the asynchronous maintenance task after a write operation. If the
1608
   * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it
1609
   * is already processing then it is set to transition to REQUIRED upon completion so that a new
1610
   * execution is triggered by the next operation.
1611
   */
1612
  void scheduleAfterWrite() {
1613
    @Var int drainStatus = drainStatusOpaque();
1✔
1614
    for (;;) {
1615
      switch (drainStatus) {
1✔
1616
        case IDLE:
1617
          casDrainStatus(IDLE, REQUIRED);
1✔
1618
          scheduleDrainBuffers();
1✔
1619
          return;
1✔
1620
        case REQUIRED:
1621
          scheduleDrainBuffers();
1✔
1622
          return;
1✔
1623
        case PROCESSING_TO_IDLE:
1624
          if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) {
1✔
1625
            return;
1✔
1626
          }
1627
          drainStatus = drainStatusAcquire();
1✔
1628
          continue;
1✔
1629
        case PROCESSING_TO_REQUIRED:
1630
          return;
1✔
1631
        default:
1632
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
1633
      }
1634
    }
1635
  }
1636

1637
  /**
1638
   * Attempts to schedule an asynchronous task to apply the pending operations to the page
1639
   * replacement policy. If the executor rejects the task then it is run directly.
1640
   */
1641
  void scheduleDrainBuffers() {
1642
    if (drainStatusOpaque() >= PROCESSING_TO_IDLE) {
1✔
1643
      return;
1✔
1644
    }
1645
    if (evictionLock.tryLock()) {
1✔
1646
      try {
1647
        int drainStatus = drainStatusOpaque();
1✔
1648
        if (drainStatus >= PROCESSING_TO_IDLE) {
1✔
1649
          return;
1✔
1650
        }
1651
        setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1652
        executor.execute(drainBuffersTask);
1✔
1653
      } catch (Throwable t) {
1✔
1654
        logger.log(Level.WARNING, "Exception thrown when submitting maintenance task", t);
1✔
1655
        maintenance(/* ignored */ null);
1✔
1656
      } finally {
1657
        evictionLock.unlock();
1✔
1658
      }
1659
    }
1660
  }
1✔
1661

1662
  @Override
1663
  public void cleanUp() {
1664
    try {
1665
      performCleanUp(/* ignored */ null);
1✔
1666
    } catch (RuntimeException e) {
1✔
1667
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1668
    }
1✔
1669
  }
1✔
1670

1671
  /**
1672
   * Performs the maintenance work, blocking until the lock is acquired.
1673
   *
1674
   * @param task an additional pending task to run, or {@code null} if not present
1675
   */
1676
  void performCleanUp(@Nullable Runnable task) {
1677
    evictionLock.lock();
1✔
1678
    try {
1679
      maintenance(task);
1✔
1680
    } finally {
1681
      evictionLock.unlock();
1✔
1682
    }
1683
    rescheduleCleanUpIfIncomplete();
1✔
1684
  }
1✔
1685

1686
  /**
1687
   * If there remains pending operations that were not handled by the prior clean up then try to
1688
   * schedule an asynchronous maintenance task. This may occur due to a concurrent write after the
1689
   * maintenance work had started or if the amortized threshold of work per clean up was reached.
1690
   */
1691
  @SuppressWarnings("resource")
1692
  void rescheduleCleanUpIfIncomplete() {
1693
    if (drainStatusOpaque() != REQUIRED) {
1✔
1694
      return;
1✔
1695
    }
1696

1697
    // An immediate scheduling cannot be performed on a custom executor because it may use a
1698
    // caller-runs policy. This could cause the caller's penalty to exceed the amortized threshold,
1699
    // e.g. repeated concurrent writes could result in a retry loop.
1700
    if (executor == ForkJoinPool.commonPool()) {
1✔
1701
      scheduleDrainBuffers();
1✔
1702
      return;
1✔
1703
    }
1704

1705
    // If a scheduler was configured then the maintenance can be deferred onto the custom executor
1706
    // and run in the near future. Otherwise, it will be handled due to other cache activity.
1707
    var pacer = pacer();
1✔
1708
    if ((pacer != null) && !pacer.isScheduled() && evictionLock.tryLock()) {
1✔
1709
      try {
1710
        if ((drainStatusOpaque() == REQUIRED) && !pacer.isScheduled()) {
1✔
1711
          pacer.schedule(executor, drainBuffersTask, expirationTicker().read(), Pacer.TOLERANCE);
1✔
1712
        }
1713
      } finally {
1714
        evictionLock.unlock();
1✔
1715
      }
1716
    }
1717
  }
1✔
1718

1719
  /**
1720
   * Performs the pending maintenance work and sets the state flags during processing to avoid
1721
   * excess scheduling attempts. The read buffer, write buffer, and reference queues are drained,
1722
   * followed by expiration, and size-based eviction.
1723
   *
1724
   * @param task an additional pending task to run, or {@code null} if not present
1725
   */
1726
  @GuardedBy("evictionLock")
1727
  void maintenance(@Nullable Runnable task) {
1728
    setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1729

1730
    try {
1731
      drainReadBuffer();
1✔
1732

1733
      drainWriteBuffer();
1✔
1734
      if (task != null) {
1✔
1735
        task.run();
1✔
1736
      }
1737

1738
      drainKeyReferences();
1✔
1739
      drainValueReferences();
1✔
1740

1741
      expireEntries();
1✔
1742
      evictEntries();
1✔
1743

1744
      climb();
1✔
1745
    } finally {
1746
      if ((drainStatusOpaque() != PROCESSING_TO_IDLE)
1✔
1747
          || !casDrainStatus(PROCESSING_TO_IDLE, IDLE)) {
1✔
1748
        setDrainStatusOpaque(REQUIRED);
1✔
1749
      }
1750
    }
1751
  }
1✔
1752

1753
  /** Drains the weak key references queue. */
1754
  @GuardedBy("evictionLock")
1755
  void drainKeyReferences() {
1756
    if (!collectKeys()) {
1✔
1757
      return;
1✔
1758
    }
1759
    @Var Reference<? extends K> keyRef;
1760
    while ((keyRef = keyReferenceQueue().poll()) != null) {
1✔
1761
      Node<K, V> node = data.get(keyRef);
1✔
1762
      if (node != null) {
1✔
1763
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1764
      }
1765
    }
1✔
1766
  }
1✔
1767

1768
  /** Drains the weak / soft value references queue. */
1769
  @GuardedBy("evictionLock")
1770
  void drainValueReferences() {
1771
    if (!collectValues()) {
1✔
1772
      return;
1✔
1773
    }
1774
    @Var Reference<? extends V> valueRef;
1775
    while ((valueRef = valueReferenceQueue().poll()) != null) {
1✔
1776
      @SuppressWarnings("unchecked")
1777
      var ref = (InternalReference<V>) valueRef;
1✔
1778
      Node<K, V> node = data.get(ref.getKeyReference());
1✔
1779
      if ((node != null) && (valueRef == node.getValueReference())) {
1✔
1780
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1781
      }
1782
    }
1✔
1783
  }
1✔
1784

1785
  /** Drains the read buffer. */
1786
  @GuardedBy("evictionLock")
1787
  void drainReadBuffer() {
1788
    if (!skipReadBuffer()) {
1✔
1789
      readBuffer.drainTo(accessPolicy);
1✔
1790
    }
1791
  }
1✔
1792

1793
  /** Updates the node's location in the page replacement policy. */
1794
  @GuardedBy("evictionLock")
1795
  void onAccess(Node<K, V> node) {
1796
    if (evicts()) {
1✔
1797
      var keyRef = node.getKeyReferenceOrNull();
1✔
1798
      if ((keyRef == null) || !node.isAlive()) {
1✔
1799
        return;
1✔
1800
      }
1801
      frequencySketch().increment(keyRef);
1✔
1802
      if (node.inWindow()) {
1✔
1803
        reorder(accessOrderWindowDeque(), node);
1✔
1804
      } else if (node.inMainProbation()) {
1✔
1805
        reorderProbation(node);
1✔
1806
      } else {
1807
        reorder(accessOrderProtectedDeque(), node);
1✔
1808
      }
1809
      setHitsInSample(hitsInSample() + 1);
1✔
1810
    } else if (expiresAfterAccess()) {
1✔
1811
      reorder(accessOrderWindowDeque(), node);
1✔
1812
    }
1813
    if (expiresVariable()) {
1✔
1814
      timerWheel().reschedule(node);
1✔
1815
    }
1816
  }
1✔
1817

1818
  /** Promote the node from probation to protected on an access. */
1819
  @GuardedBy("evictionLock")
1820
  void reorderProbation(Node<K, V> node) {
1821
    if (!accessOrderProbationDeque().contains(node)) {
1!
1822
      // Ignore stale accesses for an entry that is no longer present
1823
      return;
×
1824
    } else if (node.getPolicyWeight() > mainProtectedMaximum()) {
1✔
1825
      reorder(accessOrderProbationDeque(), node);
1✔
1826
      return;
1✔
1827
    }
1828

1829
    // If the protected space exceeds its maximum, the LRU items are demoted to the probation space.
1830
    // This is deferred to the adaption phase at the end of the maintenance cycle.
1831
    setMainProtectedWeightedSize(mainProtectedWeightedSize() + node.getPolicyWeight());
1✔
1832
    accessOrderProbationDeque().remove(node);
1✔
1833
    accessOrderProtectedDeque().offerLast(node);
1✔
1834
    node.makeMainProtected();
1✔
1835
  }
1✔
1836

1837
  /** Updates the node's location in the policy's deque. */
1838
  static <K, V> void reorder(LinkedDeque<Node<K, V>> deque, Node<K, V> node) {
1839
    // An entry may be scheduled for reordering despite having been removed. This can occur when the
1840
    // entry was concurrently read while a writer was removing it. If the entry is no longer linked
1841
    // then it does not need to be processed.
1842
    if (deque.contains(node)) {
1✔
1843
      deque.moveToBack(node);
1✔
1844
    }
1845
  }
1✔
1846

1847
  /** Drains the write buffer. */
1848
  @GuardedBy("evictionLock")
1849
  void drainWriteBuffer() {
1850
    for (int i = 0; i <= WRITE_BUFFER_MAX; i++) {
1✔
1851
      Runnable task = writeBuffer.poll();
1✔
1852
      if (task == null) {
1✔
1853
        return;
1✔
1854
      }
1855
      task.run();
1✔
1856
    }
1857
    setDrainStatusOpaque(PROCESSING_TO_REQUIRED);
1✔
1858
  }
1✔
1859

1860
  /**
1861
   * Atomically transitions the node to the <code>dead</code> state and decrements the
1862
   * <code>weightedSize</code>.
1863
   *
1864
   * @param node the entry in the page replacement policy
1865
   */
1866
  @GuardedBy("evictionLock")
1867
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
1868
  void makeDead(Node<K, V> node) {
1869
    synchronized (node) {
1✔
1870
      if (node.isDead()) {
1✔
1871
        return;
1✔
1872
      }
1873
      if (evicts()) {
1✔
1874
        // The node's policy weight may be out of sync due to a pending update waiting to be
1875
        // processed. At this point the node's weight is finalized, so the weight can be safely
1876
        // taken from the node's perspective and the sizes will be adjusted correctly.
1877
        if (node.inWindow()) {
1✔
1878
          setWindowWeightedSize(windowWeightedSize() - node.getWeight());
1✔
1879
        } else if (node.inMainProtected()) {
1✔
1880
          setMainProtectedWeightedSize(mainProtectedWeightedSize() - node.getWeight());
1✔
1881
        }
1882
        setWeightedSize(weightedSize() - node.getWeight());
1✔
1883
      }
1884
      node.die();
1✔
1885
    }
1✔
1886
  }
1✔
1887

1888
  /** Adds the node to the page replacement policy. */
1889
  final class AddTask implements Runnable {
1890
    final Node<K, V> node;
1891
    final int weight;
1892

1893
    AddTask(Node<K, V> node, int weight) {
1✔
1894
      this.weight = weight;
1✔
1895
      this.node = node;
1✔
1896
    }
1✔
1897

1898
    @Override
1899
    @GuardedBy("evictionLock")
1900
    public void run() {
1901
      if (evicts()) {
1✔
1902
        setWeightedSize(weightedSize() + weight);
1✔
1903
        setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1904
        node.setPolicyWeight(node.getPolicyWeight() + weight);
1✔
1905

1906
        long maximum = maximum();
1✔
1907
        if (weightedSize() >= (maximum >>> 1)) {
1✔
1908
          if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
1909
            evictEntries();
1✔
1910
          } else {
1911
            // Lazily initialize when close to the maximum
1912
            long capacity = isWeighted() ? data.mappingCount() : maximum;
1✔
1913
            frequencySketch().ensureCapacity(capacity);
1✔
1914
          }
1915
        }
1916

1917
        var keyRef = node.getKeyReferenceOrNull();
1✔
1918
        if (keyRef != null) {
1✔
1919
          frequencySketch().increment(keyRef);
1✔
1920
        }
1921

1922
        setMissesInSample(missesInSample() + 1);
1✔
1923
      }
1924

1925
      // ignore out-of-order write operations
1926
      boolean isAlive;
1927
      synchronized (node) {
1✔
1928
        isAlive = node.isAlive();
1✔
1929
      }
1✔
1930
      if (isAlive) {
1✔
1931
        if (expiresAfterWrite()) {
1✔
1932
          writeOrderDeque().offerLast(node);
1✔
1933
        }
1934
        if (expiresVariable()) {
1✔
1935
          timerWheel().schedule(node);
1✔
1936
        }
1937
        if (evicts()) {
1✔
1938
          if (weight > maximum()) {
1✔
1939
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
1940
          } else if (weight > windowMaximum()) {
1✔
1941
            accessOrderWindowDeque().offerFirst(node);
1✔
1942
          } else {
1943
            accessOrderWindowDeque().offerLast(node);
1✔
1944
          }
1945
        } else if (expiresAfterAccess()) {
1✔
1946
          accessOrderWindowDeque().offerLast(node);
1✔
1947
        }
1948
      }
1949
    }
1✔
1950
  }
1951

1952
  /** Removes a node from the page replacement policy. */
1953
  final class RemovalTask implements Runnable {
1954
    final Node<K, V> node;
1955

1956
    RemovalTask(Node<K, V> node) {
1✔
1957
      this.node = node;
1✔
1958
    }
1✔
1959

1960
    @Override
1961
    @GuardedBy("evictionLock")
1962
    public void run() {
1963
      // add may not have been processed yet
1964
      if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1965
        accessOrderWindowDeque().remove(node);
1✔
1966
      } else if (evicts()) {
1✔
1967
        if (node.inMainProbation()) {
1✔
1968
          accessOrderProbationDeque().remove(node);
1✔
1969
        } else {
1970
          accessOrderProtectedDeque().remove(node);
1✔
1971
        }
1972
      }
1973
      if (expiresAfterWrite()) {
1✔
1974
        writeOrderDeque().remove(node);
1✔
1975
      } else if (expiresVariable()) {
1✔
1976
        timerWheel().deschedule(node);
1✔
1977
      }
1978
      makeDead(node);
1✔
1979
    }
1✔
1980
  }
1981

1982
  /** Updates the weighted size. */
1983
  final class UpdateTask implements Runnable {
1984
    final int weightDifference;
1985
    final Node<K, V> node;
1986

1987
    public UpdateTask(Node<K, V> node, int weightDifference) {
1✔
1988
      this.weightDifference = weightDifference;
1✔
1989
      this.node = node;
1✔
1990
    }
1✔
1991

1992
    @Override
1993
    @GuardedBy("evictionLock")
1994
    public void run() {
1995
      if (expiresAfterWrite()) {
1✔
1996
        reorder(writeOrderDeque(), node);
1✔
1997
      } else if (expiresVariable()) {
1✔
1998
        timerWheel().reschedule(node);
1✔
1999
      }
2000
      if (evicts()) {
1✔
2001
        int oldWeightedSize = node.getPolicyWeight();
1✔
2002
        node.setPolicyWeight(oldWeightedSize + weightDifference);
1✔
2003
        if (node.inWindow()) {
1✔
2004
          setWindowWeightedSize(windowWeightedSize() + weightDifference);
1✔
2005
          if (node.getPolicyWeight() > maximum()) {
1✔
2006
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2007
          } else if (node.getPolicyWeight() <= windowMaximum()) {
1✔
2008
            onAccess(node);
1✔
2009
          } else if (accessOrderWindowDeque().contains(node)) {
1✔
2010
            accessOrderWindowDeque().moveToFront(node);
1✔
2011
          }
2012
        } else if (node.inMainProbation()) {
1✔
2013
            if (node.getPolicyWeight() <= maximum()) {
1✔
2014
              onAccess(node);
1✔
2015
            } else {
2016
              evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2017
            }
2018
        } else {
2019
          setMainProtectedWeightedSize(mainProtectedWeightedSize() + weightDifference);
1✔
2020
          if (node.getPolicyWeight() <= maximum()) {
1✔
2021
            onAccess(node);
1✔
2022
          } else {
2023
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2024
          }
2025
        }
2026

2027
        setWeightedSize(weightedSize() + weightDifference);
1✔
2028
        if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
2029
          evictEntries();
1✔
2030
        }
2031
      } else if (expiresAfterAccess()) {
1✔
2032
        onAccess(node);
1✔
2033
      }
2034
    }
1✔
2035
  }
2036

2037
  /* --------------- Concurrent Map Support --------------- */
2038

2039
  @Override
2040
  public boolean isEmpty() {
2041
    return data.isEmpty();
1✔
2042
  }
2043

2044
  @Override
2045
  public int size() {
2046
    return data.size();
1✔
2047
  }
2048

2049
  @Override
2050
  public long estimatedSize() {
2051
    return data.mappingCount();
1✔
2052
  }
2053

2054
  @Override
2055
  public void clear() {
2056
    Deque<Node<K, V>> entries;
2057
    evictionLock.lock();
1✔
2058
    try {
2059
      // Discard all pending reads
2060
      readBuffer.drainTo(e -> {});
1✔
2061

2062
      // Apply all pending writes
2063
      @Var Runnable task;
2064
      while ((task = writeBuffer.poll()) != null) {
1✔
2065
        task.run();
1✔
2066
      }
2067

2068
      // Cancel the scheduled cleanup
2069
      Pacer pacer = pacer();
1✔
2070
      if (pacer != null) {
1✔
2071
        pacer.cancel();
1✔
2072
      }
2073

2074
      // Discard all entries, falling back to one-by-one to avoid excessive lock hold times
2075
      long now = expirationTicker().read();
1✔
2076
      int threshold = (WRITE_BUFFER_MAX / 2);
1✔
2077
      entries = new ArrayDeque<>(data.values());
1✔
2078
      while (!entries.isEmpty() && (writeBuffer.size() < threshold)) {
1✔
2079
        removeNode(entries.pollFirst(), now);
1✔
2080
      }
2081
    } finally {
2082
      evictionLock.unlock();
1✔
2083
    }
2084

2085
    // Remove any stragglers if released early to more aggressively flush incoming writes
2086
    @Var boolean cleanUp = false;
1✔
2087
    for (var node : entries) {
1✔
2088
      @Nullable K key = node.getKey();
1✔
2089
      if (key == null) {
1✔
2090
        cleanUp = true;
1✔
2091
      } else {
2092
        remove(key);
1✔
2093
      }
2094
    }
1✔
2095
    if (collectKeys() && cleanUp) {
1✔
2096
      cleanUp();
1✔
2097
    }
2098
  }
1✔
2099

2100
  @GuardedBy("evictionLock")
2101
  @SuppressWarnings({"GuardedByChecker", "SynchronizationOnLocalVariableOrMethodParameter"})
2102
  void removeNode(Node<K, V> node, long now) {
2103
    K key = node.getKey();
1✔
2104
    var ctx = new EvictContext<V>();
1✔
2105
    var keyReference = node.getKeyReference();
1✔
2106

2107
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
2108
      if (n != node) {
1✔
2109
        return n;
1✔
2110
      }
2111
      synchronized (node) {
1✔
2112
        ctx.value = node.getValue();
1✔
2113

2114
        if ((key == null) || (ctx.value == null)) {
1✔
2115
          ctx.cause = RemovalCause.COLLECTED;
1✔
2116
        } else if (hasExpired(node, now, ctx.value)) {
1✔
2117
          ctx.cause = RemovalCause.EXPIRED;
1✔
2118
        } else {
2119
          ctx.cause = RemovalCause.EXPLICIT;
1✔
2120
        }
2121

2122
        if (ctx.cause.wasEvicted()) {
1✔
2123
          notifyEviction(key, ctx.value, ctx.cause);
1✔
2124
        }
2125

2126
        discardRefresh(node.getKeyReference());
1✔
2127
        node.retire();
1✔
2128
        return null;
1✔
2129
      }
2130
    });
2131

2132
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
2133
      accessOrderWindowDeque().remove(node);
1✔
2134
    } else if (evicts()) {
1✔
2135
      if (node.inMainProbation()) {
1✔
2136
        accessOrderProbationDeque().remove(node);
1✔
2137
      } else {
2138
        accessOrderProtectedDeque().remove(node);
1✔
2139
      }
2140
    }
2141
    if (expiresAfterWrite()) {
1✔
2142
      writeOrderDeque().remove(node);
1✔
2143
    } else if (expiresVariable()) {
1✔
2144
      timerWheel().deschedule(node);
1✔
2145
    }
2146

2147
    synchronized (node) {
1✔
2148
      logIfAlive(node);
1✔
2149
      makeDead(node);
1✔
2150
    }
1✔
2151

2152
    if (ctx.cause != null) {
1✔
2153
      notifyRemoval(key, ctx.value, ctx.cause);
1✔
2154
    }
2155
  }
1✔
2156

2157
  @Override
2158
  public boolean containsKey(Object key) {
2159
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2160
    if (node == null) {
1✔
2161
      return false;
1✔
2162
    }
2163
    V value = node.getValue();
1✔
2164
    return (value != null) && !hasExpired(node, expirationTicker().read(), value);
1✔
2165
  }
2166

2167
  @Override
2168
  public boolean containsValue(Object value) {
2169
    requireNonNull(value);
1✔
2170

2171
    long now = expirationTicker().read();
1✔
2172
    for (Node<K, V> node : data.values()) {
1✔
2173
      V nodeValue = node.getValue();
1✔
2174
      if ((nodeValue != null) && node.containsValue(value)
1✔
2175
          && !hasExpired(node, now, nodeValue) && (node.getKey() != null)) {
1✔
2176
        return true;
1✔
2177
      }
2178
    }
1✔
2179
    return false;
1✔
2180
  }
2181

2182
  @Override
2183
  public @Nullable V get(Object key) {
2184
    return getIfPresent(key, /* recordStats= */ false);
1✔
2185
  }
2186

2187
  @Override
2188
  public @Nullable V getIfPresent(Object key, boolean recordStats) {
2189
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2190
    if (node == null) {
1✔
2191
      if (recordStats) {
1✔
2192
        statsCounter().recordMisses(1);
1✔
2193
      }
2194
      if (drainStatusOpaque() == REQUIRED) {
1✔
2195
        scheduleDrainBuffers();
1✔
2196
      }
2197
      return null;
1✔
2198
    }
2199

2200
    V value = node.getValue();
1✔
2201
    long now = expirationTicker().read();
1✔
2202
    if ((value == null) || hasExpired(node, now, value)) {
1✔
2203
      if (recordStats) {
1✔
2204
        statsCounter().recordMisses(1);
1✔
2205
      }
2206
      scheduleDrainBuffers();
1✔
2207
      return null;
1✔
2208
    }
2209

2210
    if (!isComputingAsync(value)) {
1✔
2211
      @SuppressWarnings("unchecked")
2212
      var castedKey = (K) key;
1✔
2213
      setAccessTime(node, now);
1✔
2214
      tryExpireAfterRead(node, castedKey, value, expiry(), now);
1✔
2215
    }
2216
    V refreshed = afterRead(node, now, recordStats);
1✔
2217
    return (refreshed == null) ? value : refreshed;
1✔
2218
  }
2219

2220
  @Override
2221
  public @Nullable V getIfPresentQuietly(Object key) {
2222
    V value;
2223
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2224
    if ((node == null) || ((value = node.getValue()) == null)
1✔
2225
        || hasExpired(node, expirationTicker().read(), value)) {
1✔
2226
      return null;
1✔
2227
    }
2228
    return value;
1✔
2229
  }
2230

2231
  /**
2232
   * Returns the key associated with the mapping in this cache, or {@code null} if there is none.
2233
   *
2234
   * @param key the key whose canonical instance is to be returned
2235
   * @return the key used by the mapping, or {@code null} if this cache does not contain a mapping
2236
   *         for the key
2237
   * @throws NullPointerException if the specified key is null
2238
   */
2239
  public @Nullable K getKey(K key) {
2240
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2241
    if (node == null) {
1✔
2242
      if (drainStatusOpaque() == REQUIRED) {
1✔
2243
        scheduleDrainBuffers();
1✔
2244
      }
2245
      return null;
1✔
2246
    }
2247
    afterRead(node, /* now= */ 0L, /* recordHit= */ false);
1✔
2248
    return node.getKey();
1✔
2249
  }
2250

2251
  @Override
2252
  public Map<K, V> getAllPresent(Iterable<? extends K> keys) {
2253
    var result = new LinkedHashMap<K, @Nullable V>(calculateHashMapCapacity(keys));
1✔
2254
    for (K key : keys) {
1✔
2255
      result.put(key, null);
1✔
2256
    }
1✔
2257

2258
    int uniqueKeys = result.size();
1✔
2259
    long now = expirationTicker().read();
1✔
2260
    for (var iter = result.entrySet().iterator(); iter.hasNext();) {
1✔
2261
      V value;
2262
      var entry = iter.next();
1✔
2263
      Node<K, V> node = data.get(nodeFactory.newLookupKey(entry.getKey()));
1✔
2264
      if ((node == null) || ((value = node.getValue()) == null)
1✔
2265
          || hasExpired(node, now, value)) {
1✔
2266
        iter.remove();
1✔
2267
      } else {
2268
        setAccessTime(node, now);
1✔
2269
        tryExpireAfterRead(node, entry.getKey(), value, expiry(), now);
1✔
2270
        V refreshed = afterRead(node, now, /* recordHit= */ false);
1✔
2271
        entry.setValue((refreshed == null) ? value : refreshed);
1✔
2272
      }
2273
    }
1✔
2274
    statsCounter().recordHits(result.size());
1✔
2275
    statsCounter().recordMisses(uniqueKeys - result.size());
1✔
2276

2277
    @SuppressWarnings("NullableProblems")
2278
    Map<K, V> unmodifiable = Collections.unmodifiableMap(result);
1✔
2279
    return unmodifiable;
1✔
2280
  }
2281

2282
  @Override
2283
  public void putAll(Map<? extends K, ? extends V> map) {
2284
    map.forEach(this::put);
1✔
2285
  }
1✔
2286

2287
  @Override
2288
  public @Nullable V put(K key, V value) {
2289
    return put(key, value, expiry(), /* onlyIfAbsent= */ false);
1✔
2290
  }
2291

2292
  @Override
2293
  public @Nullable V putIfAbsent(K key, V value) {
2294
    return put(key, value, expiry(), /* onlyIfAbsent= */ true);
1✔
2295
  }
2296

2297
  /**
2298
   * Adds a node to the policy and the data store. If an existing node is found, then its value is
2299
   * updated if allowed.
2300
   *
2301
   * @param key key with which the specified value is to be associated
2302
   * @param value value to be associated with the specified key
2303
   * @param expiry the calculator for the write expiration time
2304
   * @param onlyIfAbsent a write is performed only if the key is not already associated with a value
2305
   * @return the prior value in or null if no mapping was found
2306
   */
2307
  @Nullable V put(K key, V value, Expiry<K, V> expiry, boolean onlyIfAbsent) {
2308
    requireNonNull(key);
1✔
2309
    requireNonNull(value);
1✔
2310

2311
    @Var Node<K, V> node = null;
1✔
2312
    long now = expirationTicker().read();
1✔
2313
    int newWeight = weigher.weigh(key, value);
1✔
2314
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2315
    for (int attempts = 1; ; attempts++) {
1✔
2316
      @Var Node<K, V> prior = data.get(lookupKey);
1✔
2317
      if (prior == null) {
1✔
2318
        if (node == null) {
1✔
2319
          node = nodeFactory.newNode(key, keyReferenceQueue(),
1✔
2320
              value, valueReferenceQueue(), newWeight, now);
1✔
2321
          long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
1✔
2322
          setVariableTime(node, expireAfterCreate(key, value, expiry, now));
1✔
2323
          setAccessTime(node, expirationTime);
1✔
2324
          setWriteTime(node, expirationTime);
1✔
2325
        }
2326
        prior = data.putIfAbsent(node.getKeyReference(), node);
1✔
2327
        if (prior == null) {
1✔
2328
          afterWrite(new AddTask(node, newWeight));
1✔
2329
          return null;
1✔
2330
        } else if (onlyIfAbsent) {
1✔
2331
          // An optimistic fast path to avoid unnecessary locking
2332
          V currentValue = prior.getValue();
1✔
2333
          if ((currentValue != null) && !hasExpired(prior, now, currentValue)) {
1✔
2334
            if (!isComputingAsync(currentValue)) {
1✔
2335
              tryExpireAfterRead(prior, key, currentValue, expiry, now);
1✔
2336
              setAccessTime(prior, now);
1✔
2337
            }
2338
            afterRead(prior, now, /* recordHit= */ false);
1✔
2339
            return currentValue;
1✔
2340
          }
2341
        }
1✔
2342
      } else if (onlyIfAbsent) {
1✔
2343
        // An optimistic fast path to avoid unnecessary locking
2344
        V currentValue = prior.getValue();
1✔
2345
        if ((currentValue != null) && !hasExpired(prior, now, currentValue)) {
1✔
2346
          if (!isComputingAsync(currentValue)) {
1✔
2347
            tryExpireAfterRead(prior, key, currentValue, expiry, now);
1✔
2348
            setAccessTime(prior, now);
1✔
2349
          }
2350
          afterRead(prior, now, /* recordHit= */ false);
1✔
2351
          return currentValue;
1✔
2352
        }
2353
      }
2354

2355
      // A read may race with the entry's removal, so that after the entry is acquired it may no
2356
      // longer be usable. A retry will reread from the map and either find an absent mapping, a
2357
      // new entry, or a stale entry.
2358
      if (!prior.isAlive()) {
1✔
2359
        // A reread of the stale entry may occur if the state transition occurred but the map
2360
        // removal was delayed by a context switch, so that this thread spin waits until resolved.
2361
        if ((attempts & MAX_PUT_SPIN_WAIT_ATTEMPTS) != 0) {
1✔
2362
          Thread.onSpinWait();
1✔
2363
          continue;
1✔
2364
        }
2365

2366
        // If the spin wait attempts are exhausted then fallback to a map computation in order to
2367
        // deschedule this thread until the entry's removal completes. If the key was modified
2368
        // while in the map so that its equals or hashCode changed then the contents may be
2369
        // corrupted, where the cache holds an evicted (dead) entry that could not be removed.
2370
        // That is a violation of the Map contract, so we check that the mapping is in the "alive"
2371
        // state while in the computation.
2372
        data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2373
          requireIsAlive(key, n);
1✔
2374
          return n;
1✔
2375
        });
2376
        continue;
1✔
2377
      }
2378

2379
      V oldValue;
2380
      long varTime;
2381
      int oldWeight;
2382
      @Var boolean expired = false;
1✔
2383
      @Var boolean mayUpdate = true;
1✔
2384
      @Var boolean exceedsTolerance = false;
1✔
2385
      synchronized (prior) {
1✔
2386
        if (!prior.isAlive()) {
1✔
2387
          continue;
1✔
2388
        }
2389
        oldValue = prior.getValue();
1✔
2390
        oldWeight = prior.getWeight();
1✔
2391
        if (oldValue == null) {
1✔
2392
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2393
          notifyEviction(key, null, RemovalCause.COLLECTED);
1✔
2394
        } else if (hasExpired(prior, now, oldValue)) {
1✔
2395
          expired = true;
1✔
2396
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2397
          notifyEviction(key, oldValue, RemovalCause.EXPIRED);
1✔
2398
        } else if (onlyIfAbsent) {
1✔
2399
          mayUpdate = false;
1✔
2400
          varTime = expireAfterRead(prior, key, oldValue, expiry, now);
1✔
2401
        } else {
2402
          varTime = expireAfterUpdate(prior, key, value, expiry, now);
1✔
2403
        }
2404

2405
        long expirationTime = isComputingAsync(mayUpdate ? value : oldValue)
1✔
2406
            ? (now + ASYNC_EXPIRY)
1✔
2407
            : now;
1✔
2408
        if (mayUpdate) {
1✔
2409
          exceedsTolerance = exceedsWriteTimeTolerance(prior, varTime, now);
1✔
2410
          if (expired || exceedsTolerance) {
1✔
2411
            setWriteTime(prior, isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now);
1✔
2412
          }
2413

2414
          prior.setValue(value, valueReferenceQueue());
1✔
2415
          prior.setWeight(newWeight);
1✔
2416

2417
          discardRefresh(prior.getKeyReference());
1✔
2418
        }
2419

2420
        setVariableTime(prior, varTime);
1✔
2421
        setAccessTime(prior, expirationTime);
1✔
2422
      }
1✔
2423

2424
      if (expired) {
1✔
2425
        notifyRemoval(key, oldValue, RemovalCause.EXPIRED);
1✔
2426
      } else if (oldValue == null) {
1✔
2427
        notifyRemoval(key, /* value= */ null, RemovalCause.COLLECTED);
1✔
2428
      } else if (mayUpdate) {
1✔
2429
        notifyOnReplace(key, oldValue, value);
1✔
2430
      }
2431

2432
      int weightedDifference = mayUpdate ? (newWeight - oldWeight) : 0;
1✔
2433
      if ((oldValue == null) || (weightedDifference != 0) || expired) {
1✔
2434
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2435
      } else if (!onlyIfAbsent && exceedsTolerance) {
1✔
2436
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2437
      } else {
2438
        afterRead(prior, now, /* recordHit= */ false);
1✔
2439
      }
2440

2441
      return expired ? null : oldValue;
1✔
2442
    }
2443
  }
2444

2445
  @Override
2446
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2447
  public @Nullable V remove(Object key) {
2448
    var ctx = new RemoveContext<K, V>();
1✔
2449
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2450
    data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2451
      synchronized (n) {
1✔
2452
        requireIsAlive(key, n);
1✔
2453
        ctx.oldKey = n.getKey();
1✔
2454
        ctx.oldValue = n.getValue();
1✔
2455
        RemovalCause actualCause;
2456
        if ((ctx.oldKey == null) || (ctx.oldValue == null)) {
1✔
2457
          actualCause = RemovalCause.COLLECTED;
1✔
2458
        } else if (hasExpired(n, expirationTicker().read(), ctx.oldValue)) {
1✔
2459
          actualCause = RemovalCause.EXPIRED;
1✔
2460
        } else {
2461
          actualCause = RemovalCause.EXPLICIT;
1✔
2462
        }
2463
        if (actualCause.wasEvicted()) {
1✔
2464
          notifyEviction(ctx.oldKey, ctx.oldValue, actualCause);
1✔
2465
        }
2466
        ctx.cause = actualCause;
1✔
2467
        discardRefresh(k);
1✔
2468
        ctx.node = n;
1✔
2469
        n.retire();
1✔
2470
        return null;
1✔
2471
      }
2472
    });
2473

2474
    if (ctx.cause != null) {
1✔
2475
      afterWrite(new RemovalTask(requireNonNull(ctx.node)));
1✔
2476
      notifyRemoval(ctx.oldKey, ctx.oldValue, ctx.cause);
1✔
2477
    }
2478
    return (ctx.cause == RemovalCause.EXPLICIT) ? ctx.oldValue : null;
1✔
2479
  }
2480

2481
  @Override
2482
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2483
  public boolean remove(Object key, @Nullable Object value) {
2484
    requireNonNull(key);
1✔
2485
    if (value == null) {
1✔
2486
      return false;
1✔
2487
    }
2488

2489
    var ctx = new RemoveContext<K, V>();
1✔
2490
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2491
    data.computeIfPresent(lookupKey, (kR, node) -> {
1✔
2492
      synchronized (node) {
1✔
2493
        requireIsAlive(key, node);
1✔
2494
        ctx.oldKey = node.getKey();
1✔
2495
        ctx.oldValue = node.getValue();
1✔
2496
        if ((ctx.oldKey == null) || (ctx.oldValue == null)) {
1✔
2497
          ctx.cause = RemovalCause.COLLECTED;
1✔
2498
        } else if (hasExpired(node, expirationTicker().read(), ctx.oldValue)) {
1✔
2499
          ctx.cause = RemovalCause.EXPIRED;
1✔
2500
        } else if (node.containsValue(value)) {
1✔
2501
          ctx.cause = RemovalCause.EXPLICIT;
1✔
2502
        } else {
2503
          return node;
1✔
2504
        }
2505
        if (ctx.cause.wasEvicted()) {
1✔
2506
          notifyEviction(ctx.oldKey, ctx.oldValue, ctx.cause);
1✔
2507
        }
2508
        discardRefresh(kR);
1✔
2509
        ctx.node = node;
1✔
2510
        node.retire();
1✔
2511
        return null;
1✔
2512
      }
2513
    });
2514

2515
    if (ctx.node == null) {
1✔
2516
      return false;
1✔
2517
    }
2518
    var removeCause = requireNonNull(ctx.cause);
1✔
2519
    afterWrite(new RemovalTask(ctx.node));
1✔
2520
    notifyRemoval(ctx.oldKey, ctx.oldValue, removeCause);
1✔
2521

2522
    return (removeCause == RemovalCause.EXPLICIT);
1✔
2523
  }
2524

2525
  @Override
2526
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2527
  public @Nullable V replace(K key, V value) {
2528
    requireNonNull(key);
1✔
2529
    requireNonNull(value);
1✔
2530
    var ctx = new ReplaceContext<K, V>();
1✔
2531
    int weight = weigher.weigh(key, value);
1✔
2532
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2533
      synchronized (n) {
1✔
2534
        requireIsAlive(key, n);
1✔
2535
        ctx.nodeKey = n.getKey();
1✔
2536
        ctx.oldValue = n.getValue();
1✔
2537
        ctx.oldWeight = n.getWeight();
1✔
2538
        if ((ctx.nodeKey == null) || (ctx.oldValue == null)
1✔
2539
            || hasExpired(n, ctx.now = expirationTicker().read(), ctx.oldValue)) {
1✔
2540
          ctx.oldValue = null;
1✔
2541
          return n;
1✔
2542
        }
2543

2544
        long varTime = expireAfterUpdate(n, key, value, expiry(), ctx.now);
1✔
2545
        n.setValue(value, valueReferenceQueue());
1✔
2546
        n.setWeight(weight);
1✔
2547

2548
        long expirationTime = isComputingAsync(value) ? (ctx.now + ASYNC_EXPIRY) : ctx.now;
1✔
2549
        ctx.exceedsTolerance = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2550
        if (ctx.exceedsTolerance) {
1✔
2551
          setWriteTime(n, expirationTime);
1✔
2552
        }
2553
        setAccessTime(n, expirationTime);
1✔
2554
        setVariableTime(n, varTime);
1✔
2555
        discardRefresh(k);
1✔
2556
        return n;
1✔
2557
      }
2558
    });
2559

2560
    if ((node == null) || (ctx.nodeKey == null) || (ctx.oldValue == null)) {
1✔
2561
      if (node != null) {
1✔
2562
        scheduleDrainBuffers();
1✔
2563
      }
2564
      return null;
1✔
2565
    }
2566

2567
    int weightedDifference = (weight - ctx.oldWeight);
1✔
2568
    if (ctx.exceedsTolerance || (weightedDifference != 0)) {
1✔
2569
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2570
    } else {
2571
      afterRead(node, ctx.now, /* recordHit= */ false);
1✔
2572
    }
2573

2574
    notifyOnReplace(ctx.nodeKey, ctx.oldValue, value);
1✔
2575
    return ctx.oldValue;
1✔
2576
  }
2577

2578
  @Override
2579
  public boolean replace(K key, V oldValue, V newValue) {
2580
    return replace(key, oldValue, newValue, /* shouldDiscardRefresh= */ true);
1✔
2581
  }
2582

2583
  @Override
2584
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2585
  public boolean replace(K key, V oldValue, V newValue, boolean shouldDiscardRefresh) {
2586
    requireNonNull(key);
1✔
2587
    requireNonNull(oldValue);
1✔
2588
    requireNonNull(newValue);
1✔
2589
    var ctx = new ReplaceContext<K, V>();
1✔
2590
    int weight = weigher.weigh(key, newValue);
1✔
2591
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2592
      synchronized (n) {
1✔
2593
        requireIsAlive(key, n);
1✔
2594
        ctx.nodeKey = n.getKey();
1✔
2595
        ctx.oldValue = n.getValue();
1✔
2596
        ctx.oldWeight = n.getWeight();
1✔
2597
        if ((ctx.nodeKey == null) || (ctx.oldValue == null) || !n.containsValue(oldValue)
1✔
2598
            || hasExpired(n, ctx.now = expirationTicker().read(), ctx.oldValue)) {
1✔
2599
          ctx.oldValue = null;
1✔
2600
          return n;
1✔
2601
        }
2602

2603
        long varTime = expireAfterUpdate(n, key, newValue, expiry(), ctx.now);
1✔
2604
        n.setValue(newValue, valueReferenceQueue());
1✔
2605
        n.setWeight(weight);
1✔
2606

2607
        long expirationTime = isComputingAsync(newValue) ? (ctx.now + ASYNC_EXPIRY) : ctx.now;
1✔
2608
        ctx.exceedsTolerance = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2609
        if (ctx.exceedsTolerance) {
1✔
2610
          setWriteTime(n, expirationTime);
1✔
2611
        }
2612
        setAccessTime(n, expirationTime);
1✔
2613
        setVariableTime(n, varTime);
1✔
2614

2615
        if (shouldDiscardRefresh) {
1✔
2616
          discardRefresh(k);
1✔
2617
        }
2618
      }
1✔
2619
      return n;
1✔
2620
    });
2621

2622
    if ((node == null) || (ctx.nodeKey == null) || (ctx.oldValue == null)) {
1✔
2623
      if (node != null) {
1✔
2624
        scheduleDrainBuffers();
1✔
2625
      }
2626
      return false;
1✔
2627
    }
2628

2629
    int weightedDifference = (weight - ctx.oldWeight);
1✔
2630
    if (ctx.exceedsTolerance || (weightedDifference != 0)) {
1✔
2631
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2632
    } else {
2633
      afterRead(node, ctx.now, /* recordHit= */ false);
1✔
2634
    }
2635

2636
    notifyOnReplace(ctx.nodeKey, ctx.oldValue, newValue);
1✔
2637
    return true;
1✔
2638
  }
2639

2640
  @Override
2641
  public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
2642
    requireNonNull(function);
1✔
2643

2644
    BiFunction<K, V, V> remappingFunction = (key, oldValue) ->
1✔
2645
        requireNonNull(function.apply(key, oldValue));
1✔
2646
    for (K key : keySet()) {
1✔
2647
      Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2648
      remap(key, lookupKey, remappingFunction, expiry(),
1✔
2649
          new ComputeContext<>(expirationTicker().read()), /* computeIfAbsent= */ false);
1✔
2650
    }
1✔
2651
  }
1✔
2652

2653
  @Override
2654
  public @Nullable V computeIfAbsent(K key,
2655
      @Var Function<? super K, ? extends @Nullable V> mappingFunction,
2656
      boolean recordStats, boolean recordLoad) {
2657
    requireNonNull(key);
1✔
2658
    requireNonNull(mappingFunction);
1✔
2659
    long now = expirationTicker().read();
1✔
2660

2661
    // An optimistic fast path to avoid unnecessary locking
2662
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2663
    if (node != null) {
1✔
2664
      V value = node.getValue();
1✔
2665
      if ((value != null) && !hasExpired(node, now, value)) {
1✔
2666
        if (!isComputingAsync(value)) {
1✔
2667
          tryExpireAfterRead(node, key, value, expiry(), now);
1✔
2668
          setAccessTime(node, now);
1✔
2669
        }
2670
        @Nullable V refreshed = afterRead(node, now, /* recordHit= */ recordStats);
1✔
2671
        return (refreshed == null) ? value : refreshed;
1✔
2672
      }
2673
    }
2674
    if (recordStats) {
1✔
2675
      mappingFunction = statsAware(mappingFunction, recordLoad);
1✔
2676
    }
2677
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2678
    return doComputeIfAbsent(key, keyRef, mappingFunction,
1✔
2679
        new ComputeContext<>(now), recordStats);
2680
  }
2681

2682
  /** Returns the current value from a computeIfAbsent invocation. */
2683
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2684
  @Nullable V doComputeIfAbsent(K key, Object keyRef,
2685
      Function<? super K, ? extends @Nullable V> mappingFunction,
2686
      ComputeContext<K, V> ctx, boolean recordStats) {
2687
    Node<K, V> node = data.compute(keyRef, (k, n) -> {
1✔
2688
      if (n == null) {
1✔
2689
        ctx.newValue = mappingFunction.apply(key);
1✔
2690
        if (ctx.newValue == null) {
1✔
2691
          discardRefresh(k);
1✔
2692
          return null;
1✔
2693
        }
2694
        ctx.now = expirationTicker().read();
1✔
2695
        ctx.newWeight = weigher.weigh(key, ctx.newValue);
1✔
2696
        var created = nodeFactory.newNode(k, ctx.newValue,
1✔
2697
            valueReferenceQueue(), ctx.newWeight, ctx.now);
1✔
2698
        long expirationTime = isComputingAsync(ctx.newValue)
1✔
2699
            ? ctx.now + ASYNC_EXPIRY
1✔
2700
            : ctx.now;
1✔
2701
        setVariableTime(created, expireAfterCreate(key, ctx.newValue, expiry(), ctx.now));
1✔
2702
        setAccessTime(created, expirationTime);
1✔
2703
        setWriteTime(created, expirationTime);
1✔
2704
        discardRefresh(k);
1✔
2705
        return created;
1✔
2706
      }
2707

2708
      synchronized (n) {
1✔
2709
        requireIsAlive(key, n);
1✔
2710
        ctx.nodeKey = n.getKey();
1✔
2711
        ctx.oldValue = n.getValue();
1✔
2712
        ctx.oldWeight = n.getWeight();
1✔
2713
        RemovalCause actualCause;
2714
        if ((ctx.nodeKey == null) || (ctx.oldValue == null)) {
1✔
2715
          actualCause = RemovalCause.COLLECTED;
1✔
2716
        } else if (hasExpired(n, ctx.now, ctx.oldValue)) {
1✔
2717
          actualCause = RemovalCause.EXPIRED;
1✔
2718
        } else {
2719
          return n;
1✔
2720
        }
2721

2722
        ctx.cause = actualCause;
1✔
2723
        notifyEviction(ctx.nodeKey, ctx.oldValue, actualCause);
1✔
2724

2725
        try {
2726
          ctx.newValue = mappingFunction.apply(key);
1✔
2727
          if (ctx.newValue == null) {
1✔
2728
            discardRefresh(k);
1✔
2729
            ctx.removed = n;
1✔
2730
            n.retire();
1✔
2731
            return null;
1✔
2732
          }
2733
          ctx.now = expirationTicker().read();
1✔
2734
          ctx.newWeight = weigher.weigh(key, ctx.newValue);
1✔
2735
          long varTime = expireAfterCreate(key, ctx.newValue, expiry(), ctx.now);
1✔
2736

2737
          n.setValue(ctx.newValue, valueReferenceQueue());
1✔
2738
          n.setWeight(ctx.newWeight);
1✔
2739

2740
          long expirationTime = isComputingAsync(ctx.newValue)
1✔
2741
              ? (ctx.now + ASYNC_EXPIRY) : ctx.now;
1✔
2742
          setAccessTime(n, expirationTime);
1✔
2743
          setWriteTime(n, expirationTime);
1✔
2744
          setVariableTime(n, varTime);
1✔
2745
          discardRefresh(k);
1✔
2746
          return n;
1✔
2747
        } catch (Throwable e) {
1✔
2748
          ctx.newValue = null;
1✔
2749
          discardRefresh(k);
1✔
2750
          ctx.exception = e;
1✔
2751
          ctx.removed = n;
1✔
2752
          n.retire();
1✔
2753
          return null;
1✔
2754
        }
2755
      }
2756
    });
2757

2758
    if (ctx.cause != null) {
1✔
2759
      statsCounter().recordEviction(ctx.oldWeight, ctx.cause);
1✔
2760
      notifyRemoval(ctx.nodeKey, ctx.oldValue, ctx.cause);
1✔
2761
    }
2762
    if (node == null) {
1✔
2763
      if (ctx.removed != null) {
1✔
2764
        afterWrite(new RemovalTask(ctx.removed));
1✔
2765
      }
2766
      if (ctx.exception != null) {
1✔
2767
        throwException(ctx.exception);
×
2768
      }
2769
      return null;
1✔
2770
    }
2771
    if ((ctx.oldValue != null) && (ctx.newValue == null)) {
1✔
2772
      if (!isComputingAsync(ctx.oldValue)) {
1✔
2773
        tryExpireAfterRead(node, key, ctx.oldValue, expiry(), ctx.now);
1✔
2774
        setAccessTime(node, ctx.now);
1✔
2775
      }
2776

2777
      afterRead(node, ctx.now, /* recordHit= */ recordStats);
1✔
2778
      return ctx.oldValue;
1✔
2779
    }
2780
    if ((ctx.oldValue == null) && (ctx.cause == null)) {
1✔
2781
      afterWrite(new AddTask(node, ctx.newWeight));
1✔
2782
    } else {
2783
      int weightedDifference = (ctx.newWeight - ctx.oldWeight);
1✔
2784
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2785
    }
2786

2787
    return ctx.newValue;
1✔
2788
  }
2789

2790
  @Override
2791
  public @Nullable V computeIfPresent(K key,
2792
      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2793
    requireNonNull(key);
1✔
2794
    requireNonNull(remappingFunction);
1✔
2795

2796
    // An optimistic fast path to avoid unnecessary locking
2797
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2798
    @Nullable Node<K, V> node = data.get(lookupKey);
1✔
2799
    long now;
2800
    if (node == null) {
1✔
2801
      return null;
1✔
2802
    }
2803
    V value = node.getValue();
1✔
2804
    if ((value == null) || hasExpired(node, now = expirationTicker().read(), value)) {
1✔
2805
      scheduleDrainBuffers();
1✔
2806
      return null;
1✔
2807
    }
2808

2809
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2810
        statsAware(remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
1✔
2811
    return remap(key, lookupKey, statsAwareRemappingFunction,
1✔
2812
        expiry(), new ComputeContext<>(now), /* computeIfAbsent= */ false);
1✔
2813
  }
2814

2815
  @Override
2816
  public @Nullable V compute(K key,
2817
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2818
      @Nullable Expiry<? super K, ? super V> expiry, boolean recordLoad,
2819
      boolean recordLoadFailure) {
2820
    requireNonNull(key);
1✔
2821
    requireNonNull(remappingFunction);
1✔
2822

2823
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2824
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2825
        statsAware(remappingFunction, recordLoad, recordLoadFailure);
1✔
2826
    return remap(key, keyRef, statsAwareRemappingFunction,
1✔
2827
        expiry, new ComputeContext<>(expirationTicker().read()), /* computeIfAbsent= */ true);
1✔
2828
  }
2829

2830
  @Override
2831
  public @Nullable V merge(K key, V value,
2832
      BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2833
    requireNonNull(key);
1✔
2834
    requireNonNull(value);
1✔
2835
    requireNonNull(remappingFunction);
1✔
2836

2837
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2838
    BiFunction<? super K, ? super @Nullable V, ? extends @Nullable V> mergeFunction =
1✔
2839
        (k, oldValue) -> (oldValue == null)
1✔
2840
          ? value
1✔
2841
          : statsAware(remappingFunction).apply(oldValue, value);
1✔
2842
    return remap(key, keyRef, mergeFunction, expiry(),
1✔
2843
        new ComputeContext<>(expirationTicker().read()), /* computeIfAbsent= */ true);
1✔
2844
  }
2845

2846
  /**
2847
   * Attempts to compute a mapping for the specified key and its current mapped value (or
2848
   * {@code null} if there is no current mapping).
2849
   * <p>
2850
   * An entry that has expired or been reference collected is evicted and the computation continues
2851
   * as if the entry had not been present. This method does not pre-screen and does not wrap the
2852
   * remappingFunction to be statistics aware.
2853
   *
2854
   * @param key key with which the specified value is to be associated
2855
   * @param keyRef the key to associate with or a lookup only key if not {@code computeIfAbsent}
2856
   * @param remappingFunction the function to compute a value
2857
   * @param expiry the calculator for the expiration time
2858
   * @param ctx the mutable context for passing state to and from the {@link ConcurrentHashMap}
2859
   *        compute lambda, with {@link ComputeContext#now} set to the current ticker time
2860
   * @param computeIfAbsent if an absent entry can be computed
2861
   * @return the new value associated with the specified key, or null if none
2862
   */
2863
  @SuppressWarnings({"StatementWithEmptyBody", "SynchronizationOnLocalVariableOrMethodParameter"})
2864
  @Nullable V remap(K key, Object keyRef,
2865
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2866
      @Nullable Expiry<? super K, ? super V> expiry,
2867
      ComputeContext<K, V> ctx, boolean computeIfAbsent) {
2868
    Node<K, V> node = data.compute(keyRef, (kr, n) -> {
1✔
2869
      if (n == null) {
1✔
2870
        if (!computeIfAbsent) {
1✔
2871
          return null;
1✔
2872
        }
2873
        ctx.newValue = remappingFunction.apply(key, null);
1✔
2874
        if (ctx.newValue == null) {
1✔
2875
          return null;
1✔
2876
        }
2877
        ctx.now = expirationTicker().read();
1✔
2878
        ctx.newWeight = weigher.weigh(key, ctx.newValue);
1✔
2879
        long varTime = expireAfterCreate(key, ctx.newValue, expiry, ctx.now);
1✔
2880
        var created = nodeFactory.newNode(keyRef, ctx.newValue,
1✔
2881
            valueReferenceQueue(), ctx.newWeight, ctx.now);
1✔
2882

2883
        long expirationTime = isComputingAsync(ctx.newValue)
1✔
2884
            ? ctx.now + ASYNC_EXPIRY
1✔
2885
            : ctx.now;
1✔
2886
        setAccessTime(created, expirationTime);
1✔
2887
        setWriteTime(created, expirationTime);
1✔
2888
        setVariableTime(created, varTime);
1✔
2889
        discardRefresh(kr);
1✔
2890
        return created;
1✔
2891
      }
2892

2893
      synchronized (n) {
1✔
2894
        requireIsAlive(key, n);
1✔
2895
        ctx.nodeKey = n.getKey();
1✔
2896
        ctx.oldValue = n.getValue();
1✔
2897
        ctx.oldWeight = n.getWeight();
1✔
2898
        if ((ctx.nodeKey == null) || (ctx.oldValue == null)) {
1✔
2899
          ctx.cause = RemovalCause.COLLECTED;
1✔
2900
        } else if (hasExpired(n, expirationTicker().read(), ctx.oldValue)) {
1✔
2901
          ctx.cause = RemovalCause.EXPIRED;
1✔
2902
        }
2903
        if (ctx.cause != null) {
1✔
2904
          notifyEviction(ctx.nodeKey, ctx.oldValue, ctx.cause);
1✔
2905
          if (!computeIfAbsent) {
1✔
2906
            discardRefresh(kr);
1✔
2907
            ctx.removed = n;
1✔
2908
            n.retire();
1✔
2909
            return null;
1✔
2910
          }
2911
        }
2912

2913
        boolean wasEvicted = (ctx.cause != null);
1✔
2914
        try {
2915
          ctx.newValue = remappingFunction.apply(ctx.nodeKey,
1✔
2916
              (ctx.cause == null) ? ctx.oldValue : null);
1✔
2917

2918
          if (ctx.newValue == null) {
1✔
2919
            if (ctx.cause == null) {
1✔
2920
              ctx.cause = RemovalCause.EXPLICIT;
1✔
2921
            }
2922
            discardRefresh(kr);
1✔
2923
            ctx.removed = n;
1✔
2924
            n.retire();
1✔
2925
            return null;
1✔
2926
          }
2927

2928
          long varTime;
2929
          ctx.newWeight = weigher.weigh(key, ctx.newValue);
1✔
2930
          ctx.now = expirationTicker().read();
1✔
2931
          if (ctx.cause == null) {
1✔
2932
            if (ctx.newValue != ctx.oldValue) {
1✔
2933
              ctx.cause = RemovalCause.REPLACED;
1✔
2934
            }
2935
            varTime = expireAfterUpdate(n, key, ctx.newValue, expiry, ctx.now);
1✔
2936
          } else {
2937
            varTime = expireAfterCreate(key, ctx.newValue, expiry, ctx.now);
1✔
2938
          }
2939

2940
          if (ctx.newValue != ctx.oldValue) {
1✔
2941
            n.setValue(ctx.newValue, valueReferenceQueue());
1✔
2942
          }
2943
          n.setWeight(ctx.newWeight);
1✔
2944

2945
          long expirationTime = isComputingAsync(ctx.newValue)
1✔
2946
              ? ctx.now + ASYNC_EXPIRY
1✔
2947
              : ctx.now;
1✔
2948
          ctx.exceedsTolerance = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2949
          if (((ctx.cause != null) && ctx.cause.wasEvicted()) || ctx.exceedsTolerance) {
1✔
2950
            setWriteTime(n, expirationTime);
1✔
2951
          }
2952
          setAccessTime(n, expirationTime);
1✔
2953
          setVariableTime(n, varTime);
1✔
2954
          discardRefresh(kr);
1✔
2955
          return n;
1✔
2956
        } catch (Throwable e) {
1✔
2957
          if (!wasEvicted) {
1✔
2958
            throw e;
1✔
2959
          }
2960
          ctx.newValue = null;
1✔
2961
          discardRefresh(kr);
1✔
2962
          ctx.exception = e;
1✔
2963
          ctx.removed = n;
1✔
2964
          n.retire();
1✔
2965
          return null;
1✔
2966
        }
2967
      }
2968
    });
2969

2970
    if (ctx.cause != null) {
1✔
2971
      if (ctx.cause == RemovalCause.REPLACED) {
1✔
2972
        requireNonNull(ctx.newValue);
1✔
2973
        notifyOnReplace(key, ctx.oldValue, ctx.newValue);
1✔
2974
      } else {
2975
        if (ctx.cause.wasEvicted()) {
1✔
2976
          statsCounter().recordEviction(ctx.oldWeight, ctx.cause);
1✔
2977
        }
2978
        notifyRemoval(ctx.nodeKey, ctx.oldValue, ctx.cause);
1✔
2979
      }
2980
    }
2981

2982
    if (ctx.removed != null) {
1✔
2983
      afterWrite(new RemovalTask(ctx.removed));
1✔
2984
    } else if (node == null) {
1✔
2985
      // absent and not computable
2986
    } else if ((ctx.oldValue == null) && (ctx.cause == null)) {
1✔
2987
      afterWrite(new AddTask(node, ctx.newWeight));
1✔
2988
    } else {
2989
      int weightedDifference = ctx.newWeight - ctx.oldWeight;
1✔
2990
      if (ctx.exceedsTolerance || (weightedDifference != 0)) {
1✔
2991
        afterWrite(new UpdateTask(node, weightedDifference));
1✔
2992
      } else {
2993
        afterRead(node, ctx.now, /* recordHit= */ false);
1✔
2994
        if ((ctx.cause != null) && ctx.cause.wasEvicted()) {
1✔
2995
          scheduleDrainBuffers();
1✔
2996
        }
2997
      }
2998
    }
2999

3000
    if (ctx.exception != null) {
1✔
3001
      throwException(ctx.exception);
×
3002
    }
3003
    return ctx.newValue;
1✔
3004
  }
3005

3006
  @Override
3007
  public void forEach(BiConsumer<? super K, ? super V> action) {
3008
    requireNonNull(action);
1✔
3009

3010
    for (var iterator = new EntryIterator<>(this); iterator.hasNext();) {
1✔
3011
      action.accept(iterator.key, iterator.value);
1✔
3012
      iterator.advance();
1✔
3013
    }
3014
  }
1✔
3015

3016
  @Override
3017
  public Set<K> keySet() {
3018
    Set<K> ks = keySet;
1✔
3019
    return (ks == null) ? (keySet = new KeySetView<>(this)) : ks;
1✔
3020
  }
3021

3022
  @Override
3023
  public Collection<V> values() {
3024
    Collection<V> vs = values;
1✔
3025
    return (vs == null) ? (values = new ValuesView<>(this)) : vs;
1✔
3026
  }
3027

3028
  @Override
3029
  public Set<Entry<K, V>> entrySet() {
3030
    Set<Entry<K, V>> es = entrySet;
1✔
3031
    return (es == null) ? (entrySet = new EntrySetView<>(this)) : es;
1✔
3032
  }
3033

3034
  /**
3035
   * Object equality requires reflexive, symmetric, transitive, and consistency properties. Of
3036
   * these, symmetry and consistency require further clarification for how they are upheld.
3037
   * <p>
3038
   * The <i>consistency</i> property between invocations requires that the results are the same if
3039
   * there are no modifications to the information used. Therefore, usages should expect that this
3040
   * operation may return misleading results if either the maps or the data held by them is modified
3041
   * during the execution of this method. This characteristic allows for comparing the map sizes and
3042
   * assuming stable mappings, as done by {@link java.util.AbstractMap}-based maps.
3043
   * <p>
3044
   * The <i>symmetric</i> property requires that the result is the same for all implementations of
3045
   * {@link Map#equals(Object)}. That contract is defined in terms of the stable mappings provided
3046
   * by {@link #entrySet()}, meaning that the {@link #size()} optimization forces that the count is
3047
   * consistent with the mappings when used for an equality check.
3048
   * <p>
3049
   * The cache's {@link #size()} method may include entries that have expired or have been reference
3050
   * collected, but have not yet been removed from the backing map. An iteration over the map may
3051
   * trigger the removal of these dead entries when skipped over during traversal. To ensure
3052
   * consistency and symmetry, usages should call {@link #cleanUp()} before this method while no
3053
   * other concurrent operations are being performed on this cache. This is not done implicitly by
3054
   * {@link #size()} as many usages assume it to be instantaneous and lock-free.
3055
   */
3056
  @Override
3057
  public boolean equals(@Nullable Object o) {
3058
    if (o == this) {
1✔
3059
      return true;
1✔
3060
    } else if (!(o instanceof Map)) {
1✔
3061
      return false;
1✔
3062
    }
3063

3064
    var map = (Map<?, ?>) o;
1✔
3065
    if (size() != map.size()) {
1✔
3066
      return false;
1✔
3067
    }
3068

3069
    long now = expirationTicker().read();
1✔
3070
    for (var node : data.values()) {
1✔
3071
      K key = node.getKey();
1✔
3072
      V value = node.getValue();
1✔
3073
      if ((key == null) || (value == null)
1✔
3074
          || !node.isAlive() || hasExpired(node, now, value)) {
1✔
3075
        scheduleDrainBuffers();
1✔
3076
        return false;
1✔
3077
      } else {
3078
        var val = map.get(key);
1✔
3079
        if ((val == null) || ((val != value) && !val.equals(value))) {
1✔
3080
          return false;
1✔
3081
        }
3082
      }
3083
    }
1✔
3084
    return true;
1✔
3085
  }
3086

3087
  @Override
3088
  public int hashCode() {
3089
    @Var int hash = 0;
1✔
3090
    @Var boolean drain = false;
1✔
3091
    long now = expirationTicker().read();
1✔
3092
    for (var node : data.values()) {
1✔
3093
      K key = node.getKey();
1✔
3094
      V value = node.getValue();
1✔
3095
      if ((key == null) || (value == null)
1✔
3096
          || !node.isAlive() || hasExpired(node, now, value)) {
1✔
3097
        drain = true;
1✔
3098
      } else {
3099
        hash += key.hashCode() ^ value.hashCode();
1✔
3100
      }
3101
    }
1✔
3102
    if (drain) {
1✔
3103
      scheduleDrainBuffers();
1✔
3104
    }
3105
    return hash;
1✔
3106
  }
3107

3108
  @Override
3109
  public String toString() {
3110
    @Var boolean drain = false;
1✔
3111
    long now = expirationTicker().read();
1✔
3112
    var result = new StringBuilder().append('{');
1✔
3113
    for (var node : data.values()) {
1✔
3114
      K key = node.getKey();
1✔
3115
      V value = node.getValue();
1✔
3116
      if ((key == null) || (value == null)
1✔
3117
          || !node.isAlive() || hasExpired(node, now, value)) {
1✔
3118
        drain = true;
1✔
3119
      } else {
3120
        if (result.length() != 1) {
1✔
3121
          result.append(',').append(' ');
1✔
3122
        }
3123
        result.append((key == this) ? "(this Map)" : key);
1✔
3124
        result.append('=');
1✔
3125
        result.append((value == this) ? "(this Map)" : value);
1✔
3126
      }
3127
    }
1✔
3128
    if (drain) {
1✔
3129
      scheduleDrainBuffers();
1✔
3130
    }
3131
    return result.append('}').toString();
1✔
3132
  }
3133

3134
  /**
3135
   * Returns the computed result from the ordered traversal of the cache entries.
3136
   *
3137
   * @param hottest the coldest or hottest iteration order
3138
   * @param transformer a function that unwraps the value
3139
   * @param mappingFunction the mapping function to compute a value
3140
   * @return the computed value
3141
   */
3142
  @SuppressWarnings("GuardedByChecker")
3143
  <T> T evictionOrder(boolean hottest, Function<@Nullable V, @Nullable V> transformer,
3144
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3145
    Comparator<Node<K, V>> comparator = Comparator.comparingInt(node -> {
1✔
3146
      var keyRef = node.getKeyReferenceOrNull();
1✔
3147
      return (keyRef == null) ? 0 : frequencySketch().frequency(keyRef);
1✔
3148
    });
3149
    Iterable<Node<K, V>> iterable;
3150
    if (hottest) {
1✔
3151
      iterable = () -> {
1✔
3152
        var secondary = PeekingIterator.comparing(
1✔
3153
            accessOrderProbationDeque().descendingIterator(),
1✔
3154
            accessOrderWindowDeque().descendingIterator(), comparator);
1✔
3155
        return PeekingIterator.concat(
1✔
3156
            accessOrderProtectedDeque().descendingIterator(), secondary);
1✔
3157
      };
3158
    } else {
3159
      iterable = () -> {
1✔
3160
        var primary = PeekingIterator.comparing(
1✔
3161
            accessOrderWindowDeque().iterator(), accessOrderProbationDeque().iterator(),
1✔
3162
            comparator.reversed());
1✔
3163
        return PeekingIterator.concat(primary, accessOrderProtectedDeque().iterator());
1✔
3164
      };
3165
    }
3166
    return snapshot(iterable, transformer, mappingFunction);
1✔
3167
  }
3168

3169
  /**
3170
   * Returns the computed result from the ordered traversal of the cache entries.
3171
   *
3172
   * @param oldest the youngest or oldest iteration order
3173
   * @param transformer a function that unwraps the value
3174
   * @param mappingFunction the mapping function to compute a value
3175
   * @return the computed value
3176
   */
3177
  @SuppressWarnings("GuardedByChecker")
3178
  <T> T expireAfterAccessOrder(boolean oldest, Function<@Nullable V, @Nullable V> transformer,
3179
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3180
    Iterable<Node<K, V>> iterable;
3181
    if (evicts()) {
1✔
3182
      iterable = () -> {
1✔
3183
        @Var Comparator<Node<K, V>> comparator = Comparator.comparingLong(Node::getAccessTime);
1✔
3184
        PeekingIterator<Node<K, V>> first;
3185
        PeekingIterator<Node<K, V>> second;
3186
        PeekingIterator<Node<K, V>> third;
3187
        if (oldest) {
1✔
3188
          first = accessOrderWindowDeque().iterator();
1✔
3189
          second = accessOrderProbationDeque().iterator();
1✔
3190
          third = accessOrderProtectedDeque().iterator();
1✔
3191
        } else {
3192
          comparator = comparator.reversed();
1✔
3193
          first = accessOrderWindowDeque().descendingIterator();
1✔
3194
          second = accessOrderProbationDeque().descendingIterator();
1✔
3195
          third = accessOrderProtectedDeque().descendingIterator();
1✔
3196
        }
3197
        return PeekingIterator.comparing(
1✔
3198
            PeekingIterator.comparing(first, second, comparator), third, comparator);
1✔
3199
      };
3200
    } else {
3201
      iterable = oldest
1✔
3202
          ? accessOrderWindowDeque()
1✔
3203
          : accessOrderWindowDeque()::descendingIterator;
1✔
3204
    }
3205
    return snapshot(iterable, transformer, mappingFunction);
1✔
3206
  }
3207

3208
  /**
3209
   * Returns the computed result from the ordered traversal of the cache entries.
3210
   *
3211
   * @param iterable the supplier of the entries in the cache
3212
   * @param transformer a function that unwraps the value
3213
   * @param mappingFunction the mapping function to compute a value
3214
   * @return the computed value
3215
   */
3216
  <T> T snapshot(Iterable<Node<K, V>> iterable, Function<@Nullable V, @Nullable V> transformer,
3217
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3218
    requireNonNull(mappingFunction);
1✔
3219
    requireNonNull(transformer);
1✔
3220
    requireNonNull(iterable);
1✔
3221

3222
    evictionLock.lock();
1✔
3223
    try {
3224
      maintenance(/* ignored */ null);
1✔
3225

3226
      // Obtain the iterator as late as possible for modification count checking
3227
      try (var stream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(
1✔
3228
           iterable.iterator(), DISTINCT | ORDERED | NONNULL | IMMUTABLE), /* parallel= */ false)) {
1✔
3229
        return mappingFunction.apply(stream
1✔
3230
            .map(node -> nodeToCacheEntry(node, transformer, node.getPolicyWeight()))
1✔
3231
            .filter(Objects::nonNull));
1✔
3232
      }
3233
    } finally {
3234
      evictionLock.unlock();
1✔
3235
      rescheduleCleanUpIfIncomplete();
1✔
3236
    }
3237
  }
3238

3239
  /**
3240
   * Returns an entry for the given node if it can be used externally, else null. The weight is
3241
   * caller-supplied: snapshot callers hold evictionLock and read policyWeight (in sync with the
3242
   * drain thread); unlocked readers read weight, which may be stale for a concurrent in-place
3243
   * update (acceptable for a point-in-time CacheEntry).
3244
   */
3245
  @Nullable CacheEntry<K, V> nodeToCacheEntry(
3246
      Node<K, V> node, Function<@Nullable V, @Nullable V> transformer, int weight) {
3247
    V rawValue = node.getValue();
1✔
3248
    if (rawValue == null) {
1✔
3249
      return null;
1✔
3250
    }
3251
    V value = transformer.apply(rawValue);
1✔
3252
    K key = node.getKey();
1✔
3253
    long now;
3254
    if ((key == null) || (value == null) || !node.isAlive()
1✔
3255
        || hasExpired(node, (now = expirationTicker().read()), rawValue)) {
1✔
3256
      return null;
1✔
3257
    }
3258

3259
    @Var long expiresAfter = Long.MAX_VALUE;
1✔
3260
    if (expiresAfterAccess()) {
1✔
3261
      expiresAfter = Math.min(expiresAfter,
1✔
3262
          expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
3263
    }
3264
    if (expiresAfterWrite()) {
1✔
3265
      expiresAfter = Math.min(expiresAfter,
1✔
3266
          expiresAfterWriteNanos() - ((now & ~1L) - (node.getWriteTime() & ~1L)));
1✔
3267
    }
3268
    if (expiresVariable()) {
1✔
3269
      expiresAfter = node.getVariableTime() - now;
1✔
3270
    }
3271

3272
    long refreshableAt = refreshAfterWrite()
1✔
3273
        ? (node.getWriteTime() & ~1L) + refreshAfterWriteNanos()
1✔
3274
        : now + Long.MAX_VALUE;
1✔
3275
    return SnapshotEntry.forEntry(key, value, now, weight, now + expiresAfter, refreshableAt);
1✔
3276
  }
3277

3278
  /** Mutable context for passing state between a lambda and the caller. */
3279
  static final class EvictContext<V> {
1✔
3280
    @Nullable RemovalCause cause;
3281
    @Nullable V value;
3282
    boolean resurrect;
3283
    boolean removed;
3284
  }
3285

3286
  /** Mutable context for passing state between a lambda and the caller. */
3287
  static final class RemoveContext<K, V> {
1✔
3288
    @Nullable K oldKey;
3289
    @Nullable V oldValue;
3290
    @Nullable Node<K, V> node;
3291
    @Nullable RemovalCause cause;
3292
  }
3293

3294
  /** Mutable context for passing state between a lambda and the caller. */
3295
  static final class ReplaceContext<K, V> {
1✔
3296
    @Nullable K nodeKey;
3297
    @Nullable V oldValue;
3298

3299
    long now;
3300
    int oldWeight;
3301
    boolean exceedsTolerance;
3302
  }
3303

3304
  /** Mutable context for passing state between a lambda and the caller. */
3305
  static final class ComputeContext<K, V> {
3306
    @Nullable K nodeKey;
3307
    @Nullable V oldValue;
3308
    @Nullable V newValue;
3309
    @Nullable Node<K, V> removed;
3310
    @Nullable RemovalCause cause;
3311
    @Nullable Throwable exception;
3312

3313
    long now;
3314
    int oldWeight;
3315
    int newWeight;
3316
    boolean exceedsTolerance;
3317

3318
    ComputeContext(long now) {
1✔
3319
      this.now = now;
1✔
3320
    }
1✔
3321
  }
3322

3323
  /** A function that produces an unmodifiable map up to the limit in stream order. */
3324
  static final class SizeLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3325
    private final int expectedSize;
3326
    private final long limit;
3327

3328
    SizeLimiter(int expectedSize, long limit) {
1✔
3329
      requireArgument(limit >= 0);
1✔
3330
      this.expectedSize = expectedSize;
1✔
3331
      this.limit = limit;
1✔
3332
    }
1✔
3333

3334
    @Override
3335
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3336
      var map = new LinkedHashMap<K, V>(calculateHashMapCapacity(expectedSize));
1✔
3337
      stream.limit(limit).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3338
      return Collections.unmodifiableMap(map);
1✔
3339
    }
3340
  }
3341

3342
  /** A function that produces an unmodifiable map up to the weighted limit in stream order. */
3343
  static final class WeightLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3344
    private final long weightLimit;
3345

3346
    private long weightedSize;
3347

3348
    WeightLimiter(long weightLimit) {
1✔
3349
      requireArgument(weightLimit >= 0);
1✔
3350
      this.weightLimit = weightLimit;
1✔
3351
    }
1✔
3352

3353
    @Override
3354
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3355
      var map = new LinkedHashMap<K, V>();
1✔
3356
      stream.takeWhile(entry -> {
1✔
3357
        weightedSize = Math.addExact(weightedSize, entry.weight());
1✔
3358
        return (weightedSize <= weightLimit);
1✔
3359
      }).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3360
      return Collections.unmodifiableMap(map);
1✔
3361
    }
3362
  }
3363

3364
  /** An adapter to safely externalize the keys. */
3365
  static final class KeySetView<K, V> extends AbstractSet<K> {
3366
    final BoundedLocalCache<K, V> cache;
3367

3368
    KeySetView(BoundedLocalCache<K, V> cache) {
1✔
3369
      this.cache = requireNonNull(cache);
1✔
3370
    }
1✔
3371

3372
    @Override
3373
    public int size() {
3374
      return cache.size();
1✔
3375
    }
3376

3377
    @Override
3378
    public void clear() {
3379
      cache.clear();
1✔
3380
    }
1✔
3381

3382
    @Override
3383
    @SuppressWarnings("SuspiciousMethodCalls")
3384
    public boolean contains(Object o) {
3385
      return cache.containsKey(o);
1✔
3386
    }
3387

3388
    @Override
3389
    public boolean removeAll(Collection<?> collection) {
3390
      requireNonNull(collection);
1✔
3391
      @Var boolean modified = false;
1✔
3392
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3393
        for (K key : this) {
1✔
3394
          if (collection.contains(key)) {
1✔
3395
            modified |= remove(key);
1✔
3396
          }
3397
        }
1✔
3398
      } else {
3399
        for (var item : collection) {
1✔
3400
          modified |= (item != null) && remove(item);
1✔
3401
        }
1✔
3402
      }
3403
      return modified;
1✔
3404
    }
3405

3406
    @Override
3407
    public boolean remove(Object o) {
3408
      return (cache.remove(o) != null);
1✔
3409
    }
3410

3411
    @Override
3412
    public boolean removeIf(Predicate<? super K> filter) {
3413
      requireNonNull(filter);
1✔
3414
      @Var boolean modified = false;
1✔
3415
      for (K key : this) {
1✔
3416
        if (filter.test(key) && remove(key)) {
1✔
3417
          modified = true;
1✔
3418
        }
3419
      }
1✔
3420
      return modified;
1✔
3421
    }
3422

3423
    @Override
3424
    public boolean retainAll(Collection<?> collection) {
3425
      requireNonNull(collection);
1✔
3426
      @Var boolean modified = false;
1✔
3427
      for (K key : this) {
1✔
3428
        if (!collection.contains(key) && remove(key)) {
1✔
3429
          modified = true;
1✔
3430
        }
3431
      }
1✔
3432
      return modified;
1✔
3433
    }
3434

3435
    @Override
3436
    public Iterator<K> iterator() {
3437
      return new KeyIterator<>(cache);
1✔
3438
    }
3439

3440
    @Override
3441
    public Spliterator<K> spliterator() {
3442
      return new KeySpliterator<>(cache);
1✔
3443
    }
3444
  }
3445

3446
  /** An adapter to safely externalize the key iterator. */
3447
  static final class KeyIterator<K, V> implements Iterator<K> {
3448
    final EntryIterator<K, V> iterator;
3449

3450
    KeyIterator(BoundedLocalCache<K, V> cache) {
1✔
3451
      this.iterator = new EntryIterator<>(cache);
1✔
3452
    }
1✔
3453

3454
    @Override
3455
    public boolean hasNext() {
3456
      return iterator.hasNext();
1✔
3457
    }
3458

3459
    @Override
3460
    public K next() {
3461
      return iterator.nextKey();
1✔
3462
    }
3463

3464
    @Override
3465
    public void remove() {
3466
      iterator.remove();
1✔
3467
    }
1✔
3468
  }
3469

3470
  /** An adapter to safely externalize the key spliterator. */
3471
  static final class KeySpliterator<K, V> implements Spliterator<K> {
3472
    final Spliterator<Node<K, V>> spliterator;
3473
    final BoundedLocalCache<K, V> cache;
3474

3475
    KeySpliterator(BoundedLocalCache<K, V> cache) {
3476
      this(cache, cache.data.values().spliterator());
1✔
3477
    }
1✔
3478

3479
    KeySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3480
      this.spliterator = requireNonNull(spliterator);
1✔
3481
      this.cache = requireNonNull(cache);
1✔
3482
    }
1✔
3483

3484
    @Override
3485
    public void forEachRemaining(Consumer<? super K> action) {
3486
      requireNonNull(action);
1✔
3487
      Consumer<Node<K, V>> consumer = node -> {
1✔
3488
        K key = node.getKey();
1✔
3489
        V value = node.getValue();
1✔
3490
        long now = cache.expirationTicker().read();
1✔
3491
        if ((key != null) && (value != null) && node.isAlive()
1✔
3492
            && !cache.hasExpired(node, now, value)) {
1✔
3493
          action.accept(key);
1✔
3494
        }
3495
      };
1✔
3496
      spliterator.forEachRemaining(consumer);
1✔
3497
    }
1✔
3498

3499
    @Override
3500
    public boolean tryAdvance(Consumer<? super K> action) {
3501
      requireNonNull(action);
1✔
3502
      boolean[] advanced = { false };
1✔
3503
      Consumer<Node<K, V>> consumer = node -> {
1✔
3504
        K key = node.getKey();
1✔
3505
        V value = node.getValue();
1✔
3506
        long now = cache.expirationTicker().read();
1✔
3507
        if ((key != null) && (value != null) && node.isAlive()
1✔
3508
            && !cache.hasExpired(node, now, value)) {
1✔
3509
          action.accept(key);
1✔
3510
          advanced[0] = true;
1✔
3511
        }
3512
      };
1✔
3513
      while (spliterator.tryAdvance(consumer)) {
1✔
3514
        if (advanced[0]) {
1✔
3515
          return true;
1✔
3516
        }
3517
      }
3518
      return false;
1✔
3519
    }
3520

3521
    @Override
3522
    public @Nullable Spliterator<K> trySplit() {
3523
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3524
      return (split == null) ? null : new KeySpliterator<>(cache, split);
1✔
3525
    }
3526

3527
    @Override
3528
    public long estimateSize() {
3529
      return spliterator.estimateSize();
1✔
3530
    }
3531

3532
    @Override
3533
    public int characteristics() {
3534
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3535
    }
3536
  }
3537

3538
  /** An adapter to safely externalize the values. */
3539
  static final class ValuesView<K, V> extends AbstractCollection<V> {
3540
    final BoundedLocalCache<K, V> cache;
3541

3542
    ValuesView(BoundedLocalCache<K, V> cache) {
1✔
3543
      this.cache = requireNonNull(cache);
1✔
3544
    }
1✔
3545

3546
    @Override
3547
    public int size() {
3548
      return cache.size();
1✔
3549
    }
3550

3551
    @Override
3552
    public void clear() {
3553
      cache.clear();
1✔
3554
    }
1✔
3555

3556
    @Override
3557
    @SuppressWarnings("SuspiciousMethodCalls")
3558
    public boolean contains(Object o) {
3559
      return cache.containsValue(o);
1✔
3560
    }
3561

3562
    @Override
3563
    public boolean removeAll(Collection<?> collection) {
3564
      requireNonNull(collection);
1✔
3565
      @Var boolean modified = false;
1✔
3566
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3567
        var key = requireNonNull(iterator.key);
1✔
3568
        var value = requireNonNull(iterator.value);
1✔
3569
        if (collection.contains(value) && cache.remove(key, value)) {
1✔
3570
          modified = true;
1✔
3571
        }
3572
        iterator.advance();
1✔
3573
      }
1✔
3574
      return modified;
1✔
3575
    }
3576

3577
    @Override
3578
    public boolean remove(@Nullable Object o) {
3579
      if (o == null) {
1✔
3580
        return false;
1✔
3581
      }
3582
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3583
        var key = requireNonNull(iterator.key);
1✔
3584
        var value = requireNonNull(iterator.value);
1✔
3585
        if (o.equals(value) && cache.remove(key, value)) {
1✔
3586
          return true;
1✔
3587
        }
3588
        iterator.advance();
1✔
3589
      }
1✔
3590
      return false;
1✔
3591
    }
3592

3593
    @Override
3594
    public boolean removeIf(Predicate<? super V> filter) {
3595
      requireNonNull(filter);
1✔
3596
      @Var boolean modified = false;
1✔
3597
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3598
        var value = requireNonNull(iterator.value);
1✔
3599
        if (filter.test(value)) {
1✔
3600
          var key = requireNonNull(iterator.key);
1✔
3601
          modified |= cache.remove(key, value);
1✔
3602
        }
3603
        iterator.advance();
1✔
3604
      }
1✔
3605
      return modified;
1✔
3606
    }
3607

3608
    @Override
3609
    public boolean retainAll(Collection<?> collection) {
3610
      requireNonNull(collection);
1✔
3611
      @Var boolean modified = false;
1✔
3612
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3613
        var key = requireNonNull(iterator.key);
1✔
3614
        var value = requireNonNull(iterator.value);
1✔
3615
        if (!collection.contains(value) && cache.remove(key, value)) {
1✔
3616
          modified = true;
1✔
3617
        }
3618
        iterator.advance();
1✔
3619
      }
1✔
3620
      return modified;
1✔
3621
    }
3622

3623
    @Override
3624
    public Iterator<V> iterator() {
3625
      return new ValueIterator<>(cache);
1✔
3626
    }
3627

3628
    @Override
3629
    public Spliterator<V> spliterator() {
3630
      return new ValueSpliterator<>(cache);
1✔
3631
    }
3632
  }
3633

3634
  /** An adapter to safely externalize the value iterator. */
3635
  static final class ValueIterator<K, V> implements Iterator<V> {
3636
    final EntryIterator<K, V> iterator;
3637

3638
    ValueIterator(BoundedLocalCache<K, V> cache) {
1✔
3639
      this.iterator = new EntryIterator<>(cache);
1✔
3640
    }
1✔
3641

3642
    @Override
3643
    public boolean hasNext() {
3644
      return iterator.hasNext();
1✔
3645
    }
3646

3647
    @Override
3648
    public V next() {
3649
      return iterator.nextValue();
1✔
3650
    }
3651

3652
    @Override
3653
    public void remove() {
3654
      iterator.remove();
1✔
3655
    }
1✔
3656
  }
3657

3658
  /** An adapter to safely externalize the value spliterator. */
3659
  static final class ValueSpliterator<K, V> implements Spliterator<V> {
3660
    final Spliterator<Node<K, V>> spliterator;
3661
    final BoundedLocalCache<K, V> cache;
3662

3663
    ValueSpliterator(BoundedLocalCache<K, V> cache) {
3664
      this(cache, cache.data.values().spliterator());
1✔
3665
    }
1✔
3666

3667
    ValueSpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3668
      this.spliterator = requireNonNull(spliterator);
1✔
3669
      this.cache = requireNonNull(cache);
1✔
3670
    }
1✔
3671

3672
    @Override
3673
    public void forEachRemaining(Consumer<? super V> action) {
3674
      requireNonNull(action);
1✔
3675
      Consumer<Node<K, V>> consumer = node -> {
1✔
3676
        K key = node.getKey();
1✔
3677
        V value = node.getValue();
1✔
3678
        long now = cache.expirationTicker().read();
1✔
3679
        if ((key != null) && (value != null) && node.isAlive()
1✔
3680
            && !cache.hasExpired(node, now, value)) {
1✔
3681
          action.accept(value);
1✔
3682
        }
3683
      };
1✔
3684
      spliterator.forEachRemaining(consumer);
1✔
3685
    }
1✔
3686

3687
    @Override
3688
    public boolean tryAdvance(Consumer<? super V> action) {
3689
      requireNonNull(action);
1✔
3690
      boolean[] advanced = { false };
1✔
3691
      Consumer<Node<K, V>> consumer = node -> {
1✔
3692
        K key = node.getKey();
1✔
3693
        V value = node.getValue();
1✔
3694
        long now = cache.expirationTicker().read();
1✔
3695
        if ((key != null) && (value != null) && node.isAlive()
1✔
3696
            && !cache.hasExpired(node, now, value)) {
1✔
3697
          action.accept(value);
1✔
3698
          advanced[0] = true;
1✔
3699
        }
3700
      };
1✔
3701
      while (spliterator.tryAdvance(consumer)) {
1✔
3702
        if (advanced[0]) {
1✔
3703
          return true;
1✔
3704
        }
3705
      }
3706
      return false;
1✔
3707
    }
3708

3709
    @Override
3710
    public @Nullable Spliterator<V> trySplit() {
3711
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3712
      return (split == null) ? null : new ValueSpliterator<>(cache, split);
1✔
3713
    }
3714

3715
    @Override
3716
    public long estimateSize() {
3717
      return spliterator.estimateSize();
1✔
3718
    }
3719

3720
    @Override
3721
    public int characteristics() {
3722
      return CONCURRENT | NONNULL;
1✔
3723
    }
3724
  }
3725

3726
  /** An adapter to safely externalize the entries. */
3727
  static final class EntrySetView<K, V> extends AbstractSet<Entry<K, V>> {
3728
    final BoundedLocalCache<K, V> cache;
3729

3730
    EntrySetView(BoundedLocalCache<K, V> cache) {
1✔
3731
      this.cache = requireNonNull(cache);
1✔
3732
    }
1✔
3733

3734
    @Override
3735
    public int size() {
3736
      return cache.size();
1✔
3737
    }
3738

3739
    @Override
3740
    public void clear() {
3741
      cache.clear();
1✔
3742
    }
1✔
3743

3744
    @Override
3745
    public boolean contains(Object o) {
3746
      if (!(o instanceof Entry<?, ?>)) {
1✔
3747
        return false;
1✔
3748
      }
3749
      var entry = (Entry<?, ?>) o;
1✔
3750
      var key = entry.getKey();
1✔
3751
      var value = entry.getValue();
1✔
3752
      if ((key == null) || (value == null)) {
1✔
3753
        return false;
1✔
3754
      }
3755
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
3756
      if (node == null) {
1✔
3757
        return false;
1✔
3758
      }
3759
      V nodeValue = node.getValue();
1✔
3760
      return (nodeValue != null) && node.containsValue(value)
1!
3761
          && !cache.hasExpired(node, cache.expirationTicker().read(), nodeValue);
1✔
3762
    }
3763

3764
    @Override
3765
    public boolean removeAll(Collection<?> collection) {
3766
      requireNonNull(collection);
1✔
3767
      @Var boolean modified = false;
1✔
3768
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3769
        for (var entry : this) {
1✔
3770
          if (collection.contains(entry)) {
1✔
3771
            modified |= remove(entry);
1✔
3772
          }
3773
        }
1✔
3774
      } else {
3775
        for (var item : collection) {
1✔
3776
          modified |= (item != null) && remove(item);
1✔
3777
        }
1✔
3778
      }
3779
      return modified;
1✔
3780
    }
3781

3782
    @Override
3783
    @SuppressWarnings("SuspiciousMethodCalls")
3784
    public boolean remove(Object o) {
3785
      if (!(o instanceof Entry<?, ?>)) {
1✔
3786
        return false;
1✔
3787
      }
3788
      var entry = (Entry<?, ?>) o;
1✔
3789
      var key = entry.getKey();
1✔
3790
      return (key != null) && cache.remove(key, entry.getValue());
1✔
3791
    }
3792

3793
    @Override
3794
    public boolean removeIf(Predicate<? super Entry<K, V>> filter) {
3795
      requireNonNull(filter);
1✔
3796
      @Var boolean modified = false;
1✔
3797
      for (Entry<K, V> entry : this) {
1✔
3798
        if (filter.test(entry)) {
1✔
3799
          modified |= cache.remove(entry.getKey(), entry.getValue());
1✔
3800
        }
3801
      }
1✔
3802
      return modified;
1✔
3803
    }
3804

3805
    @Override
3806
    public boolean retainAll(Collection<?> collection) {
3807
      requireNonNull(collection);
1✔
3808
      @Var boolean modified = false;
1✔
3809
      for (var entry : this) {
1✔
3810
        if (!collection.contains(entry) && remove(entry)) {
1✔
3811
          modified = true;
1✔
3812
        }
3813
      }
1✔
3814
      return modified;
1✔
3815
    }
3816

3817
    @Override
3818
    public Iterator<Entry<K, V>> iterator() {
3819
      return new EntryIterator<>(cache);
1✔
3820
    }
3821

3822
    @Override
3823
    public Spliterator<Entry<K, V>> spliterator() {
3824
      return new EntrySpliterator<>(cache);
1✔
3825
    }
3826
  }
3827

3828
  /** An adapter to safely externalize the entry iterator. */
3829
  static final class EntryIterator<K, V> implements Iterator<Entry<K, V>> {
3830
    final BoundedLocalCache<K, V> cache;
3831
    final Iterator<Node<K, V>> iterator;
3832

3833
    @Nullable K key;
3834
    @Nullable V value;
3835
    @Nullable K removalKey;
3836
    @Nullable Node<K, V> next;
3837

3838
    EntryIterator(BoundedLocalCache<K, V> cache) {
1✔
3839
      this.iterator = cache.data.values().iterator();
1✔
3840
      this.cache = cache;
1✔
3841
    }
1✔
3842

3843
    @Override
3844
    public boolean hasNext() {
3845
      if (next != null) {
1✔
3846
        return true;
1✔
3847
      }
3848

3849
      long now = cache.expirationTicker().read();
1✔
3850
      while (iterator.hasNext()) {
1✔
3851
        next = iterator.next();
1✔
3852
        value = next.getValue();
1✔
3853
        key = next.getKey();
1✔
3854

3855
        boolean evictable = (key == null) || (value == null) || cache.hasExpired(next, now, value);
1✔
3856
        if (evictable || !next.isAlive()) {
1✔
3857
          if (evictable) {
1✔
3858
            cache.scheduleDrainBuffers();
1✔
3859
          }
3860
          advance();
1✔
3861
          continue;
1✔
3862
        }
3863
        return true;
1✔
3864
      }
3865
      return false;
1✔
3866
    }
3867

3868
    /** Invalidates the current position so that the iterator may compute the next position. */
3869
    void advance() {
3870
      value = null;
1✔
3871
      next = null;
1✔
3872
      key = null;
1✔
3873
    }
1✔
3874

3875
    K nextKey() {
3876
      if (!hasNext()) {
1✔
3877
        throw new NoSuchElementException();
1✔
3878
      }
3879
      removalKey = key;
1✔
3880
      advance();
1✔
3881
      return requireNonNull(removalKey);
1✔
3882
    }
3883

3884
    V nextValue() {
3885
      if (!hasNext()) {
1✔
3886
        throw new NoSuchElementException();
1✔
3887
      }
3888
      removalKey = key;
1✔
3889
      V val = value;
1✔
3890
      advance();
1✔
3891
      return requireNonNull(val);
1✔
3892
    }
3893

3894
    @Override
3895
    public Entry<K, V> next() {
3896
      if (!hasNext()) {
1✔
3897
        throw new NoSuchElementException();
1✔
3898
      }
3899
      var entry = new WriteThroughEntry<K, @NonNull V>(
1✔
3900
          cache, requireNonNull(key), requireNonNull(value));
1✔
3901
      removalKey = key;
1✔
3902
      advance();
1✔
3903
      return entry;
1✔
3904
    }
3905

3906
    @Override
3907
    public void remove() {
3908
      if (removalKey == null) {
1✔
3909
        throw new IllegalStateException();
1✔
3910
      }
3911
      cache.remove(removalKey);
1✔
3912
      removalKey = null;
1✔
3913
    }
1✔
3914
  }
3915

3916
  /** An adapter to safely externalize the entry spliterator. */
3917
  static final class EntrySpliterator<K, V> implements Spliterator<Entry<K, V>> {
3918
    final Spliterator<Node<K, V>> spliterator;
3919
    final BoundedLocalCache<K, V> cache;
3920

3921
    EntrySpliterator(BoundedLocalCache<K, V> cache) {
3922
      this(cache, cache.data.values().spliterator());
1✔
3923
    }
1✔
3924

3925
    EntrySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3926
      this.spliterator = requireNonNull(spliterator);
1✔
3927
      this.cache = requireNonNull(cache);
1✔
3928
    }
1✔
3929

3930
    @Override
3931
    public void forEachRemaining(Consumer<? super Entry<K, V>> action) {
3932
      requireNonNull(action);
1✔
3933
      Consumer<Node<K, V>> consumer = node -> {
1✔
3934
        K key = node.getKey();
1✔
3935
        V value = node.getValue();
1✔
3936
        long now = cache.expirationTicker().read();
1✔
3937
        if ((key != null) && (value != null) && node.isAlive()
1✔
3938
            && !cache.hasExpired(node, now, value)) {
1✔
3939
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3940
        }
3941
      };
1✔
3942
      spliterator.forEachRemaining(consumer);
1✔
3943
    }
1✔
3944

3945
    @Override
3946
    public boolean tryAdvance(Consumer<? super Entry<K, V>> action) {
3947
      requireNonNull(action);
1✔
3948
      boolean[] advanced = { false };
1✔
3949
      Consumer<Node<K, V>> consumer = node -> {
1✔
3950
        K key = node.getKey();
1✔
3951
        V value = node.getValue();
1✔
3952
        long now = cache.expirationTicker().read();
1✔
3953
        if ((key != null) && (value != null) && node.isAlive()
1✔
3954
            && !cache.hasExpired(node, now, value)) {
1✔
3955
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3956
          advanced[0] = true;
1✔
3957
        }
3958
      };
1✔
3959
      while (spliterator.tryAdvance(consumer)) {
1✔
3960
        if (advanced[0]) {
1✔
3961
          return true;
1✔
3962
        }
3963
      }
3964
      return false;
1✔
3965
    }
3966

3967
    @Override
3968
    public @Nullable Spliterator<Entry<K, V>> trySplit() {
3969
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3970
      return (split == null) ? null : new EntrySpliterator<>(cache, split);
1✔
3971
    }
3972

3973
    @Override
3974
    public long estimateSize() {
3975
      return spliterator.estimateSize();
1✔
3976
    }
3977

3978
    @Override
3979
    public int characteristics() {
3980
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3981
    }
3982
  }
3983

3984
  /** A reusable task that performs the maintenance work; used to avoid wrapping by ForkJoinPool. */
3985
  static final class PerformCleanupTask extends ForkJoinTask<@Nullable Void> implements Runnable {
3986
    private static final long serialVersionUID = 1L;
3987

3988
    final WeakReference<BoundedLocalCache<?, ?>> reference;
3989

3990
    PerformCleanupTask(BoundedLocalCache<?, ?> cache) {
1✔
3991
      reference = new WeakReference<>(cache);
1✔
3992
    }
1✔
3993

3994
    @Override
3995
    public boolean exec() {
3996
      try {
3997
        run();
1✔
3998
      } catch (Throwable t) {
1✔
3999
        logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", t);
1✔
4000
      }
1✔
4001

4002
      // Indicates that the task has not completed to allow subsequent submissions to execute
4003
      return false;
1✔
4004
    }
4005

4006
    @Override
4007
    public void run() {
4008
      BoundedLocalCache<?, ?> cache = reference.get();
1✔
4009
      if (cache != null) {
1✔
4010
        cache.performCleanUp(/* ignored */ null);
1✔
4011
      }
4012
    }
1✔
4013

4014
    /**
4015
     * This method cannot be ignored due to being final, so a hostile user supplied Executor could
4016
     * forcibly complete the task and halt future executions. There are easier ways to intentionally
4017
     * harm a system, so this is assumed to not happen in practice.
4018
     */
4019
    // public final void quietlyComplete() {}
4020

4021
    @Override public void complete(@Nullable Void value) {}
1✔
4022
    @Override public void setRawResult(@Nullable Void value) {}
1✔
4023
    @Override public @Nullable Void getRawResult() { return null; }
1✔
4024
    @Override public void completeExceptionally(@Nullable Throwable t) {}
1✔
4025
    @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; }
1✔
4026
  }
4027

4028
  /** Creates a serialization proxy based on the common configuration shared by all cache types. */
4029
  static <K, V> SerializationProxy<K, V> makeSerializationProxy(BoundedLocalCache<?, ?> cache) {
4030
    var proxy = new SerializationProxy<K, V>();
1✔
4031
    proxy.weakKeys = cache.collectKeys();
1✔
4032
    proxy.weakValues = cache.nodeFactory.weakValues();
1✔
4033
    proxy.softValues = cache.nodeFactory.softValues();
1✔
4034
    proxy.isRecordingStats = cache.isRecordingStats();
1✔
4035
    proxy.evictionListener = cache.evictionListener;
1✔
4036
    proxy.removalListener = cache.removalListener();
1✔
4037
    proxy.ticker = cache.expirationTicker();
1✔
4038
    if (cache.expiresAfterAccess()) {
1✔
4039
      proxy.expiresAfterAccessNanos = cache.expiresAfterAccessNanos();
1✔
4040
    }
4041
    if (cache.expiresAfterWrite()) {
1✔
4042
      proxy.expiresAfterWriteNanos = cache.expiresAfterWriteNanos();
1✔
4043
    }
4044
    if (cache.expiresVariable()) {
1✔
4045
      proxy.expiry = cache.expiry();
1✔
4046
    }
4047
    if (cache.refreshAfterWrite()) {
1✔
4048
      proxy.refreshAfterWriteNanos = cache.refreshAfterWriteNanos();
1✔
4049
    }
4050
    if (cache.evicts()) {
1✔
4051
      if (cache.isWeighted) {
1✔
4052
        proxy.weigher = cache.weigher;
1✔
4053
        proxy.maximumWeight = cache.maximum();
1✔
4054
      } else {
4055
        proxy.maximumSize = cache.maximum();
1✔
4056
      }
4057
    }
4058
    proxy.cacheLoader = cache.cacheLoader;
1✔
4059
    proxy.async = cache.isAsync;
1✔
4060
    return proxy;
1✔
4061
  }
4062

4063
  /* --------------- Manual Cache --------------- */
4064

4065
  static class BoundedLocalManualCache<K, V> implements LocalManualCache<K, V>, Serializable {
4066
    private static final long serialVersionUID = 1;
4067

4068
    final BoundedLocalCache<K, V> cache;
4069

4070
    @Nullable Policy<K, V> policy;
4071

4072
    BoundedLocalManualCache(Caffeine<K, V> builder) {
4073
      this(builder, null);
1✔
4074
    }
1✔
4075

4076
    BoundedLocalManualCache(Caffeine<K, V> builder, @Nullable CacheLoader<? super K, V> loader) {
1✔
4077
      cache = LocalCacheFactory.newBoundedLocalCache(builder, loader, /* isAsync= */ false);
1✔
4078
    }
1✔
4079

4080
    @Override
4081
    public final BoundedLocalCache<K, V> cache() {
4082
      return cache;
1✔
4083
    }
4084

4085
    @Override
4086
    public final Policy<K, V> policy() {
4087
      if (policy == null) {
1✔
4088
        Function<@Nullable V, @Nullable V> identity = v -> v;
1✔
4089
        policy = new BoundedPolicy<>(cache, identity, cache.isWeighted);
1✔
4090
      }
4091
      return policy;
1✔
4092
    }
4093

4094
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4095
      throw new InvalidObjectException("Proxy required");
1✔
4096
    }
4097

4098
    private Object writeReplace() {
4099
      return makeSerializationProxy(cache);
1✔
4100
    }
4101
  }
4102

4103
  @SuppressWarnings({"NullableOptional",
4104
    "OptionalAssignedToNull", "OptionalUsedAsFieldOrParameterType"})
4105
  static final class BoundedPolicy<K, V> implements Policy<K, V> {
4106
    final Function<@Nullable V, @Nullable V> transformer;
4107
    final BoundedLocalCache<K, V> cache;
4108
    final boolean isWeighted;
4109

4110
    @Nullable Optional<Eviction<K, V>> eviction;
4111
    @Nullable Optional<FixedRefresh<K, V>> refreshes;
4112
    @Nullable Optional<FixedExpiration<K, V>> afterWrite;
4113
    @Nullable Optional<FixedExpiration<K, V>> afterAccess;
4114
    @Nullable Optional<VarExpiration<K, V>> variable;
4115

4116
    BoundedPolicy(BoundedLocalCache<K, V> cache,
4117
        Function<@Nullable V, @Nullable V> transformer, boolean isWeighted) {
1✔
4118
      this.transformer = transformer;
1✔
4119
      this.isWeighted = isWeighted;
1✔
4120
      this.cache = cache;
1✔
4121
    }
1✔
4122

4123
    @Override public boolean isRecordingStats() {
4124
      return cache.isRecordingStats();
1✔
4125
    }
4126
    @Override public @Nullable V getIfPresentQuietly(K key) {
4127
      return transformer.apply(cache.getIfPresentQuietly(key));
1✔
4128
    }
4129
    @SuppressWarnings("GuardedByChecker")
4130
    @Override public @Nullable CacheEntry<K, V> getEntryIfPresentQuietly(K key) {
4131
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4132
      return (node == null) ? null : cache.nodeToCacheEntry(node, transformer, node.getWeight());
1✔
4133
    }
4134
    @SuppressWarnings("Java9CollectionFactory")
4135
    @Override public Map<K, CompletableFuture<V>> refreshes() {
4136
      var refreshes = cache.refreshes;
1✔
4137
      if ((refreshes == null) || refreshes.isEmpty()) {
1✔
4138
        @SuppressWarnings({"ImmutableMapOf", "RedundantUnmodifiable"})
4139
        Map<K, CompletableFuture<V>> emptyMap = Collections.unmodifiableMap(Collections.emptyMap());
1✔
4140
        return emptyMap;
1✔
4141
      } else if (cache.collectKeys()) {
1✔
4142
        var inFlight = new IdentityHashMap<K, CompletableFuture<V>>(refreshes.size());
1✔
4143
        for (var entry : refreshes.entrySet()) {
1✔
4144
          @SuppressWarnings("unchecked")
4145
          @Nullable K key = ((InternalReference<K>) entry.getKey()).get();
1✔
4146
          @SuppressWarnings("unchecked")
4147
          var future = (CompletableFuture<V>) entry.getValue();
1✔
4148
          if (key != null) {
1✔
4149
            inFlight.put(key, future);
1✔
4150
          }
4151
        }
1✔
4152
        return Collections.unmodifiableMap(inFlight);
1✔
4153
      }
4154
      @SuppressWarnings("unchecked")
4155
      var castedRefreshes = (Map<K, CompletableFuture<V>>) (Object) refreshes;
1✔
4156
      return Collections.unmodifiableMap(new HashMap<>(castedRefreshes));
1✔
4157
    }
4158
    @Override public Optional<Eviction<K, V>> eviction() {
4159
      return cache.evicts()
1✔
4160
          ? (eviction == null) ? (eviction = Optional.of(new BoundedEviction())) : eviction
1✔
4161
          : Optional.empty();
1✔
4162
    }
4163
    @Override public Optional<FixedExpiration<K, V>> expireAfterAccess() {
4164
      if (!cache.expiresAfterAccess()) {
1✔
4165
        return Optional.empty();
1✔
4166
      }
4167
      return (afterAccess == null)
1✔
4168
          ? (afterAccess = Optional.of(new BoundedExpireAfterAccess()))
1✔
4169
          : afterAccess;
1✔
4170
    }
4171
    @Override public Optional<FixedExpiration<K, V>> expireAfterWrite() {
4172
      if (!cache.expiresAfterWrite()) {
1✔
4173
        return Optional.empty();
1✔
4174
      }
4175
      return (afterWrite == null)
1✔
4176
          ? (afterWrite = Optional.of(new BoundedExpireAfterWrite()))
1✔
4177
          : afterWrite;
1✔
4178
    }
4179
    @Override public Optional<VarExpiration<K, V>> expireVariably() {
4180
      if (!cache.expiresVariable()) {
1✔
4181
        return Optional.empty();
1✔
4182
      }
4183
      return (variable == null)
1✔
4184
          ? (variable = Optional.of(new BoundedVarExpiration()))
1✔
4185
          : variable;
1✔
4186
    }
4187
    @Override public Optional<FixedRefresh<K, V>> refreshAfterWrite() {
4188
      if (!cache.refreshAfterWrite()) {
1✔
4189
        return Optional.empty();
1✔
4190
      }
4191
      return (refreshes == null)
1✔
4192
          ? (refreshes = Optional.of(new BoundedRefreshAfterWrite()))
1✔
4193
          : refreshes;
1✔
4194
    }
4195

4196
    final class BoundedEviction implements Eviction<K, V> {
1✔
4197
      @Override public boolean isWeighted() {
4198
        return isWeighted;
1✔
4199
      }
4200
      @Override public OptionalInt weightOf(K key) {
4201
        requireNonNull(key);
1✔
4202
        if (!isWeighted) {
1✔
4203
          return OptionalInt.empty();
1✔
4204
        }
4205
        Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4206
        if (node == null) {
1✔
4207
          return OptionalInt.empty();
1✔
4208
        }
4209
        V value = node.getValue();
1✔
4210
        if ((value == null) || cache.hasExpired(node, cache.expirationTicker().read(), value)) {
1✔
4211
          return OptionalInt.empty();
1✔
4212
        }
4213
        synchronized (node) {
1✔
4214
          return node.isAlive() ? OptionalInt.of(node.getWeight()) : OptionalInt.empty();
1✔
4215
        }
4216
      }
4217
      @Override public OptionalLong weightedSize() {
4218
        return isWeighted
1✔
4219
            ? OptionalLong.of(Math.max(0, cache.weightedSizeAcquire()))
1✔
4220
            : OptionalLong.empty();
1✔
4221
      }
4222
      @Override public long getMaximum() {
4223
        return cache.maximumAcquire();
1✔
4224
      }
4225
      @Override public void setMaximum(long maximum) {
4226
        cache.evictionLock.lock();
1✔
4227
        try {
4228
          cache.setMaximumSize(maximum);
1✔
4229
          cache.maintenance(/* ignored */ null);
1✔
4230
        } finally {
4231
          cache.evictionLock.unlock();
1✔
4232
          cache.rescheduleCleanUpIfIncomplete();
1✔
4233
        }
4234
      }
1✔
4235
      @Override public Map<K, V> coldest(int limit) {
4236
        int expectedSize = Math.min(limit, cache.size());
1✔
4237
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4238
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
1✔
4239
      }
4240
      @Override public Map<K, V> coldestWeighted(long weightLimit) {
4241
        var limiter = isWeighted()
1✔
4242
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4243
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4244
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
1✔
4245
      }
4246
      @Override
4247
      public <T> T coldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4248
        requireNonNull(mappingFunction);
1✔
4249
        return cache.evictionOrder(/* hottest= */ false, transformer, mappingFunction);
1✔
4250
      }
4251
      @Override public Map<K, V> hottest(int limit) {
4252
        int expectedSize = Math.min(limit, cache.size());
1✔
4253
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4254
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
1✔
4255
      }
4256
      @Override public Map<K, V> hottestWeighted(long weightLimit) {
4257
        var limiter = isWeighted()
1✔
4258
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4259
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4260
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
1✔
4261
      }
4262
      @Override
4263
      public <T> T hottest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4264
        requireNonNull(mappingFunction);
1✔
4265
        return cache.evictionOrder(/* hottest= */ true, transformer, mappingFunction);
1✔
4266
      }
4267
    }
4268

4269
    @SuppressWarnings("PreferJavaTimeOverload")
4270
    final class BoundedExpireAfterAccess implements FixedExpiration<K, V> {
1✔
4271
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4272
        requireNonNull(key);
1✔
4273
        requireNonNull(unit);
1✔
4274
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4275
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4276
        if (node == null) {
1✔
4277
          return OptionalLong.empty();
1✔
4278
        }
4279
        V value = node.getValue();
1✔
4280
        if (value == null) {
1✔
4281
          return OptionalLong.empty();
1✔
4282
        }
4283
        long now = cache.expirationTicker().read();
1✔
4284
        return cache.hasExpired(node, now, value)
1✔
4285
            ? OptionalLong.empty()
1✔
4286
            : OptionalLong.of(unit.convert(now - node.getAccessTime(), TimeUnit.NANOSECONDS));
1✔
4287
      }
4288
      @Override public long getExpiresAfter(TimeUnit unit) {
4289
        return unit.convert(cache.expiresAfterAccessNanos(), TimeUnit.NANOSECONDS);
1✔
4290
      }
4291
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4292
        requireArgument(duration >= 0);
1✔
4293
        cache.setExpiresAfterAccessNanos(unit.toNanos(duration));
1✔
4294
        cache.scheduleAfterWrite();
1✔
4295
      }
1✔
4296
      @Override public Map<K, V> oldest(int limit) {
4297
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4298
      }
4299
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4300
        return cache.expireAfterAccessOrder(/* oldest= */ true, transformer, mappingFunction);
1✔
4301
      }
4302
      @Override public Map<K, V> youngest(int limit) {
4303
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4304
      }
4305
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4306
        return cache.expireAfterAccessOrder(/* oldest= */ false, transformer, mappingFunction);
1✔
4307
      }
4308
    }
4309

4310
    @SuppressWarnings("PreferJavaTimeOverload")
4311
    final class BoundedExpireAfterWrite implements FixedExpiration<K, V> {
1✔
4312
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4313
        requireNonNull(key);
1✔
4314
        requireNonNull(unit);
1✔
4315
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4316
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4317
        if (node == null) {
1✔
4318
          return OptionalLong.empty();
1✔
4319
        }
4320
        V value = node.getValue();
1✔
4321
        if (value == null) {
1✔
4322
          return OptionalLong.empty();
1✔
4323
        }
4324
        long now = cache.expirationTicker().read();
1✔
4325
        return cache.hasExpired(node, now, value)
1✔
4326
            ? OptionalLong.empty()
1✔
4327
            : OptionalLong.of(unit.convert(
1✔
4328
                (now & ~1L) - (node.getWriteTime() & ~1L), TimeUnit.NANOSECONDS));
1✔
4329
      }
4330
      @Override public long getExpiresAfter(TimeUnit unit) {
4331
        return unit.convert(cache.expiresAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4332
      }
4333
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4334
        requireArgument(duration >= 0);
1✔
4335
        cache.setExpiresAfterWriteNanos(unit.toNanos(duration));
1✔
4336
        cache.scheduleAfterWrite();
1✔
4337
      }
1✔
4338
      @Override public Map<K, V> oldest(int limit) {
4339
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4340
      }
4341
      @SuppressWarnings("GuardedByChecker")
4342
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4343
        return cache.snapshot(cache.writeOrderDeque(), transformer, mappingFunction);
1✔
4344
      }
4345
      @Override public Map<K, V> youngest(int limit) {
4346
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4347
      }
4348
      @SuppressWarnings("GuardedByChecker")
4349
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4350
        return cache.snapshot(cache.writeOrderDeque()::descendingIterator,
1✔
4351
            transformer, mappingFunction);
4352
      }
4353
    }
4354

4355
    @SuppressWarnings("PreferJavaTimeOverload")
4356
    final class BoundedVarExpiration implements VarExpiration<K, V> {
1✔
4357
      @Override public OptionalLong getExpiresAfter(K key, TimeUnit unit) {
4358
        requireNonNull(key);
1✔
4359
        requireNonNull(unit);
1✔
4360
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4361
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4362
        if (node == null) {
1✔
4363
          return OptionalLong.empty();
1✔
4364
        }
4365
        V value = node.getValue();
1✔
4366
        if (value == null) {
1✔
4367
          return OptionalLong.empty();
1✔
4368
        }
4369
        long now = cache.expirationTicker().read();
1✔
4370
        return cache.hasExpired(node, now, value)
1✔
4371
            ? OptionalLong.empty()
1✔
4372
            : OptionalLong.of(unit.convert(node.getVariableTime() - now, TimeUnit.NANOSECONDS));
1✔
4373
      }
4374
      @Override public void setExpiresAfter(K key, long duration, TimeUnit unit) {
4375
        requireNonNull(key);
1✔
4376
        requireNonNull(unit);
1✔
4377
        requireArgument(duration >= 0);
1✔
4378
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4379
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4380
        if (node != null) {
1✔
4381
          long now;
4382
          long durationNanos = TimeUnit.NANOSECONDS.convert(duration, unit);
1✔
4383
          synchronized (node) {
1✔
4384
            now = cache.expirationTicker().read();
1✔
4385
            V value = node.getValue();
1✔
4386
            if ((value == null) || cache.isComputingAsync(value)
1!
4387
                || cache.hasExpired(node, now, value)) {
1✔
4388
              return;
1✔
4389
            }
4390
            node.setVariableTime(now + Math.min(durationNanos, MAXIMUM_EXPIRY));
1✔
4391
          }
1✔
4392
          cache.afterRead(node, now, /* recordHit= */ false);
1✔
4393
        }
4394
      }
1✔
4395
      @Override public @Nullable V put(K key, V value, long duration, TimeUnit unit) {
4396
        requireNonNull(unit);
1✔
4397
        requireNonNull(value);
1✔
4398
        requireArgument(duration >= 0);
1✔
4399
        return cache.isAsync
1✔
4400
            ? putAsync(key, value, duration, unit)
1✔
4401
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ false);
1✔
4402
      }
4403
      @Override public @Nullable V putIfAbsent(K key, V value, long duration, TimeUnit unit) {
4404
        requireNonNull(unit);
1✔
4405
        requireNonNull(value);
1✔
4406
        requireArgument(duration >= 0);
1✔
4407
        return cache.isAsync
1✔
4408
            ? putIfAbsentAsync(key, value, duration, unit)
1✔
4409
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ true);
1✔
4410
      }
4411
      @Nullable V putSync(K key, V value, long duration, TimeUnit unit, boolean onlyIfAbsent) {
4412
        var expiry = new FixedExpireAfterWrite<K, V>(duration, unit);
1✔
4413
        return cache.put(key, value, expiry, onlyIfAbsent);
1✔
4414
      }
4415
      @SuppressWarnings("unchecked")
4416
      @Nullable V putIfAbsentAsync(K key, V value, long duration, TimeUnit unit) {
4417
        // Keep in sync with LocalAsyncCache.AsMapView#putIfAbsent(key, value)
4418
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4419
        var asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4420

4421
        for (;;) {
4422
          var priorFuture = (CompletableFuture<V>) cache.getIfPresent(
1✔
4423
              key, /* recordStats= */ false);
4424
          if (priorFuture != null) {
1✔
4425
            if (!priorFuture.isDone()) {
1✔
4426
              Async.getWhenSuccessful(priorFuture);
1✔
4427
              continue;
1✔
4428
            }
4429

4430
            V prior = Async.getWhenSuccessful(priorFuture);
1✔
4431
            if (prior != null) {
1✔
4432
              return prior;
1✔
4433
            }
4434
          }
4435

4436
          boolean[] added = { false };
1✔
4437
          var computed = (CompletableFuture<V>) cache.compute(key, (K k, @Nullable V oldValue) -> {
1✔
4438
            var oldValueFuture = (CompletableFuture<V>) oldValue;
1✔
4439
            added[0] = (oldValueFuture == null)
1✔
4440
                || (oldValueFuture.isDone() && (Async.getIfReady(oldValueFuture) == null));
1✔
4441
            return added[0] ? asyncValue : oldValue;
1✔
4442
          }, expiry, /* recordLoad= */ false, /* recordLoadFailure= */ false);
4443

4444
          if (added[0]) {
1✔
4445
            return null;
1✔
4446
          } else {
4447
            V prior = Async.getWhenSuccessful(computed);
1✔
4448
            if (prior != null) {
1✔
4449
              return prior;
1✔
4450
            }
4451
          }
4452
        }
1✔
4453
      }
4454
      @SuppressWarnings("unchecked")
4455
      @Nullable V putAsync(K key, V value, long duration, TimeUnit unit) {
4456
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4457
        var asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4458

4459
        var oldValueFuture = (CompletableFuture<V>) cache.put(
1✔
4460
            key, asyncValue, expiry, /* onlyIfAbsent= */ false);
4461
        return Async.getWhenSuccessful(oldValueFuture);
1✔
4462
      }
4463
      @Override public @Nullable V compute(K key,
4464
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4465
          Duration duration) {
4466
        requireNonNull(key);
1✔
4467
        requireNonNull(duration);
1✔
4468
        requireNonNull(remappingFunction);
1✔
4469
        requireArgument(!duration.isNegative(), "duration cannot be negative: %s", duration);
1✔
4470
        var expiry = new FixedExpireAfterWrite<K, V>(
1✔
4471
            toNanosSaturated(duration), TimeUnit.NANOSECONDS);
1✔
4472

4473
        return cache.isAsync
1✔
4474
            ? computeAsync(key, remappingFunction, expiry)
1✔
4475
            : cache.compute(key, remappingFunction, expiry,
1✔
4476
                /* recordLoad= */ true, /* recordLoadFailure= */ true);
4477
      }
4478
      @Nullable V computeAsync(K key,
4479
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4480
          Expiry<? super K, ? super V> expiry) {
4481
        // Keep in sync with LocalAsyncCache.AsMapView#compute(key, remappingFunction)
4482
        @SuppressWarnings("unchecked")
4483
        var delegate = (LocalCache<K, CompletableFuture<V>>) cache;
1✔
4484

4485
        @SuppressWarnings({"rawtypes", "unchecked", "Varifier"})
4486
        @Nullable V[] newValue = (@Nullable V[]) new Object[1];
1✔
4487
        for (;;) {
4488
          Async.getWhenSuccessful(delegate.getIfPresentQuietly(key));
1✔
4489

4490
          CompletableFuture<V> valueFuture = delegate.compute(
1✔
4491
              key, (K k, @Nullable CompletableFuture<V> oldValueFuture) -> {
4492
                if ((oldValueFuture != null) && !oldValueFuture.isDone()) {
1✔
4493
                  return oldValueFuture;
1✔
4494
                }
4495

4496
                V oldValue = Async.getIfReady(oldValueFuture);
1✔
4497
                BiFunction<? super K, ? super V, ? extends @Nullable V> function =
1✔
4498
                    delegate.statsAware(remappingFunction,
1✔
4499
                        /* recordLoad= */ true, /* recordLoadFailure= */ true);
4500
                newValue[0] = function.apply(key, oldValue);
1✔
4501
                return (newValue[0] == null) ? null
1✔
4502
                    : CompletableFuture.completedFuture(newValue[0]);
1✔
4503
              }, new AsyncExpiry<>(expiry), /* recordLoad= */ false,
4504
              /* recordLoadFailure= */ false);
4505

4506
          if (newValue[0] != null) {
1✔
4507
            return newValue[0];
1✔
4508
          } else if (valueFuture == null) {
1✔
4509
            return null;
1✔
4510
          }
4511
        }
1✔
4512
      }
4513
      @Override public Map<K, V> oldest(int limit) {
4514
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4515
      }
4516
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4517
        return cache.snapshot(cache.timerWheel(), transformer, mappingFunction);
1✔
4518
      }
4519
      @Override public Map<K, V> youngest(int limit) {
4520
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4521
      }
4522
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4523
        return cache.snapshot(cache.timerWheel()::descendingIterator, transformer, mappingFunction);
1✔
4524
      }
4525
    }
4526

4527
    static final class FixedExpireAfterWrite<K, V> implements Expiry<K, V> {
4528
      final long duration;
4529
      final TimeUnit unit;
4530

4531
      FixedExpireAfterWrite(long duration, TimeUnit unit) {
1✔
4532
        this.duration = duration;
1✔
4533
        this.unit = unit;
1✔
4534
      }
1✔
4535
      @Override public long expireAfterCreate(K key, V value, long currentTime) {
4536
        return unit.toNanos(duration);
1✔
4537
      }
4538
      @Override public long expireAfterUpdate(
4539
          K key, V value, long currentTime, long currentDuration) {
4540
        return unit.toNanos(duration);
1✔
4541
      }
4542
      @CanIgnoreReturnValue
4543
      @Override public long expireAfterRead(
4544
          K key, V value, long currentTime, long currentDuration) {
4545
        return currentDuration;
1✔
4546
      }
4547
    }
4548

4549
    @SuppressWarnings("PreferJavaTimeOverload")
4550
    final class BoundedRefreshAfterWrite implements FixedRefresh<K, V> {
1✔
4551
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4552
        requireNonNull(key);
1✔
4553
        requireNonNull(unit);
1✔
4554
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4555
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4556
        if (node == null) {
1✔
4557
          return OptionalLong.empty();
1✔
4558
        }
4559
        V value = node.getValue();
1✔
4560
        if (value == null) {
1✔
4561
          return OptionalLong.empty();
1✔
4562
        }
4563
        long now = cache.expirationTicker().read();
1✔
4564
        return cache.hasExpired(node, now, value)
1✔
4565
            ? OptionalLong.empty()
1✔
4566
            : OptionalLong.of(unit.convert(
1✔
4567
                (now & ~1L) - (node.getWriteTime() & ~1L), TimeUnit.NANOSECONDS));
1✔
4568
      }
4569
      @Override public long getRefreshesAfter(TimeUnit unit) {
4570
        return unit.convert(cache.refreshAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4571
      }
4572
      @Override public void setRefreshesAfter(long duration, TimeUnit unit) {
4573
        requireArgument(duration >= 0);
1✔
4574
        cache.setRefreshAfterWriteNanos(unit.toNanos(duration));
1✔
4575
        cache.scheduleAfterWrite();
1✔
4576
      }
1✔
4577
    }
4578
  }
4579

4580
  /* --------------- Loading Cache --------------- */
4581

4582
  static final class BoundedLocalLoadingCache<K, V>
4583
      extends BoundedLocalManualCache<K, V> implements LocalLoadingCache<K, V> {
4584
    private static final long serialVersionUID = 1;
4585

4586
    final Function<K, @Nullable V> mappingFunction;
4587
    final @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction;
4588

4589
    BoundedLocalLoadingCache(Caffeine<K, V> builder, CacheLoader<? super K, V> loader) {
4590
      super(builder, loader);
1✔
4591
      requireNonNull(loader);
1✔
4592
      mappingFunction = newMappingFunction(loader);
1✔
4593
      bulkMappingFunction = newBulkMappingFunction(loader);
1✔
4594
    }
1✔
4595

4596
    @Override
4597
    @SuppressWarnings({"DataFlowIssue", "NullAway"})
4598
    public AsyncCacheLoader<? super K, V> cacheLoader() {
4599
      return cache.cacheLoader;
1✔
4600
    }
4601

4602
    @Override
4603
    public Function<K, @Nullable V> mappingFunction() {
4604
      return mappingFunction;
1✔
4605
    }
4606

4607
    @Override
4608
    public @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction() {
4609
      return bulkMappingFunction;
1✔
4610
    }
4611

4612
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4613
      throw new InvalidObjectException("Proxy required");
1✔
4614
    }
4615

4616
    private Object writeReplace() {
4617
      return makeSerializationProxy(cache);
1✔
4618
    }
4619
  }
4620

4621
  /* --------------- Async Cache --------------- */
4622

4623
  static final class BoundedLocalAsyncCache<K, V> implements LocalAsyncCache<K, V>, Serializable {
4624
    private static final long serialVersionUID = 1;
4625

4626
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4627
    final boolean isWeighted;
4628

4629
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4630
    @Nullable CacheView<K, V> cacheView;
4631
    @Nullable Policy<K, V> policy;
4632

4633
    @SuppressWarnings("unchecked")
4634
    BoundedLocalAsyncCache(Caffeine<K, V> builder) {
1✔
4635
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4636
          .newBoundedLocalCache(builder, /* cacheLoader= */ null, /* isAsync= */ true);
1✔
4637
      isWeighted = builder.isWeighted();
1✔
4638
    }
1✔
4639

4640
    @Override
4641
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4642
      return cache;
1✔
4643
    }
4644

4645
    @Override
4646
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4647
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4648
    }
4649

4650
    @Override
4651
    public Cache<K, V> synchronous() {
4652
      return (cacheView == null) ? (cacheView = new CacheView<>(this)) : cacheView;
1✔
4653
    }
4654

4655
    @Override
4656
    public Policy<K, V> policy() {
4657
      if (policy == null) {
1✔
4658
        @SuppressWarnings("unchecked")
4659
        var castCache = (BoundedLocalCache<K, V>) cache;
1✔
4660
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
1✔
4661
        @SuppressWarnings("unchecked")
4662
        var castTransformer = (Function<@Nullable V, @Nullable V>) transformer;
1✔
4663
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4664
      }
4665
      return policy;
1✔
4666
    }
4667

4668
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4669
      throw new InvalidObjectException("Proxy required");
1✔
4670
    }
4671

4672
    private Object writeReplace() {
4673
      return makeSerializationProxy(cache);
1✔
4674
    }
4675
  }
4676

4677
  /* --------------- Async Loading Cache --------------- */
4678

4679
  static final class BoundedLocalAsyncLoadingCache<K, V>
4680
      extends LocalAsyncLoadingCache<K, V> implements Serializable {
4681
    private static final long serialVersionUID = 1;
4682

4683
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4684
    final boolean isWeighted;
4685

4686
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4687
    @Nullable Policy<K, V> policy;
4688

4689
    @SuppressWarnings("unchecked")
4690
    BoundedLocalAsyncLoadingCache(Caffeine<K, V> builder, AsyncCacheLoader<? super K, V> loader) {
4691
      super(loader);
1✔
4692
      isWeighted = builder.isWeighted();
1✔
4693
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4694
          .newBoundedLocalCache(builder, loader, /* isAsync= */ true);
1✔
4695
    }
1✔
4696

4697
    @Override
4698
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4699
      return cache;
1✔
4700
    }
4701

4702
    @Override
4703
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4704
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4705
    }
4706

4707
    @Override
4708
    public Policy<K, V> policy() {
4709
      if (policy == null) {
1✔
4710
        @SuppressWarnings("unchecked")
4711
        var castCache = (BoundedLocalCache<K, V>) cache;
1✔
4712
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
1✔
4713
        @SuppressWarnings("unchecked")
4714
        var castTransformer = (Function<@Nullable V, @Nullable V>) transformer;
1✔
4715
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4716
      }
4717
      return policy;
1✔
4718
    }
4719

4720
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4721
      throw new InvalidObjectException("Proxy required");
1✔
4722
    }
4723

4724
    private Object writeReplace() {
4725
      return makeSerializationProxy(cache);
1✔
4726
    }
4727
  }
4728
}
4729

4730
/** The namespace for field padding through inheritance. */
4731
@SuppressWarnings({"IdentifierName", "MultiVariableDeclaration"})
4732
final class BLCHeader {
4733

4734
  private BLCHeader() {}
4735

4736
  @SuppressWarnings("unused")
4737
  static class PadDrainStatus {
1✔
4738
    byte p000, p001, p002, p003, p004, p005, p006, p007;
4739
    byte p008, p009, p010, p011, p012, p013, p014, p015;
4740
    byte p016, p017, p018, p019, p020, p021, p022, p023;
4741
    byte p024, p025, p026, p027, p028, p029, p030, p031;
4742
    byte p032, p033, p034, p035, p036, p037, p038, p039;
4743
    byte p040, p041, p042, p043, p044, p045, p046, p047;
4744
    byte p048, p049, p050, p051, p052, p053, p054, p055;
4745
    byte p056, p057, p058, p059, p060, p061, p062, p063;
4746
    byte p064, p065, p066, p067, p068, p069, p070, p071;
4747
    byte p072, p073, p074, p075, p076, p077, p078, p079;
4748
    byte p080, p081, p082, p083, p084, p085, p086, p087;
4749
    byte p088, p089, p090, p091, p092, p093, p094, p095;
4750
    byte p096, p097, p098, p099, p100, p101, p102, p103;
4751
    byte p104, p105, p106, p107, p108, p109, p110, p111;
4752
    byte p112, p113, p114, p115, p116, p117, p118, p119;
4753
  }
4754

4755
  /** Enforces a memory layout to avoid false sharing by padding the drain status. */
4756
  abstract static class DrainStatusRef extends PadDrainStatus {
1✔
4757
    static final VarHandle DRAIN_STATUS = findVarHandle(
1✔
4758
        DrainStatusRef.class, "drainStatus", int.class);
4759

4760
    /** A drain is not taking place. */
4761
    static final int IDLE = 0;
4762
    /** A drain is required due to a pending write modification. */
4763
    static final int REQUIRED = 1;
4764
    /** A drain is in progress and will transition to idle. */
4765
    static final int PROCESSING_TO_IDLE = 2;
4766
    /** A drain is in progress and will transition to required. */
4767
    static final int PROCESSING_TO_REQUIRED = 3;
4768

4769
    /** The draining status of the buffers. */
4770
    volatile int drainStatus = IDLE;
1✔
4771

4772
    /**
4773
     * Returns whether maintenance work is needed.
4774
     *
4775
     * @param delayable if draining the read buffer can be delayed
4776
     */
4777
    @SuppressWarnings("StatementSwitchToExpressionSwitch")
4778
    boolean shouldDrainBuffers(boolean delayable) {
4779
      switch (drainStatusOpaque()) {
1✔
4780
        case IDLE:
4781
          return !delayable;
1✔
4782
        case REQUIRED:
4783
          return true;
1✔
4784
        case PROCESSING_TO_IDLE:
4785
        case PROCESSING_TO_REQUIRED:
4786
          return false;
1✔
4787
        default:
4788
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
4789
      }
4790
    }
4791

4792
    int drainStatusOpaque() {
4793
      return (int) DRAIN_STATUS.getOpaque(this);
1✔
4794
    }
4795

4796
    int drainStatusAcquire() {
4797
      return (int) DRAIN_STATUS.getAcquire(this);
1✔
4798
    }
4799

4800
    void setDrainStatusOpaque(int drainStatus) {
4801
      DRAIN_STATUS.setOpaque(this, drainStatus);
1✔
4802
    }
1✔
4803

4804
    void setDrainStatusRelease(int drainStatus) {
4805
      DRAIN_STATUS.setRelease(this, drainStatus);
1✔
4806
    }
1✔
4807

4808
    boolean casDrainStatus(int expect, int update) {
4809
      return DRAIN_STATUS.compareAndSet(this, expect, update);
1✔
4810
    }
4811

4812
    static VarHandle findVarHandle(Class<?> recv, String name, Class<?> type) {
4813
      try {
4814
        return MethodHandles.lookup().findVarHandle(recv, name, type);
1✔
4815
      } catch (ReflectiveOperationException e) {
1✔
4816
        throw new ExceptionInInitializerError(e);
1✔
4817
      }
4818
    }
4819
  }
4820
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc