• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ben-manes / caffeine / #5336

16 Mar 2026 05:38PM UTC coverage: 99.823% (-0.01%) from 99.836%
#5336

push

github

ben-manes
Add coverage gap tests for expiry exceptions, async completion, and bulk load

Test scenarios that were previously uncovered:
- Expiry.expireAfterCreate throwing on expired entry during compute (catch-
  commit-rethrow pattern with variable expiration and weighted eviction stats)
- Mapping function throwing on expired entry during compute (eviction committed
  with correct old weight in stats, EXPIRED removal cause)
- Compute on expired entry where remapping returns same object instance
  (setValue skip optimization, EXPIRED notification, entry survives)
- AsyncExpiry delegating to user's expireAfterCreate on future completion when
  the user callback throws (entry removed, load failure logged)
- Bulk loadAll with mid-iteration weigher failure (partial put, stats)
- replace() on expired entry scheduling cleanup for deferred eviction

3857 of 3872 branches covered (99.61%)

7898 of 7912 relevant lines covered (99.82%)

1.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.68
/caffeine/src/main/java/com/github/benmanes/caffeine/cache/BoundedLocalCache.java
1
/*
2
 * Copyright 2014 Ben Manes. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
package com.github.benmanes.caffeine.cache;
17

18
import static com.github.benmanes.caffeine.cache.Async.ASYNC_EXPIRY;
19
import static com.github.benmanes.caffeine.cache.Caffeine.calculateHashMapCapacity;
20
import static com.github.benmanes.caffeine.cache.Caffeine.ceilingPowerOfTwo;
21
import static com.github.benmanes.caffeine.cache.Caffeine.requireArgument;
22
import static com.github.benmanes.caffeine.cache.Caffeine.toNanosSaturated;
23
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newBulkMappingFunction;
24
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newMappingFunction;
25
import static com.github.benmanes.caffeine.cache.Node.PROBATION;
26
import static com.github.benmanes.caffeine.cache.Node.PROTECTED;
27
import static com.github.benmanes.caffeine.cache.Node.WINDOW;
28
import static java.util.Locale.US;
29
import static java.util.Objects.requireNonNull;
30
import static java.util.Spliterator.DISTINCT;
31
import static java.util.Spliterator.IMMUTABLE;
32
import static java.util.Spliterator.NONNULL;
33
import static java.util.Spliterator.ORDERED;
34

35
import java.io.InvalidObjectException;
36
import java.io.ObjectInputStream;
37
import java.io.Serializable;
38
import java.lang.System.Logger;
39
import java.lang.System.Logger.Level;
40
import java.lang.invoke.MethodHandles;
41
import java.lang.invoke.VarHandle;
42
import java.lang.ref.Reference;
43
import java.lang.ref.ReferenceQueue;
44
import java.lang.ref.WeakReference;
45
import java.time.Duration;
46
import java.util.AbstractCollection;
47
import java.util.AbstractSet;
48
import java.util.ArrayDeque;
49
import java.util.Collection;
50
import java.util.Collections;
51
import java.util.Comparator;
52
import java.util.Deque;
53
import java.util.HashMap;
54
import java.util.IdentityHashMap;
55
import java.util.Iterator;
56
import java.util.LinkedHashMap;
57
import java.util.Map;
58
import java.util.NoSuchElementException;
59
import java.util.Objects;
60
import java.util.Optional;
61
import java.util.OptionalInt;
62
import java.util.OptionalLong;
63
import java.util.Set;
64
import java.util.Spliterator;
65
import java.util.Spliterators;
66
import java.util.concurrent.CancellationException;
67
import java.util.concurrent.CompletableFuture;
68
import java.util.concurrent.CompletionException;
69
import java.util.concurrent.ConcurrentHashMap;
70
import java.util.concurrent.ConcurrentMap;
71
import java.util.concurrent.Executor;
72
import java.util.concurrent.ForkJoinPool;
73
import java.util.concurrent.ForkJoinTask;
74
import java.util.concurrent.ThreadLocalRandom;
75
import java.util.concurrent.TimeUnit;
76
import java.util.concurrent.TimeoutException;
77
import java.util.concurrent.locks.ReentrantLock;
78
import java.util.function.BiConsumer;
79
import java.util.function.BiFunction;
80
import java.util.function.Consumer;
81
import java.util.function.Function;
82
import java.util.function.Predicate;
83
import java.util.stream.Stream;
84
import java.util.stream.StreamSupport;
85

86
import org.jspecify.annotations.NonNull;
87
import org.jspecify.annotations.Nullable;
88

89
import com.github.benmanes.caffeine.cache.Async.AsyncExpiry;
90
import com.github.benmanes.caffeine.cache.LinkedDeque.PeekingIterator;
91
import com.github.benmanes.caffeine.cache.Policy.CacheEntry;
92
import com.github.benmanes.caffeine.cache.References.InternalReference;
93
import com.github.benmanes.caffeine.cache.stats.StatsCounter;
94
import com.google.errorprone.annotations.CanIgnoreReturnValue;
95
import com.google.errorprone.annotations.Var;
96
import com.google.errorprone.annotations.concurrent.GuardedBy;
97

98
/**
99
 * An in-memory cache implementation that supports full concurrency of retrievals, a high expected
100
 * concurrency for updates, and multiple ways to bound the cache.
101
 * <p>
102
 * This class is abstract and code generated subclasses provide the complete implementation for a
103
 * particular configuration. This is to ensure that only the fields and execution paths necessary
104
 * for a given configuration are used.
105
 *
106
 * @author ben.manes@gmail.com (Ben Manes)
107
 * @param <K> the type of keys maintained by this cache
108
 * @param <V> the type of mapped values
109
 */
110
@SuppressWarnings({"RedundantSuppression", "ResultOfMethodCallIgnored", "serial", "unused"})
111
abstract class BoundedLocalCache<K, V> extends BLCHeader.DrainStatusRef
112
    implements LocalCache<K, V> {
113

114
  /*
115
   * This class performs a best-effort bounding of a ConcurrentHashMap using a page-replacement
116
   * algorithm to determine which entries to evict when the capacity is exceeded.
117
   *
118
   * Concurrency:
119
   * ------------
120
   * The page replacement algorithms are kept eventually consistent with the map. An update to the
121
   * map and recording of reads may not be immediately reflected in the policy's data structures.
122
   * These structures are guarded by a lock, and operations are applied in batches to avoid lock
123
   * contention. The penalty of applying the batches is spread across threads, so that the amortized
124
   * cost is slightly higher than performing just the ConcurrentHashMap operation [1].
125
   *
126
   * A memento of the reads and writes that were performed on the map is recorded in buffers. These
127
   * buffers are drained at the first opportunity after a write or when a read buffer is full. The
128
   * reads are offered to a buffer that will reject additions if contended on or if it is full. Due
129
   * to the concurrent nature of the read and write operations, a strict policy ordering is not
130
   * possible, but it may be observably strict when single-threaded. The buffers are drained
131
   * asynchronously to minimize the request latency and uses a state machine to determine when to
132
   * schedule this work on an executor.
133
   *
134
   * Due to a lack of a strict ordering guarantee, a task can be executed out-of-order, such as a
135
   * removal followed by its addition. The state of the entry is encoded using the key field to
136
   * avoid additional memory usage. An entry is "alive" if it is in both the hash table and the page
137
   * replacement policy. It is "retired" if it is not in the hash table and is pending removal from
138
   * the page replacement policy. Finally, an entry transitions to the "dead" state when it is
139
   * neither in the hash table nor the page replacement policy. Both the retired and dead states are
140
   * represented by a sentinel key that should not be used for map operations.
141
   *
142
   * Eviction:
143
   * ---------
144
   * Maximum size is implemented using the Window TinyLfu policy [2] due to its high hit rate, O(1)
145
   * time complexity, and small footprint. A new entry starts in the admission window and remains
146
   * there as long as it has high temporal locality (recency). Eventually an entry will slip from
147
   * the window into the main space. If the main space is already full, then a historic frequency
148
   * filter determines whether to evict the newly admitted entry or the victim entry chosen by the
149
   * eviction policy. This process ensures that the entries in the window were very recently used,
150
   * while entries in the main space are accessed very frequently and remain moderately recent. The
151
   * windowing allows the policy to have a high hit rate when entries exhibit a bursty access
152
   * pattern, while the filter ensures that popular items are retained. The admission window uses
153
   * LRU and the main space uses Segmented LRU.
154
   *
155
   * The optimal size of the window vs. main spaces is workload dependent [3]. A large admission
156
   * window is favored by recency-biased workloads, while a small one favors frequency-biased
157
   * workloads. When the window is too small, then recent arrivals are prematurely evicted, but when
158
   * it is too large, then they pollute the cache and force the eviction of more popular entries.
159
   * The optimal configuration is dynamically determined by using hill climbing to walk the hit rate
160
   * curve. This is achieved by sampling the hit rate and adjusting the window size in the direction
161
   * that is improving (making positive or negative steps). At each interval, the step size is
162
   * decreased until the hit rate climber converges at the optimal setting. The process is restarted
163
   * when the hit rate changes over a threshold, indicating that the workload altered, and a new
164
   * setting may be required.
165
   *
166
   * The historic usage is retained in a compact popularity sketch, which uses hashing to
167
   * probabilistically estimate an item's frequency. This exposes a flaw where an adversary could
168
   * use hash flooding [4] to artificially raise the frequency of the main space's victim and cause
169
   * all candidates to be rejected. In the worst case, by exploiting hash collisions, an attacker
170
   * could cause the cache to never hit and hold only worthless items, resulting in a
171
   * denial-of-service attack against the underlying resource. This is mitigated by introducing
172
   * jitter, allowing candidates that are at least moderately popular to have a small, random chance
173
   * of being admitted. This causes the victim to be evicted, but in a way that marginally impacts
174
   * the hit rate.
175
   *
176
   * Expiration:
177
   * -----------
178
   * Expiration is implemented in O(1) time complexity. The time-to-idle policy uses an access-order
179
   * queue, the time-to-live policy uses a write-order queue, and variable expiration uses a
180
   * hierarchical timer wheel [5]. The queuing policies allow for peeking at the oldest entry to
181
   * determine if it has expired. If it has not, then the younger entries must not have expired
182
   * either. If a maximum size is set, then expiration will share the queues, minimizing the
183
   * per-entry footprint. The timer wheel based policy uses hashing and cascading in a manner that
184
   * amortizes the penalty of sorting to achieve a similar algorithmic cost.
185
   *
186
   * The expiration updates are applied in a best effort fashion. The reordering of variable or
187
   * access-order expiration may be discarded by the read buffer if it is full or contended.
188
   * Similarly, the reordering of write expiration may be ignored for an entry if the last update
189
   * was within a short time window. This is done to avoid overwhelming the write buffer.
190
   *
191
   * [1] BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free
192
   * https://web.njit.edu/~dingxn/papers/BP-Wrapper.pdf
193
   * [2] TinyLFU: A Highly Efficient Cache Admission Policy
194
   * https://dl.acm.org/citation.cfm?id=3149371
195
   * [3] Adaptive Software Cache Management
196
   * https://dl.acm.org/citation.cfm?id=3274816
197
   * [4] Denial of Service via Algorithmic Complexity Attack
198
   * https://www.usenix.org/legacy/events/sec03/tech/full_papers/crosby/crosby.pdf
199
   * [5] Hashed and Hierarchical Timing Wheels
200
   * http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
201
   */
202

203
  static final Logger logger = System.getLogger(BoundedLocalCache.class.getName());
1✔
204

205
  /** The number of CPUs */
206
  static final int NCPU = Runtime.getRuntime().availableProcessors();
1✔
207
  /** The initial capacity of the write buffer. */
208
  static final int WRITE_BUFFER_MIN = 4;
209
  /** The maximum capacity of the write buffer. */
210
  static final int WRITE_BUFFER_MAX = 128 * ceilingPowerOfTwo(NCPU);
1✔
211
  /** The number of attempts to insert into the write buffer before yielding. */
212
  static final int WRITE_BUFFER_RETRIES = 100;
213
  /** The maximum weighted capacity of the map. */
214
  static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
215
  /** The initial percent of the maximum weighted capacity dedicated to the main space. */
216
  static final double PERCENT_MAIN = 0.99d;
217
  /** The percent of the maximum weighted capacity dedicated to the main's protected space. */
218
  static final double PERCENT_MAIN_PROTECTED = 0.80d;
219
  /** The difference in hit rates that restarts the climber. */
220
  static final double HILL_CLIMBER_RESTART_THRESHOLD = 0.05d;
221
  /** The percent of the total size to adapt the window by. */
222
  static final double HILL_CLIMBER_STEP_PERCENT = 0.0625d;
223
  /** The rate to decrease the step size to adapt by. */
224
  static final double HILL_CLIMBER_STEP_DECAY_RATE = 0.98d;
225
  /** The minimum popularity for allowing randomized admission. */
226
  static final int ADMIT_HASHDOS_THRESHOLD = 6;
227
  /** The maximum number of entries that can be transferred between queues. */
228
  static final int QUEUE_TRANSFER_THRESHOLD = 1_000;
229
  /** The maximum time window between entry updates before the expiration must be reordered. */
230
  static final long EXPIRE_WRITE_TOLERANCE = TimeUnit.SECONDS.toNanos(1);
1✔
231
  /** The maximum duration before an entry expires. */
232
  static final long MAXIMUM_EXPIRY = (Long.MAX_VALUE >> 1); // 150 years
233
  /** The duration to wait on the eviction lock before warning of a possible misuse. */
234
  static final long WARN_AFTER_LOCK_WAIT_NANOS = TimeUnit.SECONDS.toNanos(30);
1✔
235
  /** The number of retries before computing to validate the entry's integrity; pow2 modulus. */
236
  static final int MAX_PUT_SPIN_WAIT_ATTEMPTS = 1024 - 1;
237
  /** The handle for the in-flight refresh operations. */
238
  static final VarHandle REFRESHES = findVarHandle(
1✔
239
      BoundedLocalCache.class, "refreshes", ConcurrentMap.class);
240

241
  final @Nullable RemovalListener<K, V> evictionListener;
242
  final @Nullable AsyncCacheLoader<K, V> cacheLoader;
243

244
  final MpscGrowableArrayQueue<Runnable> writeBuffer;
245
  final ConcurrentHashMap<Object, Node<K, V>> data;
246
  final PerformCleanupTask drainBuffersTask;
247
  final Consumer<Node<K, V>> accessPolicy;
248
  final Buffer<Node<K, V>> readBuffer;
249
  final NodeFactory<K, V> nodeFactory;
250
  final ReentrantLock evictionLock;
251
  final Weigher<K, V> weigher;
252
  final Executor executor;
253

254
  final boolean isWeighted;
255
  final boolean isAsync;
256

257
  @Nullable Set<K> keySet;
258
  @Nullable Collection<V> values;
259
  @Nullable Set<Entry<K, V>> entrySet;
260
  volatile @Nullable ConcurrentMap<Object, CompletableFuture<?>> refreshes;
261

262
  /** Creates an instance based on the builder's configuration. */
263
  @SuppressWarnings("GuardedBy")
264
  protected BoundedLocalCache(Caffeine<K, V> builder,
265
      @Nullable AsyncCacheLoader<K, V> cacheLoader, boolean isAsync) {
1✔
266
    this.isAsync = isAsync;
1✔
267
    this.cacheLoader = cacheLoader;
1✔
268
    executor = builder.getExecutor();
1✔
269
    isWeighted = builder.isWeighted();
1✔
270
    evictionLock = new ReentrantLock();
1✔
271
    weigher = builder.getWeigher(isAsync);
1✔
272
    drainBuffersTask = new PerformCleanupTask(this);
1✔
273
    nodeFactory = NodeFactory.newFactory(builder, isAsync);
1✔
274
    evictionListener = builder.getEvictionListener(isAsync);
1✔
275
    data = new ConcurrentHashMap<>(builder.getInitialCapacity());
1✔
276
    readBuffer = evicts() || collectKeys() || collectValues() || expiresAfterAccess()
1✔
277
        ? new BoundedBuffer<>()
1✔
278
        : Buffer.disabled();
1✔
279
    accessPolicy = (evicts() || expiresAfterAccess()) ? this::onAccess : e -> {};
1✔
280
    writeBuffer = new MpscGrowableArrayQueue<>(WRITE_BUFFER_MIN, WRITE_BUFFER_MAX);
1✔
281

282
    if (evicts()) {
1✔
283
      setMaximumSize(builder.getMaximum());
1✔
284
    }
285
  }
1✔
286

287
  /** Ensures that the node is alive during the map operation. */
288
  void requireIsAlive(Object key, Node<?, ?> node) {
289
    if (!node.isAlive()) {
1✔
290
      throw new IllegalStateException(brokenEqualityMessage(key, node));
1✔
291
    }
292
  }
1✔
293

294
  /** Logs if the node cannot be found in the map but is still alive. */
295
  void logIfAlive(Node<?, ?> node) {
296
    if (node.isAlive()) {
1✔
297
      String message = brokenEqualityMessage(node.getKeyReference(), node);
1✔
298
      logger.log(Level.ERROR, message, new IllegalStateException());
1✔
299
    }
300
  }
1✔
301

302
  /** Returns the formatted broken equality error message. */
303
  String brokenEqualityMessage(Object key, Node<?, ?> node) {
304
    return String.format(US, "An invalid state was detected, occurring when the key's equals or "
1✔
305
        + "hashCode was modified while residing in the cache. This violation of the Map "
306
        + "contract can lead to non-deterministic behavior (key: %s, key type: %s, "
307
        + "node type: %s, cache type: %s).", key, key.getClass().getName(),
1✔
308
        node.getClass().getSimpleName(), getClass().getSimpleName());
1✔
309
  }
310

311
  /** Throws the exception, wrapping checked exceptions in a {@link CompletionException}. */
312
  static void throwException(Throwable t) {
313
    if (t instanceof RuntimeException) {
1!
314
      throw (RuntimeException) t;
1✔
315
    } else if (t instanceof Error) {
×
316
      throw (Error) t;
×
317
    }
318
    throw new CompletionException(t);
×
319
  }
320

321
  /* --------------- Shared --------------- */
322

323
  @Override
324
  public boolean isAsync() {
325
    return isAsync;
1✔
326
  }
327

328
  /** Returns if the node's value is currently being computed asynchronously. */
329
  final boolean isComputingAsync(@Nullable V value) {
330
    return isAsync && !Async.isReady((CompletableFuture<?>) value);
1✔
331
  }
332

333
  @GuardedBy("evictionLock")
334
  protected AccessOrderDeque<Node<K, V>> accessOrderWindowDeque() {
335
    throw new UnsupportedOperationException();
1✔
336
  }
337

338
  @GuardedBy("evictionLock")
339
  protected AccessOrderDeque<Node<K, V>> accessOrderProbationDeque() {
340
    throw new UnsupportedOperationException();
1✔
341
  }
342

343
  @GuardedBy("evictionLock")
344
  protected AccessOrderDeque<Node<K, V>> accessOrderProtectedDeque() {
345
    throw new UnsupportedOperationException();
1✔
346
  }
347

348
  @GuardedBy("evictionLock")
349
  protected WriteOrderDeque<Node<K, V>> writeOrderDeque() {
350
    throw new UnsupportedOperationException();
1✔
351
  }
352

353
  @Override
354
  public final Executor executor() {
355
    return executor;
1✔
356
  }
357

358
  @Override
359
  public ConcurrentMap<Object, CompletableFuture<?>> refreshes() {
360
    @Var var pending = refreshes;
1✔
361
    if (pending == null) {
1✔
362
      pending = new ConcurrentHashMap<>();
1✔
363
      if (!REFRESHES.compareAndSet(this, null, pending)) {
1✔
364
        pending = requireNonNull(refreshes);
×
365
      }
366
    }
367
    return pending;
1✔
368
  }
369

370
  /** Invalidate the in-flight refresh. */
371
  @SuppressWarnings("RedundantCollectionOperation")
372
  void discardRefresh(Object keyReference) {
373
    var pending = refreshes;
1✔
374
    if ((pending != null) && pending.containsKey(keyReference)) {
1✔
375
      pending.remove(keyReference);
1✔
376
    }
377
  }
1✔
378

379
  @Override
380
  public Object referenceKey(K key) {
381
    return nodeFactory.newLookupKey(key);
1✔
382
  }
383

384
  @Override
385
  public boolean isPendingEviction(K key) {
386
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
387
    return (node != null)
1✔
388
        && ((node.getValue() == null) || hasExpired(node, expirationTicker().read()));
1✔
389
  }
390

391
  /* --------------- Stats Support --------------- */
392

393
  @Override
394
  public boolean isRecordingStats() {
395
    return false;
1✔
396
  }
397

398
  @Override
399
  public StatsCounter statsCounter() {
400
    return StatsCounter.disabledStatsCounter();
1✔
401
  }
402

403
  @Override
404
  public Ticker statsTicker() {
405
    return Ticker.disabledTicker();
1✔
406
  }
407

408
  /* --------------- Removal Listener Support --------------- */
409

410
  protected @Nullable RemovalListener<K, V> removalListener() {
411
    return null;
1✔
412
  }
413

414
  @Override
415
  public void notifyRemoval(@Nullable K key, @Nullable V value, RemovalCause cause) {
416
    var removalListener = removalListener();
1✔
417
    if (removalListener == null) {
1✔
418
      return;
1✔
419
    }
420
    Runnable task = () -> {
1✔
421
      try {
422
        removalListener.onRemoval(key, value, cause);
1✔
423
      } catch (Throwable t) {
1✔
424
        logger.log(Level.WARNING, "Exception thrown by removal listener", t);
1✔
425
      }
1✔
426
    };
1✔
427
    try {
428
      executor.execute(task);
1✔
429
    } catch (Throwable t) {
1✔
430
      logger.log(Level.ERROR, "Exception thrown when submitting removal listener", t);
1✔
431
      task.run();
1✔
432
    }
1✔
433
  }
1✔
434

435
  /* --------------- Eviction Listener Support --------------- */
436

437
  void notifyEviction(@Nullable K key, @Nullable V value, RemovalCause cause) {
438
    if (evictionListener == null) {
1✔
439
      return;
1✔
440
    }
441
    try {
442
      evictionListener.onRemoval(key, value, cause);
1✔
443
    } catch (Throwable t) {
1✔
444
      logger.log(Level.WARNING, "Exception thrown by eviction listener", t);
1✔
445
    }
1✔
446
  }
1✔
447

448
  /* --------------- Reference Support --------------- */
449

450
  /** Returns if the keys are weak reference garbage collected. */
451
  protected boolean collectKeys() {
452
    return false;
1✔
453
  }
454

455
  /** Returns if the values are weak or soft reference garbage collected. */
456
  protected boolean collectValues() {
457
    return false;
1✔
458
  }
459

460
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
461
  protected ReferenceQueue<K> keyReferenceQueue() {
462
    return null;
1✔
463
  }
464

465
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
466
  protected ReferenceQueue<V> valueReferenceQueue() {
467
    return null;
1✔
468
  }
469

470
  /* --------------- Expiration Support --------------- */
471

472
  /** Returns the {@link Pacer} used to schedule the maintenance task. */
473
  protected @Nullable Pacer pacer() {
474
    return null;
1✔
475
  }
476

477
  /** Returns if the cache expires entries after a variable time threshold. */
478
  protected boolean expiresVariable() {
479
    return false;
1✔
480
  }
481

482
  /** Returns if the cache expires entries after an access time threshold. */
483
  protected boolean expiresAfterAccess() {
484
    return false;
1✔
485
  }
486

487
  /** Returns how long after the last access to an entry the map will retain that entry. */
488
  protected long expiresAfterAccessNanos() {
489
    throw new UnsupportedOperationException();
1✔
490
  }
491

492
  protected void setExpiresAfterAccessNanos(long expireAfterAccessNanos) {
493
    throw new UnsupportedOperationException();
1✔
494
  }
495

496
  /** Returns if the cache expires entries after a write time threshold. */
497
  protected boolean expiresAfterWrite() {
498
    return false;
1✔
499
  }
500

501
  /** Returns how long after the last write to an entry the map will retain that entry. */
502
  protected long expiresAfterWriteNanos() {
503
    throw new UnsupportedOperationException();
1✔
504
  }
505

506
  protected void setExpiresAfterWriteNanos(long expireAfterWriteNanos) {
507
    throw new UnsupportedOperationException();
1✔
508
  }
509

510
  /** Returns if the cache refreshes entries after a write time threshold. */
511
  protected boolean refreshAfterWrite() {
512
    return false;
1✔
513
  }
514

515
  /** Returns how long after the last write an entry becomes a candidate for refresh. */
516
  protected long refreshAfterWriteNanos() {
517
    throw new UnsupportedOperationException();
1✔
518
  }
519

520
  protected void setRefreshAfterWriteNanos(long refreshAfterWriteNanos) {
521
    throw new UnsupportedOperationException();
1✔
522
  }
523

524
  @Override
525
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
526
  public Expiry<K, V> expiry() {
527
    return null;
1✔
528
  }
529

530
  /** Returns the {@link Ticker} used by this cache for expiration. */
531
  public Ticker expirationTicker() {
532
    return Ticker.disabledTicker();
1✔
533
  }
534

535
  protected TimerWheel<K, V> timerWheel() {
536
    throw new UnsupportedOperationException();
1✔
537
  }
538

539
  /* --------------- Eviction Support --------------- */
540

541
  /** Returns if the cache evicts entries due to a maximum size or weight threshold. */
542
  protected boolean evicts() {
543
    return false;
1✔
544
  }
545

546
  /** Returns if entries may be assigned different weights. */
547
  protected boolean isWeighted() {
548
    return (weigher != Weigher.singletonWeigher());
1✔
549
  }
550

551
  protected FrequencySketch frequencySketch() {
552
    throw new UnsupportedOperationException();
1✔
553
  }
554

555
  /** Returns if an access to an entry can skip notifying the eviction policy. */
556
  protected boolean fastpath() {
557
    return false;
1✔
558
  }
559

560
  /** Returns the maximum weighted size. */
561
  protected long maximum() {
562
    throw new UnsupportedOperationException();
1✔
563
  }
564

565
  /** Returns the maximum weighted size. */
566
  protected long maximumAcquire() {
567
    throw new UnsupportedOperationException();
1✔
568
  }
569

570
  /** Returns the maximum weighted size of the window space. */
571
  protected long windowMaximum() {
572
    throw new UnsupportedOperationException();
1✔
573
  }
574

575
  /** Returns the maximum weighted size of the main's protected space. */
576
  protected long mainProtectedMaximum() {
577
    throw new UnsupportedOperationException();
1✔
578
  }
579

580
  @GuardedBy("evictionLock")
581
  protected void setMaximum(long maximum) {
582
    throw new UnsupportedOperationException();
1✔
583
  }
584

585
  @GuardedBy("evictionLock")
586
  protected void setWindowMaximum(long maximum) {
587
    throw new UnsupportedOperationException();
1✔
588
  }
589

590
  @GuardedBy("evictionLock")
591
  protected void setMainProtectedMaximum(long maximum) {
592
    throw new UnsupportedOperationException();
1✔
593
  }
594

595
  /** Returns the combined weight of the values in the cache (may be negative). */
596
  protected long weightedSize() {
597
    throw new UnsupportedOperationException();
1✔
598
  }
599

600
  /** Returns the combined weight of the values in the cache (may be negative). */
601
  protected long weightedSizeAcquire() {
602
    throw new UnsupportedOperationException();
1✔
603
  }
604

605
  /** Returns the uncorrected combined weight of the values in the window space. */
606
  protected long windowWeightedSize() {
607
    throw new UnsupportedOperationException();
1✔
608
  }
609

610
  /** Returns the uncorrected combined weight of the values in the main's protected space. */
611
  protected long mainProtectedWeightedSize() {
612
    throw new UnsupportedOperationException();
1✔
613
  }
614

615
  @GuardedBy("evictionLock")
616
  protected void setWeightedSize(long weightedSize) {
617
    throw new UnsupportedOperationException();
1✔
618
  }
619

620
  @GuardedBy("evictionLock")
621
  protected void setWindowWeightedSize(long weightedSize) {
622
    throw new UnsupportedOperationException();
1✔
623
  }
624

625
  @GuardedBy("evictionLock")
626
  protected void setMainProtectedWeightedSize(long weightedSize) {
627
    throw new UnsupportedOperationException();
1✔
628
  }
629

630
  protected int hitsInSample() {
631
    throw new UnsupportedOperationException();
1✔
632
  }
633

634
  protected int missesInSample() {
635
    throw new UnsupportedOperationException();
1✔
636
  }
637

638
  protected double stepSize() {
639
    throw new UnsupportedOperationException();
1✔
640
  }
641

642
  protected double previousSampleHitRate() {
643
    throw new UnsupportedOperationException();
1✔
644
  }
645

646
  protected long adjustment() {
647
    throw new UnsupportedOperationException();
1✔
648
  }
649

650
  @GuardedBy("evictionLock")
651
  protected void setHitsInSample(int hitCount) {
652
    throw new UnsupportedOperationException();
1✔
653
  }
654

655
  @GuardedBy("evictionLock")
656
  protected void setMissesInSample(int missCount) {
657
    throw new UnsupportedOperationException();
1✔
658
  }
659

660
  @GuardedBy("evictionLock")
661
  protected void setStepSize(double stepSize) {
662
    throw new UnsupportedOperationException();
1✔
663
  }
664

665
  @GuardedBy("evictionLock")
666
  protected void setPreviousSampleHitRate(double hitRate) {
667
    throw new UnsupportedOperationException();
1✔
668
  }
669

670
  @GuardedBy("evictionLock")
671
  protected void setAdjustment(long amount) {
672
    throw new UnsupportedOperationException();
1✔
673
  }
674

675
  /**
676
   * Sets the maximum weighted size of the cache. The caller may need to perform a maintenance cycle
677
   * to eagerly evicts entries until the cache shrinks to the appropriate size.
678
   */
679
  @GuardedBy("evictionLock")
680
  @SuppressWarnings({"ConstantValue", "Varifier"})
681
  void setMaximumSize(long maximum) {
682
    requireArgument(maximum >= 0, "maximum must not be negative");
1✔
683
    if (maximum == maximum()) {
1✔
684
      return;
1✔
685
    }
686

687
    long max = Math.min(maximum, MAXIMUM_CAPACITY);
1✔
688
    long window = max - (long) (PERCENT_MAIN * max);
1✔
689
    long mainProtected = (long) (PERCENT_MAIN_PROTECTED * (max - window));
1✔
690

691
    setMaximum(max);
1✔
692
    setWindowMaximum(window);
1✔
693
    setMainProtectedMaximum(mainProtected);
1✔
694

695
    setHitsInSample(0);
1✔
696
    setMissesInSample(0);
1✔
697
    setStepSize(-HILL_CLIMBER_STEP_PERCENT * max);
1✔
698

699
    if ((frequencySketch() != null) && !isWeighted() && (weightedSize() >= (max >>> 1))) {
1✔
700
      // Lazily initialize when close to the maximum size
701
      frequencySketch().ensureCapacity(max);
1✔
702
    }
703
  }
1✔
704

705
  /** Evicts entries if the cache exceeds the maximum. */
706
  @GuardedBy("evictionLock")
707
  void evictEntries() {
708
    if (!evicts()) {
1✔
709
      return;
1✔
710
    }
711
    var candidate = evictFromWindow();
1✔
712
    evictFromMain(candidate);
1✔
713
  }
1✔
714

715
  /**
716
   * Evicts entries from the window space into the main space while the window size exceeds a
717
   * maximum.
718
   *
719
   * @return the first candidate promoted into the probation space
720
   */
721
  @GuardedBy("evictionLock")
722
  @Nullable Node<K, V> evictFromWindow() {
723
    @Var Node<K, V> first = null;
1✔
724
    @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
725
    while (windowWeightedSize() > windowMaximum()) {
1✔
726
      // The pending operations will adjust the size to reflect the correct weight
727
      if (node == null) {
1✔
728
        break;
1✔
729
      }
730

731
      Node<K, V> next = node.getNextInAccessOrder();
1✔
732
      if (node.getPolicyWeight() != 0) {
1✔
733
        node.makeMainProbation();
1✔
734
        accessOrderWindowDeque().remove(node);
1✔
735
        accessOrderProbationDeque().offerLast(node);
1✔
736
        if (first == null) {
1✔
737
          first = node;
1✔
738
        }
739

740
        setWindowWeightedSize(windowWeightedSize() - node.getPolicyWeight());
1✔
741
      }
742
      node = next;
1✔
743
    }
1✔
744

745
    return first;
1✔
746
  }
747

748
  /**
749
   * Evicts entries from the main space if the cache exceeds the maximum capacity. The main space
750
   * determines whether admitting an entry (coming from the window space) is preferable to retaining
751
   * the eviction policy's victim. This decision is made using a frequency filter so that the
752
   * least frequently used entry is removed.
753
   * <p>
754
   * The window space's candidates were previously promoted to the probation space at its MRU
755
   * position and the eviction policy's victim starts at the LRU position. The candidates are
756
   * evaluated in promotion order while an eviction is required, and if exhausted then additional
757
   * entries are retrieved from the window space. Likewise, if the victim selection exhausts the
758
   * probation space then additional entries are retrieved from the protected space. The queues are
759
   * consumed in LRU order and the evicted entry is the one with a lower relative frequency, where
760
   * the preference is to retain the main space's victims versus the window space's candidates on a
761
   * tie.
762
   *
763
   * @param candidate the first candidate promoted into the probation space
764
   */
765
  @GuardedBy("evictionLock")
766
  void evictFromMain(@Var @Nullable Node<K, V> candidate) {
767
    @Var int victimQueue = PROBATION;
1✔
768
    @Var int candidateQueue = PROBATION;
1✔
769
    @Var Node<K, V> victim = accessOrderProbationDeque().peekFirst();
1✔
770
    while (weightedSize() > maximum()) {
1✔
771
      // Search the admission window for additional candidates
772
      if ((candidate == null) && (candidateQueue == PROBATION)) {
1✔
773
        candidate = accessOrderWindowDeque().peekFirst();
1✔
774
        candidateQueue = WINDOW;
1✔
775
      }
776

777
      // Try evicting from the protected and window queues
778
      if ((candidate == null) && (victim == null)) {
1✔
779
        if (victimQueue == PROBATION) {
1✔
780
          victim = accessOrderProtectedDeque().peekFirst();
1✔
781
          victimQueue = PROTECTED;
1✔
782
          continue;
1✔
783
        } else if (victimQueue == PROTECTED) {
1✔
784
          victim = accessOrderWindowDeque().peekFirst();
1✔
785
          victimQueue = WINDOW;
1✔
786
          continue;
1✔
787
        }
788

789
        // The pending operations will adjust the size to reflect the correct weight
790
        break;
791
      }
792

793
      // Skip over entries with zero weight
794
      if ((victim != null) && (victim.getPolicyWeight() == 0)) {
1✔
795
        victim = victim.getNextInAccessOrder();
1✔
796
        continue;
1✔
797
      } else if ((candidate != null) && (candidate.getPolicyWeight() == 0)) {
1✔
798
        candidate = candidate.getNextInAccessOrder();
1✔
799
        continue;
1✔
800
      }
801

802
      // Evict immediately if only one of the entries is present
803
      if (victim == null) {
1✔
804
        requireNonNull(candidate);
1✔
805
        Node<K, V> previous = candidate.getNextInAccessOrder();
1✔
806
        Node<K, V> evict = candidate;
1✔
807
        candidate = previous;
1✔
808
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
809
        continue;
1✔
810
      } else if (candidate == null) {
1✔
811
        Node<K, V> evict = victim;
1✔
812
        victim = victim.getNextInAccessOrder();
1✔
813
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
814
        continue;
1✔
815
      }
816

817
      // Evict immediately if both selected the same entry
818
      if (candidate == victim) {
1✔
819
        victim = victim.getNextInAccessOrder();
1✔
820
        evictEntry(candidate, RemovalCause.SIZE, 0L);
1✔
821
        candidate = null;
1✔
822
        continue;
1✔
823
      }
824

825
      // Evict immediately if an entry was collected
826
      var victimKeyRef = victim.getKeyReferenceOrNull();
1✔
827
      var candidateKeyRef = candidate.getKeyReferenceOrNull();
1✔
828
      if (victimKeyRef == null) {
1✔
829
        Node<K, V> evict = victim;
1✔
830
        victim = victim.getNextInAccessOrder();
1✔
831
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
832
        continue;
1✔
833
      } else if (candidateKeyRef == null) {
1✔
834
        Node<K, V> evict = candidate;
1✔
835
        candidate = candidate.getNextInAccessOrder();
1✔
836
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
837
        continue;
1✔
838
      }
839

840
      // Evict immediately if an entry was removed
841
      if (!victim.isAlive()) {
1✔
842
        Node<K, V> evict = victim;
1✔
843
        victim = victim.getNextInAccessOrder();
1✔
844
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
845
        continue;
1✔
846
      } else if (!candidate.isAlive()) {
1✔
847
        Node<K, V> evict = candidate;
1✔
848
        candidate = candidate.getNextInAccessOrder();
1✔
849
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
850
        continue;
1✔
851
      }
852

853
      // Evict immediately if the candidate's weight exceeds the maximum
854
      if (candidate.getPolicyWeight() > maximum()) {
1✔
855
        Node<K, V> evict = candidate;
1✔
856
        candidate = candidate.getNextInAccessOrder();
1✔
857
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
858
        continue;
1✔
859
      }
860

861
      // Evict the entry with the lowest frequency
862
      if (admit(candidateKeyRef, victimKeyRef)) {
1✔
863
        Node<K, V> evict = victim;
1✔
864
        victim = victim.getNextInAccessOrder();
1✔
865
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
866
        candidate = candidate.getNextInAccessOrder();
1✔
867
      } else {
1✔
868
        Node<K, V> evict = candidate;
1✔
869
        candidate = candidate.getNextInAccessOrder();
1✔
870
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
871
      }
872
    }
1✔
873
  }
1✔
874

875
  /**
876
   * Determines if the candidate should be accepted into the main space, as determined by its
877
   * frequency relative to the victim. A small amount of randomness is used to protect against hash
878
   * collision attacks, where the victim's frequency is artificially raised so that no new entries
879
   * are admitted.
880
   *
881
   * @param candidateKeyRef the keyRef for the entry being proposed for long term retention
882
   * @param victimKeyRef the keyRef for the entry chosen by the eviction policy for replacement
883
   * @return if the candidate should be admitted and the victim ejected
884
   */
885
  @GuardedBy("evictionLock")
886
  boolean admit(Object candidateKeyRef, Object victimKeyRef) {
887
    int candidateFreq = frequencySketch().frequency(candidateKeyRef);
1✔
888
    int victimFreq = frequencySketch().frequency(victimKeyRef);
1✔
889
    if (candidateFreq > victimFreq) {
1✔
890
      return true;
1✔
891
    } else if (candidateFreq >= ADMIT_HASHDOS_THRESHOLD) {
1✔
892
      // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack
893
      // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm
894
      // candidate reduces the number of random acceptances to minimize the impact on the hit rate.
895
      int random = ThreadLocalRandom.current().nextInt();
1✔
896
      return ((random & 127) == 0);
1✔
897
    }
898
    return false;
1✔
899
  }
900

901
  /** Expires entries that have expired by access, write, or variable. */
902
  @GuardedBy("evictionLock")
903
  void expireEntries() {
904
    long now = expirationTicker().read();
1✔
905
    expireAfterAccessEntries(now);
1✔
906
    expireAfterWriteEntries(now);
1✔
907
    expireVariableEntries(now);
1✔
908

909
    Pacer pacer = pacer();
1✔
910
    if (pacer != null) {
1✔
911
      long delay = getExpirationDelay(now);
1✔
912
      if (delay == Long.MAX_VALUE) {
1✔
913
        pacer.cancel();
1✔
914
      } else {
915
        pacer.schedule(executor, drainBuffersTask, now, delay);
1✔
916
      }
917
    }
918
  }
1✔
919

920
  /** Expires entries in the access-order queue. */
921
  @GuardedBy("evictionLock")
922
  void expireAfterAccessEntries(long now) {
923
    if (!expiresAfterAccess()) {
1✔
924
      return;
1✔
925
    }
926

927
    expireAfterAccessEntries(now, accessOrderWindowDeque());
1✔
928
    if (evicts()) {
1✔
929
      expireAfterAccessEntries(now, accessOrderProbationDeque());
1✔
930
      expireAfterAccessEntries(now, accessOrderProtectedDeque());
1✔
931
    }
932
  }
1✔
933

934
  /** Expires entries in an access-order queue. */
935
  @GuardedBy("evictionLock")
936
  void expireAfterAccessEntries(long now, AccessOrderDeque<Node<K, V>> accessOrderDeque) {
937
    long duration = expiresAfterAccessNanos();
1✔
938
    for (;;) {
939
      Node<K, V> node = accessOrderDeque.peekFirst();
1✔
940
      if ((node == null) || ((now - node.getAccessTime()) < duration)
1✔
941
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
942
        return;
1✔
943
      }
944
    }
1✔
945
  }
946

947
  /** Expires entries on the write-order queue. */
948
  @GuardedBy("evictionLock")
949
  void expireAfterWriteEntries(long now) {
950
    if (!expiresAfterWrite()) {
1✔
951
      return;
1✔
952
    }
953
    long duration = expiresAfterWriteNanos();
1✔
954
    for (;;) {
955
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
956
      if ((node == null) || ((now - node.getWriteTime()) < duration)
1✔
957
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
958
        break;
1✔
959
      }
960
    }
1✔
961
  }
1✔
962

963
  /** Expires entries in the timer wheel. */
964
  @GuardedBy("evictionLock")
965
  void expireVariableEntries(long now) {
966
    if (expiresVariable()) {
1✔
967
      timerWheel().advance(this, now);
1✔
968
    }
969
  }
1✔
970

971
  /** Returns the duration until the next item expires, or {@link Long#MAX_VALUE} if none. */
972
  @GuardedBy("evictionLock")
973
  long getExpirationDelay(long now) {
974
    @Var long delay = Long.MAX_VALUE;
1✔
975
    if (expiresAfterAccess()) {
1✔
976
      @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
977
      if (node != null) {
1✔
978
        delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
979
      }
980
      if (evicts()) {
1✔
981
        node = accessOrderProbationDeque().peekFirst();
1✔
982
        if (node != null) {
1✔
983
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
984
        }
985
        node = accessOrderProtectedDeque().peekFirst();
1✔
986
        if (node != null) {
1✔
987
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
988
        }
989
      }
990
    }
991
    if (expiresAfterWrite()) {
1✔
992
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
993
      if (node != null) {
1✔
994
        delay = Math.min(delay, expiresAfterWriteNanos() - (now - node.getWriteTime()));
1✔
995
      }
996
    }
997
    if (expiresVariable()) {
1✔
998
      delay = Math.min(delay, timerWheel().getExpirationDelay());
1✔
999
    }
1000
    return delay;
1✔
1001
  }
1002

1003
  /** Returns if the entry has expired. */
1004
  @SuppressWarnings("ShortCircuitBoolean")
1005
  boolean hasExpired(Node<K, V> node, long now) {
1006
    if (isComputingAsync(node.getValue())) {
1✔
1007
      return false;
1✔
1008
    }
1009
    return (expiresAfterAccess() && (now - node.getAccessTime() >= expiresAfterAccessNanos()))
1✔
1010
        | (expiresAfterWrite() && (now - node.getWriteTime() >= expiresAfterWriteNanos()))
1✔
1011
        | (expiresVariable() && (now - node.getVariableTime() >= 0));
1✔
1012
  }
1013

1014
  /**
1015
   * Attempts to evict the entry based on the given removal cause. A removal may be ignored if the
1016
   * entry was updated and is no longer eligible for eviction.
1017
   *
1018
   * @param node the entry to evict
1019
   * @param cause the reason to evict
1020
   * @param now the current time, used only if expiring
1021
   * @return if the entry was evicted
1022
   */
1023
  @GuardedBy("evictionLock")
1024
  @SuppressWarnings({"GuardedByChecker", "SynchronizationOnLocalVariableOrMethodParameter"})
1025
  boolean evictEntry(Node<K, V> node, RemovalCause cause, long now) {
1026
    K key = node.getKey();
1✔
1027
    var ctx = new EvictContext<V>();
1✔
1028
    var keyReference = node.getKeyReference();
1✔
1029

1030
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
1031
      if (n != node) {
1✔
1032
        return n;
1✔
1033
      }
1034
      synchronized (node) {
1✔
1035
        ctx.value = node.getValue();
1✔
1036

1037
        if ((key == null) || (ctx.value == null)) {
1✔
1038
          ctx.cause = RemovalCause.COLLECTED;
1✔
1039
        } else if (cause == RemovalCause.COLLECTED) {
1✔
1040
          ctx.resurrect = true;
1✔
1041
          return node;
1✔
1042
        } else {
1043
          ctx.cause = cause;
1✔
1044
        }
1045

1046
        if (ctx.cause == RemovalCause.EXPIRED) {
1✔
1047
          @Var boolean expired = false;
1✔
1048
          if (expiresAfterAccess()) {
1✔
1049
            expired |= ((now - node.getAccessTime()) >= expiresAfterAccessNanos());
1✔
1050
          }
1051
          if (expiresAfterWrite()) {
1✔
1052
            expired |= ((now - node.getWriteTime()) >= expiresAfterWriteNanos());
1✔
1053
          }
1054
          if (expiresVariable()) {
1✔
1055
            expired |= ((now - node.getVariableTime()) >= 0);
1✔
1056
          }
1057
          if (!expired) {
1✔
1058
            ctx.resurrect = true;
1✔
1059
            return node;
1✔
1060
          }
1061
        } else if (ctx.cause == RemovalCause.SIZE) {
1✔
1062
          int weight = node.getWeight();
1✔
1063
          if (weight == 0) {
1✔
1064
            ctx.resurrect = true;
1✔
1065
            return node;
1✔
1066
          }
1067
        }
1068

1069
        notifyEviction(key, ctx.value, ctx.cause);
1✔
1070
        discardRefresh(keyReference);
1✔
1071
        ctx.removed = true;
1✔
1072
        node.retire();
1✔
1073
        return null;
1✔
1074
      }
1075
    });
1076

1077
    // The entry is no longer eligible for eviction
1078
    if (ctx.resurrect) {
1✔
1079
      return false;
1✔
1080
    }
1081

1082
    // If the eviction fails due to a concurrent removal of the victim, that removal may cancel out
1083
    // the addition that triggered this eviction. The victim is eagerly unlinked and the size
1084
    // decremented before the removal task so that if an eviction is still required then a new
1085
    // victim will be chosen for removal.
1086
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1087
      accessOrderWindowDeque().remove(node);
1✔
1088
    } else if (evicts()) {
1✔
1089
      if (node.inMainProbation()) {
1✔
1090
        accessOrderProbationDeque().remove(node);
1✔
1091
      } else {
1092
        accessOrderProtectedDeque().remove(node);
1✔
1093
      }
1094
    }
1095
    if (expiresAfterWrite()) {
1✔
1096
      writeOrderDeque().remove(node);
1✔
1097
    } else if (expiresVariable()) {
1✔
1098
      timerWheel().deschedule(node);
1✔
1099
    }
1100

1101
    synchronized (node) {
1✔
1102
      logIfAlive(node);
1✔
1103
      makeDead(node);
1✔
1104
    }
1✔
1105

1106
    if (ctx.removed) {
1✔
1107
      var removeCause = requireNonNull(ctx.cause);
1✔
1108
      statsCounter().recordEviction(node.getWeight(), removeCause);
1✔
1109
      notifyRemoval(key, ctx.value, removeCause);
1✔
1110
    }
1111

1112
    return true;
1✔
1113
  }
1114

1115
  /** Adapts the eviction policy to towards the optimal recency / frequency configuration. */
1116
  @GuardedBy("evictionLock")
1117
  @SuppressWarnings("UnnecessaryReturnStatement")
1118
  void climb() {
1119
    if (!evicts()) {
1✔
1120
      return;
1✔
1121
    }
1122

1123
    determineAdjustment();
1✔
1124
    demoteFromMainProtected();
1✔
1125
    long amount = adjustment();
1✔
1126
    if (amount == 0) {
1✔
1127
      return;
1✔
1128
    } else if (amount > 0) {
1✔
1129
      increaseWindow();
1✔
1130
    } else {
1131
      decreaseWindow();
1✔
1132
    }
1133
  }
1✔
1134

1135
  /** Calculates the amount to adapt the window by and sets {@link #adjustment()} accordingly. */
1136
  @GuardedBy("evictionLock")
1137
  void determineAdjustment() {
1138
    if (frequencySketch().isNotInitialized()) {
1✔
1139
      setPreviousSampleHitRate(0.0);
1✔
1140
      setMissesInSample(0);
1✔
1141
      setHitsInSample(0);
1✔
1142
      return;
1✔
1143
    }
1144

1145
    long requestCount = (long) hitsInSample() + missesInSample();
1✔
1146
    if (requestCount < frequencySketch().sampleSize) {
1✔
1147
      return;
1✔
1148
    }
1149

1150
    double hitRate = (double) hitsInSample() / requestCount;
1✔
1151
    double hitRateChange = hitRate - previousSampleHitRate();
1✔
1152
    double amount = (hitRateChange >= 0) ? stepSize() : -stepSize();
1✔
1153
    double nextStepSize = (Math.abs(hitRateChange) >= HILL_CLIMBER_RESTART_THRESHOLD)
1✔
1154
        ? HILL_CLIMBER_STEP_PERCENT * maximum() * (amount >= 0 ? 1 : -1)
1✔
1155
        : HILL_CLIMBER_STEP_DECAY_RATE * amount;
1✔
1156
    setPreviousSampleHitRate(hitRate);
1✔
1157
    setAdjustment((long) amount);
1✔
1158
    setStepSize(nextStepSize);
1✔
1159
    setMissesInSample(0);
1✔
1160
    setHitsInSample(0);
1✔
1161
  }
1✔
1162

1163
  /**
1164
   * Increases the size of the admission window by shrinking the portion allocated to the main
1165
   * space. As the main space is partitioned into probation and protected regions (80% / 20%), for
1166
   * simplicity only the protected is reduced. If the regions exceed their maximums, this may cause
1167
   * protected items to be demoted to the probation region and probation items to be demoted to the
1168
   * admission window.
1169
   */
1170
  @GuardedBy("evictionLock")
1171
  void increaseWindow() {
1172
    if (mainProtectedMaximum() == 0) {
1✔
1173
      return;
1✔
1174
    }
1175

1176
    @Var long quota = Math.min(adjustment(), mainProtectedMaximum());
1✔
1177
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1178
    setWindowMaximum(windowMaximum() + quota);
1✔
1179
    demoteFromMainProtected();
1✔
1180

1181
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1182
      @Var Node<K, V> candidate = accessOrderProbationDeque().peekFirst();
1✔
1183
      @Var boolean probation = true;
1✔
1184
      if ((candidate == null) || (quota < candidate.getPolicyWeight())) {
1✔
1185
        candidate = accessOrderProtectedDeque().peekFirst();
1✔
1186
        probation = false;
1✔
1187
      }
1188
      if (candidate == null) {
1✔
1189
        break;
1✔
1190
      }
1191

1192
      int weight = candidate.getPolicyWeight();
1✔
1193
      if (quota < weight) {
1✔
1194
        break;
1✔
1195
      }
1196

1197
      quota -= weight;
1✔
1198
      if (probation) {
1✔
1199
        accessOrderProbationDeque().remove(candidate);
1✔
1200
      } else {
1201
        setMainProtectedWeightedSize(mainProtectedWeightedSize() - weight);
1✔
1202
        accessOrderProtectedDeque().remove(candidate);
1✔
1203
      }
1204
      setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1205
      accessOrderWindowDeque().offerLast(candidate);
1✔
1206
      candidate.makeWindow();
1✔
1207
    }
1208

1209
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1210
    setWindowMaximum(windowMaximum() - quota);
1✔
1211
    setAdjustment(quota);
1✔
1212
  }
1✔
1213

1214
  /** Decreases the size of the admission window and increases the main's protected region. */
1215
  @GuardedBy("evictionLock")
1216
  void decreaseWindow() {
1217
    if (windowMaximum() <= 1) {
1✔
1218
      return;
1✔
1219
    }
1220

1221
    @Var long quota = Math.min(-adjustment(), Math.max(0, windowMaximum() - 1));
1✔
1222
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1223
    setWindowMaximum(windowMaximum() - quota);
1✔
1224

1225
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1226
      Node<K, V> candidate = accessOrderWindowDeque().peekFirst();
1✔
1227
      if (candidate == null) {
1✔
1228
        break;
1✔
1229
      }
1230

1231
      int weight = candidate.getPolicyWeight();
1✔
1232
      if (quota < weight) {
1✔
1233
        break;
1✔
1234
      }
1235

1236
      quota -= weight;
1✔
1237
      setWindowWeightedSize(windowWeightedSize() - weight);
1✔
1238
      accessOrderWindowDeque().remove(candidate);
1✔
1239
      accessOrderProbationDeque().offerLast(candidate);
1✔
1240
      candidate.makeMainProbation();
1✔
1241
    }
1242

1243
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1244
    setWindowMaximum(windowMaximum() + quota);
1✔
1245
    setAdjustment(-quota);
1✔
1246
  }
1✔
1247

1248
  /** Transfers the nodes from the protected to the probation region if it exceeds the maximum. */
1249
  @GuardedBy("evictionLock")
1250
  void demoteFromMainProtected() {
1251
    long mainProtectedMaximum = mainProtectedMaximum();
1✔
1252
    @Var long mainProtectedWeightedSize = mainProtectedWeightedSize();
1✔
1253
    if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1254
      return;
1✔
1255
    }
1256

1257
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1258
      if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1259
        break;
1✔
1260
      }
1261

1262
      Node<K, V> demoted = accessOrderProtectedDeque().pollFirst();
1✔
1263
      if (demoted == null) {
1✔
1264
        break;
1✔
1265
      }
1266
      demoted.makeMainProbation();
1✔
1267
      accessOrderProbationDeque().offerLast(demoted);
1✔
1268
      mainProtectedWeightedSize -= demoted.getPolicyWeight();
1✔
1269
    }
1270
    setMainProtectedWeightedSize(mainProtectedWeightedSize);
1✔
1271
  }
1✔
1272

1273
  /**
1274
   * Performs the post-processing work required after a read.
1275
   *
1276
   * @param node the entry in the page replacement policy
1277
   * @param now the current time, in nanoseconds
1278
   * @param recordHit if the hit count should be incremented
1279
   * @return the refreshed value if immediately loaded, else null
1280
   */
1281
  @Nullable V afterRead(Node<K, V> node, long now, boolean recordHit) {
1282
    if (recordHit) {
1✔
1283
      statsCounter().recordHits(1);
1✔
1284
    }
1285

1286
    boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL);
1✔
1287
    if (shouldDrainBuffers(delayable)) {
1✔
1288
      scheduleDrainBuffers();
1✔
1289
    }
1290
    return refreshIfNeeded(node, now);
1✔
1291
  }
1292

1293
  /** Returns if the cache should bypass the read buffer. */
1294
  boolean skipReadBuffer() {
1295
    return fastpath() && frequencySketch().isNotInitialized();
1✔
1296
  }
1297

1298
  /**
1299
   * Asynchronously refreshes the entry if eligible.
1300
   *
1301
   * @param node the entry in the cache to refresh
1302
   * @param now the current time, in nanoseconds
1303
   * @return the refreshed value if immediately loaded, else null
1304
   */
1305
  @SuppressWarnings("FutureReturnValueIgnored")
1306
  @Nullable V refreshIfNeeded(Node<K, V> node, long now) {
1307
    if (!refreshAfterWrite()) {
1✔
1308
      return null;
1✔
1309
    }
1310

1311
    K key;
1312
    V oldValue;
1313
    long writeTime = node.getWriteTime();
1✔
1314
    long refreshWriteTime = writeTime | 1L;
1✔
1315
    Object keyReference = node.getKeyReference();
1✔
1316
    ConcurrentMap<Object, CompletableFuture<?>> refreshes;
1317
    if (((now - writeTime) > refreshAfterWriteNanos())
1✔
1318
        && ((key = node.getKey()) != null) && ((oldValue = node.getValue()) != null)
1✔
1319
        && !isComputingAsync(oldValue) && ((writeTime & 1L) == 0L)
1✔
1320
        && !(refreshes = refreshes()).containsKey(keyReference)
1✔
1321
        && node.isAlive() && node.casWriteTime(writeTime, refreshWriteTime)) {
1✔
1322
      long[] startTime = new long[1];
1✔
1323
      @SuppressWarnings({"rawtypes", "unchecked"})
1324
      @Nullable CompletableFuture<? extends @Nullable V>[] refreshFuture = new CompletableFuture[1];
1✔
1325
      try {
1326
        refreshes.computeIfAbsent(keyReference, k -> {
1✔
1327
          try {
1328
            startTime[0] = statsTicker().read();
1✔
1329
            if (isAsync) {
1✔
1330
              @SuppressWarnings("unchecked")
1331
              var future = (CompletableFuture<V>) oldValue;
1✔
1332
              if (Async.isReady(future)) {
1✔
1333
                requireNonNull(cacheLoader);
1✔
1334
                var refresh = cacheLoader.asyncReload(key, future.join(), executor);
1✔
1335
                refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1336
              } else {
1✔
1337
                // no-op if the future's completion state was modified (e.g. obtrude methods)
1338
                return null;
1✔
1339
              }
1340
            } else {
1✔
1341
              requireNonNull(cacheLoader);
1✔
1342
              var refresh = cacheLoader.asyncReload(key, oldValue, executor);
1✔
1343
              refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1344
            }
1345
            return refreshFuture[0];
1✔
1346
          } catch (InterruptedException e) {
1✔
1347
            Thread.currentThread().interrupt();
1✔
1348
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1349
            return null;
1✔
1350
          } catch (Throwable e) {
1✔
1351
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1352
            return null;
1✔
1353
          }
1354
        });
1355
      } finally {
1356
        node.casWriteTime(refreshWriteTime, writeTime);
1✔
1357
      }
1358

1359
      if (refreshFuture[0] == null) {
1✔
1360
        return null;
1✔
1361
      }
1362

1363
      var refreshed = refreshFuture[0].handle((newValue, error) -> {
1✔
1364
        long loadTime = statsTicker().read() - startTime[0];
1✔
1365
        if (error != null) {
1✔
1366
          if (!(error instanceof CancellationException) && !(error instanceof TimeoutException)) {
1✔
1367
            logger.log(Level.WARNING, "Exception thrown during refresh", error);
1✔
1368
          }
1369
          refreshes.remove(keyReference, refreshFuture[0]);
1✔
1370
          statsCounter().recordLoadFailure(loadTime);
1✔
1371
          return null;
1✔
1372
        }
1373

1374
        @SuppressWarnings("unchecked")
1375
        V value = (isAsync && (newValue != null)) ? (V) refreshFuture[0] : newValue;
1✔
1376

1377
        @Nullable RemovalCause[] cause = new RemovalCause[1];
1✔
1378
        V result = compute(key, (K k, @Nullable V currentValue) -> {
1✔
1379
          boolean removed = refreshes.remove(keyReference, refreshFuture[0]);
1✔
1380
          if (currentValue == null) {
1✔
1381
            // If the entry is absent then discard the refresh and maybe notifying the listener
1382
            if (value != null) {
1✔
1383
              cause[0] = RemovalCause.EXPLICIT;
1✔
1384
            }
1385
            return null;
1✔
1386
          } else if (currentValue == value) {
1✔
1387
            // If the reloaded value is the same instance then no-op
1388
            return currentValue;
1✔
1389
          } else if (isAsync &&
1✔
1390
              (newValue == Async.getIfReady((CompletableFuture<?>) currentValue))) {
1✔
1391
            // If the completed futures hold the same value instance then no-op
1392
            return currentValue;
1✔
1393
          } else if (removed && (currentValue == oldValue) && (node.getWriteTime() == writeTime)) {
1!
1394
            // If the entry was not modified while in-flight (no ABA) then replace
1395
            return value;
1✔
1396
          }
1397
          // Otherwise, a write invalidated the refresh so discard it and notify the listener
1398
          cause[0] = RemovalCause.REPLACED;
1✔
1399
          return currentValue;
1✔
1400
        }, expiry(), /* recordLoad= */ false, /* recordLoadFailure= */ true);
1✔
1401

1402
        if (cause[0] != null) {
1✔
1403
          notifyRemoval(key, value, cause[0]);
1✔
1404
        }
1405
        if (newValue == null) {
1✔
1406
          statsCounter().recordLoadFailure(loadTime);
1✔
1407
        } else {
1408
          statsCounter().recordLoadSuccess(loadTime);
1✔
1409
        }
1410
        return result;
1✔
1411
      });
1412
      return Async.getIfReady(refreshed);
1✔
1413
    }
1414

1415
    return null;
1✔
1416
  }
1417

1418
  /**
1419
   * Returns the expiration time for the entry after being created.
1420
   *
1421
   * @param key the key of the entry that was created
1422
   * @param value the value of the entry that was created
1423
   * @param expiry the calculator for the expiration time
1424
   * @param now the current time, in nanoseconds
1425
   * @return the expiration time
1426
   */
1427
  long expireAfterCreate(K key, V value, @Nullable Expiry<? super K, ? super V> expiry, long now) {
1428
    if (expiresVariable()) {
1✔
1429
      requireNonNull(expiry);
1✔
1430
      long duration = Math.max(0L, expiry.expireAfterCreate(key, value, now));
1✔
1431
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1432
    }
1433
    return 0L;
1✔
1434
  }
1435

1436
  /**
1437
   * Returns the expiration time for the entry after being updated.
1438
   *
1439
   * @param node the entry in the page replacement policy
1440
   * @param key the key of the entry that was updated
1441
   * @param value the value of the entry that was updated
1442
   * @param expiry the calculator for the expiration time
1443
   * @param now the current time, in nanoseconds
1444
   * @return the expiration time
1445
   */
1446
  long expireAfterUpdate(Node<K, V> node, K key, V value,
1447
      @Nullable Expiry<? super K, ? super V> expiry, long now) {
1448
    if (expiresVariable()) {
1✔
1449
      requireNonNull(expiry);
1✔
1450
      long currentDuration = Math.max(1, node.getVariableTime() - now);
1✔
1451
      long duration = Math.max(0L, expiry.expireAfterUpdate(key, value, now, currentDuration));
1✔
1452
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1453
    }
1454
    return 0L;
1✔
1455
  }
1456

1457
  /**
1458
   * Returns the access time for the entry after a read.
1459
   *
1460
   * @param node the entry in the page replacement policy
1461
   * @param key the key of the entry that was read
1462
   * @param value the value of the entry that was read
1463
   * @param expiry the calculator for the expiration time
1464
   * @param now the current time, in nanoseconds
1465
   * @return the expiration time
1466
   */
1467
  long expireAfterRead(Node<K, V> node, K key, V value, Expiry<K, V> expiry, long now) {
1468
    if (expiresVariable()) {
1✔
1469
      long currentDuration = Math.max(0L, node.getVariableTime() - now);
1✔
1470
      long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
1✔
1471
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1472
    }
1473
    return 0L;
1✔
1474
  }
1475

1476
  /**
1477
   * Attempts to update the access time for the entry after a read.
1478
   *
1479
   * @param node the entry in the page replacement policy
1480
   * @param key the key of the entry that was read
1481
   * @param value the value of the entry that was read
1482
   * @param expiry the calculator for the expiration time
1483
   * @param now the current time, in nanoseconds
1484
   */
1485
  void tryExpireAfterRead(Node<K, V> node, K key, V value, Expiry<K, V> expiry, long now) {
1486
    if (!expiresVariable()) {
1✔
1487
      return;
1✔
1488
    }
1489

1490
    long variableTime = node.getVariableTime();
1✔
1491
    long currentDuration = Math.max(1, variableTime - now);
1✔
1492
    if (isAsync && (currentDuration > MAXIMUM_EXPIRY)) {
1✔
1493
      // expireAfterCreate has not yet set the duration after completion
1494
      return;
1✔
1495
    }
1496

1497
    long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
1✔
1498
    if (duration != currentDuration) {
1✔
1499
      long expirationTime = isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1500
      node.casVariableTime(variableTime, expirationTime);
1✔
1501
    }
1502
  }
1✔
1503

1504
  void setVariableTime(Node<K, V> node, long expirationTime) {
1505
    if (expiresVariable()) {
1✔
1506
      node.setVariableTime(expirationTime);
1✔
1507
    }
1508
  }
1✔
1509

1510
  void setWriteTime(Node<K, V> node, long now) {
1511
    if (expiresAfterWrite() || refreshAfterWrite()) {
1✔
1512
      node.setWriteTime(now & ~1L);
1✔
1513
    }
1514
  }
1✔
1515

1516
  void setAccessTime(Node<K, V> node, long now) {
1517
    if (expiresAfterAccess()) {
1✔
1518
      node.setAccessTime(now);
1✔
1519
    }
1520
  }
1✔
1521

1522
  /** Returns if the entry's write time would exceed the minimum expiration reorder threshold. */
1523
  boolean exceedsWriteTimeTolerance(Node<K, V> node, long varTime, long now) {
1524
    long variableTime = node.getVariableTime();
1✔
1525
    long tolerance = EXPIRE_WRITE_TOLERANCE;
1✔
1526
    long writeTime = node.getWriteTime();
1✔
1527
    return
1✔
1528
        (expiresAfterWrite()
1✔
1529
            && ((expiresAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
1✔
1530
        || (refreshAfterWrite()
1✔
1531
            && ((refreshAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
1✔
1532
        || (expiresVariable() && (Math.abs(varTime - variableTime) > tolerance));
1✔
1533
  }
1534

1535
  /**
1536
   * Performs the post-processing work required after a write.
1537
   *
1538
   * @param task the pending operation to be applied
1539
   */
1540
  void afterWrite(Runnable task) {
1541
    for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) {
1✔
1542
      if (writeBuffer.offer(task)) {
1✔
1543
        scheduleAfterWrite();
1✔
1544
        return;
1✔
1545
      }
1546
      scheduleDrainBuffers();
1✔
1547
      Thread.onSpinWait();
1✔
1548
    }
1549

1550
    // In scenarios where the writing threads cannot make progress then they attempt to provide
1551
    // assistance by performing the eviction work directly. This can resolve cases where the
1552
    // maintenance task is scheduled but not running. That might occur due to all of the executor's
1553
    // threads being busy (perhaps writing into this cache), the write rate greatly exceeds the
1554
    // consuming rate, priority inversion, or if the executor silently discarded the maintenance
1555
    // task. Unfortunately this cannot resolve when the eviction is blocked waiting on a long-
1556
    // running computation due to an eviction listener, the victim is being computed on by a writer,
1557
    // or the victim residing in the same hash bin as a computing entry. In those cases a warning is
1558
    // logged to encourage the application to decouple these computations from the map operations.
1559
    lock();
1✔
1560
    try {
1561
      maintenance(task);
1✔
1562
    } catch (RuntimeException e) {
1✔
1563
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1564
    } finally {
1565
      evictionLock.unlock();
1✔
1566
    }
1567
    rescheduleCleanUpIfIncomplete();
1✔
1568
  }
1✔
1569

1570
  /** Acquires the eviction lock. */
1571
  void lock() {
1572
    @Var long remainingNanos = WARN_AFTER_LOCK_WAIT_NANOS;
1✔
1573
    long end = System.nanoTime() + remainingNanos;
1✔
1574
    @Var boolean interrupted = false;
1✔
1575
    try {
1576
      for (;;) {
1577
        try {
1578
          if (evictionLock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
1✔
1579
            return;
1✔
1580
          }
1581
          logger.log(Level.WARNING, "The cache is experiencing excessive wait times for acquiring "
1✔
1582
              + "the eviction lock. This may indicate that a long-running computation has halted "
1583
              + "eviction when trying to remove the victim entry. Consider using AsyncCache to "
1584
              + "decouple the computation from the map operation.", new TimeoutException());
1585
          evictionLock.lock();
1✔
1586
          return;
1✔
1587
        } catch (InterruptedException e) {
1✔
1588
          remainingNanos = end - System.nanoTime();
1✔
1589
          interrupted = true;
1✔
1590
        }
1✔
1591
      }
1592
    } finally {
1593
      if (interrupted) {
1✔
1594
        Thread.currentThread().interrupt();
1✔
1595
      }
1596
    }
1597
  }
1598

1599
  /**
1600
   * Conditionally schedules the asynchronous maintenance task after a write operation. If the
1601
   * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it
1602
   * is already processing then it is set to transition to REQUIRED upon completion so that a new
1603
   * execution is triggered by the next operation.
1604
   */
1605
  void scheduleAfterWrite() {
1606
    @Var int drainStatus = drainStatusOpaque();
1✔
1607
    for (;;) {
1608
      switch (drainStatus) {
1✔
1609
        case IDLE:
1610
          casDrainStatus(IDLE, REQUIRED);
1✔
1611
          scheduleDrainBuffers();
1✔
1612
          return;
1✔
1613
        case REQUIRED:
1614
          scheduleDrainBuffers();
1✔
1615
          return;
1✔
1616
        case PROCESSING_TO_IDLE:
1617
          if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) {
1✔
1618
            return;
1✔
1619
          }
1620
          drainStatus = drainStatusAcquire();
1✔
1621
          continue;
1✔
1622
        case PROCESSING_TO_REQUIRED:
1623
          return;
1✔
1624
        default:
1625
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
1626
      }
1627
    }
1628
  }
1629

1630
  /**
1631
   * Attempts to schedule an asynchronous task to apply the pending operations to the page
1632
   * replacement policy. If the executor rejects the task then it is run directly.
1633
   */
1634
  void scheduleDrainBuffers() {
1635
    if (drainStatusOpaque() >= PROCESSING_TO_IDLE) {
1✔
1636
      return;
1✔
1637
    }
1638
    if (evictionLock.tryLock()) {
1✔
1639
      try {
1640
        int drainStatus = drainStatusOpaque();
1✔
1641
        if (drainStatus >= PROCESSING_TO_IDLE) {
1✔
1642
          return;
1✔
1643
        }
1644
        setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1645
        executor.execute(drainBuffersTask);
1✔
1646
      } catch (Throwable t) {
1✔
1647
        logger.log(Level.WARNING, "Exception thrown when submitting maintenance task", t);
1✔
1648
        maintenance(/* ignored */ null);
1✔
1649
      } finally {
1650
        evictionLock.unlock();
1✔
1651
      }
1652
    }
1653
  }
1✔
1654

1655
  @Override
1656
  public void cleanUp() {
1657
    try {
1658
      performCleanUp(/* ignored */ null);
1✔
1659
    } catch (RuntimeException e) {
1✔
1660
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1661
    }
1✔
1662
  }
1✔
1663

1664
  /**
1665
   * Performs the maintenance work, blocking until the lock is acquired.
1666
   *
1667
   * @param task an additional pending task to run, or {@code null} if not present
1668
   */
1669
  void performCleanUp(@Nullable Runnable task) {
1670
    evictionLock.lock();
1✔
1671
    try {
1672
      maintenance(task);
1✔
1673
    } finally {
1674
      evictionLock.unlock();
1✔
1675
    }
1676
    rescheduleCleanUpIfIncomplete();
1✔
1677
  }
1✔
1678

1679
  /**
1680
   * If there remains pending operations that were not handled by the prior clean up then try to
1681
   * schedule an asynchronous maintenance task. This may occur due to a concurrent write after the
1682
   * maintenance work had started or if the amortized threshold of work per clean up was reached.
1683
   */
1684
  @SuppressWarnings("resource")
1685
  void rescheduleCleanUpIfIncomplete() {
1686
    if (drainStatusOpaque() != REQUIRED) {
1✔
1687
      return;
1✔
1688
    }
1689

1690
    // An immediate scheduling cannot be performed on a custom executor because it may use a
1691
    // caller-runs policy. This could cause the caller's penalty to exceed the amortized threshold,
1692
    // e.g. repeated concurrent writes could result in a retry loop.
1693
    if (executor == ForkJoinPool.commonPool()) {
1✔
1694
      scheduleDrainBuffers();
1✔
1695
      return;
1✔
1696
    }
1697

1698
    // If a scheduler was configured then the maintenance can be deferred onto the custom executor
1699
    // and run in the near future. Otherwise, it will be handled due to other cache activity.
1700
    var pacer = pacer();
1✔
1701
    if ((pacer != null) && !pacer.isScheduled() && evictionLock.tryLock()) {
1✔
1702
      try {
1703
        if ((drainStatusOpaque() == REQUIRED) && !pacer.isScheduled()) {
1✔
1704
          pacer.schedule(executor, drainBuffersTask, expirationTicker().read(), Pacer.TOLERANCE);
1✔
1705
        }
1706
      } finally {
1707
        evictionLock.unlock();
1✔
1708
      }
1709
    }
1710
  }
1✔
1711

1712
  /**
1713
   * Performs the pending maintenance work and sets the state flags during processing to avoid
1714
   * excess scheduling attempts. The read buffer, write buffer, and reference queues are drained,
1715
   * followed by expiration, and size-based eviction.
1716
   *
1717
   * @param task an additional pending task to run, or {@code null} if not present
1718
   */
1719
  @GuardedBy("evictionLock")
1720
  void maintenance(@Nullable Runnable task) {
1721
    setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1722

1723
    try {
1724
      drainReadBuffer();
1✔
1725

1726
      drainWriteBuffer();
1✔
1727
      if (task != null) {
1✔
1728
        task.run();
1✔
1729
      }
1730

1731
      drainKeyReferences();
1✔
1732
      drainValueReferences();
1✔
1733

1734
      expireEntries();
1✔
1735
      evictEntries();
1✔
1736

1737
      climb();
1✔
1738
    } finally {
1739
      if ((drainStatusOpaque() != PROCESSING_TO_IDLE)
1✔
1740
          || !casDrainStatus(PROCESSING_TO_IDLE, IDLE)) {
1✔
1741
        setDrainStatusOpaque(REQUIRED);
1✔
1742
      }
1743
    }
1744
  }
1✔
1745

1746
  /** Drains the weak key references queue. */
1747
  @GuardedBy("evictionLock")
1748
  void drainKeyReferences() {
1749
    if (!collectKeys()) {
1✔
1750
      return;
1✔
1751
    }
1752
    @Var Reference<? extends K> keyRef;
1753
    while ((keyRef = keyReferenceQueue().poll()) != null) {
1✔
1754
      Node<K, V> node = data.get(keyRef);
1✔
1755
      if (node != null) {
1✔
1756
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1757
      }
1758
    }
1✔
1759
  }
1✔
1760

1761
  /** Drains the weak / soft value references queue. */
1762
  @GuardedBy("evictionLock")
1763
  void drainValueReferences() {
1764
    if (!collectValues()) {
1✔
1765
      return;
1✔
1766
    }
1767
    @Var Reference<? extends V> valueRef;
1768
    while ((valueRef = valueReferenceQueue().poll()) != null) {
1✔
1769
      @SuppressWarnings("unchecked")
1770
      var ref = (InternalReference<V>) valueRef;
1✔
1771
      Node<K, V> node = data.get(ref.getKeyReference());
1✔
1772
      if ((node != null) && (valueRef == node.getValueReference())) {
1✔
1773
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1774
      }
1775
    }
1✔
1776
  }
1✔
1777

1778
  /** Drains the read buffer. */
1779
  @GuardedBy("evictionLock")
1780
  void drainReadBuffer() {
1781
    if (!skipReadBuffer()) {
1✔
1782
      readBuffer.drainTo(accessPolicy);
1✔
1783
    }
1784
  }
1✔
1785

1786
  /** Updates the node's location in the page replacement policy. */
1787
  @GuardedBy("evictionLock")
1788
  void onAccess(Node<K, V> node) {
1789
    if (evicts()) {
1✔
1790
      var keyRef = node.getKeyReferenceOrNull();
1✔
1791
      if ((keyRef == null) || !node.isAlive()) {
1✔
1792
        return;
1✔
1793
      }
1794
      frequencySketch().increment(keyRef);
1✔
1795
      if (node.inWindow()) {
1✔
1796
        reorder(accessOrderWindowDeque(), node);
1✔
1797
      } else if (node.inMainProbation()) {
1✔
1798
        reorderProbation(node);
1✔
1799
      } else {
1800
        reorder(accessOrderProtectedDeque(), node);
1✔
1801
      }
1802
      setHitsInSample(hitsInSample() + 1);
1✔
1803
    } else if (expiresAfterAccess()) {
1✔
1804
      reorder(accessOrderWindowDeque(), node);
1✔
1805
    }
1806
    if (expiresVariable()) {
1✔
1807
      timerWheel().reschedule(node);
1✔
1808
    }
1809
  }
1✔
1810

1811
  /** Promote the node from probation to protected on an access. */
1812
  @GuardedBy("evictionLock")
1813
  void reorderProbation(Node<K, V> node) {
1814
    if (!accessOrderProbationDeque().contains(node)) {
1!
1815
      // Ignore stale accesses for an entry that is no longer present
1816
      return;
×
1817
    } else if (node.getPolicyWeight() > mainProtectedMaximum()) {
1✔
1818
      reorder(accessOrderProbationDeque(), node);
1✔
1819
      return;
1✔
1820
    }
1821

1822
    // If the protected space exceeds its maximum, the LRU items are demoted to the probation space.
1823
    // This is deferred to the adaption phase at the end of the maintenance cycle.
1824
    setMainProtectedWeightedSize(mainProtectedWeightedSize() + node.getPolicyWeight());
1✔
1825
    accessOrderProbationDeque().remove(node);
1✔
1826
    accessOrderProtectedDeque().offerLast(node);
1✔
1827
    node.makeMainProtected();
1✔
1828
  }
1✔
1829

1830
  /** Updates the node's location in the policy's deque. */
1831
  static <K, V> void reorder(LinkedDeque<Node<K, V>> deque, Node<K, V> node) {
1832
    // An entry may be scheduled for reordering despite having been removed. This can occur when the
1833
    // entry was concurrently read while a writer was removing it. If the entry is no longer linked
1834
    // then it does not need to be processed.
1835
    if (deque.contains(node)) {
1✔
1836
      deque.moveToBack(node);
1✔
1837
    }
1838
  }
1✔
1839

1840
  /** Drains the write buffer. */
1841
  @GuardedBy("evictionLock")
1842
  void drainWriteBuffer() {
1843
    for (int i = 0; i <= WRITE_BUFFER_MAX; i++) {
1✔
1844
      Runnable task = writeBuffer.poll();
1✔
1845
      if (task == null) {
1✔
1846
        return;
1✔
1847
      }
1848
      task.run();
1✔
1849
    }
1850
    setDrainStatusOpaque(PROCESSING_TO_REQUIRED);
1✔
1851
  }
1✔
1852

1853
  /**
1854
   * Atomically transitions the node to the <code>dead</code> state and decrements the
1855
   * <code>weightedSize</code>.
1856
   *
1857
   * @param node the entry in the page replacement policy
1858
   */
1859
  @GuardedBy("evictionLock")
1860
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
1861
  void makeDead(Node<K, V> node) {
1862
    synchronized (node) {
1✔
1863
      if (node.isDead()) {
1✔
1864
        return;
1✔
1865
      }
1866
      if (evicts()) {
1✔
1867
        // The node's policy weight may be out of sync due to a pending update waiting to be
1868
        // processed. At this point the node's weight is finalized, so the weight can be safely
1869
        // taken from the node's perspective and the sizes will be adjusted correctly.
1870
        if (node.inWindow()) {
1✔
1871
          setWindowWeightedSize(windowWeightedSize() - node.getWeight());
1✔
1872
        } else if (node.inMainProtected()) {
1✔
1873
          setMainProtectedWeightedSize(mainProtectedWeightedSize() - node.getWeight());
1✔
1874
        }
1875
        setWeightedSize(weightedSize() - node.getWeight());
1✔
1876
      }
1877
      node.die();
1✔
1878
    }
1✔
1879
  }
1✔
1880

1881
  /** Adds the node to the page replacement policy. */
1882
  final class AddTask implements Runnable {
1883
    final Node<K, V> node;
1884
    final int weight;
1885

1886
    AddTask(Node<K, V> node, int weight) {
1✔
1887
      this.weight = weight;
1✔
1888
      this.node = node;
1✔
1889
    }
1✔
1890

1891
    @Override
1892
    @GuardedBy("evictionLock")
1893
    public void run() {
1894
      if (evicts()) {
1✔
1895
        setWeightedSize(weightedSize() + weight);
1✔
1896
        setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1897
        node.setPolicyWeight(node.getPolicyWeight() + weight);
1✔
1898

1899
        long maximum = maximum();
1✔
1900
        if (weightedSize() >= (maximum >>> 1)) {
1✔
1901
          if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
1902
            evictEntries();
1✔
1903
          } else {
1904
            // Lazily initialize when close to the maximum
1905
            long capacity = isWeighted() ? data.mappingCount() : maximum;
1✔
1906
            frequencySketch().ensureCapacity(capacity);
1✔
1907
          }
1908
        }
1909

1910
        var keyRef = node.getKeyReferenceOrNull();
1✔
1911
        if (keyRef != null) {
1✔
1912
          frequencySketch().increment(keyRef);
1✔
1913
        }
1914

1915
        setMissesInSample(missesInSample() + 1);
1✔
1916
      }
1917

1918
      // ignore out-of-order write operations
1919
      boolean isAlive;
1920
      synchronized (node) {
1✔
1921
        isAlive = node.isAlive();
1✔
1922
      }
1✔
1923
      if (isAlive) {
1✔
1924
        if (expiresAfterWrite()) {
1✔
1925
          writeOrderDeque().offerLast(node);
1✔
1926
        }
1927
        if (expiresVariable()) {
1✔
1928
          timerWheel().schedule(node);
1✔
1929
        }
1930
        if (evicts()) {
1✔
1931
          if (weight > maximum()) {
1✔
1932
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
1933
          } else if (weight > windowMaximum()) {
1✔
1934
            accessOrderWindowDeque().offerFirst(node);
1✔
1935
          } else {
1936
            accessOrderWindowDeque().offerLast(node);
1✔
1937
          }
1938
        } else if (expiresAfterAccess()) {
1✔
1939
          accessOrderWindowDeque().offerLast(node);
1✔
1940
        }
1941
      }
1942
    }
1✔
1943
  }
1944

1945
  /** Removes a node from the page replacement policy. */
1946
  final class RemovalTask implements Runnable {
1947
    final Node<K, V> node;
1948

1949
    RemovalTask(Node<K, V> node) {
1✔
1950
      this.node = node;
1✔
1951
    }
1✔
1952

1953
    @Override
1954
    @GuardedBy("evictionLock")
1955
    public void run() {
1956
      // add may not have been processed yet
1957
      if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1958
        accessOrderWindowDeque().remove(node);
1✔
1959
      } else if (evicts()) {
1✔
1960
        if (node.inMainProbation()) {
1✔
1961
          accessOrderProbationDeque().remove(node);
1✔
1962
        } else {
1963
          accessOrderProtectedDeque().remove(node);
1✔
1964
        }
1965
      }
1966
      if (expiresAfterWrite()) {
1✔
1967
        writeOrderDeque().remove(node);
1✔
1968
      } else if (expiresVariable()) {
1✔
1969
        timerWheel().deschedule(node);
1✔
1970
      }
1971
      makeDead(node);
1✔
1972
    }
1✔
1973
  }
1974

1975
  /** Updates the weighted size. */
1976
  final class UpdateTask implements Runnable {
1977
    final int weightDifference;
1978
    final Node<K, V> node;
1979

1980
    public UpdateTask(Node<K, V> node, int weightDifference) {
1✔
1981
      this.weightDifference = weightDifference;
1✔
1982
      this.node = node;
1✔
1983
    }
1✔
1984

1985
    @Override
1986
    @GuardedBy("evictionLock")
1987
    public void run() {
1988
      if (expiresAfterWrite()) {
1✔
1989
        reorder(writeOrderDeque(), node);
1✔
1990
      } else if (expiresVariable()) {
1✔
1991
        timerWheel().reschedule(node);
1✔
1992
      }
1993
      if (evicts()) {
1✔
1994
        int oldWeightedSize = node.getPolicyWeight();
1✔
1995
        node.setPolicyWeight(oldWeightedSize + weightDifference);
1✔
1996
        if (node.inWindow()) {
1✔
1997
          setWindowWeightedSize(windowWeightedSize() + weightDifference);
1✔
1998
          if (node.getPolicyWeight() > maximum()) {
1✔
1999
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2000
          } else if (node.getPolicyWeight() <= windowMaximum()) {
1✔
2001
            onAccess(node);
1✔
2002
          } else if (accessOrderWindowDeque().contains(node)) {
1✔
2003
            accessOrderWindowDeque().moveToFront(node);
1✔
2004
          }
2005
        } else if (node.inMainProbation()) {
1✔
2006
            if (node.getPolicyWeight() <= maximum()) {
1✔
2007
              onAccess(node);
1✔
2008
            } else {
2009
              evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2010
            }
2011
        } else {
2012
          setMainProtectedWeightedSize(mainProtectedWeightedSize() + weightDifference);
1✔
2013
          if (node.getPolicyWeight() <= maximum()) {
1✔
2014
            onAccess(node);
1✔
2015
          } else {
2016
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2017
          }
2018
        }
2019

2020
        setWeightedSize(weightedSize() + weightDifference);
1✔
2021
        if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
2022
          evictEntries();
1✔
2023
        }
2024
      } else if (expiresAfterAccess()) {
1✔
2025
        onAccess(node);
1✔
2026
      }
2027
    }
1✔
2028
  }
2029

2030
  /* --------------- Concurrent Map Support --------------- */
2031

2032
  @Override
2033
  public boolean isEmpty() {
2034
    return data.isEmpty();
1✔
2035
  }
2036

2037
  @Override
2038
  public int size() {
2039
    return data.size();
1✔
2040
  }
2041

2042
  @Override
2043
  public long estimatedSize() {
2044
    return data.mappingCount();
1✔
2045
  }
2046

2047
  @Override
2048
  public void clear() {
2049
    Deque<Node<K, V>> entries;
2050
    evictionLock.lock();
1✔
2051
    try {
2052
      // Discard all pending reads
2053
      readBuffer.drainTo(e -> {});
1✔
2054

2055
      // Apply all pending writes
2056
      @Var Runnable task;
2057
      while ((task = writeBuffer.poll()) != null) {
1✔
2058
        task.run();
1✔
2059
      }
2060

2061
      // Cancel the scheduled cleanup
2062
      Pacer pacer = pacer();
1✔
2063
      if (pacer != null) {
1✔
2064
        pacer.cancel();
1✔
2065
      }
2066

2067
      // Discard all entries, falling back to one-by-one to avoid excessive lock hold times
2068
      long now = expirationTicker().read();
1✔
2069
      int threshold = (WRITE_BUFFER_MAX / 2);
1✔
2070
      entries = new ArrayDeque<>(data.values());
1✔
2071
      while (!entries.isEmpty() && (writeBuffer.size() < threshold)) {
1✔
2072
        removeNode(entries.pollFirst(), now);
1✔
2073
      }
2074
    } finally {
2075
      evictionLock.unlock();
1✔
2076
    }
2077

2078
    // Remove any stragglers if released early to more aggressively flush incoming writes
2079
    @Var boolean cleanUp = false;
1✔
2080
    for (var node : entries) {
1✔
2081
      @Nullable K key = node.getKey();
1✔
2082
      if (key == null) {
1✔
2083
        cleanUp = true;
1✔
2084
      } else {
2085
        remove(key);
1✔
2086
      }
2087
    }
1✔
2088
    if (collectKeys() && cleanUp) {
1✔
2089
      cleanUp();
1✔
2090
    }
2091
  }
1✔
2092

2093
  @GuardedBy("evictionLock")
2094
  @SuppressWarnings({"GuardedByChecker", "SynchronizationOnLocalVariableOrMethodParameter"})
2095
  void removeNode(Node<K, V> node, long now) {
2096
    K key = node.getKey();
1✔
2097
    var ctx = new EvictContext<V>();
1✔
2098
    var keyReference = node.getKeyReference();
1✔
2099

2100
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
2101
      if (n != node) {
1✔
2102
        return n;
1✔
2103
      }
2104
      synchronized (node) {
1✔
2105
        ctx.value = node.getValue();
1✔
2106

2107
        if ((key == null) || (ctx.value == null)) {
1✔
2108
          ctx.cause = RemovalCause.COLLECTED;
1✔
2109
        } else if (hasExpired(node, now)) {
1✔
2110
          ctx.cause = RemovalCause.EXPIRED;
1✔
2111
        } else {
2112
          ctx.cause = RemovalCause.EXPLICIT;
1✔
2113
        }
2114

2115
        if (ctx.cause.wasEvicted()) {
1✔
2116
          notifyEviction(key, ctx.value, ctx.cause);
1✔
2117
        }
2118

2119
        discardRefresh(node.getKeyReference());
1✔
2120
        node.retire();
1✔
2121
        return null;
1✔
2122
      }
2123
    });
2124

2125
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
2126
      accessOrderWindowDeque().remove(node);
1✔
2127
    } else if (evicts()) {
1✔
2128
      if (node.inMainProbation()) {
1✔
2129
        accessOrderProbationDeque().remove(node);
1✔
2130
      } else {
2131
        accessOrderProtectedDeque().remove(node);
1✔
2132
      }
2133
    }
2134
    if (expiresAfterWrite()) {
1✔
2135
      writeOrderDeque().remove(node);
1✔
2136
    } else if (expiresVariable()) {
1✔
2137
      timerWheel().deschedule(node);
1✔
2138
    }
2139

2140
    synchronized (node) {
1✔
2141
      logIfAlive(node);
1✔
2142
      makeDead(node);
1✔
2143
    }
1✔
2144

2145
    if (ctx.cause != null) {
1✔
2146
      notifyRemoval(key, ctx.value, ctx.cause);
1✔
2147
    }
2148
  }
1✔
2149

2150
  @Override
2151
  public boolean containsKey(Object key) {
2152
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2153
    return (node != null) && (node.getValue() != null)
1✔
2154
        && !hasExpired(node, expirationTicker().read());
1✔
2155
  }
2156

2157
  @Override
2158
  public boolean containsValue(Object value) {
2159
    requireNonNull(value);
1✔
2160

2161
    long now = expirationTicker().read();
1✔
2162
    for (Node<K, V> node : data.values()) {
1✔
2163
      if (node.containsValue(value) && !hasExpired(node, now) && (node.getKey() != null)) {
1✔
2164
        return true;
1✔
2165
      }
2166
    }
1✔
2167
    return false;
1✔
2168
  }
2169

2170
  @Override
2171
  public @Nullable V get(Object key) {
2172
    return getIfPresent(key, /* recordStats= */ false);
1✔
2173
  }
2174

2175
  @Override
2176
  public @Nullable V getIfPresent(Object key, boolean recordStats) {
2177
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2178
    if (node == null) {
1✔
2179
      if (recordStats) {
1✔
2180
        statsCounter().recordMisses(1);
1✔
2181
      }
2182
      if (drainStatusOpaque() == REQUIRED) {
1✔
2183
        scheduleDrainBuffers();
1✔
2184
      }
2185
      return null;
1✔
2186
    }
2187

2188
    V value = node.getValue();
1✔
2189
    long now = expirationTicker().read();
1✔
2190
    if (hasExpired(node, now) || (collectValues() && (value == null))) {
1✔
2191
      if (recordStats) {
1✔
2192
        statsCounter().recordMisses(1);
1✔
2193
      }
2194
      scheduleDrainBuffers();
1✔
2195
      return null;
1✔
2196
    }
2197

2198
    if ((value != null) && !isComputingAsync(value)) {
1✔
2199
      @SuppressWarnings("unchecked")
2200
      var castedKey = (K) key;
1✔
2201
      setAccessTime(node, now);
1✔
2202
      tryExpireAfterRead(node, castedKey, value, expiry(), now);
1✔
2203
    }
2204
    V refreshed = afterRead(node, now, recordStats);
1✔
2205
    return (refreshed == null) ? value : refreshed;
1✔
2206
  }
2207

2208
  @Override
2209
  public @Nullable V getIfPresentQuietly(Object key) {
2210
    V value;
2211
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2212
    if ((node == null) || ((value = node.getValue()) == null)
1✔
2213
        || hasExpired(node, expirationTicker().read())) {
1✔
2214
      return null;
1✔
2215
    }
2216
    return value;
1✔
2217
  }
2218

2219
  /**
2220
   * Returns the key associated with the mapping in this cache, or {@code null} if there is none.
2221
   *
2222
   * @param key the key whose canonical instance is to be returned
2223
   * @return the key used by the mapping, or {@code null} if this cache does not contain a mapping
2224
   *         for the key
2225
   * @throws NullPointerException if the specified key is null
2226
   */
2227
  public @Nullable K getKey(K key) {
2228
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2229
    if (node == null) {
1✔
2230
      if (drainStatusOpaque() == REQUIRED) {
1✔
2231
        scheduleDrainBuffers();
1✔
2232
      }
2233
      return null;
1✔
2234
    }
2235
    afterRead(node, /* now= */ 0L, /* recordHit= */ false);
1✔
2236
    return node.getKey();
1✔
2237
  }
2238

2239
  @Override
2240
  public Map<K, V> getAllPresent(Iterable<? extends K> keys) {
2241
    var result = new LinkedHashMap<K, @Nullable V>(calculateHashMapCapacity(keys));
1✔
2242
    for (K key : keys) {
1✔
2243
      result.put(key, null);
1✔
2244
    }
1✔
2245

2246
    int uniqueKeys = result.size();
1✔
2247
    long now = expirationTicker().read();
1✔
2248
    for (var iter = result.entrySet().iterator(); iter.hasNext();) {
1✔
2249
      V value;
2250
      var entry = iter.next();
1✔
2251
      Node<K, V> node = data.get(nodeFactory.newLookupKey(entry.getKey()));
1✔
2252
      if ((node == null) || ((value = node.getValue()) == null) || hasExpired(node, now)) {
1✔
2253
        iter.remove();
1✔
2254
      } else {
2255
        setAccessTime(node, now);
1✔
2256
        tryExpireAfterRead(node, entry.getKey(), value, expiry(), now);
1✔
2257
        V refreshed = afterRead(node, now, /* recordHit= */ false);
1✔
2258
        entry.setValue((refreshed == null) ? value : refreshed);
1✔
2259
      }
2260
    }
1✔
2261
    statsCounter().recordHits(result.size());
1✔
2262
    statsCounter().recordMisses(uniqueKeys - result.size());
1✔
2263

2264
    @SuppressWarnings("NullableProblems")
2265
    Map<K, V> unmodifiable = Collections.unmodifiableMap(result);
1✔
2266
    return unmodifiable;
1✔
2267
  }
2268

2269
  @Override
2270
  public void putAll(Map<? extends K, ? extends V> map) {
2271
    map.forEach(this::put);
1✔
2272
  }
1✔
2273

2274
  @Override
2275
  public @Nullable V put(K key, V value) {
2276
    return put(key, value, expiry(), /* onlyIfAbsent= */ false);
1✔
2277
  }
2278

2279
  @Override
2280
  public @Nullable V putIfAbsent(K key, V value) {
2281
    return put(key, value, expiry(), /* onlyIfAbsent= */ true);
1✔
2282
  }
2283

2284
  /**
2285
   * Adds a node to the policy and the data store. If an existing node is found, then its value is
2286
   * updated if allowed.
2287
   *
2288
   * @param key key with which the specified value is to be associated
2289
   * @param value value to be associated with the specified key
2290
   * @param expiry the calculator for the write expiration time
2291
   * @param onlyIfAbsent a write is performed only if the key is not already associated with a value
2292
   * @return the prior value in or null if no mapping was found
2293
   */
2294
  @Nullable V put(K key, V value, Expiry<K, V> expiry, boolean onlyIfAbsent) {
2295
    requireNonNull(key);
1✔
2296
    requireNonNull(value);
1✔
2297

2298
    @Var Node<K, V> node = null;
1✔
2299
    long now = expirationTicker().read();
1✔
2300
    int newWeight = weigher.weigh(key, value);
1✔
2301
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2302
    for (int attempts = 1; ; attempts++) {
1✔
2303
      @Var Node<K, V> prior = data.get(lookupKey);
1✔
2304
      if (prior == null) {
1✔
2305
        if (node == null) {
1✔
2306
          node = nodeFactory.newNode(key, keyReferenceQueue(),
1✔
2307
              value, valueReferenceQueue(), newWeight, now);
1✔
2308
          long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
1✔
2309
          setVariableTime(node, expireAfterCreate(key, value, expiry, now));
1✔
2310
          setAccessTime(node, expirationTime);
1✔
2311
          setWriteTime(node, expirationTime);
1✔
2312
        }
2313
        prior = data.putIfAbsent(node.getKeyReference(), node);
1✔
2314
        if (prior == null) {
1✔
2315
          afterWrite(new AddTask(node, newWeight));
1✔
2316
          return null;
1✔
2317
        } else if (onlyIfAbsent) {
1✔
2318
          // An optimistic fast path to avoid unnecessary locking
2319
          V currentValue = prior.getValue();
1✔
2320
          if ((currentValue != null) && !hasExpired(prior, now)) {
1✔
2321
            if (!isComputingAsync(currentValue)) {
1✔
2322
              tryExpireAfterRead(prior, key, currentValue, expiry(), now);
1✔
2323
              setAccessTime(prior, now);
1✔
2324
            }
2325
            afterRead(prior, now, /* recordHit= */ false);
1✔
2326
            return currentValue;
1✔
2327
          }
2328
        }
1✔
2329
      } else if (onlyIfAbsent) {
1✔
2330
        // An optimistic fast path to avoid unnecessary locking
2331
        V currentValue = prior.getValue();
1✔
2332
        if ((currentValue != null) && !hasExpired(prior, now)) {
1✔
2333
          if (!isComputingAsync(currentValue)) {
1✔
2334
            tryExpireAfterRead(prior, key, currentValue, expiry(), now);
1✔
2335
            setAccessTime(prior, now);
1✔
2336
          }
2337
          afterRead(prior, now, /* recordHit= */ false);
1✔
2338
          return currentValue;
1✔
2339
        }
2340
      }
2341

2342
      // A read may race with the entry's removal, so that after the entry is acquired it may no
2343
      // longer be usable. A retry will reread from the map and either find an absent mapping, a
2344
      // new entry, or a stale entry.
2345
      if (!prior.isAlive()) {
1✔
2346
        // A reread of the stale entry may occur if the state transition occurred but the map
2347
        // removal was delayed by a context switch, so that this thread spin waits until resolved.
2348
        if ((attempts & MAX_PUT_SPIN_WAIT_ATTEMPTS) != 0) {
1✔
2349
          Thread.onSpinWait();
1✔
2350
          continue;
1✔
2351
        }
2352

2353
        // If the spin wait attempts are exhausted then fallback to a map computation in order to
2354
        // deschedule this thread until the entry's removal completes. If the key was modified
2355
        // while in the map so that its equals or hashCode changed then the contents may be
2356
        // corrupted, where the cache holds an evicted (dead) entry that could not be removed.
2357
        // That is a violation of the Map contract, so we check that the mapping is in the "alive"
2358
        // state while in the computation.
2359
        data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2360
          requireIsAlive(key, n);
1✔
2361
          return n;
1✔
2362
        });
2363
        continue;
1✔
2364
      }
2365

2366
      V oldValue;
2367
      long varTime;
2368
      int oldWeight;
2369
      @Var boolean expired = false;
1✔
2370
      @Var boolean mayUpdate = true;
1✔
2371
      @Var boolean exceedsTolerance = false;
1✔
2372
      synchronized (prior) {
1✔
2373
        if (!prior.isAlive()) {
1✔
2374
          continue;
1✔
2375
        }
2376
        oldValue = prior.getValue();
1✔
2377
        oldWeight = prior.getWeight();
1✔
2378
        if (oldValue == null) {
1✔
2379
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2380
          notifyEviction(key, null, RemovalCause.COLLECTED);
1✔
2381
        } else if (hasExpired(prior, now)) {
1✔
2382
          expired = true;
1✔
2383
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2384
          notifyEviction(key, oldValue, RemovalCause.EXPIRED);
1✔
2385
        } else if (onlyIfAbsent) {
1✔
2386
          mayUpdate = false;
1✔
2387
          varTime = expireAfterRead(prior, key, oldValue, expiry, now);
1✔
2388
        } else {
2389
          varTime = expireAfterUpdate(prior, key, value, expiry, now);
1✔
2390
        }
2391

2392
        long expirationTime = isComputingAsync(mayUpdate ? value : oldValue)
1✔
2393
            ? (now + ASYNC_EXPIRY)
1✔
2394
            : now;
1✔
2395
        if (mayUpdate) {
1✔
2396
          exceedsTolerance = exceedsWriteTimeTolerance(prior, varTime, now);
1✔
2397
          if (expired || exceedsTolerance) {
1✔
2398
            setWriteTime(prior, isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now);
1✔
2399
          }
2400

2401
          prior.setValue(value, valueReferenceQueue());
1✔
2402
          prior.setWeight(newWeight);
1✔
2403

2404
          discardRefresh(prior.getKeyReference());
1✔
2405
        }
2406

2407
        setVariableTime(prior, varTime);
1✔
2408
        setAccessTime(prior, expirationTime);
1✔
2409
      }
1✔
2410

2411
      if (expired) {
1✔
2412
        notifyRemoval(key, oldValue, RemovalCause.EXPIRED);
1✔
2413
      } else if (oldValue == null) {
1✔
2414
        notifyRemoval(key, /* value= */ null, RemovalCause.COLLECTED);
1✔
2415
      } else if (mayUpdate) {
1✔
2416
        notifyOnReplace(key, oldValue, value);
1✔
2417
      }
2418

2419
      int weightedDifference = mayUpdate ? (newWeight - oldWeight) : 0;
1✔
2420
      if ((oldValue == null) || (weightedDifference != 0) || expired) {
1✔
2421
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2422
      } else if (!onlyIfAbsent && exceedsTolerance) {
1✔
2423
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2424
      } else {
2425
        afterRead(prior, now, /* recordHit= */ false);
1✔
2426
      }
2427

2428
      return expired ? null : oldValue;
1✔
2429
    }
2430
  }
2431

2432
  @Override
2433
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2434
  public @Nullable V remove(Object key) {
2435
    var ctx = new RemoveContext<K, V>();
1✔
2436
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2437
    data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2438
      synchronized (n) {
1✔
2439
        requireIsAlive(key, n);
1✔
2440
        ctx.oldKey = n.getKey();
1✔
2441
        ctx.oldValue = n.getValue();
1✔
2442
        RemovalCause actualCause;
2443
        if ((ctx.oldKey == null) || (ctx.oldValue == null)) {
1✔
2444
          actualCause = RemovalCause.COLLECTED;
1✔
2445
        } else if (hasExpired(n, expirationTicker().read())) {
1✔
2446
          actualCause = RemovalCause.EXPIRED;
1✔
2447
        } else {
2448
          actualCause = RemovalCause.EXPLICIT;
1✔
2449
        }
2450
        if (actualCause.wasEvicted()) {
1✔
2451
          notifyEviction(ctx.oldKey, ctx.oldValue, actualCause);
1✔
2452
        }
2453
        ctx.cause = actualCause;
1✔
2454
        discardRefresh(k);
1✔
2455
        ctx.node = n;
1✔
2456
        n.retire();
1✔
2457
        return null;
1✔
2458
      }
2459
    });
2460

2461
    if (ctx.cause != null) {
1✔
2462
      afterWrite(new RemovalTask(requireNonNull(ctx.node)));
1✔
2463
      notifyRemoval(ctx.oldKey, ctx.oldValue, ctx.cause);
1✔
2464
    }
2465
    return (ctx.cause == RemovalCause.EXPLICIT) ? ctx.oldValue : null;
1✔
2466
  }
2467

2468
  @Override
2469
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2470
  public boolean remove(Object key, @Nullable Object value) {
2471
    requireNonNull(key);
1✔
2472
    if (value == null) {
1✔
2473
      return false;
1✔
2474
    }
2475

2476
    var ctx = new RemoveContext<K, V>();
1✔
2477
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2478
    data.computeIfPresent(lookupKey, (kR, node) -> {
1✔
2479
      synchronized (node) {
1✔
2480
        requireIsAlive(key, node);
1✔
2481
        ctx.oldKey = node.getKey();
1✔
2482
        ctx.oldValue = node.getValue();
1✔
2483
        if ((ctx.oldKey == null) || (ctx.oldValue == null)) {
1✔
2484
          ctx.cause = RemovalCause.COLLECTED;
1✔
2485
        } else if (hasExpired(node, expirationTicker().read())) {
1✔
2486
          ctx.cause = RemovalCause.EXPIRED;
1✔
2487
        } else if (node.containsValue(value)) {
1✔
2488
          ctx.cause = RemovalCause.EXPLICIT;
1✔
2489
        } else {
2490
          return node;
1✔
2491
        }
2492
        if (ctx.cause.wasEvicted()) {
1✔
2493
          notifyEviction(ctx.oldKey, ctx.oldValue, ctx.cause);
1✔
2494
        }
2495
        discardRefresh(kR);
1✔
2496
        ctx.node = node;
1✔
2497
        node.retire();
1✔
2498
        return null;
1✔
2499
      }
2500
    });
2501

2502
    if (ctx.node == null) {
1✔
2503
      return false;
1✔
2504
    }
2505
    var removeCause = requireNonNull(ctx.cause);
1✔
2506
    afterWrite(new RemovalTask(ctx.node));
1✔
2507
    notifyRemoval(ctx.oldKey, ctx.oldValue, removeCause);
1✔
2508

2509
    return (removeCause == RemovalCause.EXPLICIT);
1✔
2510
  }
2511

2512
  @Override
2513
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2514
  public @Nullable V replace(K key, V value) {
2515
    requireNonNull(key);
1✔
2516
    requireNonNull(value);
1✔
2517
    var ctx = new ReplaceContext<K, V>();
1✔
2518
    int weight = weigher.weigh(key, value);
1✔
2519
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2520
      synchronized (n) {
1✔
2521
        requireIsAlive(key, n);
1✔
2522
        ctx.nodeKey = n.getKey();
1✔
2523
        ctx.oldValue = n.getValue();
1✔
2524
        ctx.oldWeight = n.getWeight();
1✔
2525
        if ((ctx.nodeKey == null) || (ctx.oldValue == null)
1✔
2526
            || hasExpired(n, ctx.now = expirationTicker().read())) {
1✔
2527
          ctx.oldValue = null;
1✔
2528
          return n;
1✔
2529
        }
2530

2531
        long varTime = expireAfterUpdate(n, key, value, expiry(), ctx.now);
1✔
2532
        n.setValue(value, valueReferenceQueue());
1✔
2533
        n.setWeight(weight);
1✔
2534

2535
        long expirationTime = isComputingAsync(value) ? (ctx.now + ASYNC_EXPIRY) : ctx.now;
1✔
2536
        ctx.exceedsTolerance = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2537
        if (ctx.exceedsTolerance) {
1✔
2538
          setWriteTime(n, expirationTime);
1✔
2539
        }
2540
        setAccessTime(n, expirationTime);
1✔
2541
        setVariableTime(n, varTime);
1✔
2542
        discardRefresh(k);
1✔
2543
        return n;
1✔
2544
      }
2545
    });
2546

2547
    if ((node == null) || (ctx.nodeKey == null) || (ctx.oldValue == null)) {
1✔
2548
      if (node != null) {
1✔
2549
        scheduleDrainBuffers();
1✔
2550
      }
2551
      return null;
1✔
2552
    }
2553

2554
    int weightedDifference = (weight - ctx.oldWeight);
1✔
2555
    if (ctx.exceedsTolerance || (weightedDifference != 0)) {
1✔
2556
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2557
    } else {
2558
      afterRead(node, ctx.now, /* recordHit= */ false);
1✔
2559
    }
2560

2561
    notifyOnReplace(ctx.nodeKey, ctx.oldValue, value);
1✔
2562
    return ctx.oldValue;
1✔
2563
  }
2564

2565
  @Override
2566
  public boolean replace(K key, V oldValue, V newValue) {
2567
    return replace(key, oldValue, newValue, /* shouldDiscardRefresh= */ true);
1✔
2568
  }
2569

2570
  @Override
2571
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2572
  public boolean replace(K key, V oldValue, V newValue, boolean shouldDiscardRefresh) {
2573
    requireNonNull(key);
1✔
2574
    requireNonNull(oldValue);
1✔
2575
    requireNonNull(newValue);
1✔
2576
    var ctx = new ReplaceContext<K, V>();
1✔
2577
    int weight = weigher.weigh(key, newValue);
1✔
2578
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2579
      synchronized (n) {
1✔
2580
        requireIsAlive(key, n);
1✔
2581
        ctx.nodeKey = n.getKey();
1✔
2582
        ctx.oldValue = n.getValue();
1✔
2583
        ctx.oldWeight = n.getWeight();
1✔
2584
        if ((ctx.nodeKey == null) || (ctx.oldValue == null) || !n.containsValue(oldValue)
1✔
2585
            || hasExpired(n, ctx.now = expirationTicker().read())) {
1✔
2586
          ctx.oldValue = null;
1✔
2587
          return n;
1✔
2588
        }
2589

2590
        long varTime = expireAfterUpdate(n, key, newValue, expiry(), ctx.now);
1✔
2591
        n.setValue(newValue, valueReferenceQueue());
1✔
2592
        n.setWeight(weight);
1✔
2593

2594
        long expirationTime = isComputingAsync(newValue) ? (ctx.now + ASYNC_EXPIRY) : ctx.now;
1✔
2595
        ctx.exceedsTolerance = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2596
        if (ctx.exceedsTolerance) {
1✔
2597
          setWriteTime(n, expirationTime);
1✔
2598
        }
2599
        setAccessTime(n, expirationTime);
1✔
2600
        setVariableTime(n, varTime);
1✔
2601

2602
        if (shouldDiscardRefresh) {
1✔
2603
          discardRefresh(k);
1✔
2604
        }
2605
      }
1✔
2606
      return n;
1✔
2607
    });
2608

2609
    if ((node == null) || (ctx.nodeKey == null) || (ctx.oldValue == null)) {
1✔
2610
      if (node != null) {
1✔
2611
        scheduleDrainBuffers();
1✔
2612
      }
2613
      return false;
1✔
2614
    }
2615

2616
    int weightedDifference = (weight - ctx.oldWeight);
1✔
2617
    if (ctx.exceedsTolerance || (weightedDifference != 0)) {
1✔
2618
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2619
    } else {
2620
      afterRead(node, ctx.now, /* recordHit= */ false);
1✔
2621
    }
2622

2623
    notifyOnReplace(ctx.nodeKey, ctx.oldValue, newValue);
1✔
2624
    return true;
1✔
2625
  }
2626

2627
  @Override
2628
  public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
2629
    requireNonNull(function);
1✔
2630

2631
    BiFunction<K, V, V> remappingFunction = (key, oldValue) ->
1✔
2632
        requireNonNull(function.apply(key, oldValue));
1✔
2633
    for (K key : keySet()) {
1✔
2634
      Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2635
      remap(key, lookupKey, remappingFunction, expiry(),
1✔
2636
          new ComputeContext<>(expirationTicker().read()), /* computeIfAbsent= */ false);
1✔
2637
    }
1✔
2638
  }
1✔
2639

2640
  @Override
2641
  public @Nullable V computeIfAbsent(K key,
2642
      @Var Function<? super K, ? extends @Nullable V> mappingFunction,
2643
      boolean recordStats, boolean recordLoad) {
2644
    requireNonNull(key);
1✔
2645
    requireNonNull(mappingFunction);
1✔
2646
    long now = expirationTicker().read();
1✔
2647

2648
    // An optimistic fast path to avoid unnecessary locking
2649
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2650
    if (node != null) {
1✔
2651
      V value = node.getValue();
1✔
2652
      if ((value != null) && !hasExpired(node, now)) {
1✔
2653
        if (!isComputingAsync(value)) {
1✔
2654
          tryExpireAfterRead(node, key, value, expiry(), now);
1✔
2655
          setAccessTime(node, now);
1✔
2656
        }
2657
        @Nullable V refreshed = afterRead(node, now, /* recordHit= */ recordStats);
1✔
2658
        return (refreshed == null) ? value : refreshed;
1✔
2659
      }
2660
    }
2661
    if (recordStats) {
1✔
2662
      mappingFunction = statsAware(mappingFunction, recordLoad);
1✔
2663
    }
2664
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2665
    return doComputeIfAbsent(key, keyRef, mappingFunction,
1✔
2666
        new ComputeContext<>(now), recordStats);
2667
  }
2668

2669
  /** Returns the current value from a computeIfAbsent invocation. */
2670
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2671
  @Nullable V doComputeIfAbsent(K key, Object keyRef,
2672
      Function<? super K, ? extends @Nullable V> mappingFunction,
2673
      ComputeContext<K, V> ctx, boolean recordStats) {
2674
    Node<K, V> node = data.compute(keyRef, (k, n) -> {
1✔
2675
      if (n == null) {
1✔
2676
        ctx.newValue = mappingFunction.apply(key);
1✔
2677
        if (ctx.newValue == null) {
1✔
2678
          discardRefresh(k);
1✔
2679
          return null;
1✔
2680
        }
2681
        ctx.now = expirationTicker().read();
1✔
2682
        ctx.newWeight = weigher.weigh(key, ctx.newValue);
1✔
2683
        var created = nodeFactory.newNode(k, ctx.newValue,
1✔
2684
            valueReferenceQueue(), ctx.newWeight, ctx.now);
1✔
2685
        long expirationTime = isComputingAsync(ctx.newValue)
1✔
2686
            ? ctx.now + ASYNC_EXPIRY
1✔
2687
            : ctx.now;
1✔
2688
        setVariableTime(created, expireAfterCreate(key, ctx.newValue, expiry(), ctx.now));
1✔
2689
        setAccessTime(created, expirationTime);
1✔
2690
        setWriteTime(created, expirationTime);
1✔
2691
        discardRefresh(k);
1✔
2692
        return created;
1✔
2693
      }
2694

2695
      synchronized (n) {
1✔
2696
        requireIsAlive(key, n);
1✔
2697
        ctx.nodeKey = n.getKey();
1✔
2698
        ctx.oldValue = n.getValue();
1✔
2699
        ctx.oldWeight = n.getWeight();
1✔
2700
        RemovalCause actualCause;
2701
        if ((ctx.nodeKey == null) || (ctx.oldValue == null)) {
1✔
2702
          actualCause = RemovalCause.COLLECTED;
1✔
2703
        } else if (hasExpired(n, ctx.now)) {
1✔
2704
          actualCause = RemovalCause.EXPIRED;
1✔
2705
        } else {
2706
          return n;
1✔
2707
        }
2708

2709
        ctx.cause = actualCause;
1✔
2710
        notifyEviction(ctx.nodeKey, ctx.oldValue, actualCause);
1✔
2711

2712
        try {
2713
          ctx.newValue = mappingFunction.apply(key);
1✔
2714
          if (ctx.newValue == null) {
1✔
2715
            discardRefresh(k);
1✔
2716
            ctx.removed = n;
1✔
2717
            n.retire();
1✔
2718
            return null;
1✔
2719
          }
2720
          ctx.now = expirationTicker().read();
1✔
2721
          ctx.newWeight = weigher.weigh(key, ctx.newValue);
1✔
2722
          long varTime = expireAfterCreate(key, ctx.newValue, expiry(), ctx.now);
1✔
2723

2724
          n.setValue(ctx.newValue, valueReferenceQueue());
1✔
2725
          n.setWeight(ctx.newWeight);
1✔
2726

2727
          long expirationTime = isComputingAsync(ctx.newValue)
1✔
2728
              ? (ctx.now + ASYNC_EXPIRY) : ctx.now;
1✔
2729
          setAccessTime(n, expirationTime);
1✔
2730
          setWriteTime(n, expirationTime);
1✔
2731
          setVariableTime(n, varTime);
1✔
2732
          discardRefresh(k);
1✔
2733
          return n;
1✔
2734
        } catch (Throwable e) {
1✔
2735
          ctx.newValue = null;
1✔
2736
          discardRefresh(k);
1✔
2737
          ctx.exception = e;
1✔
2738
          ctx.removed = n;
1✔
2739
          n.retire();
1✔
2740
          return null;
1✔
2741
        }
2742
      }
2743
    });
2744

2745
    if (ctx.cause != null) {
1✔
2746
      statsCounter().recordEviction(ctx.oldWeight, ctx.cause);
1✔
2747
      notifyRemoval(ctx.nodeKey, ctx.oldValue, ctx.cause);
1✔
2748
    }
2749
    if (node == null) {
1✔
2750
      if (ctx.removed != null) {
1✔
2751
        afterWrite(new RemovalTask(ctx.removed));
1✔
2752
      }
2753
      if (ctx.exception != null) {
1✔
2754
        throwException(ctx.exception);
×
2755
      }
2756
      return null;
1✔
2757
    }
2758
    if ((ctx.oldValue != null) && (ctx.newValue == null)) {
1✔
2759
      if (!isComputingAsync(ctx.oldValue)) {
1✔
2760
        tryExpireAfterRead(node, key, ctx.oldValue, expiry(), ctx.now);
1✔
2761
        setAccessTime(node, ctx.now);
1✔
2762
      }
2763

2764
      afterRead(node, ctx.now, /* recordHit= */ recordStats);
1✔
2765
      return ctx.oldValue;
1✔
2766
    }
2767
    if ((ctx.oldValue == null) && (ctx.cause == null)) {
1✔
2768
      afterWrite(new AddTask(node, ctx.newWeight));
1✔
2769
    } else {
2770
      int weightedDifference = (ctx.newWeight - ctx.oldWeight);
1✔
2771
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2772
    }
2773

2774
    return ctx.newValue;
1✔
2775
  }
2776

2777
  @Override
2778
  public @Nullable V computeIfPresent(K key,
2779
      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2780
    requireNonNull(key);
1✔
2781
    requireNonNull(remappingFunction);
1✔
2782

2783
    // An optimistic fast path to avoid unnecessary locking
2784
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2785
    @Nullable Node<K, V> node = data.get(lookupKey);
1✔
2786
    long now;
2787
    if (node == null) {
1✔
2788
      return null;
1✔
2789
    } else if ((node.getValue() == null) || hasExpired(node, (now = expirationTicker().read()))) {
1✔
2790
      scheduleDrainBuffers();
1✔
2791
      return null;
1✔
2792
    }
2793

2794
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2795
        statsAware(remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
1✔
2796
    return remap(key, lookupKey, statsAwareRemappingFunction,
1✔
2797
        expiry(), new ComputeContext<>(now), /* computeIfAbsent= */ false);
1✔
2798
  }
2799

2800
  @Override
2801
  public @Nullable V compute(K key,
2802
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2803
      @Nullable Expiry<? super K, ? super V> expiry, boolean recordLoad,
2804
      boolean recordLoadFailure) {
2805
    requireNonNull(key);
1✔
2806
    requireNonNull(remappingFunction);
1✔
2807

2808
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2809
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2810
        statsAware(remappingFunction, recordLoad, recordLoadFailure);
1✔
2811
    return remap(key, keyRef, statsAwareRemappingFunction,
1✔
2812
        expiry, new ComputeContext<>(expirationTicker().read()), /* computeIfAbsent= */ true);
1✔
2813
  }
2814

2815
  @Override
2816
  public @Nullable V merge(K key, V value,
2817
      BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2818
    requireNonNull(key);
1✔
2819
    requireNonNull(value);
1✔
2820
    requireNonNull(remappingFunction);
1✔
2821

2822
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2823
    BiFunction<? super K, ? super @Nullable V, ? extends @Nullable V> mergeFunction =
1✔
2824
        (k, oldValue) -> (oldValue == null)
1✔
2825
          ? value
1✔
2826
          : statsAware(remappingFunction).apply(oldValue, value);
1✔
2827
    return remap(key, keyRef, mergeFunction, expiry(),
1✔
2828
        new ComputeContext<>(expirationTicker().read()), /* computeIfAbsent= */ true);
1✔
2829
  }
2830

2831
  /**
2832
   * Attempts to compute a mapping for the specified key and its current mapped value (or
2833
   * {@code null} if there is no current mapping).
2834
   * <p>
2835
   * An entry that has expired or been reference collected is evicted and the computation continues
2836
   * as if the entry had not been present. This method does not pre-screen and does not wrap the
2837
   * remappingFunction to be statistics aware.
2838
   *
2839
   * @param key key with which the specified value is to be associated
2840
   * @param keyRef the key to associate with or a lookup only key if not {@code computeIfAbsent}
2841
   * @param remappingFunction the function to compute a value
2842
   * @param expiry the calculator for the expiration time
2843
   * @param ctx the mutable context for passing state to and from the {@link ConcurrentHashMap}
2844
   *        compute lambda, with {@link ComputeContext#now} set to the current ticker time
2845
   * @param computeIfAbsent if an absent entry can be computed
2846
   * @return the new value associated with the specified key, or null if none
2847
   */
2848
  @SuppressWarnings({"StatementWithEmptyBody", "SynchronizationOnLocalVariableOrMethodParameter"})
2849
  @Nullable V remap(K key, Object keyRef,
2850
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2851
      @Nullable Expiry<? super K, ? super V> expiry,
2852
      ComputeContext<K, V> ctx, boolean computeIfAbsent) {
2853
    Node<K, V> node = data.compute(keyRef, (kr, n) -> {
1✔
2854
      if (n == null) {
1✔
2855
        if (!computeIfAbsent) {
1✔
2856
          return null;
1✔
2857
        }
2858
        ctx.newValue = remappingFunction.apply(key, null);
1✔
2859
        if (ctx.newValue == null) {
1✔
2860
          return null;
1✔
2861
        }
2862
        ctx.now = expirationTicker().read();
1✔
2863
        ctx.newWeight = weigher.weigh(key, ctx.newValue);
1✔
2864
        long varTime = expireAfterCreate(key, ctx.newValue, expiry, ctx.now);
1✔
2865
        var created = nodeFactory.newNode(keyRef, ctx.newValue,
1✔
2866
            valueReferenceQueue(), ctx.newWeight, ctx.now);
1✔
2867

2868
        long expirationTime = isComputingAsync(ctx.newValue)
1✔
2869
            ? ctx.now + ASYNC_EXPIRY
1✔
2870
            : ctx.now;
1✔
2871
        setAccessTime(created, expirationTime);
1✔
2872
        setWriteTime(created, expirationTime);
1✔
2873
        setVariableTime(created, varTime);
1✔
2874
        discardRefresh(kr);
1✔
2875
        return created;
1✔
2876
      }
2877

2878
      synchronized (n) {
1✔
2879
        requireIsAlive(key, n);
1✔
2880
        ctx.nodeKey = n.getKey();
1✔
2881
        ctx.oldValue = n.getValue();
1✔
2882
        ctx.oldWeight = n.getWeight();
1✔
2883
        if ((ctx.nodeKey == null) || (ctx.oldValue == null)) {
1✔
2884
          ctx.cause = RemovalCause.COLLECTED;
1✔
2885
        } else if (hasExpired(n, expirationTicker().read())) {
1✔
2886
          ctx.cause = RemovalCause.EXPIRED;
1✔
2887
        }
2888
        if (ctx.cause != null) {
1✔
2889
          notifyEviction(ctx.nodeKey, ctx.oldValue, ctx.cause);
1✔
2890
          if (!computeIfAbsent) {
1✔
2891
            discardRefresh(kr);
1✔
2892
            ctx.removed = n;
1✔
2893
            n.retire();
1✔
2894
            return null;
1✔
2895
          }
2896
        }
2897

2898
        boolean wasEvicted = (ctx.cause != null);
1✔
2899
        try {
2900
          ctx.newValue = remappingFunction.apply(ctx.nodeKey,
1✔
2901
              (ctx.cause == null) ? ctx.oldValue : null);
1✔
2902

2903
          if (ctx.newValue == null) {
1✔
2904
            if (ctx.cause == null) {
1✔
2905
              ctx.cause = RemovalCause.EXPLICIT;
1✔
2906
            }
2907
            discardRefresh(kr);
1✔
2908
            ctx.removed = n;
1✔
2909
            n.retire();
1✔
2910
            return null;
1✔
2911
          }
2912

2913
          long varTime;
2914
          ctx.newWeight = weigher.weigh(key, ctx.newValue);
1✔
2915
          ctx.now = expirationTicker().read();
1✔
2916
          if (ctx.cause == null) {
1✔
2917
            if (ctx.newValue != ctx.oldValue) {
1✔
2918
              ctx.cause = RemovalCause.REPLACED;
1✔
2919
            }
2920
            varTime = expireAfterUpdate(n, key, ctx.newValue, expiry, ctx.now);
1✔
2921
          } else {
2922
            varTime = expireAfterCreate(key, ctx.newValue, expiry, ctx.now);
1✔
2923
          }
2924

2925
          if (ctx.newValue != ctx.oldValue) {
1✔
2926
            n.setValue(ctx.newValue, valueReferenceQueue());
1✔
2927
          }
2928
          n.setWeight(ctx.newWeight);
1✔
2929

2930
          long expirationTime = isComputingAsync(ctx.newValue)
1✔
2931
              ? ctx.now + ASYNC_EXPIRY
1✔
2932
              : ctx.now;
1✔
2933
          ctx.exceedsTolerance = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2934
          if (((ctx.cause != null) && ctx.cause.wasEvicted()) || ctx.exceedsTolerance) {
1✔
2935
            setWriteTime(n, expirationTime);
1✔
2936
          }
2937
          setAccessTime(n, expirationTime);
1✔
2938
          setVariableTime(n, varTime);
1✔
2939
          discardRefresh(kr);
1✔
2940
          return n;
1✔
2941
        } catch (Throwable e) {
1✔
2942
          if (!wasEvicted) {
1✔
2943
            throw e;
1✔
2944
          }
2945
          ctx.newValue = null;
1✔
2946
          discardRefresh(kr);
1✔
2947
          ctx.exception = e;
1✔
2948
          ctx.removed = n;
1✔
2949
          n.retire();
1✔
2950
          return null;
1✔
2951
        }
2952
      }
2953
    });
2954

2955
    if (ctx.cause != null) {
1✔
2956
      if (ctx.cause == RemovalCause.REPLACED) {
1✔
2957
        requireNonNull(ctx.newValue);
1✔
2958
        notifyOnReplace(key, ctx.oldValue, ctx.newValue);
1✔
2959
      } else {
2960
        if (ctx.cause.wasEvicted()) {
1✔
2961
          statsCounter().recordEviction(ctx.oldWeight, ctx.cause);
1✔
2962
        }
2963
        notifyRemoval(ctx.nodeKey, ctx.oldValue, ctx.cause);
1✔
2964
      }
2965
    }
2966

2967
    if (ctx.removed != null) {
1✔
2968
      afterWrite(new RemovalTask(ctx.removed));
1✔
2969
    } else if (node == null) {
1✔
2970
      // absent and not computable
2971
    } else if ((ctx.oldValue == null) && (ctx.cause == null)) {
1✔
2972
      afterWrite(new AddTask(node, ctx.newWeight));
1✔
2973
    } else {
2974
      int weightedDifference = ctx.newWeight - ctx.oldWeight;
1✔
2975
      if (ctx.exceedsTolerance || (weightedDifference != 0)) {
1✔
2976
        afterWrite(new UpdateTask(node, weightedDifference));
1✔
2977
      } else {
2978
        afterRead(node, ctx.now, /* recordHit= */ false);
1✔
2979
        if ((ctx.cause != null) && ctx.cause.wasEvicted()) {
1✔
2980
          scheduleDrainBuffers();
1✔
2981
        }
2982
      }
2983
    }
2984

2985
    if (ctx.exception != null) {
1✔
2986
      throwException(ctx.exception);
×
2987
    }
2988
    return ctx.newValue;
1✔
2989
  }
2990

2991
  @Override
2992
  public void forEach(BiConsumer<? super K, ? super V> action) {
2993
    requireNonNull(action);
1✔
2994

2995
    for (var iterator = new EntryIterator<>(this); iterator.hasNext();) {
1✔
2996
      action.accept(iterator.key, iterator.value);
1✔
2997
      iterator.advance();
1✔
2998
    }
2999
  }
1✔
3000

3001
  @Override
3002
  public Set<K> keySet() {
3003
    Set<K> ks = keySet;
1✔
3004
    return (ks == null) ? (keySet = new KeySetView<>(this)) : ks;
1✔
3005
  }
3006

3007
  @Override
3008
  public Collection<V> values() {
3009
    Collection<V> vs = values;
1✔
3010
    return (vs == null) ? (values = new ValuesView<>(this)) : vs;
1✔
3011
  }
3012

3013
  @Override
3014
  public Set<Entry<K, V>> entrySet() {
3015
    Set<Entry<K, V>> es = entrySet;
1✔
3016
    return (es == null) ? (entrySet = new EntrySetView<>(this)) : es;
1✔
3017
  }
3018

3019
  /**
3020
   * Object equality requires reflexive, symmetric, transitive, and consistency properties. Of
3021
   * these, symmetry and consistency require further clarification for how they are upheld.
3022
   * <p>
3023
   * The <i>consistency</i> property between invocations requires that the results are the same if
3024
   * there are no modifications to the information used. Therefore, usages should expect that this
3025
   * operation may return misleading results if either the maps or the data held by them is modified
3026
   * during the execution of this method. This characteristic allows for comparing the map sizes and
3027
   * assuming stable mappings, as done by {@link java.util.AbstractMap}-based maps.
3028
   * <p>
3029
   * The <i>symmetric</i> property requires that the result is the same for all implementations of
3030
   * {@link Map#equals(Object)}. That contract is defined in terms of the stable mappings provided
3031
   * by {@link #entrySet()}, meaning that the {@link #size()} optimization forces that the count is
3032
   * consistent with the mappings when used for an equality check.
3033
   * <p>
3034
   * The cache's {@link #size()} method may include entries that have expired or have been reference
3035
   * collected, but have not yet been removed from the backing map. An iteration over the map may
3036
   * trigger the removal of these dead entries when skipped over during traversal. To ensure
3037
   * consistency and symmetry, usages should call {@link #cleanUp()} before this method while no
3038
   * other concurrent operations are being performed on this cache. This is not done implicitly by
3039
   * {@link #size()} as many usages assume it to be instantaneous and lock-free.
3040
   */
3041
  @Override
3042
  public boolean equals(@Nullable Object o) {
3043
    if (o == this) {
1✔
3044
      return true;
1✔
3045
    } else if (!(o instanceof Map)) {
1✔
3046
      return false;
1✔
3047
    }
3048

3049
    var map = (Map<?, ?>) o;
1✔
3050
    if (size() != map.size()) {
1✔
3051
      return false;
1✔
3052
    }
3053

3054
    long now = expirationTicker().read();
1✔
3055
    for (var node : data.values()) {
1✔
3056
      K key = node.getKey();
1✔
3057
      V value = node.getValue();
1✔
3058
      if ((key == null) || (value == null)
1✔
3059
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3060
        scheduleDrainBuffers();
1✔
3061
        return false;
1✔
3062
      } else {
3063
        var val = map.get(key);
1✔
3064
        if ((val == null) || ((val != value) && !val.equals(value))) {
1✔
3065
          return false;
1✔
3066
        }
3067
      }
3068
    }
1✔
3069
    return true;
1✔
3070
  }
3071

3072
  @Override
3073
  public int hashCode() {
3074
    @Var int hash = 0;
1✔
3075
    long now = expirationTicker().read();
1✔
3076
    for (var node : data.values()) {
1✔
3077
      K key = node.getKey();
1✔
3078
      V value = node.getValue();
1✔
3079
      if ((key == null) || (value == null)
1✔
3080
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3081
        scheduleDrainBuffers();
1✔
3082
      } else {
3083
        hash += key.hashCode() ^ value.hashCode();
1✔
3084
      }
3085
    }
1✔
3086
    return hash;
1✔
3087
  }
3088

3089
  @Override
3090
  public String toString() {
3091
    var result = new StringBuilder().append('{');
1✔
3092
    long now = expirationTicker().read();
1✔
3093
    for (var node : data.values()) {
1✔
3094
      K key = node.getKey();
1✔
3095
      V value = node.getValue();
1✔
3096
      if ((key == null) || (value == null)
1✔
3097
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3098
        scheduleDrainBuffers();
1✔
3099
      } else {
3100
        if (result.length() != 1) {
1✔
3101
          result.append(',').append(' ');
1✔
3102
        }
3103
        result.append((key == this) ? "(this Map)" : key);
1✔
3104
        result.append('=');
1✔
3105
        result.append((value == this) ? "(this Map)" : value);
1✔
3106
      }
3107
    }
1✔
3108
    return result.append('}').toString();
1✔
3109
  }
3110

3111
  /**
3112
   * Returns the computed result from the ordered traversal of the cache entries.
3113
   *
3114
   * @param hottest the coldest or hottest iteration order
3115
   * @param transformer a function that unwraps the value
3116
   * @param mappingFunction the mapping function to compute a value
3117
   * @return the computed value
3118
   */
3119
  @SuppressWarnings("GuardedByChecker")
3120
  <T> T evictionOrder(boolean hottest, Function<@Nullable V, @Nullable V> transformer,
3121
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3122
    Comparator<Node<K, V>> comparator = Comparator.comparingInt(node -> {
1✔
3123
      var keyRef = node.getKeyReferenceOrNull();
1✔
3124
      return (keyRef == null) ? 0 : frequencySketch().frequency(keyRef);
1✔
3125
    });
3126
    Iterable<Node<K, V>> iterable;
3127
    if (hottest) {
1✔
3128
      iterable = () -> {
1✔
3129
        var secondary = PeekingIterator.comparing(
1✔
3130
            accessOrderProbationDeque().descendingIterator(),
1✔
3131
            accessOrderWindowDeque().descendingIterator(), comparator);
1✔
3132
        return PeekingIterator.concat(
1✔
3133
            accessOrderProtectedDeque().descendingIterator(), secondary);
1✔
3134
      };
3135
    } else {
3136
      iterable = () -> {
1✔
3137
        var primary = PeekingIterator.comparing(
1✔
3138
            accessOrderWindowDeque().iterator(), accessOrderProbationDeque().iterator(),
1✔
3139
            comparator.reversed());
1✔
3140
        return PeekingIterator.concat(primary, accessOrderProtectedDeque().iterator());
1✔
3141
      };
3142
    }
3143
    return snapshot(iterable, transformer, mappingFunction);
1✔
3144
  }
3145

3146
  /**
3147
   * Returns the computed result from the ordered traversal of the cache entries.
3148
   *
3149
   * @param oldest the youngest or oldest iteration order
3150
   * @param transformer a function that unwraps the value
3151
   * @param mappingFunction the mapping function to compute a value
3152
   * @return the computed value
3153
   */
3154
  @SuppressWarnings("GuardedByChecker")
3155
  <T> T expireAfterAccessOrder(boolean oldest, Function<@Nullable V, @Nullable V> transformer,
3156
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3157
    Iterable<Node<K, V>> iterable;
3158
    if (evicts()) {
1✔
3159
      iterable = () -> {
1✔
3160
        @Var Comparator<Node<K, V>> comparator = Comparator.comparingLong(Node::getAccessTime);
1✔
3161
        PeekingIterator<Node<K, V>> first;
3162
        PeekingIterator<Node<K, V>> second;
3163
        PeekingIterator<Node<K, V>> third;
3164
        if (oldest) {
1✔
3165
          first = accessOrderWindowDeque().iterator();
1✔
3166
          second = accessOrderProbationDeque().iterator();
1✔
3167
          third = accessOrderProtectedDeque().iterator();
1✔
3168
        } else {
3169
          comparator = comparator.reversed();
1✔
3170
          first = accessOrderWindowDeque().descendingIterator();
1✔
3171
          second = accessOrderProbationDeque().descendingIterator();
1✔
3172
          third = accessOrderProtectedDeque().descendingIterator();
1✔
3173
        }
3174
        return PeekingIterator.comparing(
1✔
3175
            PeekingIterator.comparing(first, second, comparator), third, comparator);
1✔
3176
      };
3177
    } else {
3178
      iterable = oldest
1✔
3179
          ? accessOrderWindowDeque()
1✔
3180
          : accessOrderWindowDeque()::descendingIterator;
1✔
3181
    }
3182
    return snapshot(iterable, transformer, mappingFunction);
1✔
3183
  }
3184

3185
  /**
3186
   * Returns the computed result from the ordered traversal of the cache entries.
3187
   *
3188
   * @param iterable the supplier of the entries in the cache
3189
   * @param transformer a function that unwraps the value
3190
   * @param mappingFunction the mapping function to compute a value
3191
   * @return the computed value
3192
   */
3193
  <T> T snapshot(Iterable<Node<K, V>> iterable, Function<@Nullable V, @Nullable V> transformer,
3194
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3195
    requireNonNull(mappingFunction);
1✔
3196
    requireNonNull(transformer);
1✔
3197
    requireNonNull(iterable);
1✔
3198

3199
    evictionLock.lock();
1✔
3200
    try {
3201
      maintenance(/* ignored */ null);
1✔
3202

3203
      // Obtain the iterator as late as possible for modification count checking
3204
      try (var stream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(
1✔
3205
           iterable.iterator(), DISTINCT | ORDERED | NONNULL | IMMUTABLE), /* parallel= */ false)) {
1✔
3206
        return mappingFunction.apply(stream
1✔
3207
            .map(node -> nodeToCacheEntry(node, transformer))
1✔
3208
            .filter(Objects::nonNull));
1✔
3209
      }
3210
    } finally {
3211
      evictionLock.unlock();
1✔
3212
      rescheduleCleanUpIfIncomplete();
1✔
3213
    }
3214
  }
3215

3216
  /** Returns an entry for the given node if it can be used externally, else null. */
3217
  @Nullable CacheEntry<K, V> nodeToCacheEntry(
3218
      Node<K, V> node, Function<@Nullable V, @Nullable V> transformer) {
3219
    V value = transformer.apply(node.getValue());
1✔
3220
    K key = node.getKey();
1✔
3221
    long now;
3222
    if ((key == null) || (value == null) || !node.isAlive()
1✔
3223
        || hasExpired(node, (now = expirationTicker().read()))) {
1✔
3224
      return null;
1✔
3225
    }
3226

3227
    @Var long expiresAfter = Long.MAX_VALUE;
1✔
3228
    if (expiresAfterAccess()) {
1✔
3229
      expiresAfter = Math.min(expiresAfter,
1✔
3230
          expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
3231
    }
3232
    if (expiresAfterWrite()) {
1✔
3233
      expiresAfter = Math.min(expiresAfter,
1✔
3234
          expiresAfterWriteNanos() - ((now & ~1L) - (node.getWriteTime() & ~1L)));
1✔
3235
    }
3236
    if (expiresVariable()) {
1✔
3237
      expiresAfter = node.getVariableTime() - now;
1✔
3238
    }
3239

3240
    long refreshableAt = refreshAfterWrite()
1✔
3241
        ? (node.getWriteTime() & ~1L) + refreshAfterWriteNanos()
1✔
3242
        : now + Long.MAX_VALUE;
1✔
3243
    int weight = node.getPolicyWeight();
1✔
3244
    return SnapshotEntry.forEntry(key, value, now, weight, now + expiresAfter, refreshableAt);
1✔
3245
  }
3246

3247
  /** Mutable context for passing state between a lambda and the caller. */
3248
  static final class EvictContext<V> {
1✔
3249
    @Nullable RemovalCause cause;
3250
    @Nullable V value;
3251
    boolean resurrect;
3252
    boolean removed;
3253
  }
3254

3255
  /** Mutable context for passing state between a lambda and the caller. */
3256
  static final class RemoveContext<K, V> {
1✔
3257
    @Nullable K oldKey;
3258
    @Nullable V oldValue;
3259
    @Nullable Node<K, V> node;
3260
    @Nullable RemovalCause cause;
3261
  }
3262

3263
  /** Mutable context for passing state between a lambda and the caller. */
3264
  static final class ReplaceContext<K, V> {
1✔
3265
    @Nullable K nodeKey;
3266
    @Nullable V oldValue;
3267

3268
    long now;
3269
    int oldWeight;
3270
    boolean exceedsTolerance;
3271
  }
3272

3273
  /** Mutable context for passing state between a lambda and the caller. */
3274
  static final class ComputeContext<K, V> {
3275
    @Nullable K nodeKey;
3276
    @Nullable V oldValue;
3277
    @Nullable V newValue;
3278
    @Nullable Node<K, V> removed;
3279
    @Nullable RemovalCause cause;
3280
    @Nullable Throwable exception;
3281

3282
    long now;
3283
    int oldWeight;
3284
    int newWeight;
3285
    boolean exceedsTolerance;
3286

3287
    ComputeContext(long now) {
1✔
3288
      this.now = now;
1✔
3289
    }
1✔
3290
  }
3291

3292
  /** A function that produces an unmodifiable map up to the limit in stream order. */
3293
  static final class SizeLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3294
    private final int expectedSize;
3295
    private final long limit;
3296

3297
    SizeLimiter(int expectedSize, long limit) {
1✔
3298
      requireArgument(limit >= 0);
1✔
3299
      this.expectedSize = expectedSize;
1✔
3300
      this.limit = limit;
1✔
3301
    }
1✔
3302

3303
    @Override
3304
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3305
      var map = new LinkedHashMap<K, V>(calculateHashMapCapacity(expectedSize));
1✔
3306
      stream.limit(limit).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3307
      return Collections.unmodifiableMap(map);
1✔
3308
    }
3309
  }
3310

3311
  /** A function that produces an unmodifiable map up to the weighted limit in stream order. */
3312
  static final class WeightLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3313
    private final long weightLimit;
3314

3315
    private long weightedSize;
3316

3317
    WeightLimiter(long weightLimit) {
1✔
3318
      requireArgument(weightLimit >= 0);
1✔
3319
      this.weightLimit = weightLimit;
1✔
3320
    }
1✔
3321

3322
    @Override
3323
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3324
      var map = new LinkedHashMap<K, V>();
1✔
3325
      stream.takeWhile(entry -> {
1✔
3326
        weightedSize = Math.addExact(weightedSize, entry.weight());
1✔
3327
        return (weightedSize <= weightLimit);
1✔
3328
      }).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3329
      return Collections.unmodifiableMap(map);
1✔
3330
    }
3331
  }
3332

3333
  /** An adapter to safely externalize the keys. */
3334
  static final class KeySetView<K, V> extends AbstractSet<K> {
3335
    final BoundedLocalCache<K, V> cache;
3336

3337
    KeySetView(BoundedLocalCache<K, V> cache) {
1✔
3338
      this.cache = requireNonNull(cache);
1✔
3339
    }
1✔
3340

3341
    @Override
3342
    public int size() {
3343
      return cache.size();
1✔
3344
    }
3345

3346
    @Override
3347
    public void clear() {
3348
      cache.clear();
1✔
3349
    }
1✔
3350

3351
    @Override
3352
    @SuppressWarnings("SuspiciousMethodCalls")
3353
    public boolean contains(Object o) {
3354
      return cache.containsKey(o);
1✔
3355
    }
3356

3357
    @Override
3358
    public boolean removeAll(Collection<?> collection) {
3359
      requireNonNull(collection);
1✔
3360
      @Var boolean modified = false;
1✔
3361
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3362
        for (K key : this) {
1✔
3363
          if (collection.contains(key)) {
1✔
3364
            modified |= remove(key);
1✔
3365
          }
3366
        }
1✔
3367
      } else {
3368
        for (var item : collection) {
1✔
3369
          modified |= (item != null) && remove(item);
1✔
3370
        }
1✔
3371
      }
3372
      return modified;
1✔
3373
    }
3374

3375
    @Override
3376
    public boolean remove(Object o) {
3377
      return (cache.remove(o) != null);
1✔
3378
    }
3379

3380
    @Override
3381
    public boolean removeIf(Predicate<? super K> filter) {
3382
      requireNonNull(filter);
1✔
3383
      @Var boolean modified = false;
1✔
3384
      for (K key : this) {
1✔
3385
        if (filter.test(key) && remove(key)) {
1✔
3386
          modified = true;
1✔
3387
        }
3388
      }
1✔
3389
      return modified;
1✔
3390
    }
3391

3392
    @Override
3393
    public boolean retainAll(Collection<?> collection) {
3394
      requireNonNull(collection);
1✔
3395
      @Var boolean modified = false;
1✔
3396
      for (K key : this) {
1✔
3397
        if (!collection.contains(key) && remove(key)) {
1✔
3398
          modified = true;
1✔
3399
        }
3400
      }
1✔
3401
      return modified;
1✔
3402
    }
3403

3404
    @Override
3405
    public Iterator<K> iterator() {
3406
      return new KeyIterator<>(cache);
1✔
3407
    }
3408

3409
    @Override
3410
    public Spliterator<K> spliterator() {
3411
      return new KeySpliterator<>(cache);
1✔
3412
    }
3413
  }
3414

3415
  /** An adapter to safely externalize the key iterator. */
3416
  static final class KeyIterator<K, V> implements Iterator<K> {
3417
    final EntryIterator<K, V> iterator;
3418

3419
    KeyIterator(BoundedLocalCache<K, V> cache) {
1✔
3420
      this.iterator = new EntryIterator<>(cache);
1✔
3421
    }
1✔
3422

3423
    @Override
3424
    public boolean hasNext() {
3425
      return iterator.hasNext();
1✔
3426
    }
3427

3428
    @Override
3429
    public K next() {
3430
      return iterator.nextKey();
1✔
3431
    }
3432

3433
    @Override
3434
    public void remove() {
3435
      iterator.remove();
1✔
3436
    }
1✔
3437
  }
3438

3439
  /** An adapter to safely externalize the key spliterator. */
3440
  static final class KeySpliterator<K, V> implements Spliterator<K> {
3441
    final Spliterator<Node<K, V>> spliterator;
3442
    final BoundedLocalCache<K, V> cache;
3443

3444
    KeySpliterator(BoundedLocalCache<K, V> cache) {
3445
      this(cache, cache.data.values().spliterator());
1✔
3446
    }
1✔
3447

3448
    KeySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3449
      this.spliterator = requireNonNull(spliterator);
1✔
3450
      this.cache = requireNonNull(cache);
1✔
3451
    }
1✔
3452

3453
    @Override
3454
    public void forEachRemaining(Consumer<? super K> action) {
3455
      requireNonNull(action);
1✔
3456
      Consumer<Node<K, V>> consumer = node -> {
1✔
3457
        K key = node.getKey();
1✔
3458
        V value = node.getValue();
1✔
3459
        long now = cache.expirationTicker().read();
1✔
3460
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3461
          action.accept(key);
1✔
3462
        }
3463
      };
1✔
3464
      spliterator.forEachRemaining(consumer);
1✔
3465
    }
1✔
3466

3467
    @Override
3468
    public boolean tryAdvance(Consumer<? super K> action) {
3469
      requireNonNull(action);
1✔
3470
      boolean[] advanced = { false };
1✔
3471
      Consumer<Node<K, V>> consumer = node -> {
1✔
3472
        K key = node.getKey();
1✔
3473
        V value = node.getValue();
1✔
3474
        long now = cache.expirationTicker().read();
1✔
3475
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3476
          action.accept(key);
1✔
3477
          advanced[0] = true;
1✔
3478
        }
3479
      };
1✔
3480
      while (spliterator.tryAdvance(consumer)) {
1✔
3481
        if (advanced[0]) {
1✔
3482
          return true;
1✔
3483
        }
3484
      }
3485
      return false;
1✔
3486
    }
3487

3488
    @Override
3489
    public @Nullable Spliterator<K> trySplit() {
3490
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3491
      return (split == null) ? null : new KeySpliterator<>(cache, split);
1✔
3492
    }
3493

3494
    @Override
3495
    public long estimateSize() {
3496
      return spliterator.estimateSize();
1✔
3497
    }
3498

3499
    @Override
3500
    public int characteristics() {
3501
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3502
    }
3503
  }
3504

3505
  /** An adapter to safely externalize the values. */
3506
  static final class ValuesView<K, V> extends AbstractCollection<V> {
3507
    final BoundedLocalCache<K, V> cache;
3508

3509
    ValuesView(BoundedLocalCache<K, V> cache) {
1✔
3510
      this.cache = requireNonNull(cache);
1✔
3511
    }
1✔
3512

3513
    @Override
3514
    public int size() {
3515
      return cache.size();
1✔
3516
    }
3517

3518
    @Override
3519
    public void clear() {
3520
      cache.clear();
1✔
3521
    }
1✔
3522

3523
    @Override
3524
    @SuppressWarnings("SuspiciousMethodCalls")
3525
    public boolean contains(Object o) {
3526
      return cache.containsValue(o);
1✔
3527
    }
3528

3529
    @Override
3530
    public boolean removeAll(Collection<?> collection) {
3531
      requireNonNull(collection);
1✔
3532
      @Var boolean modified = false;
1✔
3533
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3534
        var key = requireNonNull(iterator.key);
1✔
3535
        var value = requireNonNull(iterator.value);
1✔
3536
        if (collection.contains(value) && cache.remove(key, value)) {
1✔
3537
          modified = true;
1✔
3538
        }
3539
        iterator.advance();
1✔
3540
      }
1✔
3541
      return modified;
1✔
3542
    }
3543

3544
    @Override
3545
    public boolean remove(@Nullable Object o) {
3546
      if (o == null) {
1✔
3547
        return false;
1✔
3548
      }
3549
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3550
        var key = requireNonNull(iterator.key);
1✔
3551
        var value = requireNonNull(iterator.value);
1✔
3552
        if (o.equals(value) && cache.remove(key, value)) {
1✔
3553
          return true;
1✔
3554
        }
3555
        iterator.advance();
1✔
3556
      }
1✔
3557
      return false;
1✔
3558
    }
3559

3560
    @Override
3561
    public boolean removeIf(Predicate<? super V> filter) {
3562
      requireNonNull(filter);
1✔
3563
      @Var boolean modified = false;
1✔
3564
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3565
        var value = requireNonNull(iterator.value);
1✔
3566
        if (filter.test(value)) {
1✔
3567
          var key = requireNonNull(iterator.key);
1✔
3568
          modified |= cache.remove(key, value);
1✔
3569
        }
3570
        iterator.advance();
1✔
3571
      }
1✔
3572
      return modified;
1✔
3573
    }
3574

3575
    @Override
3576
    public boolean retainAll(Collection<?> collection) {
3577
      requireNonNull(collection);
1✔
3578
      @Var boolean modified = false;
1✔
3579
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3580
        var key = requireNonNull(iterator.key);
1✔
3581
        var value = requireNonNull(iterator.value);
1✔
3582
        if (!collection.contains(value) && cache.remove(key, value)) {
1✔
3583
          modified = true;
1✔
3584
        }
3585
        iterator.advance();
1✔
3586
      }
1✔
3587
      return modified;
1✔
3588
    }
3589

3590
    @Override
3591
    public Iterator<V> iterator() {
3592
      return new ValueIterator<>(cache);
1✔
3593
    }
3594

3595
    @Override
3596
    public Spliterator<V> spliterator() {
3597
      return new ValueSpliterator<>(cache);
1✔
3598
    }
3599
  }
3600

3601
  /** An adapter to safely externalize the value iterator. */
3602
  static final class ValueIterator<K, V> implements Iterator<V> {
3603
    final EntryIterator<K, V> iterator;
3604

3605
    ValueIterator(BoundedLocalCache<K, V> cache) {
1✔
3606
      this.iterator = new EntryIterator<>(cache);
1✔
3607
    }
1✔
3608

3609
    @Override
3610
    public boolean hasNext() {
3611
      return iterator.hasNext();
1✔
3612
    }
3613

3614
    @Override
3615
    public V next() {
3616
      return iterator.nextValue();
1✔
3617
    }
3618

3619
    @Override
3620
    public void remove() {
3621
      iterator.remove();
1✔
3622
    }
1✔
3623
  }
3624

3625
  /** An adapter to safely externalize the value spliterator. */
3626
  static final class ValueSpliterator<K, V> implements Spliterator<V> {
3627
    final Spliterator<Node<K, V>> spliterator;
3628
    final BoundedLocalCache<K, V> cache;
3629

3630
    ValueSpliterator(BoundedLocalCache<K, V> cache) {
3631
      this(cache, cache.data.values().spliterator());
1✔
3632
    }
1✔
3633

3634
    ValueSpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3635
      this.spliterator = requireNonNull(spliterator);
1✔
3636
      this.cache = requireNonNull(cache);
1✔
3637
    }
1✔
3638

3639
    @Override
3640
    public void forEachRemaining(Consumer<? super V> action) {
3641
      requireNonNull(action);
1✔
3642
      Consumer<Node<K, V>> consumer = node -> {
1✔
3643
        K key = node.getKey();
1✔
3644
        V value = node.getValue();
1✔
3645
        long now = cache.expirationTicker().read();
1✔
3646
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3647
          action.accept(value);
1✔
3648
        }
3649
      };
1✔
3650
      spliterator.forEachRemaining(consumer);
1✔
3651
    }
1✔
3652

3653
    @Override
3654
    public boolean tryAdvance(Consumer<? super V> action) {
3655
      requireNonNull(action);
1✔
3656
      boolean[] advanced = { false };
1✔
3657
      Consumer<Node<K, V>> consumer = node -> {
1✔
3658
        K key = node.getKey();
1✔
3659
        V value = node.getValue();
1✔
3660
        long now = cache.expirationTicker().read();
1✔
3661
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3662
          action.accept(value);
1✔
3663
          advanced[0] = true;
1✔
3664
        }
3665
      };
1✔
3666
      while (spliterator.tryAdvance(consumer)) {
1✔
3667
        if (advanced[0]) {
1✔
3668
          return true;
1✔
3669
        }
3670
      }
3671
      return false;
1✔
3672
    }
3673

3674
    @Override
3675
    public @Nullable Spliterator<V> trySplit() {
3676
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3677
      return (split == null) ? null : new ValueSpliterator<>(cache, split);
1✔
3678
    }
3679

3680
    @Override
3681
    public long estimateSize() {
3682
      return spliterator.estimateSize();
1✔
3683
    }
3684

3685
    @Override
3686
    public int characteristics() {
3687
      return CONCURRENT | NONNULL;
1✔
3688
    }
3689
  }
3690

3691
  /** An adapter to safely externalize the entries. */
3692
  static final class EntrySetView<K, V> extends AbstractSet<Entry<K, V>> {
3693
    final BoundedLocalCache<K, V> cache;
3694

3695
    EntrySetView(BoundedLocalCache<K, V> cache) {
1✔
3696
      this.cache = requireNonNull(cache);
1✔
3697
    }
1✔
3698

3699
    @Override
3700
    public int size() {
3701
      return cache.size();
1✔
3702
    }
3703

3704
    @Override
3705
    public void clear() {
3706
      cache.clear();
1✔
3707
    }
1✔
3708

3709
    @Override
3710
    public boolean contains(Object o) {
3711
      if (!(o instanceof Entry<?, ?>)) {
1✔
3712
        return false;
1✔
3713
      }
3714
      var entry = (Entry<?, ?>) o;
1✔
3715
      var key = entry.getKey();
1✔
3716
      var value = entry.getValue();
1✔
3717
      if ((key == null) || (value == null)) {
1✔
3718
        return false;
1✔
3719
      }
3720
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
3721
      return (node != null) && node.containsValue(value)
1✔
3722
           && !cache.hasExpired(node, cache.expirationTicker().read());
1✔
3723
    }
3724

3725
    @Override
3726
    public boolean removeAll(Collection<?> collection) {
3727
      requireNonNull(collection);
1✔
3728
      @Var boolean modified = false;
1✔
3729
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3730
        for (var entry : this) {
1✔
3731
          if (collection.contains(entry)) {
1✔
3732
            modified |= remove(entry);
1✔
3733
          }
3734
        }
1✔
3735
      } else {
3736
        for (var item : collection) {
1✔
3737
          modified |= (item != null) && remove(item);
1✔
3738
        }
1✔
3739
      }
3740
      return modified;
1✔
3741
    }
3742

3743
    @Override
3744
    @SuppressWarnings("SuspiciousMethodCalls")
3745
    public boolean remove(Object o) {
3746
      if (!(o instanceof Entry<?, ?>)) {
1✔
3747
        return false;
1✔
3748
      }
3749
      var entry = (Entry<?, ?>) o;
1✔
3750
      var key = entry.getKey();
1✔
3751
      return (key != null) && cache.remove(key, entry.getValue());
1✔
3752
    }
3753

3754
    @Override
3755
    public boolean removeIf(Predicate<? super Entry<K, V>> filter) {
3756
      requireNonNull(filter);
1✔
3757
      @Var boolean modified = false;
1✔
3758
      for (Entry<K, V> entry : this) {
1✔
3759
        if (filter.test(entry)) {
1✔
3760
          modified |= cache.remove(entry.getKey(), entry.getValue());
1✔
3761
        }
3762
      }
1✔
3763
      return modified;
1✔
3764
    }
3765

3766
    @Override
3767
    public boolean retainAll(Collection<?> collection) {
3768
      requireNonNull(collection);
1✔
3769
      @Var boolean modified = false;
1✔
3770
      for (var entry : this) {
1✔
3771
        if (!collection.contains(entry) && remove(entry)) {
1✔
3772
          modified = true;
1✔
3773
        }
3774
      }
1✔
3775
      return modified;
1✔
3776
    }
3777

3778
    @Override
3779
    public Iterator<Entry<K, V>> iterator() {
3780
      return new EntryIterator<>(cache);
1✔
3781
    }
3782

3783
    @Override
3784
    public Spliterator<Entry<K, V>> spliterator() {
3785
      return new EntrySpliterator<>(cache);
1✔
3786
    }
3787
  }
3788

3789
  /** An adapter to safely externalize the entry iterator. */
3790
  static final class EntryIterator<K, V> implements Iterator<Entry<K, V>> {
3791
    final BoundedLocalCache<K, V> cache;
3792
    final Iterator<Node<K, V>> iterator;
3793

3794
    @Nullable K key;
3795
    @Nullable V value;
3796
    @Nullable K removalKey;
3797
    @Nullable Node<K, V> next;
3798

3799
    EntryIterator(BoundedLocalCache<K, V> cache) {
1✔
3800
      this.iterator = cache.data.values().iterator();
1✔
3801
      this.cache = cache;
1✔
3802
    }
1✔
3803

3804
    @Override
3805
    public boolean hasNext() {
3806
      if (next != null) {
1✔
3807
        return true;
1✔
3808
      }
3809

3810
      long now = cache.expirationTicker().read();
1✔
3811
      while (iterator.hasNext()) {
1✔
3812
        next = iterator.next();
1✔
3813
        value = next.getValue();
1✔
3814
        key = next.getKey();
1✔
3815

3816
        boolean evictable = (key == null) || (value == null) || cache.hasExpired(next, now);
1✔
3817
        if (evictable || !next.isAlive()) {
1✔
3818
          if (evictable) {
1✔
3819
            cache.scheduleDrainBuffers();
1✔
3820
          }
3821
          advance();
1✔
3822
          continue;
1✔
3823
        }
3824
        return true;
1✔
3825
      }
3826
      return false;
1✔
3827
    }
3828

3829
    /** Invalidates the current position so that the iterator may compute the next position. */
3830
    void advance() {
3831
      value = null;
1✔
3832
      next = null;
1✔
3833
      key = null;
1✔
3834
    }
1✔
3835

3836
    K nextKey() {
3837
      if (!hasNext()) {
1✔
3838
        throw new NoSuchElementException();
1✔
3839
      }
3840
      removalKey = key;
1✔
3841
      advance();
1✔
3842
      return requireNonNull(removalKey);
1✔
3843
    }
3844

3845
    V nextValue() {
3846
      if (!hasNext()) {
1✔
3847
        throw new NoSuchElementException();
1✔
3848
      }
3849
      removalKey = key;
1✔
3850
      V val = value;
1✔
3851
      advance();
1✔
3852
      return requireNonNull(val);
1✔
3853
    }
3854

3855
    @Override
3856
    public Entry<K, V> next() {
3857
      if (!hasNext()) {
1✔
3858
        throw new NoSuchElementException();
1✔
3859
      }
3860
      var entry = new WriteThroughEntry<K, @NonNull V>(
1✔
3861
          cache, requireNonNull(key), requireNonNull(value));
1✔
3862
      removalKey = key;
1✔
3863
      advance();
1✔
3864
      return entry;
1✔
3865
    }
3866

3867
    @Override
3868
    public void remove() {
3869
      if (removalKey == null) {
1✔
3870
        throw new IllegalStateException();
1✔
3871
      }
3872
      cache.remove(removalKey);
1✔
3873
      removalKey = null;
1✔
3874
    }
1✔
3875
  }
3876

3877
  /** An adapter to safely externalize the entry spliterator. */
3878
  static final class EntrySpliterator<K, V> implements Spliterator<Entry<K, V>> {
3879
    final Spliterator<Node<K, V>> spliterator;
3880
    final BoundedLocalCache<K, V> cache;
3881

3882
    EntrySpliterator(BoundedLocalCache<K, V> cache) {
3883
      this(cache, cache.data.values().spliterator());
1✔
3884
    }
1✔
3885

3886
    EntrySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3887
      this.spliterator = requireNonNull(spliterator);
1✔
3888
      this.cache = requireNonNull(cache);
1✔
3889
    }
1✔
3890

3891
    @Override
3892
    public void forEachRemaining(Consumer<? super Entry<K, V>> action) {
3893
      requireNonNull(action);
1✔
3894
      Consumer<Node<K, V>> consumer = node -> {
1✔
3895
        K key = node.getKey();
1✔
3896
        V value = node.getValue();
1✔
3897
        long now = cache.expirationTicker().read();
1✔
3898
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3899
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3900
        }
3901
      };
1✔
3902
      spliterator.forEachRemaining(consumer);
1✔
3903
    }
1✔
3904

3905
    @Override
3906
    public boolean tryAdvance(Consumer<? super Entry<K, V>> action) {
3907
      requireNonNull(action);
1✔
3908
      boolean[] advanced = { false };
1✔
3909
      Consumer<Node<K, V>> consumer = node -> {
1✔
3910
        K key = node.getKey();
1✔
3911
        V value = node.getValue();
1✔
3912
        long now = cache.expirationTicker().read();
1✔
3913
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3914
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3915
          advanced[0] = true;
1✔
3916
        }
3917
      };
1✔
3918
      while (spliterator.tryAdvance(consumer)) {
1✔
3919
        if (advanced[0]) {
1✔
3920
          return true;
1✔
3921
        }
3922
      }
3923
      return false;
1✔
3924
    }
3925

3926
    @Override
3927
    public @Nullable Spliterator<Entry<K, V>> trySplit() {
3928
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3929
      return (split == null) ? null : new EntrySpliterator<>(cache, split);
1✔
3930
    }
3931

3932
    @Override
3933
    public long estimateSize() {
3934
      return spliterator.estimateSize();
1✔
3935
    }
3936

3937
    @Override
3938
    public int characteristics() {
3939
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3940
    }
3941
  }
3942

3943
  /** A reusable task that performs the maintenance work; used to avoid wrapping by ForkJoinPool. */
3944
  static final class PerformCleanupTask extends ForkJoinTask<@Nullable Void> implements Runnable {
3945
    private static final long serialVersionUID = 1L;
3946

3947
    final WeakReference<BoundedLocalCache<?, ?>> reference;
3948

3949
    PerformCleanupTask(BoundedLocalCache<?, ?> cache) {
1✔
3950
      reference = new WeakReference<>(cache);
1✔
3951
    }
1✔
3952

3953
    @Override
3954
    public boolean exec() {
3955
      try {
3956
        run();
1✔
3957
      } catch (Throwable t) {
1✔
3958
        logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", t);
1✔
3959
      }
1✔
3960

3961
      // Indicates that the task has not completed to allow subsequent submissions to execute
3962
      return false;
1✔
3963
    }
3964

3965
    @Override
3966
    public void run() {
3967
      BoundedLocalCache<?, ?> cache = reference.get();
1✔
3968
      if (cache != null) {
1✔
3969
        cache.performCleanUp(/* ignored */ null);
1✔
3970
      }
3971
    }
1✔
3972

3973
    /**
3974
     * This method cannot be ignored due to being final, so a hostile user supplied Executor could
3975
     * forcibly complete the task and halt future executions. There are easier ways to intentionally
3976
     * harm a system, so this is assumed to not happen in practice.
3977
     */
3978
    // public final void quietlyComplete() {}
3979

3980
    @Override public void complete(@Nullable Void value) {}
1✔
3981
    @Override public void setRawResult(@Nullable Void value) {}
1✔
3982
    @Override public @Nullable Void getRawResult() { return null; }
1✔
3983
    @Override public void completeExceptionally(@Nullable Throwable t) {}
1✔
3984
    @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; }
1✔
3985
  }
3986

3987
  /** Creates a serialization proxy based on the common configuration shared by all cache types. */
3988
  static <K, V> SerializationProxy<K, V> makeSerializationProxy(BoundedLocalCache<?, ?> cache) {
3989
    var proxy = new SerializationProxy<K, V>();
1✔
3990
    proxy.weakKeys = cache.collectKeys();
1✔
3991
    proxy.weakValues = cache.nodeFactory.weakValues();
1✔
3992
    proxy.softValues = cache.nodeFactory.softValues();
1✔
3993
    proxy.isRecordingStats = cache.isRecordingStats();
1✔
3994
    proxy.evictionListener = cache.evictionListener;
1✔
3995
    proxy.removalListener = cache.removalListener();
1✔
3996
    proxy.ticker = cache.expirationTicker();
1✔
3997
    if (cache.expiresAfterAccess()) {
1✔
3998
      proxy.expiresAfterAccessNanos = cache.expiresAfterAccessNanos();
1✔
3999
    }
4000
    if (cache.expiresAfterWrite()) {
1✔
4001
      proxy.expiresAfterWriteNanos = cache.expiresAfterWriteNanos();
1✔
4002
    }
4003
    if (cache.expiresVariable()) {
1✔
4004
      proxy.expiry = cache.expiry();
1✔
4005
    }
4006
    if (cache.refreshAfterWrite()) {
1✔
4007
      proxy.refreshAfterWriteNanos = cache.refreshAfterWriteNanos();
1✔
4008
    }
4009
    if (cache.evicts()) {
1✔
4010
      if (cache.isWeighted) {
1✔
4011
        proxy.weigher = cache.weigher;
1✔
4012
        proxy.maximumWeight = cache.maximum();
1✔
4013
      } else {
4014
        proxy.maximumSize = cache.maximum();
1✔
4015
      }
4016
    }
4017
    proxy.cacheLoader = cache.cacheLoader;
1✔
4018
    proxy.async = cache.isAsync;
1✔
4019
    return proxy;
1✔
4020
  }
4021

4022
  /* --------------- Manual Cache --------------- */
4023

4024
  static class BoundedLocalManualCache<K, V> implements LocalManualCache<K, V>, Serializable {
4025
    private static final long serialVersionUID = 1;
4026

4027
    final BoundedLocalCache<K, V> cache;
4028

4029
    @Nullable Policy<K, V> policy;
4030

4031
    BoundedLocalManualCache(Caffeine<K, V> builder) {
4032
      this(builder, null);
1✔
4033
    }
1✔
4034

4035
    BoundedLocalManualCache(Caffeine<K, V> builder, @Nullable CacheLoader<? super K, V> loader) {
1✔
4036
      cache = LocalCacheFactory.newBoundedLocalCache(builder, loader, /* isAsync= */ false);
1✔
4037
    }
1✔
4038

4039
    @Override
4040
    public final BoundedLocalCache<K, V> cache() {
4041
      return cache;
1✔
4042
    }
4043

4044
    @Override
4045
    public final Policy<K, V> policy() {
4046
      if (policy == null) {
1✔
4047
        Function<@Nullable V, @Nullable V> identity = v -> v;
1✔
4048
        policy = new BoundedPolicy<>(cache, identity, cache.isWeighted);
1✔
4049
      }
4050
      return policy;
1✔
4051
    }
4052

4053
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4054
      throw new InvalidObjectException("Proxy required");
1✔
4055
    }
4056

4057
    private Object writeReplace() {
4058
      return makeSerializationProxy(cache);
1✔
4059
    }
4060
  }
4061

4062
  @SuppressWarnings({"NullableOptional",
4063
    "OptionalAssignedToNull", "OptionalUsedAsFieldOrParameterType"})
4064
  static final class BoundedPolicy<K, V> implements Policy<K, V> {
4065
    final Function<@Nullable V, @Nullable V> transformer;
4066
    final BoundedLocalCache<K, V> cache;
4067
    final boolean isWeighted;
4068

4069
    @Nullable Optional<Eviction<K, V>> eviction;
4070
    @Nullable Optional<FixedRefresh<K, V>> refreshes;
4071
    @Nullable Optional<FixedExpiration<K, V>> afterWrite;
4072
    @Nullable Optional<FixedExpiration<K, V>> afterAccess;
4073
    @Nullable Optional<VarExpiration<K, V>> variable;
4074

4075
    BoundedPolicy(BoundedLocalCache<K, V> cache,
4076
        Function<@Nullable V, @Nullable V> transformer, boolean isWeighted) {
1✔
4077
      this.transformer = transformer;
1✔
4078
      this.isWeighted = isWeighted;
1✔
4079
      this.cache = cache;
1✔
4080
    }
1✔
4081

4082
    @Override public boolean isRecordingStats() {
4083
      return cache.isRecordingStats();
1✔
4084
    }
4085
    @Override public @Nullable V getIfPresentQuietly(K key) {
4086
      return transformer.apply(cache.getIfPresentQuietly(key));
1✔
4087
    }
4088
    @Override public @Nullable CacheEntry<K, V> getEntryIfPresentQuietly(K key) {
4089
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4090
      return (node == null) ? null : cache.nodeToCacheEntry(node, transformer);
1✔
4091
    }
4092
    @SuppressWarnings("Java9CollectionFactory")
4093
    @Override public Map<K, CompletableFuture<V>> refreshes() {
4094
      var refreshes = cache.refreshes;
1✔
4095
      if ((refreshes == null) || refreshes.isEmpty()) {
1✔
4096
        @SuppressWarnings({"ImmutableMapOf", "RedundantUnmodifiable"})
4097
        Map<K, CompletableFuture<V>> emptyMap = Collections.unmodifiableMap(Collections.emptyMap());
1✔
4098
        return emptyMap;
1✔
4099
      } else if (cache.collectKeys()) {
1✔
4100
        var inFlight = new IdentityHashMap<K, CompletableFuture<V>>(refreshes.size());
1✔
4101
        for (var entry : refreshes.entrySet()) {
1✔
4102
          @SuppressWarnings("unchecked")
4103
          @Nullable K key = ((InternalReference<K>) entry.getKey()).get();
1✔
4104
          @SuppressWarnings("unchecked")
4105
          var future = (CompletableFuture<V>) entry.getValue();
1✔
4106
          if (key != null) {
1✔
4107
            inFlight.put(key, future);
1✔
4108
          }
4109
        }
1✔
4110
        return Collections.unmodifiableMap(inFlight);
1✔
4111
      }
4112
      @SuppressWarnings("unchecked")
4113
      var castedRefreshes = (Map<K, CompletableFuture<V>>) (Object) refreshes;
1✔
4114
      return Collections.unmodifiableMap(new HashMap<>(castedRefreshes));
1✔
4115
    }
4116
    @Override public Optional<Eviction<K, V>> eviction() {
4117
      return cache.evicts()
1✔
4118
          ? (eviction == null) ? (eviction = Optional.of(new BoundedEviction())) : eviction
1✔
4119
          : Optional.empty();
1✔
4120
    }
4121
    @Override public Optional<FixedExpiration<K, V>> expireAfterAccess() {
4122
      if (!cache.expiresAfterAccess()) {
1✔
4123
        return Optional.empty();
1✔
4124
      }
4125
      return (afterAccess == null)
1✔
4126
          ? (afterAccess = Optional.of(new BoundedExpireAfterAccess()))
1✔
4127
          : afterAccess;
1✔
4128
    }
4129
    @Override public Optional<FixedExpiration<K, V>> expireAfterWrite() {
4130
      if (!cache.expiresAfterWrite()) {
1✔
4131
        return Optional.empty();
1✔
4132
      }
4133
      return (afterWrite == null)
1✔
4134
          ? (afterWrite = Optional.of(new BoundedExpireAfterWrite()))
1✔
4135
          : afterWrite;
1✔
4136
    }
4137
    @Override public Optional<VarExpiration<K, V>> expireVariably() {
4138
      if (!cache.expiresVariable()) {
1✔
4139
        return Optional.empty();
1✔
4140
      }
4141
      return (variable == null)
1✔
4142
          ? (variable = Optional.of(new BoundedVarExpiration()))
1✔
4143
          : variable;
1✔
4144
    }
4145
    @Override public Optional<FixedRefresh<K, V>> refreshAfterWrite() {
4146
      if (!cache.refreshAfterWrite()) {
1✔
4147
        return Optional.empty();
1✔
4148
      }
4149
      return (refreshes == null)
1✔
4150
          ? (refreshes = Optional.of(new BoundedRefreshAfterWrite()))
1✔
4151
          : refreshes;
1✔
4152
    }
4153

4154
    final class BoundedEviction implements Eviction<K, V> {
1✔
4155
      @Override public boolean isWeighted() {
4156
        return isWeighted;
1✔
4157
      }
4158
      @Override public OptionalInt weightOf(K key) {
4159
        requireNonNull(key);
1✔
4160
        if (!isWeighted) {
1✔
4161
          return OptionalInt.empty();
1✔
4162
        }
4163
        Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4164
        if ((node == null) || cache.hasExpired(node, cache.expirationTicker().read())) {
1✔
4165
          return OptionalInt.empty();
1✔
4166
        }
4167
        synchronized (node) {
1✔
4168
          return node.isAlive() ? OptionalInt.of(node.getWeight()) : OptionalInt.empty();
1✔
4169
        }
4170
      }
4171
      @Override public OptionalLong weightedSize() {
4172
        return isWeighted
1✔
4173
            ? OptionalLong.of(Math.max(0, cache.weightedSizeAcquire()))
1✔
4174
            : OptionalLong.empty();
1✔
4175
      }
4176
      @Override public long getMaximum() {
4177
        return cache.maximumAcquire();
1✔
4178
      }
4179
      @Override public void setMaximum(long maximum) {
4180
        cache.evictionLock.lock();
1✔
4181
        try {
4182
          cache.setMaximumSize(maximum);
1✔
4183
          cache.maintenance(/* ignored */ null);
1✔
4184
        } finally {
4185
          cache.evictionLock.unlock();
1✔
4186
          cache.rescheduleCleanUpIfIncomplete();
1✔
4187
        }
4188
      }
1✔
4189
      @Override public Map<K, V> coldest(int limit) {
4190
        int expectedSize = Math.min(limit, cache.size());
1✔
4191
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4192
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
1✔
4193
      }
4194
      @Override public Map<K, V> coldestWeighted(long weightLimit) {
4195
        var limiter = isWeighted()
1✔
4196
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4197
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4198
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
1✔
4199
      }
4200
      @Override
4201
      public <T> T coldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4202
        requireNonNull(mappingFunction);
1✔
4203
        return cache.evictionOrder(/* hottest= */ false, transformer, mappingFunction);
1✔
4204
      }
4205
      @Override public Map<K, V> hottest(int limit) {
4206
        int expectedSize = Math.min(limit, cache.size());
1✔
4207
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4208
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
1✔
4209
      }
4210
      @Override public Map<K, V> hottestWeighted(long weightLimit) {
4211
        var limiter = isWeighted()
1✔
4212
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4213
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4214
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
1✔
4215
      }
4216
      @Override
4217
      public <T> T hottest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4218
        requireNonNull(mappingFunction);
1✔
4219
        return cache.evictionOrder(/* hottest= */ true, transformer, mappingFunction);
1✔
4220
      }
4221
    }
4222

4223
    @SuppressWarnings("PreferJavaTimeOverload")
4224
    final class BoundedExpireAfterAccess implements FixedExpiration<K, V> {
1✔
4225
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4226
        requireNonNull(key);
1✔
4227
        requireNonNull(unit);
1✔
4228
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4229
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4230
        if (node == null) {
1✔
4231
          return OptionalLong.empty();
1✔
4232
        }
4233
        long now = cache.expirationTicker().read();
1✔
4234
        return cache.hasExpired(node, now)
1✔
4235
            ? OptionalLong.empty()
1✔
4236
            : OptionalLong.of(unit.convert(now - node.getAccessTime(), TimeUnit.NANOSECONDS));
1✔
4237
      }
4238
      @Override public long getExpiresAfter(TimeUnit unit) {
4239
        return unit.convert(cache.expiresAfterAccessNanos(), TimeUnit.NANOSECONDS);
1✔
4240
      }
4241
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4242
        requireArgument(duration >= 0);
1✔
4243
        cache.setExpiresAfterAccessNanos(unit.toNanos(duration));
1✔
4244
        cache.scheduleAfterWrite();
1✔
4245
      }
1✔
4246
      @Override public Map<K, V> oldest(int limit) {
4247
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4248
      }
4249
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4250
        return cache.expireAfterAccessOrder(/* oldest= */ true, transformer, mappingFunction);
1✔
4251
      }
4252
      @Override public Map<K, V> youngest(int limit) {
4253
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4254
      }
4255
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4256
        return cache.expireAfterAccessOrder(/* oldest= */ false, transformer, mappingFunction);
1✔
4257
      }
4258
    }
4259

4260
    @SuppressWarnings("PreferJavaTimeOverload")
4261
    final class BoundedExpireAfterWrite implements FixedExpiration<K, V> {
1✔
4262
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4263
        requireNonNull(key);
1✔
4264
        requireNonNull(unit);
1✔
4265
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4266
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4267
        if (node == null) {
1✔
4268
          return OptionalLong.empty();
1✔
4269
        }
4270
        long now = cache.expirationTicker().read();
1✔
4271
        return cache.hasExpired(node, now)
1✔
4272
            ? OptionalLong.empty()
1✔
4273
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
1✔
4274
      }
4275
      @Override public long getExpiresAfter(TimeUnit unit) {
4276
        return unit.convert(cache.expiresAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4277
      }
4278
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4279
        requireArgument(duration >= 0);
1✔
4280
        cache.setExpiresAfterWriteNanos(unit.toNanos(duration));
1✔
4281
        cache.scheduleAfterWrite();
1✔
4282
      }
1✔
4283
      @Override public Map<K, V> oldest(int limit) {
4284
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4285
      }
4286
      @SuppressWarnings("GuardedByChecker")
4287
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4288
        return cache.snapshot(cache.writeOrderDeque(), transformer, mappingFunction);
1✔
4289
      }
4290
      @Override public Map<K, V> youngest(int limit) {
4291
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4292
      }
4293
      @SuppressWarnings("GuardedByChecker")
4294
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4295
        return cache.snapshot(cache.writeOrderDeque()::descendingIterator,
1✔
4296
            transformer, mappingFunction);
4297
      }
4298
    }
4299

4300
    @SuppressWarnings("PreferJavaTimeOverload")
4301
    final class BoundedVarExpiration implements VarExpiration<K, V> {
1✔
4302
      @Override public OptionalLong getExpiresAfter(K key, TimeUnit unit) {
4303
        requireNonNull(key);
1✔
4304
        requireNonNull(unit);
1✔
4305
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4306
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4307
        if (node == null) {
1✔
4308
          return OptionalLong.empty();
1✔
4309
        }
4310
        long now = cache.expirationTicker().read();
1✔
4311
        return cache.hasExpired(node, now)
1✔
4312
            ? OptionalLong.empty()
1✔
4313
            : OptionalLong.of(unit.convert(node.getVariableTime() - now, TimeUnit.NANOSECONDS));
1✔
4314
      }
4315
      @Override public void setExpiresAfter(K key, long duration, TimeUnit unit) {
4316
        requireNonNull(key);
1✔
4317
        requireNonNull(unit);
1✔
4318
        requireArgument(duration >= 0);
1✔
4319
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4320
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4321
        if (node != null) {
1✔
4322
          long now;
4323
          long durationNanos = TimeUnit.NANOSECONDS.convert(duration, unit);
1✔
4324
          synchronized (node) {
1✔
4325
            now = cache.expirationTicker().read();
1✔
4326
            if (cache.isComputingAsync(node.getValue()) || cache.hasExpired(node, now)) {
1✔
4327
              return;
1✔
4328
            }
4329
            node.setVariableTime(now + Math.min(durationNanos, MAXIMUM_EXPIRY));
1✔
4330
          }
1✔
4331
          cache.afterRead(node, now, /* recordHit= */ false);
1✔
4332
        }
4333
      }
1✔
4334
      @Override public @Nullable V put(K key, V value, long duration, TimeUnit unit) {
4335
        requireNonNull(unit);
1✔
4336
        requireNonNull(value);
1✔
4337
        requireArgument(duration >= 0);
1✔
4338
        return cache.isAsync
1✔
4339
            ? putAsync(key, value, duration, unit)
1✔
4340
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ false);
1✔
4341
      }
4342
      @Override public @Nullable V putIfAbsent(K key, V value, long duration, TimeUnit unit) {
4343
        requireNonNull(unit);
1✔
4344
        requireNonNull(value);
1✔
4345
        requireArgument(duration >= 0);
1✔
4346
        return cache.isAsync
1✔
4347
            ? putIfAbsentAsync(key, value, duration, unit)
1✔
4348
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ true);
1✔
4349
      }
4350
      @Nullable V putSync(K key, V value, long duration, TimeUnit unit, boolean onlyIfAbsent) {
4351
        var expiry = new FixedExpireAfterWrite<K, V>(duration, unit);
1✔
4352
        return cache.put(key, value, expiry, onlyIfAbsent);
1✔
4353
      }
4354
      @SuppressWarnings("unchecked")
4355
      @Nullable V putIfAbsentAsync(K key, V value, long duration, TimeUnit unit) {
4356
        // Keep in sync with LocalAsyncCache.AsMapView#putIfAbsent(key, value)
4357
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4358
        var asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4359

4360
        for (;;) {
4361
          var priorFuture = (CompletableFuture<V>) cache.getIfPresent(
1✔
4362
              key, /* recordStats= */ false);
4363
          if (priorFuture != null) {
1✔
4364
            if (!priorFuture.isDone()) {
1✔
4365
              Async.getWhenSuccessful(priorFuture);
1✔
4366
              continue;
1✔
4367
            }
4368

4369
            V prior = Async.getWhenSuccessful(priorFuture);
1✔
4370
            if (prior != null) {
1✔
4371
              return prior;
1✔
4372
            }
4373
          }
4374

4375
          boolean[] added = { false };
1✔
4376
          var computed = (CompletableFuture<V>) cache.compute(key, (K k, @Nullable V oldValue) -> {
1✔
4377
            var oldValueFuture = (CompletableFuture<V>) oldValue;
1✔
4378
            added[0] = (oldValueFuture == null)
1✔
4379
                || (oldValueFuture.isDone() && (Async.getIfReady(oldValueFuture) == null));
1✔
4380
            return added[0] ? asyncValue : oldValue;
1✔
4381
          }, expiry, /* recordLoad= */ false, /* recordLoadFailure= */ false);
4382

4383
          if (added[0]) {
1✔
4384
            return null;
1✔
4385
          } else {
4386
            V prior = Async.getWhenSuccessful(computed);
1✔
4387
            if (prior != null) {
1✔
4388
              return prior;
1✔
4389
            }
4390
          }
4391
        }
1✔
4392
      }
4393
      @SuppressWarnings("unchecked")
4394
      @Nullable V putAsync(K key, V value, long duration, TimeUnit unit) {
4395
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4396
        var asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4397

4398
        var oldValueFuture = (CompletableFuture<V>) cache.put(
1✔
4399
            key, asyncValue, expiry, /* onlyIfAbsent= */ false);
4400
        return Async.getWhenSuccessful(oldValueFuture);
1✔
4401
      }
4402
      @Override public @Nullable V compute(K key,
4403
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4404
          Duration duration) {
4405
        requireNonNull(key);
1✔
4406
        requireNonNull(duration);
1✔
4407
        requireNonNull(remappingFunction);
1✔
4408
        requireArgument(!duration.isNegative(), "duration cannot be negative: %s", duration);
1✔
4409
        var expiry = new FixedExpireAfterWrite<K, V>(
1✔
4410
            toNanosSaturated(duration), TimeUnit.NANOSECONDS);
1✔
4411

4412
        return cache.isAsync
1✔
4413
            ? computeAsync(key, remappingFunction, expiry)
1✔
4414
            : cache.compute(key, remappingFunction, expiry,
1✔
4415
                /* recordLoad= */ true, /* recordLoadFailure= */ true);
4416
      }
4417
      @Nullable V computeAsync(K key,
4418
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4419
          Expiry<? super K, ? super V> expiry) {
4420
        // Keep in sync with LocalAsyncCache.AsMapView#compute(key, remappingFunction)
4421
        @SuppressWarnings("unchecked")
4422
        var delegate = (LocalCache<K, CompletableFuture<V>>) cache;
1✔
4423

4424
        @SuppressWarnings({"rawtypes", "unchecked", "Varifier"})
4425
        @Nullable V[] newValue = (@Nullable V[]) new Object[1];
1✔
4426
        for (;;) {
4427
          Async.getWhenSuccessful(delegate.getIfPresentQuietly(key));
1✔
4428

4429
          CompletableFuture<V> valueFuture = delegate.compute(
1✔
4430
              key, (K k, @Nullable CompletableFuture<V> oldValueFuture) -> {
4431
                if ((oldValueFuture != null) && !oldValueFuture.isDone()) {
1✔
4432
                  return oldValueFuture;
1✔
4433
                }
4434

4435
                V oldValue = Async.getIfReady(oldValueFuture);
1✔
4436
                BiFunction<? super K, ? super V, ? extends @Nullable V> function =
1✔
4437
                    delegate.statsAware(remappingFunction,
1✔
4438
                        /* recordLoad= */ true, /* recordLoadFailure= */ true);
4439
                newValue[0] = function.apply(key, oldValue);
1✔
4440
                return (newValue[0] == null) ? null
1✔
4441
                    : CompletableFuture.completedFuture(newValue[0]);
1✔
4442
              }, new AsyncExpiry<>(expiry), /* recordLoad= */ false,
4443
              /* recordLoadFailure= */ false);
4444

4445
          if (newValue[0] != null) {
1✔
4446
            return newValue[0];
1✔
4447
          } else if (valueFuture == null) {
1✔
4448
            return null;
1✔
4449
          }
4450
        }
1✔
4451
      }
4452
      @Override public Map<K, V> oldest(int limit) {
4453
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4454
      }
4455
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4456
        return cache.snapshot(cache.timerWheel(), transformer, mappingFunction);
1✔
4457
      }
4458
      @Override public Map<K, V> youngest(int limit) {
4459
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4460
      }
4461
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4462
        return cache.snapshot(cache.timerWheel()::descendingIterator, transformer, mappingFunction);
1✔
4463
      }
4464
    }
4465

4466
    static final class FixedExpireAfterWrite<K, V> implements Expiry<K, V> {
4467
      final long duration;
4468
      final TimeUnit unit;
4469

4470
      FixedExpireAfterWrite(long duration, TimeUnit unit) {
1✔
4471
        this.duration = duration;
1✔
4472
        this.unit = unit;
1✔
4473
      }
1✔
4474
      @Override public long expireAfterCreate(K key, V value, long currentTime) {
4475
        return unit.toNanos(duration);
1✔
4476
      }
4477
      @Override public long expireAfterUpdate(
4478
          K key, V value, long currentTime, long currentDuration) {
4479
        return unit.toNanos(duration);
1✔
4480
      }
4481
      @CanIgnoreReturnValue
4482
      @Override public long expireAfterRead(
4483
          K key, V value, long currentTime, long currentDuration) {
4484
        return currentDuration;
1✔
4485
      }
4486
    }
4487

4488
    @SuppressWarnings("PreferJavaTimeOverload")
4489
    final class BoundedRefreshAfterWrite implements FixedRefresh<K, V> {
1✔
4490
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4491
        requireNonNull(key);
1✔
4492
        requireNonNull(unit);
1✔
4493
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4494
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4495
        if (node == null) {
1✔
4496
          return OptionalLong.empty();
1✔
4497
        }
4498
        long now = cache.expirationTicker().read();
1✔
4499
        return cache.hasExpired(node, now)
1✔
4500
            ? OptionalLong.empty()
1✔
4501
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
1✔
4502
      }
4503
      @Override public long getRefreshesAfter(TimeUnit unit) {
4504
        return unit.convert(cache.refreshAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4505
      }
4506
      @Override public void setRefreshesAfter(long duration, TimeUnit unit) {
4507
        requireArgument(duration >= 0);
1✔
4508
        cache.setRefreshAfterWriteNanos(unit.toNanos(duration));
1✔
4509
        cache.scheduleAfterWrite();
1✔
4510
      }
1✔
4511
    }
4512
  }
4513

4514
  /* --------------- Loading Cache --------------- */
4515

4516
  static final class BoundedLocalLoadingCache<K, V>
4517
      extends BoundedLocalManualCache<K, V> implements LocalLoadingCache<K, V> {
4518
    private static final long serialVersionUID = 1;
4519

4520
    final Function<K, @Nullable V> mappingFunction;
4521
    final @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction;
4522

4523
    BoundedLocalLoadingCache(Caffeine<K, V> builder, CacheLoader<? super K, V> loader) {
4524
      super(builder, loader);
1✔
4525
      requireNonNull(loader);
1✔
4526
      mappingFunction = newMappingFunction(loader);
1✔
4527
      bulkMappingFunction = newBulkMappingFunction(loader);
1✔
4528
    }
1✔
4529

4530
    @Override
4531
    @SuppressWarnings({"DataFlowIssue", "NullAway"})
4532
    public AsyncCacheLoader<? super K, V> cacheLoader() {
4533
      return cache.cacheLoader;
1✔
4534
    }
4535

4536
    @Override
4537
    public Function<K, @Nullable V> mappingFunction() {
4538
      return mappingFunction;
1✔
4539
    }
4540

4541
    @Override
4542
    public @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction() {
4543
      return bulkMappingFunction;
1✔
4544
    }
4545

4546
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4547
      throw new InvalidObjectException("Proxy required");
1✔
4548
    }
4549

4550
    private Object writeReplace() {
4551
      return makeSerializationProxy(cache);
1✔
4552
    }
4553
  }
4554

4555
  /* --------------- Async Cache --------------- */
4556

4557
  static final class BoundedLocalAsyncCache<K, V> implements LocalAsyncCache<K, V>, Serializable {
4558
    private static final long serialVersionUID = 1;
4559

4560
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4561
    final boolean isWeighted;
4562

4563
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4564
    @Nullable CacheView<K, V> cacheView;
4565
    @Nullable Policy<K, V> policy;
4566

4567
    @SuppressWarnings("unchecked")
4568
    BoundedLocalAsyncCache(Caffeine<K, V> builder) {
1✔
4569
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4570
          .newBoundedLocalCache(builder, /* cacheLoader= */ null, /* isAsync= */ true);
1✔
4571
      isWeighted = builder.isWeighted();
1✔
4572
    }
1✔
4573

4574
    @Override
4575
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4576
      return cache;
1✔
4577
    }
4578

4579
    @Override
4580
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4581
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4582
    }
4583

4584
    @Override
4585
    public Cache<K, V> synchronous() {
4586
      return (cacheView == null) ? (cacheView = new CacheView<>(this)) : cacheView;
1✔
4587
    }
4588

4589
    @Override
4590
    public Policy<K, V> policy() {
4591
      if (policy == null) {
1✔
4592
        @SuppressWarnings("unchecked")
4593
        var castCache = (BoundedLocalCache<K, V>) cache;
1✔
4594
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
1✔
4595
        @SuppressWarnings("unchecked")
4596
        var castTransformer = (Function<@Nullable V, @Nullable V>) transformer;
1✔
4597
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4598
      }
4599
      return policy;
1✔
4600
    }
4601

4602
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4603
      throw new InvalidObjectException("Proxy required");
1✔
4604
    }
4605

4606
    private Object writeReplace() {
4607
      return makeSerializationProxy(cache);
1✔
4608
    }
4609
  }
4610

4611
  /* --------------- Async Loading Cache --------------- */
4612

4613
  static final class BoundedLocalAsyncLoadingCache<K, V>
4614
      extends LocalAsyncLoadingCache<K, V> implements Serializable {
4615
    private static final long serialVersionUID = 1;
4616

4617
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4618
    final boolean isWeighted;
4619

4620
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4621
    @Nullable Policy<K, V> policy;
4622

4623
    @SuppressWarnings("unchecked")
4624
    BoundedLocalAsyncLoadingCache(Caffeine<K, V> builder, AsyncCacheLoader<? super K, V> loader) {
4625
      super(loader);
1✔
4626
      isWeighted = builder.isWeighted();
1✔
4627
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4628
          .newBoundedLocalCache(builder, loader, /* isAsync= */ true);
1✔
4629
    }
1✔
4630

4631
    @Override
4632
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4633
      return cache;
1✔
4634
    }
4635

4636
    @Override
4637
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4638
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4639
    }
4640

4641
    @Override
4642
    public Policy<K, V> policy() {
4643
      if (policy == null) {
1✔
4644
        @SuppressWarnings("unchecked")
4645
        var castCache = (BoundedLocalCache<K, V>) cache;
1✔
4646
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
1✔
4647
        @SuppressWarnings("unchecked")
4648
        var castTransformer = (Function<@Nullable V, @Nullable V>) transformer;
1✔
4649
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4650
      }
4651
      return policy;
1✔
4652
    }
4653

4654
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4655
      throw new InvalidObjectException("Proxy required");
1✔
4656
    }
4657

4658
    private Object writeReplace() {
4659
      return makeSerializationProxy(cache);
1✔
4660
    }
4661
  }
4662
}
4663

4664
/** The namespace for field padding through inheritance. */
4665
@SuppressWarnings({"IdentifierName", "MultiVariableDeclaration"})
4666
final class BLCHeader {
4667

4668
  private BLCHeader() {}
4669

4670
  @SuppressWarnings("unused")
4671
  static class PadDrainStatus {
1✔
4672
    byte p000, p001, p002, p003, p004, p005, p006, p007;
4673
    byte p008, p009, p010, p011, p012, p013, p014, p015;
4674
    byte p016, p017, p018, p019, p020, p021, p022, p023;
4675
    byte p024, p025, p026, p027, p028, p029, p030, p031;
4676
    byte p032, p033, p034, p035, p036, p037, p038, p039;
4677
    byte p040, p041, p042, p043, p044, p045, p046, p047;
4678
    byte p048, p049, p050, p051, p052, p053, p054, p055;
4679
    byte p056, p057, p058, p059, p060, p061, p062, p063;
4680
    byte p064, p065, p066, p067, p068, p069, p070, p071;
4681
    byte p072, p073, p074, p075, p076, p077, p078, p079;
4682
    byte p080, p081, p082, p083, p084, p085, p086, p087;
4683
    byte p088, p089, p090, p091, p092, p093, p094, p095;
4684
    byte p096, p097, p098, p099, p100, p101, p102, p103;
4685
    byte p104, p105, p106, p107, p108, p109, p110, p111;
4686
    byte p112, p113, p114, p115, p116, p117, p118, p119;
4687
  }
4688

4689
  /** Enforces a memory layout to avoid false sharing by padding the drain status. */
4690
  abstract static class DrainStatusRef extends PadDrainStatus {
1✔
4691
    static final VarHandle DRAIN_STATUS = findVarHandle(
1✔
4692
        DrainStatusRef.class, "drainStatus", int.class);
4693

4694
    /** A drain is not taking place. */
4695
    static final int IDLE = 0;
4696
    /** A drain is required due to a pending write modification. */
4697
    static final int REQUIRED = 1;
4698
    /** A drain is in progress and will transition to idle. */
4699
    static final int PROCESSING_TO_IDLE = 2;
4700
    /** A drain is in progress and will transition to required. */
4701
    static final int PROCESSING_TO_REQUIRED = 3;
4702

4703
    /** The draining status of the buffers. */
4704
    volatile int drainStatus = IDLE;
1✔
4705

4706
    /**
4707
     * Returns whether maintenance work is needed.
4708
     *
4709
     * @param delayable if draining the read buffer can be delayed
4710
     */
4711
    @SuppressWarnings("StatementSwitchToExpressionSwitch")
4712
    boolean shouldDrainBuffers(boolean delayable) {
4713
      switch (drainStatusOpaque()) {
1✔
4714
        case IDLE:
4715
          return !delayable;
1✔
4716
        case REQUIRED:
4717
          return true;
1✔
4718
        case PROCESSING_TO_IDLE:
4719
        case PROCESSING_TO_REQUIRED:
4720
          return false;
1✔
4721
        default:
4722
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
4723
      }
4724
    }
4725

4726
    int drainStatusOpaque() {
4727
      return (int) DRAIN_STATUS.getOpaque(this);
1✔
4728
    }
4729

4730
    int drainStatusAcquire() {
4731
      return (int) DRAIN_STATUS.getAcquire(this);
1✔
4732
    }
4733

4734
    void setDrainStatusOpaque(int drainStatus) {
4735
      DRAIN_STATUS.setOpaque(this, drainStatus);
1✔
4736
    }
1✔
4737

4738
    void setDrainStatusRelease(int drainStatus) {
4739
      DRAIN_STATUS.setRelease(this, drainStatus);
1✔
4740
    }
1✔
4741

4742
    boolean casDrainStatus(int expect, int update) {
4743
      return DRAIN_STATUS.compareAndSet(this, expect, update);
1✔
4744
    }
4745

4746
    static VarHandle findVarHandle(Class<?> recv, String name, Class<?> type) {
4747
      try {
4748
        return MethodHandles.lookup().findVarHandle(recv, name, type);
1✔
4749
      } catch (ReflectiveOperationException e) {
1✔
4750
        throw new ExceptionInInitializerError(e);
1✔
4751
      }
4752
    }
4753
  }
4754
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc