• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ben-manes / caffeine / #4778

08 Apr 2025 05:01PM UTC coverage: 99.044% (-0.01%) from 99.057%
#4778

push

github

ben-manes
resolve build properties lazily during configuration time

7670 of 7744 relevant lines covered (99.04%)

0.99 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.35
/caffeine/src/main/java/com/github/benmanes/caffeine/cache/BoundedLocalCache.java
1
/*
2
 * Copyright 2014 Ben Manes. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
package com.github.benmanes.caffeine.cache;
17

18
import static com.github.benmanes.caffeine.cache.Async.ASYNC_EXPIRY;
19
import static com.github.benmanes.caffeine.cache.Caffeine.calculateHashMapCapacity;
20
import static com.github.benmanes.caffeine.cache.Caffeine.ceilingPowerOfTwo;
21
import static com.github.benmanes.caffeine.cache.Caffeine.requireArgument;
22
import static com.github.benmanes.caffeine.cache.Caffeine.toNanosSaturated;
23
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newBulkMappingFunction;
24
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newMappingFunction;
25
import static com.github.benmanes.caffeine.cache.Node.PROBATION;
26
import static com.github.benmanes.caffeine.cache.Node.PROTECTED;
27
import static com.github.benmanes.caffeine.cache.Node.WINDOW;
28
import static java.util.Locale.US;
29
import static java.util.Objects.requireNonNull;
30
import static java.util.Spliterator.DISTINCT;
31
import static java.util.Spliterator.IMMUTABLE;
32
import static java.util.Spliterator.NONNULL;
33
import static java.util.Spliterator.ORDERED;
34
import static java.util.function.Function.identity;
35

36
import java.io.InvalidObjectException;
37
import java.io.ObjectInputStream;
38
import java.io.Serializable;
39
import java.lang.System.Logger;
40
import java.lang.System.Logger.Level;
41
import java.lang.invoke.MethodHandles;
42
import java.lang.invoke.VarHandle;
43
import java.lang.ref.Reference;
44
import java.lang.ref.ReferenceQueue;
45
import java.lang.ref.WeakReference;
46
import java.time.Duration;
47
import java.util.AbstractCollection;
48
import java.util.AbstractSet;
49
import java.util.ArrayDeque;
50
import java.util.Collection;
51
import java.util.Collections;
52
import java.util.Comparator;
53
import java.util.Deque;
54
import java.util.HashMap;
55
import java.util.IdentityHashMap;
56
import java.util.Iterator;
57
import java.util.LinkedHashMap;
58
import java.util.Map;
59
import java.util.NoSuchElementException;
60
import java.util.Objects;
61
import java.util.Optional;
62
import java.util.OptionalInt;
63
import java.util.OptionalLong;
64
import java.util.Set;
65
import java.util.Spliterator;
66
import java.util.Spliterators;
67
import java.util.concurrent.CancellationException;
68
import java.util.concurrent.CompletableFuture;
69
import java.util.concurrent.ConcurrentHashMap;
70
import java.util.concurrent.ConcurrentMap;
71
import java.util.concurrent.Executor;
72
import java.util.concurrent.ForkJoinPool;
73
import java.util.concurrent.ForkJoinTask;
74
import java.util.concurrent.ThreadLocalRandom;
75
import java.util.concurrent.TimeUnit;
76
import java.util.concurrent.TimeoutException;
77
import java.util.concurrent.locks.ReentrantLock;
78
import java.util.function.BiConsumer;
79
import java.util.function.BiFunction;
80
import java.util.function.Consumer;
81
import java.util.function.Function;
82
import java.util.function.Predicate;
83
import java.util.stream.Stream;
84
import java.util.stream.StreamSupport;
85

86
import org.jspecify.annotations.Nullable;
87

88
import com.github.benmanes.caffeine.cache.Async.AsyncExpiry;
89
import com.github.benmanes.caffeine.cache.LinkedDeque.PeekingIterator;
90
import com.github.benmanes.caffeine.cache.Policy.CacheEntry;
91
import com.github.benmanes.caffeine.cache.References.InternalReference;
92
import com.github.benmanes.caffeine.cache.stats.StatsCounter;
93
import com.google.errorprone.annotations.CanIgnoreReturnValue;
94
import com.google.errorprone.annotations.Var;
95
import com.google.errorprone.annotations.concurrent.GuardedBy;
96

97
/**
98
 * An in-memory cache implementation that supports full concurrency of retrievals, a high expected
99
 * concurrency for updates, and multiple ways to bound the cache.
100
 * <p>
101
 * This class is abstract and code generated subclasses provide the complete implementation for a
102
 * particular configuration. This is to ensure that only the fields and execution paths necessary
103
 * for a given configuration are used.
104
 *
105
 * @author ben.manes@gmail.com (Ben Manes)
106
 * @param <K> the type of keys maintained by this cache
107
 * @param <V> the type of mapped values
108
 */
109
@SuppressWarnings("serial")
110
abstract class BoundedLocalCache<K, V> extends BLCHeader.DrainStatusRef
111
    implements LocalCache<K, V> {
112

113
  /*
114
   * This class performs a best-effort bounding of a ConcurrentHashMap using a page-replacement
115
   * algorithm to determine which entries to evict when the capacity is exceeded.
116
   *
117
   * Concurrency:
118
   * ------------
119
   * The page replacement algorithms are kept eventually consistent with the map. An update to the
120
   * map and recording of reads may not be immediately reflected in the policy's data structures.
121
   * These structures are guarded by a lock, and operations are applied in batches to avoid lock
122
   * contention. The penalty of applying the batches is spread across threads, so that the amortized
123
   * cost is slightly higher than performing just the ConcurrentHashMap operation [1].
124
   *
125
   * A memento of the reads and writes that were performed on the map is recorded in buffers. These
126
   * buffers are drained at the first opportunity after a write or when a read buffer is full. The
127
   * reads are offered to a buffer that will reject additions if contended on or if it is full. Due
128
   * to the concurrent nature of the read and write operations, a strict policy ordering is not
129
   * possible, but it may be observably strict when single-threaded. The buffers are drained
130
   * asynchronously to minimize the request latency and uses a state machine to determine when to
131
   * schedule this work on an executor.
132
   *
133
   * Due to a lack of a strict ordering guarantee, a task can be executed out-of-order, such as a
134
   * removal followed by its addition. The state of the entry is encoded using the key field to
135
   * avoid additional memory usage. An entry is "alive" if it is in both the hash table and the page
136
   * replacement policy. It is "retired" if it is not in the hash table and is pending removal from
137
   * the page replacement policy. Finally, an entry transitions to the "dead" state when it is
138
   * neither in the hash table nor the page replacement policy. Both the retired and dead states are
139
   * represented by a sentinel key that should not be used for map operations.
140
   *
141
   * Eviction:
142
   * ---------
143
   * Maximum size is implemented using the Window TinyLfu policy [2] due to its high hit rate, O(1)
144
   * time complexity, and small footprint. A new entry starts in the admission window and remains
145
   * there as long as it has high temporal locality (recency). Eventually an entry will slip from
146
   * the window into the main space. If the main space is already full, then a historic frequency
147
   * filter determines whether to evict the newly admitted entry or the victim entry chosen by the
148
   * eviction policy. This process ensures that the entries in the window were very recently used,
149
   * while entries in the main space are accessed very frequently and remain moderately recent. The
150
   * windowing allows the policy to have a high hit rate when entries exhibit a bursty access
151
   * pattern, while the filter ensures that popular items are retained. The admission window uses
152
   * LRU and the main space uses Segmented LRU.
153
   *
154
   * The optimal size of the window vs. main spaces is workload dependent [3]. A large admission
155
   * window is favored by recency-biased workloads, while a small one favors frequency-biased
156
   * workloads. When the window is too small, then recent arrivals are prematurely evicted, but when
157
   * it is too large, then they pollute the cache and force the eviction of more popular entries.
158
   * The optimal configuration is dynamically determined by using hill climbing to walk the hit rate
159
   * curve. This is achieved by sampling the hit rate and adjusting the window size in the direction
160
   * that is improving (making positive or negative steps). At each interval, the step size is
161
   * decreased until the hit rate climber converges at the optimal setting. The process is restarted
162
   * when the hit rate changes over a threshold, indicating that the workload altered, and a new
163
   * setting may be required.
164
   *
165
   * The historic usage is retained in a compact popularity sketch, which uses hashing to
166
   * probabilistically estimate an item's frequency. This exposes a flaw where an adversary could
167
   * use hash flooding [4] to artificially raise the frequency of the main space's victim and cause
168
   * all candidates to be rejected. In the worst case, by exploiting hash collisions, an attacker
169
   * could cause the cache to never hit and hold only worthless items, resulting in a
170
   * denial-of-service attack against the underlying resource. This is mitigated by introducing
171
   * jitter, allowing candidates that are at least moderately popular to have a small, random chance
172
   * of being admitted. This causes the victim to be evicted, but in a way that marginally impacts
173
   * the hit rate.
174
   *
175
   * Expiration:
176
   * -----------
177
   * Expiration is implemented in O(1) time complexity. The time-to-idle policy uses an access-order
178
   * queue, the time-to-live policy uses a write-order queue, and variable expiration uses a
179
   * hierarchical timer wheel [5]. The queuing policies allow for peeking at the oldest entry to
180
   * determine if it has expired. If it has not, then the younger entries must not have expired
181
   * either. If a maximum size is set, then expiration will share the queues, minimizing the
182
   * per-entry footprint. The timer wheel based policy uses hashing and cascading in a manner that
183
   * amortizes the penalty of sorting to achieve a similar algorithmic cost.
184
   *
185
   * The expiration updates are applied in a best effort fashion. The reordering of variable or
186
   * access-order expiration may be discarded by the read buffer if it is full or contended.
187
   * Similarly, the reordering of write expiration may be ignored for an entry if the last update
188
   * was within a short time window. This is done to avoid overwhelming the write buffer.
189
   *
190
   * [1] BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free
191
   * http://web.cse.ohio-state.edu/hpcs/WWW/HTML/publications/papers/TR-09-1.pdf
192
   * [2] TinyLFU: A Highly Efficient Cache Admission Policy
193
   * https://dl.acm.org/citation.cfm?id=3149371
194
   * [3] Adaptive Software Cache Management
195
   * https://dl.acm.org/citation.cfm?id=3274816
196
   * [4] Denial of Service via Algorithmic Complexity Attack
197
   * https://www.usenix.org/legacy/events/sec03/tech/full_papers/crosby/crosby.pdf
198
   * [5] Hashed and Hierarchical Timing Wheels
199
   * http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
200
   */
201

202
  static final Logger logger = System.getLogger(BoundedLocalCache.class.getName());
1✔
203

204
  /** The number of CPUs */
205
  static final int NCPU = Runtime.getRuntime().availableProcessors();
1✔
206
  /** The initial capacity of the write buffer. */
207
  static final int WRITE_BUFFER_MIN = 4;
208
  /** The maximum capacity of the write buffer. */
209
  static final int WRITE_BUFFER_MAX = 128 * ceilingPowerOfTwo(NCPU);
1✔
210
  /** The number of attempts to insert into the write buffer before yielding. */
211
  static final int WRITE_BUFFER_RETRIES = 100;
212
  /** The maximum weighted capacity of the map. */
213
  static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
214
  /** The initial percent of the maximum weighted capacity dedicated to the main space. */
215
  static final double PERCENT_MAIN = 0.99d;
216
  /** The percent of the maximum weighted capacity dedicated to the main's protected space. */
217
  static final double PERCENT_MAIN_PROTECTED = 0.80d;
218
  /** The difference in hit rates that restarts the climber. */
219
  static final double HILL_CLIMBER_RESTART_THRESHOLD = 0.05d;
220
  /** The percent of the total size to adapt the window by. */
221
  static final double HILL_CLIMBER_STEP_PERCENT = 0.0625d;
222
  /** The rate to decrease the step size to adapt by. */
223
  static final double HILL_CLIMBER_STEP_DECAY_RATE = 0.98d;
224
  /** The minimum popularity for allowing randomized admission. */
225
  static final int ADMIT_HASHDOS_THRESHOLD = 6;
226
  /** The maximum number of entries that can be transferred between queues. */
227
  static final int QUEUE_TRANSFER_THRESHOLD = 1_000;
228
  /** The maximum time window between entry updates before the expiration must be reordered. */
229
  static final long EXPIRE_WRITE_TOLERANCE = TimeUnit.SECONDS.toNanos(1);
1✔
230
  /** The maximum duration before an entry expires. */
231
  static final long MAXIMUM_EXPIRY = (Long.MAX_VALUE >> 1); // 150 years
232
  /** The duration to wait on the eviction lock before warning of a possible misuse. */
233
  static final long WARN_AFTER_LOCK_WAIT_NANOS = TimeUnit.SECONDS.toNanos(30);
1✔
234
  /** The number of retries before computing to validate the entry's integrity; pow2 modulus. */
235
  static final int MAX_PUT_SPIN_WAIT_ATTEMPTS = 1024 - 1;
236
  /** The handle for the in-flight refresh operations. */
237
  static final VarHandle REFRESHES;
238

239
  final @Nullable RemovalListener<K, V> evictionListener;
240
  final @Nullable AsyncCacheLoader<K, V> cacheLoader;
241

242
  final MpscGrowableArrayQueue<Runnable> writeBuffer;
243
  final ConcurrentHashMap<Object, Node<K, V>> data;
244
  final PerformCleanupTask drainBuffersTask;
245
  final Consumer<Node<K, V>> accessPolicy;
246
  final Buffer<Node<K, V>> readBuffer;
247
  final NodeFactory<K, V> nodeFactory;
248
  final ReentrantLock evictionLock;
249
  final Weigher<K, V> weigher;
250
  final Executor executor;
251

252
  final boolean isWeighted;
253
  final boolean isAsync;
254

255
  @Nullable Set<K> keySet;
256
  @Nullable Collection<V> values;
257
  @Nullable Set<Entry<K, V>> entrySet;
258
  volatile @Nullable ConcurrentMap<Object, CompletableFuture<?>> refreshes;
259

260
  /** Creates an instance based on the builder's configuration. */
261
  @SuppressWarnings("GuardedBy")
262
  protected BoundedLocalCache(Caffeine<K, V> builder,
263
      @Nullable AsyncCacheLoader<K, V> cacheLoader, boolean isAsync) {
1✔
264
    this.isAsync = isAsync;
1✔
265
    this.cacheLoader = cacheLoader;
1✔
266
    executor = builder.getExecutor();
1✔
267
    isWeighted = builder.isWeighted();
1✔
268
    evictionLock = new ReentrantLock();
1✔
269
    weigher = builder.getWeigher(isAsync);
1✔
270
    drainBuffersTask = new PerformCleanupTask(this);
1✔
271
    nodeFactory = NodeFactory.newFactory(builder, isAsync);
1✔
272
    evictionListener = builder.getEvictionListener(isAsync);
1✔
273
    data = new ConcurrentHashMap<>(builder.getInitialCapacity());
1✔
274
    readBuffer = evicts() || collectKeys() || collectValues() || expiresAfterAccess()
1✔
275
        ? new BoundedBuffer<>()
1✔
276
        : Buffer.disabled();
1✔
277
    accessPolicy = (evicts() || expiresAfterAccess()) ? this::onAccess : e -> {};
1✔
278
    writeBuffer = new MpscGrowableArrayQueue<>(WRITE_BUFFER_MIN, WRITE_BUFFER_MAX);
1✔
279

280
    if (evicts()) {
1✔
281
      setMaximumSize(builder.getMaximum());
1✔
282
    }
283
  }
1✔
284

285
  static {
286
    try {
287
      REFRESHES = MethodHandles.lookup()
1✔
288
          .findVarHandle(BoundedLocalCache.class, "refreshes", ConcurrentMap.class);
1✔
289
    } catch (ReflectiveOperationException e) {
×
290
      throw new ExceptionInInitializerError(e);
×
291
    }
1✔
292
  }
1✔
293

294
  /** Ensures that the node is alive during the map operation. */
295
  void requireIsAlive(Object key, Node<?, ?> node) {
296
    if (!node.isAlive()) {
1✔
297
      throw new IllegalStateException(brokenEqualityMessage(key, node));
1✔
298
    }
299
  }
1✔
300

301
  /** Logs if the node cannot be found in the map but is still alive. */
302
  void logIfAlive(Node<?, ?> node) {
303
    if (node.isAlive()) {
1✔
304
      String message = brokenEqualityMessage(node.getKeyReference(), node);
1✔
305
      logger.log(Level.ERROR, message, new IllegalStateException());
1✔
306
    }
307
  }
1✔
308

309
  /** Returns the formatted broken equality error message. */
310
  String brokenEqualityMessage(Object key, Node<?, ?> node) {
311
    return String.format(US, "An invalid state was detected, occurring when the key's equals or "
1✔
312
        + "hashCode was modified while residing in the cache. This violation of the Map "
313
        + "contract can lead to non-deterministic behavior (key: %s, key type: %s, "
314
        + "node type: %s, cache type: %s).", key, key.getClass().getName(),
1✔
315
        node.getClass().getSimpleName(), getClass().getSimpleName());
1✔
316
  }
317

318
  /* --------------- Shared --------------- */
319

320
  @Override
321
  public boolean isAsync() {
322
    return isAsync;
1✔
323
  }
324

325
  /** Returns if the node's value is currently being computed asynchronously. */
326
  final boolean isComputingAsync(@Nullable V value) {
327
    return isAsync && !Async.isReady((CompletableFuture<?>) value);
1✔
328
  }
329

330
  @GuardedBy("evictionLock")
331
  protected AccessOrderDeque<Node<K, V>> accessOrderWindowDeque() {
332
    throw new UnsupportedOperationException();
1✔
333
  }
334

335
  @GuardedBy("evictionLock")
336
  protected AccessOrderDeque<Node<K, V>> accessOrderProbationDeque() {
337
    throw new UnsupportedOperationException();
1✔
338
  }
339

340
  @GuardedBy("evictionLock")
341
  protected AccessOrderDeque<Node<K, V>> accessOrderProtectedDeque() {
342
    throw new UnsupportedOperationException();
1✔
343
  }
344

345
  @GuardedBy("evictionLock")
346
  protected WriteOrderDeque<Node<K, V>> writeOrderDeque() {
347
    throw new UnsupportedOperationException();
1✔
348
  }
349

350
  @Override
351
  public final Executor executor() {
352
    return executor;
1✔
353
  }
354

355
  @Override
356
  @SuppressWarnings("NullAway")
357
  public ConcurrentMap<Object, CompletableFuture<?>> refreshes() {
358
    @Var var pending = refreshes;
1✔
359
    if (pending == null) {
1✔
360
      pending = new ConcurrentHashMap<>();
1✔
361
      if (!REFRESHES.compareAndSet(this, null, pending)) {
1✔
362
        pending = refreshes;
×
363
      }
364
    }
365
    return pending;
1✔
366
  }
367

368
  /** Invalidate the in-flight refresh. */
369
  void discardRefresh(Object keyReference) {
370
    var pending = refreshes;
1✔
371
    if ((pending != null) && pending.containsKey(keyReference)) {
1✔
372
      pending.remove(keyReference);
1✔
373
    }
374
  }
1✔
375

376
  @Override
377
  public Object referenceKey(K key) {
378
    return nodeFactory.newLookupKey(key);
1✔
379
  }
380

381
  @Override
382
  public boolean isPendingEviction(K key) {
383
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
384
    return (node != null)
1✔
385
        && ((node.getValue() == null) || hasExpired(node, expirationTicker().read()));
1✔
386
  }
387

388
  /* --------------- Stats Support --------------- */
389

390
  @Override
391
  public boolean isRecordingStats() {
392
    return false;
1✔
393
  }
394

395
  @Override
396
  public StatsCounter statsCounter() {
397
    return StatsCounter.disabledStatsCounter();
1✔
398
  }
399

400
  @Override
401
  public Ticker statsTicker() {
402
    return Ticker.disabledTicker();
1✔
403
  }
404

405
  /* --------------- Removal Listener Support --------------- */
406

407
  @SuppressWarnings("NullAway")
408
  protected RemovalListener<K, V> removalListener() {
409
    return null;
1✔
410
  }
411

412
  protected boolean hasRemovalListener() {
413
    return false;
1✔
414
  }
415

416
  @Override
417
  public void notifyRemoval(@Nullable K key, @Nullable V value, RemovalCause cause) {
418
    if (!hasRemovalListener()) {
1✔
419
      return;
1✔
420
    }
421
    Runnable task = () -> {
1✔
422
      try {
423
        removalListener().onRemoval(key, value, cause);
1✔
424
      } catch (Throwable t) {
1✔
425
        logger.log(Level.WARNING, "Exception thrown by removal listener", t);
1✔
426
      }
1✔
427
    };
1✔
428
    try {
429
      executor.execute(task);
1✔
430
    } catch (Throwable t) {
1✔
431
      logger.log(Level.ERROR, "Exception thrown when submitting removal listener", t);
1✔
432
      task.run();
1✔
433
    }
1✔
434
  }
1✔
435

436
  /* --------------- Eviction Listener Support --------------- */
437

438
  void notifyEviction(@Nullable K key, @Nullable V value, RemovalCause cause) {
439
    if (evictionListener == null) {
1✔
440
      return;
1✔
441
    }
442
    try {
443
      evictionListener.onRemoval(key, value, cause);
1✔
444
    } catch (Throwable t) {
1✔
445
      logger.log(Level.WARNING, "Exception thrown by eviction listener", t);
1✔
446
    }
1✔
447
  }
1✔
448

449
  /* --------------- Reference Support --------------- */
450

451
  /** Returns if the keys are weak reference garbage collected. */
452
  protected boolean collectKeys() {
453
    return false;
1✔
454
  }
455

456
  /** Returns if the values are weak or soft reference garbage collected. */
457
  protected boolean collectValues() {
458
    return false;
1✔
459
  }
460

461
  @SuppressWarnings("NullAway")
462
  protected ReferenceQueue<K> keyReferenceQueue() {
463
    return null;
1✔
464
  }
465

466
  @SuppressWarnings("NullAway")
467
  protected ReferenceQueue<V> valueReferenceQueue() {
468
    return null;
1✔
469
  }
470

471
  /* --------------- Expiration Support --------------- */
472

473
  /** Returns the {@link Pacer} used to schedule the maintenance task. */
474
  protected @Nullable Pacer pacer() {
475
    return null;
1✔
476
  }
477

478
  /** Returns if the cache expires entries after a variable time threshold. */
479
  protected boolean expiresVariable() {
480
    return false;
1✔
481
  }
482

483
  /** Returns if the cache expires entries after an access time threshold. */
484
  protected boolean expiresAfterAccess() {
485
    return false;
1✔
486
  }
487

488
  /** Returns how long after the last access to an entry the map will retain that entry. */
489
  protected long expiresAfterAccessNanos() {
490
    throw new UnsupportedOperationException();
1✔
491
  }
492

493
  protected void setExpiresAfterAccessNanos(long expireAfterAccessNanos) {
494
    throw new UnsupportedOperationException();
1✔
495
  }
496

497
  /** Returns if the cache expires entries after a write time threshold. */
498
  protected boolean expiresAfterWrite() {
499
    return false;
1✔
500
  }
501

502
  /** Returns how long after the last write to an entry the map will retain that entry. */
503
  protected long expiresAfterWriteNanos() {
504
    throw new UnsupportedOperationException();
1✔
505
  }
506

507
  protected void setExpiresAfterWriteNanos(long expireAfterWriteNanos) {
508
    throw new UnsupportedOperationException();
1✔
509
  }
510

511
  /** Returns if the cache refreshes entries after a write time threshold. */
512
  protected boolean refreshAfterWrite() {
513
    return false;
1✔
514
  }
515

516
  /** Returns how long after the last write an entry becomes a candidate for refresh. */
517
  protected long refreshAfterWriteNanos() {
518
    throw new UnsupportedOperationException();
1✔
519
  }
520

521
  protected void setRefreshAfterWriteNanos(long refreshAfterWriteNanos) {
522
    throw new UnsupportedOperationException();
1✔
523
  }
524

525
  @Override
526
  @SuppressWarnings("NullAway")
527
  public Expiry<K, V> expiry() {
528
    return null;
1✔
529
  }
530

531
  /** Returns the {@link Ticker} used by this cache for expiration. */
532
  public Ticker expirationTicker() {
533
    return Ticker.disabledTicker();
1✔
534
  }
535

536
  protected TimerWheel<K, V> timerWheel() {
537
    throw new UnsupportedOperationException();
1✔
538
  }
539

540
  /* --------------- Eviction Support --------------- */
541

542
  /** Returns if the cache evicts entries due to a maximum size or weight threshold. */
543
  protected boolean evicts() {
544
    return false;
1✔
545
  }
546

547
  /** Returns if entries may be assigned different weights. */
548
  protected boolean isWeighted() {
549
    return (weigher != Weigher.singletonWeigher());
1✔
550
  }
551

552
  protected FrequencySketch<K> frequencySketch() {
553
    throw new UnsupportedOperationException();
1✔
554
  }
555

556
  /** Returns if an access to an entry can skip notifying the eviction policy. */
557
  protected boolean fastpath() {
558
    return false;
1✔
559
  }
560

561
  /** Returns the maximum weighted size. */
562
  protected long maximum() {
563
    throw new UnsupportedOperationException();
1✔
564
  }
565

566
  /** Returns the maximum weighted size of the window space. */
567
  protected long windowMaximum() {
568
    throw new UnsupportedOperationException();
1✔
569
  }
570

571
  /** Returns the maximum weighted size of the main's protected space. */
572
  protected long mainProtectedMaximum() {
573
    throw new UnsupportedOperationException();
1✔
574
  }
575

576
  @GuardedBy("evictionLock")
577
  protected void setMaximum(long maximum) {
578
    throw new UnsupportedOperationException();
1✔
579
  }
580

581
  @GuardedBy("evictionLock")
582
  protected void setWindowMaximum(long maximum) {
583
    throw new UnsupportedOperationException();
1✔
584
  }
585

586
  @GuardedBy("evictionLock")
587
  protected void setMainProtectedMaximum(long maximum) {
588
    throw new UnsupportedOperationException();
1✔
589
  }
590

591
  /** Returns the combined weight of the values in the cache (may be negative). */
592
  protected long weightedSize() {
593
    throw new UnsupportedOperationException();
1✔
594
  }
595

596
  /** Returns the uncorrected combined weight of the values in the window space. */
597
  protected long windowWeightedSize() {
598
    throw new UnsupportedOperationException();
1✔
599
  }
600

601
  /** Returns the uncorrected combined weight of the values in the main's protected space. */
602
  protected long mainProtectedWeightedSize() {
603
    throw new UnsupportedOperationException();
1✔
604
  }
605

606
  @GuardedBy("evictionLock")
607
  protected void setWeightedSize(long weightedSize) {
608
    throw new UnsupportedOperationException();
1✔
609
  }
610

611
  @GuardedBy("evictionLock")
612
  protected void setWindowWeightedSize(long weightedSize) {
613
    throw new UnsupportedOperationException();
1✔
614
  }
615

616
  @GuardedBy("evictionLock")
617
  protected void setMainProtectedWeightedSize(long weightedSize) {
618
    throw new UnsupportedOperationException();
1✔
619
  }
620

621
  protected int hitsInSample() {
622
    throw new UnsupportedOperationException();
1✔
623
  }
624

625
  protected int missesInSample() {
626
    throw new UnsupportedOperationException();
1✔
627
  }
628

629
  protected int sampleCount() {
630
    throw new UnsupportedOperationException();
1✔
631
  }
632

633
  protected double stepSize() {
634
    throw new UnsupportedOperationException();
1✔
635
  }
636

637
  protected double previousSampleHitRate() {
638
    throw new UnsupportedOperationException();
1✔
639
  }
640

641
  protected long adjustment() {
642
    throw new UnsupportedOperationException();
1✔
643
  }
644

645
  @GuardedBy("evictionLock")
646
  protected void setHitsInSample(int hitCount) {
647
    throw new UnsupportedOperationException();
1✔
648
  }
649

650
  @GuardedBy("evictionLock")
651
  protected void setMissesInSample(int missCount) {
652
    throw new UnsupportedOperationException();
1✔
653
  }
654

655
  @GuardedBy("evictionLock")
656
  protected void setSampleCount(int sampleCount) {
657
    throw new UnsupportedOperationException();
1✔
658
  }
659

660
  @GuardedBy("evictionLock")
661
  protected void setStepSize(double stepSize) {
662
    throw new UnsupportedOperationException();
1✔
663
  }
664

665
  @GuardedBy("evictionLock")
666
  protected void setPreviousSampleHitRate(double hitRate) {
667
    throw new UnsupportedOperationException();
1✔
668
  }
669

670
  @GuardedBy("evictionLock")
671
  protected void setAdjustment(long amount) {
672
    throw new UnsupportedOperationException();
1✔
673
  }
674

675
  /**
676
   * Sets the maximum weighted size of the cache. The caller may need to perform a maintenance cycle
677
   * to eagerly evicts entries until the cache shrinks to the appropriate size.
678
   */
679
  @GuardedBy("evictionLock")
680
  @SuppressWarnings("Varifier")
681
  void setMaximumSize(long maximum) {
682
    requireArgument(maximum >= 0, "maximum must not be negative");
1✔
683
    if (maximum == maximum()) {
1✔
684
      return;
1✔
685
    }
686

687
    long max = Math.min(maximum, MAXIMUM_CAPACITY);
1✔
688
    long window = max - (long) (PERCENT_MAIN * max);
1✔
689
    long mainProtected = (long) (PERCENT_MAIN_PROTECTED * (max - window));
1✔
690

691
    setMaximum(max);
1✔
692
    setWindowMaximum(window);
1✔
693
    setMainProtectedMaximum(mainProtected);
1✔
694

695
    setHitsInSample(0);
1✔
696
    setMissesInSample(0);
1✔
697
    setStepSize(-HILL_CLIMBER_STEP_PERCENT * max);
1✔
698

699
    if ((frequencySketch() != null) && !isWeighted() && (weightedSize() >= (max >>> 1))) {
1✔
700
      // Lazily initialize when close to the maximum size
701
      frequencySketch().ensureCapacity(max);
1✔
702
    }
703
  }
1✔
704

705
  /** Evicts entries if the cache exceeds the maximum. */
706
  @GuardedBy("evictionLock")
707
  void evictEntries() {
708
    if (!evicts()) {
1✔
709
      return;
1✔
710
    }
711
    var candidate = evictFromWindow();
1✔
712
    evictFromMain(candidate);
1✔
713
  }
1✔
714

715
  /**
716
   * Evicts entries from the window space into the main space while the window size exceeds a
717
   * maximum.
718
   *
719
   * @return the first candidate promoted into the probation space
720
   */
721
  @GuardedBy("evictionLock")
722
  @Nullable Node<K, V> evictFromWindow() {
723
    @Var Node<K, V> first = null;
1✔
724
    @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
725
    while (windowWeightedSize() > windowMaximum()) {
1✔
726
      // The pending operations will adjust the size to reflect the correct weight
727
      if (node == null) {
1✔
728
        break;
1✔
729
      }
730

731
      Node<K, V> next = node.getNextInAccessOrder();
1✔
732
      if (node.getPolicyWeight() != 0) {
1✔
733
        node.makeMainProbation();
1✔
734
        accessOrderWindowDeque().remove(node);
1✔
735
        accessOrderProbationDeque().offerLast(node);
1✔
736
        if (first == null) {
1✔
737
          first = node;
1✔
738
        }
739

740
        setWindowWeightedSize(windowWeightedSize() - node.getPolicyWeight());
1✔
741
      }
742
      node = next;
1✔
743
    }
1✔
744

745
    return first;
1✔
746
  }
747

748
  /**
749
   * Evicts entries from the main space if the cache exceeds the maximum capacity. The main space
750
   * determines whether admitting an entry (coming from the window space) is preferable to retaining
751
   * the eviction policy's victim. This decision is made using a frequency filter so that the
752
   * least frequently used entry is removed.
753
   * <p>
754
   * The window space's candidates were previously promoted to the probation space at its MRU
755
   * position and the eviction policy's victim starts at the LRU position. The candidates are
756
   * evaluated in promotion order while an eviction is required, and if exhausted then additional
757
   * entries are retrieved from the window space. Likewise, if the victim selection exhausts the
758
   * probation space then additional entries are retrieved the protected space. The queues are
759
   * consumed in LRU order and the evicted entry is the one with a lower relative frequency, where
760
   * the preference is to retain the main space's victims versus the window space's candidates on a
761
   * tie.
762
   *
763
   * @param candidate the first candidate promoted into the probation space
764
   */
765
  @GuardedBy("evictionLock")
766
  void evictFromMain(@Var @Nullable Node<K, V> candidate) {
767
    @Var int victimQueue = PROBATION;
1✔
768
    @Var int candidateQueue = PROBATION;
1✔
769
    @Var Node<K, V> victim = accessOrderProbationDeque().peekFirst();
1✔
770
    while (weightedSize() > maximum()) {
1✔
771
      // Search the admission window for additional candidates
772
      if ((candidate == null) && (candidateQueue == PROBATION)) {
1✔
773
        candidate = accessOrderWindowDeque().peekFirst();
1✔
774
        candidateQueue = WINDOW;
1✔
775
      }
776

777
      // Try evicting from the protected and window queues
778
      if ((candidate == null) && (victim == null)) {
1✔
779
        if (victimQueue == PROBATION) {
1✔
780
          victim = accessOrderProtectedDeque().peekFirst();
1✔
781
          victimQueue = PROTECTED;
1✔
782
          continue;
1✔
783
        } else if (victimQueue == PROTECTED) {
1✔
784
          victim = accessOrderWindowDeque().peekFirst();
1✔
785
          victimQueue = WINDOW;
1✔
786
          continue;
1✔
787
        }
788

789
        // The pending operations will adjust the size to reflect the correct weight
790
        break;
791
      }
792

793
      // Skip over entries with zero weight
794
      if ((victim != null) && (victim.getPolicyWeight() == 0)) {
1✔
795
        victim = victim.getNextInAccessOrder();
1✔
796
        continue;
1✔
797
      } else if ((candidate != null) && (candidate.getPolicyWeight() == 0)) {
1✔
798
        candidate = candidate.getNextInAccessOrder();
1✔
799
        continue;
1✔
800
      }
801

802
      // Evict immediately if only one of the entries is present
803
      if (victim == null) {
1✔
804
        @SuppressWarnings("NullAway")
805
        Node<K, V> previous = candidate.getNextInAccessOrder();
1✔
806
        Node<K, V> evict = candidate;
1✔
807
        candidate = previous;
1✔
808
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
809
        continue;
1✔
810
      } else if (candidate == null) {
1✔
811
        Node<K, V> evict = victim;
1✔
812
        victim = victim.getNextInAccessOrder();
1✔
813
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
814
        continue;
1✔
815
      }
816

817
      // Evict immediately if both selected the same entry
818
      if (candidate == victim) {
1✔
819
        victim = victim.getNextInAccessOrder();
1✔
820
        evictEntry(candidate, RemovalCause.SIZE, 0L);
1✔
821
        candidate = null;
1✔
822
        continue;
1✔
823
      }
824

825
      // Evict immediately if an entry was collected
826
      K victimKey = victim.getKey();
1✔
827
      K candidateKey = candidate.getKey();
1✔
828
      if (victimKey == null) {
1✔
829
        Node<K, V> evict = victim;
1✔
830
        victim = victim.getNextInAccessOrder();
1✔
831
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
832
        continue;
1✔
833
      } else if (candidateKey == null) {
1✔
834
        Node<K, V> evict = candidate;
1✔
835
        candidate = candidate.getNextInAccessOrder();
1✔
836
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
837
        continue;
1✔
838
      }
839

840
      // Evict immediately if an entry was removed
841
      if (!victim.isAlive()) {
1✔
842
        Node<K, V> evict = victim;
1✔
843
        victim = victim.getNextInAccessOrder();
1✔
844
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
845
        continue;
1✔
846
      } else if (!candidate.isAlive()) {
1✔
847
        Node<K, V> evict = candidate;
1✔
848
        candidate = candidate.getNextInAccessOrder();
1✔
849
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
850
        continue;
1✔
851
      }
852

853
      // Evict immediately if the candidate's weight exceeds the maximum
854
      if (candidate.getPolicyWeight() > maximum()) {
1✔
855
        Node<K, V> evict = candidate;
1✔
856
        candidate = candidate.getNextInAccessOrder();
1✔
857
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
858
        continue;
1✔
859
      }
860

861
      // Evict the entry with the lowest frequency
862
      if (admit(candidateKey, victimKey)) {
1✔
863
        Node<K, V> evict = victim;
1✔
864
        victim = victim.getNextInAccessOrder();
1✔
865
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
866
        candidate = candidate.getNextInAccessOrder();
1✔
867
      } else {
1✔
868
        Node<K, V> evict = candidate;
1✔
869
        candidate = candidate.getNextInAccessOrder();
1✔
870
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
871
      }
872
    }
1✔
873
  }
1✔
874

875
  /**
876
   * Determines if the candidate should be accepted into the main space, as determined by its
877
   * frequency relative to the victim. A small amount of randomness is used to protect against hash
878
   * collision attacks, where the victim's frequency is artificially raised so that no new entries
879
   * are admitted.
880
   *
881
   * @param candidateKey the key for the entry being proposed for long term retention
882
   * @param victimKey the key for the entry chosen by the eviction policy for replacement
883
   * @return if the candidate should be admitted and the victim ejected
884
   */
885
  @GuardedBy("evictionLock")
886
  boolean admit(K candidateKey, K victimKey) {
887
    int victimFreq = frequencySketch().frequency(victimKey);
1✔
888
    int candidateFreq = frequencySketch().frequency(candidateKey);
1✔
889
    if (candidateFreq > victimFreq) {
1✔
890
      return true;
1✔
891
    } else if (candidateFreq >= ADMIT_HASHDOS_THRESHOLD) {
1✔
892
      // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack
893
      // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm
894
      // candidate reduces the number of random acceptances to minimize the impact on the hit rate.
895
      int random = ThreadLocalRandom.current().nextInt();
1✔
896
      return ((random & 127) == 0);
1✔
897
    }
898
    return false;
1✔
899
  }
900

901
  /** Expires entries that have expired by access, write, or variable. */
902
  @GuardedBy("evictionLock")
903
  void expireEntries() {
904
    long now = expirationTicker().read();
1✔
905
    expireAfterAccessEntries(now);
1✔
906
    expireAfterWriteEntries(now);
1✔
907
    expireVariableEntries(now);
1✔
908

909
    Pacer pacer = pacer();
1✔
910
    if (pacer != null) {
1✔
911
      long delay = getExpirationDelay(now);
1✔
912
      if (delay == Long.MAX_VALUE) {
1✔
913
        pacer.cancel();
1✔
914
      } else {
915
        pacer.schedule(executor, drainBuffersTask, now, delay);
1✔
916
      }
917
    }
918
  }
1✔
919

920
  /** Expires entries in the access-order queue. */
921
  @GuardedBy("evictionLock")
922
  void expireAfterAccessEntries(long now) {
923
    if (!expiresAfterAccess()) {
1✔
924
      return;
1✔
925
    }
926

927
    expireAfterAccessEntries(now, accessOrderWindowDeque());
1✔
928
    if (evicts()) {
1✔
929
      expireAfterAccessEntries(now, accessOrderProbationDeque());
1✔
930
      expireAfterAccessEntries(now, accessOrderProtectedDeque());
1✔
931
    }
932
  }
1✔
933

934
  /** Expires entries in an access-order queue. */
935
  @GuardedBy("evictionLock")
936
  void expireAfterAccessEntries(long now, AccessOrderDeque<Node<K, V>> accessOrderDeque) {
937
    long duration = expiresAfterAccessNanos();
1✔
938
    for (;;) {
939
      Node<K, V> node = accessOrderDeque.peekFirst();
1✔
940
      if ((node == null) || ((now - node.getAccessTime()) < duration)
1✔
941
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
942
        return;
1✔
943
      }
944
    }
1✔
945
  }
946

947
  /** Expires entries on the write-order queue. */
948
  @GuardedBy("evictionLock")
949
  void expireAfterWriteEntries(long now) {
950
    if (!expiresAfterWrite()) {
1✔
951
      return;
1✔
952
    }
953
    long duration = expiresAfterWriteNanos();
1✔
954
    for (;;) {
955
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
956
      if ((node == null) || ((now - node.getWriteTime()) < duration)
1✔
957
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
958
        break;
1✔
959
      }
960
    }
1✔
961
  }
1✔
962

963
  /** Expires entries in the timer wheel. */
964
  @GuardedBy("evictionLock")
965
  void expireVariableEntries(long now) {
966
    if (expiresVariable()) {
1✔
967
      timerWheel().advance(this, now);
1✔
968
    }
969
  }
1✔
970

971
  /** Returns the duration until the next item expires, or {@link Long#MAX_VALUE} if none. */
972
  @GuardedBy("evictionLock")
973
  long getExpirationDelay(long now) {
974
    @Var long delay = Long.MAX_VALUE;
1✔
975
    if (expiresAfterAccess()) {
1✔
976
      @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
977
      if (node != null) {
1✔
978
        delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
979
      }
980
      if (evicts()) {
1✔
981
        node = accessOrderProbationDeque().peekFirst();
1✔
982
        if (node != null) {
1✔
983
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
984
        }
985
        node = accessOrderProtectedDeque().peekFirst();
1✔
986
        if (node != null) {
1✔
987
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
988
        }
989
      }
990
    }
991
    if (expiresAfterWrite()) {
1✔
992
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
993
      if (node != null) {
1✔
994
        delay = Math.min(delay, expiresAfterWriteNanos() - (now - node.getWriteTime()));
1✔
995
      }
996
    }
997
    if (expiresVariable()) {
1✔
998
      delay = Math.min(delay, timerWheel().getExpirationDelay());
1✔
999
    }
1000
    return delay;
1✔
1001
  }
1002

1003
  /** Returns if the entry has expired. */
1004
  @SuppressWarnings("ShortCircuitBoolean")
1005
  boolean hasExpired(Node<K, V> node, long now) {
1006
    if (isComputingAsync(node.getValue())) {
1✔
1007
      return false;
1✔
1008
    }
1009
    return (expiresAfterAccess() && (now - node.getAccessTime() >= expiresAfterAccessNanos()))
1✔
1010
        | (expiresAfterWrite() && (now - node.getWriteTime() >= expiresAfterWriteNanos()))
1✔
1011
        | (expiresVariable() && (now - node.getVariableTime() >= 0));
1✔
1012
  }
1013

1014
  /**
1015
   * Attempts to evict the entry based on the given removal cause. A removal may be ignored if the
1016
   * entry was updated and is no longer eligible for eviction.
1017
   *
1018
   * @param node the entry to evict
1019
   * @param cause the reason to evict
1020
   * @param now the current time, used only if expiring
1021
   * @return if the entry was evicted
1022
   */
1023
  @GuardedBy("evictionLock")
1024
  @SuppressWarnings({"GuardedByChecker", "NullAway", "PMD.CollapsibleIfStatements"})
1025
  boolean evictEntry(Node<K, V> node, RemovalCause cause, long now) {
1026
    K key = node.getKey();
1✔
1027
    @SuppressWarnings("unchecked")
1028
    var value = (V[]) new Object[1];
1✔
1029
    var removed = new boolean[1];
1✔
1030
    var resurrect = new boolean[1];
1✔
1031
    var actualCause = new RemovalCause[1];
1✔
1032
    var keyReference = node.getKeyReference();
1✔
1033

1034
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
1035
      if (n != node) {
1✔
1036
        return n;
1✔
1037
      }
1038
      synchronized (n) {
1✔
1039
        value[0] = n.getValue();
1✔
1040

1041
        if ((key == null) || (value[0] == null)) {
1✔
1042
          actualCause[0] = RemovalCause.COLLECTED;
1✔
1043
        } else if (cause == RemovalCause.COLLECTED) {
1✔
1044
          resurrect[0] = true;
1✔
1045
          return n;
1✔
1046
        } else {
1047
          actualCause[0] = cause;
1✔
1048
        }
1049

1050
        if (actualCause[0] == RemovalCause.EXPIRED) {
1✔
1051
          @Var boolean expired = false;
1✔
1052
          if (expiresAfterAccess()) {
1✔
1053
            expired |= ((now - n.getAccessTime()) >= expiresAfterAccessNanos());
1✔
1054
          }
1055
          if (expiresAfterWrite()) {
1✔
1056
            expired |= ((now - n.getWriteTime()) >= expiresAfterWriteNanos());
1✔
1057
          }
1058
          if (expiresVariable()) {
1✔
1059
            expired |= ((now - node.getVariableTime()) >= 0);
1✔
1060
          }
1061
          if (!expired) {
1✔
1062
            resurrect[0] = true;
1✔
1063
            return n;
1✔
1064
          }
1065
        } else if (actualCause[0] == RemovalCause.SIZE) {
1✔
1066
          int weight = node.getWeight();
1✔
1067
          if (weight == 0) {
1✔
1068
            resurrect[0] = true;
1✔
1069
            return n;
1✔
1070
          }
1071
        }
1072

1073
        notifyEviction(key, value[0], actualCause[0]);
1✔
1074
        discardRefresh(keyReference);
1✔
1075
        removed[0] = true;
1✔
1076
        node.retire();
1✔
1077
      }
1✔
1078
      return null;
1✔
1079
    });
1080

1081
    // The entry is no longer eligible for eviction
1082
    if (resurrect[0]) {
1✔
1083
      return false;
1✔
1084
    }
1085

1086
    // If the eviction fails due to a concurrent removal of the victim, that removal may cancel out
1087
    // the addition that triggered this eviction. The victim is eagerly unlinked and the size
1088
    // decremented before the removal task so that if an eviction is still required then a new
1089
    // victim will be chosen for removal.
1090
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1091
      accessOrderWindowDeque().remove(node);
1✔
1092
    } else if (evicts()) {
1✔
1093
      if (node.inMainProbation()) {
1✔
1094
        accessOrderProbationDeque().remove(node);
1✔
1095
      } else {
1096
        accessOrderProtectedDeque().remove(node);
1✔
1097
      }
1098
    }
1099
    if (expiresAfterWrite()) {
1✔
1100
      writeOrderDeque().remove(node);
1✔
1101
    } else if (expiresVariable()) {
1✔
1102
      timerWheel().deschedule(node);
1✔
1103
    }
1104

1105
    synchronized (node) {
1✔
1106
      logIfAlive(node);
1✔
1107
      makeDead(node);
1✔
1108
    }
1✔
1109

1110
    if (removed[0]) {
1✔
1111
      statsCounter().recordEviction(node.getWeight(), actualCause[0]);
1✔
1112
      notifyRemoval(key, value[0], actualCause[0]);
1✔
1113
    }
1114

1115
    return true;
1✔
1116
  }
1117

1118
  /** Adapts the eviction policy to towards the optimal recency / frequency configuration. */
1119
  @GuardedBy("evictionLock")
1120
  void climb() {
1121
    if (!evicts()) {
1✔
1122
      return;
1✔
1123
    }
1124

1125
    determineAdjustment();
1✔
1126
    demoteFromMainProtected();
1✔
1127
    long amount = adjustment();
1✔
1128
    if (amount == 0) {
1✔
1129
      return;
1✔
1130
    } else if (amount > 0) {
1✔
1131
      increaseWindow();
1✔
1132
    } else {
1133
      decreaseWindow();
1✔
1134
    }
1135
  }
1✔
1136

1137
  /** Calculates the amount to adapt the window by and sets {@link #adjustment()} accordingly. */
1138
  @GuardedBy("evictionLock")
1139
  void determineAdjustment() {
1140
    if (frequencySketch().isNotInitialized()) {
1✔
1141
      setPreviousSampleHitRate(0.0);
1✔
1142
      setMissesInSample(0);
1✔
1143
      setHitsInSample(0);
1✔
1144
      return;
1✔
1145
    }
1146

1147
    int requestCount = hitsInSample() + missesInSample();
1✔
1148
    if (requestCount < frequencySketch().sampleSize) {
1✔
1149
      return;
1✔
1150
    }
1151

1152
    double hitRate = (double) hitsInSample() / requestCount;
1✔
1153
    double hitRateChange = hitRate - previousSampleHitRate();
1✔
1154
    double amount = (hitRateChange >= 0) ? stepSize() : -stepSize();
1✔
1155
    double nextStepSize = (Math.abs(hitRateChange) >= HILL_CLIMBER_RESTART_THRESHOLD)
1✔
1156
        ? HILL_CLIMBER_STEP_PERCENT * maximum() * (amount >= 0 ? 1 : -1)
1✔
1157
        : HILL_CLIMBER_STEP_DECAY_RATE * amount;
1✔
1158
    setPreviousSampleHitRate(hitRate);
1✔
1159
    setAdjustment((long) amount);
1✔
1160
    setStepSize(nextStepSize);
1✔
1161
    setMissesInSample(0);
1✔
1162
    setHitsInSample(0);
1✔
1163
  }
1✔
1164

1165
  /**
1166
   * Increases the size of the admission window by shrinking the portion allocated to the main
1167
   * space. As the main space is partitioned into probation and protected regions (80% / 20%), for
1168
   * simplicity only the protected is reduced. If the regions exceed their maximums, this may cause
1169
   * protected items to be demoted to the probation region and probation items to be demoted to the
1170
   * admission window.
1171
   */
1172
  @GuardedBy("evictionLock")
1173
  void increaseWindow() {
1174
    if (mainProtectedMaximum() == 0) {
1✔
1175
      return;
1✔
1176
    }
1177

1178
    @Var long quota = Math.min(adjustment(), mainProtectedMaximum());
1✔
1179
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1180
    setWindowMaximum(windowMaximum() + quota);
1✔
1181
    demoteFromMainProtected();
1✔
1182

1183
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1184
      @Var Node<K, V> candidate = accessOrderProbationDeque().peekFirst();
1✔
1185
      @Var boolean probation = true;
1✔
1186
      if ((candidate == null) || (quota < candidate.getPolicyWeight())) {
1✔
1187
        candidate = accessOrderProtectedDeque().peekFirst();
1✔
1188
        probation = false;
1✔
1189
      }
1190
      if (candidate == null) {
1✔
1191
        break;
1✔
1192
      }
1193

1194
      int weight = candidate.getPolicyWeight();
1✔
1195
      if (quota < weight) {
1✔
1196
        break;
1✔
1197
      }
1198

1199
      quota -= weight;
1✔
1200
      if (probation) {
1✔
1201
        accessOrderProbationDeque().remove(candidate);
1✔
1202
      } else {
1203
        setMainProtectedWeightedSize(mainProtectedWeightedSize() - weight);
1✔
1204
        accessOrderProtectedDeque().remove(candidate);
1✔
1205
      }
1206
      setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1207
      accessOrderWindowDeque().offerLast(candidate);
1✔
1208
      candidate.makeWindow();
1✔
1209
    }
1210

1211
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1212
    setWindowMaximum(windowMaximum() - quota);
1✔
1213
    setAdjustment(quota);
1✔
1214
  }
1✔
1215

1216
  /** Decreases the size of the admission window and increases the main's protected region. */
1217
  @GuardedBy("evictionLock")
1218
  void decreaseWindow() {
1219
    if (windowMaximum() <= 1) {
1✔
1220
      return;
1✔
1221
    }
1222

1223
    @Var long quota = Math.min(-adjustment(), Math.max(0, windowMaximum() - 1));
1✔
1224
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1225
    setWindowMaximum(windowMaximum() - quota);
1✔
1226

1227
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1228
      Node<K, V> candidate = accessOrderWindowDeque().peekFirst();
1✔
1229
      if (candidate == null) {
1✔
1230
        break;
1✔
1231
      }
1232

1233
      int weight = candidate.getPolicyWeight();
1✔
1234
      if (quota < weight) {
1✔
1235
        break;
1✔
1236
      }
1237

1238
      quota -= weight;
1✔
1239
      setWindowWeightedSize(windowWeightedSize() - weight);
1✔
1240
      accessOrderWindowDeque().remove(candidate);
1✔
1241
      accessOrderProbationDeque().offerLast(candidate);
1✔
1242
      candidate.makeMainProbation();
1✔
1243
    }
1244

1245
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1246
    setWindowMaximum(windowMaximum() + quota);
1✔
1247
    setAdjustment(-quota);
1✔
1248
  }
1✔
1249

1250
  /** Transfers the nodes from the protected to the probation region if it exceeds the maximum. */
1251
  @GuardedBy("evictionLock")
1252
  void demoteFromMainProtected() {
1253
    long mainProtectedMaximum = mainProtectedMaximum();
1✔
1254
    @Var long mainProtectedWeightedSize = mainProtectedWeightedSize();
1✔
1255
    if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1256
      return;
1✔
1257
    }
1258

1259
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1260
      if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1261
        break;
1✔
1262
      }
1263

1264
      Node<K, V> demoted = accessOrderProtectedDeque().poll();
1✔
1265
      if (demoted == null) {
1✔
1266
        break;
1✔
1267
      }
1268
      demoted.makeMainProbation();
1✔
1269
      accessOrderProbationDeque().offerLast(demoted);
1✔
1270
      mainProtectedWeightedSize -= demoted.getPolicyWeight();
1✔
1271
    }
1272
    setMainProtectedWeightedSize(mainProtectedWeightedSize);
1✔
1273
  }
1✔
1274

1275
  /**
1276
   * Performs the post-processing work required after a read.
1277
   *
1278
   * @param node the entry in the page replacement policy
1279
   * @param now the current time, in nanoseconds
1280
   * @param recordHit if the hit count should be incremented
1281
   * @return the refreshed value if immediately loaded, else null
1282
   */
1283
  @Nullable V afterRead(Node<K, V> node, long now, boolean recordHit) {
1284
    if (recordHit) {
1✔
1285
      statsCounter().recordHits(1);
1✔
1286
    }
1287

1288
    boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL);
1✔
1289
    if (shouldDrainBuffers(delayable)) {
1✔
1290
      scheduleDrainBuffers();
1✔
1291
    }
1292
    return refreshIfNeeded(node, now);
1✔
1293
  }
1294

1295
  /** Returns if the cache should bypass the read buffer. */
1296
  boolean skipReadBuffer() {
1297
    return fastpath() && frequencySketch().isNotInitialized();
1✔
1298
  }
1299

1300
  /**
1301
   * Asynchronously refreshes the entry if eligible.
1302
   *
1303
   * @param node the entry in the cache to refresh
1304
   * @param now the current time, in nanoseconds
1305
   * @return the refreshed value if immediately loaded, else null
1306
   */
1307
  @SuppressWarnings("FutureReturnValueIgnored")
1308
  @Nullable V refreshIfNeeded(Node<K, V> node, long now) {
1309
    if (!refreshAfterWrite()) {
1✔
1310
      return null;
1✔
1311
    }
1312

1313
    K key;
1314
    V oldValue;
1315
    long writeTime = node.getWriteTime();
1✔
1316
    long refreshWriteTime = writeTime | 1L;
1✔
1317
    Object keyReference = node.getKeyReference();
1✔
1318
    ConcurrentMap<Object, CompletableFuture<?>> refreshes;
1319
    if (((now - writeTime) > refreshAfterWriteNanos()) && (keyReference != null)
1✔
1320
        && ((key = node.getKey()) != null) && ((oldValue = node.getValue()) != null)
1✔
1321
        && !isComputingAsync(oldValue) && ((writeTime & 1L) == 0L)
1✔
1322
        && !(refreshes = refreshes()).containsKey(keyReference)
1✔
1323
        && node.isAlive() && node.casWriteTime(writeTime, refreshWriteTime)) {
1✔
1324
      long[] startTime = new long[1];
1✔
1325
      @SuppressWarnings({"rawtypes", "unchecked"})
1326
      CompletableFuture<? extends V>[] refreshFuture = new CompletableFuture[1];
1✔
1327
      try {
1328
        refreshes.computeIfAbsent(keyReference, k -> {
1✔
1329
          try {
1330
            startTime[0] = statsTicker().read();
1✔
1331
            if (isAsync) {
1✔
1332
              @SuppressWarnings("unchecked")
1333
              var future = (CompletableFuture<V>) oldValue;
1✔
1334
              if (Async.isReady(future)) {
1✔
1335
                @SuppressWarnings("NullAway")
1336
                var refresh = cacheLoader.asyncReload(key, future.join(), executor);
1✔
1337
                refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1338
              } else {
1✔
1339
                // no-op if the future's completion state was modified (e.g. obtrude methods)
1340
                return null;
1✔
1341
              }
1342
            } else {
1✔
1343
              @SuppressWarnings("NullAway")
1344
              var refresh = cacheLoader.asyncReload(key, oldValue, executor);
1✔
1345
              refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1346
            }
1347
            return refreshFuture[0];
1✔
1348
          } catch (InterruptedException e) {
1✔
1349
            Thread.currentThread().interrupt();
1✔
1350
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1351
            return null;
1✔
1352
          } catch (Throwable e) {
1✔
1353
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1354
            return null;
1✔
1355
          }
1356
        });
1357
      } finally {
1358
        node.casWriteTime(refreshWriteTime, writeTime);
1✔
1359
      }
1360

1361
      if (refreshFuture[0] == null) {
1✔
1362
        return null;
1✔
1363
      }
1364

1365
      var refreshed = refreshFuture[0].handle((newValue, error) -> {
1✔
1366
        long loadTime = statsTicker().read() - startTime[0];
1✔
1367
        if (error != null) {
1✔
1368
          if (!(error instanceof CancellationException) && !(error instanceof TimeoutException)) {
1✔
1369
            logger.log(Level.WARNING, "Exception thrown during refresh", error);
1✔
1370
          }
1371
          refreshes.remove(keyReference, refreshFuture[0]);
1✔
1372
          statsCounter().recordLoadFailure(loadTime);
1✔
1373
          return null;
1✔
1374
        }
1375

1376
        @SuppressWarnings("unchecked")
1377
        V value = (isAsync && (newValue != null)) ? (V) refreshFuture[0] : newValue;
1✔
1378

1379
        RemovalCause[] cause = new RemovalCause[1];
1✔
1380
        V result = compute(key, (k, currentValue) -> {
1✔
1381
          if (currentValue == null) {
1✔
1382
            // If the entry is absent then discard the refresh and maybe notifying the listener
1383
            if (value != null) {
1✔
1384
              cause[0] = RemovalCause.EXPLICIT;
1✔
1385
            }
1386
            return null;
1✔
1387
          } else if (currentValue == value) {
1✔
1388
            // If the reloaded value is the same instance then no-op
1389
            return currentValue;
1✔
1390
          } else if (isAsync &&
1✔
1391
              (newValue == Async.getIfReady((CompletableFuture<?>) currentValue))) {
1✔
1392
            // If the completed futures hold the same value instance then no-op
1393
            return currentValue;
1✔
1394
          } else if ((currentValue == oldValue) && (node.getWriteTime() == writeTime)) {
1✔
1395
            // If the entry was not modified while in-flight (no ABA) then replace
1396
            return value;
1✔
1397
          }
1398
          // Otherwise, a write invalidated the refresh so discard it and notify the listener
1399
          cause[0] = RemovalCause.REPLACED;
1✔
1400
          return currentValue;
1✔
1401
        }, expiry(), /* recordLoad= */ false, /* recordLoadFailure= */ true);
1✔
1402

1403
        if (cause[0] != null) {
1✔
1404
          notifyRemoval(key, value, cause[0]);
1✔
1405
        }
1406
        if (newValue == null) {
1✔
1407
          statsCounter().recordLoadFailure(loadTime);
1✔
1408
        } else {
1409
          statsCounter().recordLoadSuccess(loadTime);
1✔
1410
        }
1411

1412
        refreshes.remove(keyReference, refreshFuture[0]);
1✔
1413
        return result;
1✔
1414
      });
1415
      return Async.getIfReady(refreshed);
1✔
1416
    }
1417

1418
    return null;
1✔
1419
  }
1420

1421
  /**
1422
   * Returns the expiration time for the entry after being created.
1423
   *
1424
   * @param key the key of the entry that was created
1425
   * @param value the value of the entry that was created
1426
   * @param expiry the calculator for the expiration time
1427
   * @param now the current time, in nanoseconds
1428
   * @return the expiration time
1429
   */
1430
  long expireAfterCreate(@Nullable K key, @Nullable V value,
1431
      Expiry<? super K, ? super V> expiry, long now) {
1432
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1433
      long duration = Math.max(0L, expiry.expireAfterCreate(key, value, now));
1✔
1434
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1435
    }
1436
    return 0L;
1✔
1437
  }
1438

1439
  /**
1440
   * Returns the expiration time for the entry after being updated.
1441
   *
1442
   * @param node the entry in the page replacement policy
1443
   * @param key the key of the entry that was updated
1444
   * @param value the value of the entry that was updated
1445
   * @param expiry the calculator for the expiration time
1446
   * @param now the current time, in nanoseconds
1447
   * @return the expiration time
1448
   */
1449
  long expireAfterUpdate(Node<K, V> node, @Nullable K key,
1450
      @Nullable V value, Expiry<? super K, ? super V> expiry, long now) {
1451
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1452
      long currentDuration = Math.max(1, node.getVariableTime() - now);
1✔
1453
      long duration = Math.max(0L, expiry.expireAfterUpdate(key, value, now, currentDuration));
1✔
1454
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1455
    }
1456
    return 0L;
1✔
1457
  }
1458

1459
  /**
1460
   * Returns the access time for the entry after a read.
1461
   *
1462
   * @param node the entry in the page replacement policy
1463
   * @param key the key of the entry that was read
1464
   * @param value the value of the entry that was read
1465
   * @param expiry the calculator for the expiration time
1466
   * @param now the current time, in nanoseconds
1467
   * @return the expiration time
1468
   */
1469
  long expireAfterRead(Node<K, V> node, @Nullable K key,
1470
      @Nullable V value, Expiry<K, V> expiry, long now) {
1471
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1472
      long currentDuration = Math.max(0L, node.getVariableTime() - now);
1✔
1473
      long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
1✔
1474
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1475
    }
1476
    return 0L;
1✔
1477
  }
1478

1479
  /**
1480
   * Attempts to update the access time for the entry after a read.
1481
   *
1482
   * @param node the entry in the page replacement policy
1483
   * @param key the key of the entry that was read
1484
   * @param value the value of the entry that was read
1485
   * @param expiry the calculator for the expiration time
1486
   * @param now the current time, in nanoseconds
1487
   */
1488
  void tryExpireAfterRead(Node<K, V> node, @Nullable K key,
1489
      @Nullable V value, Expiry<K, V> expiry, long now) {
1490
    if (!expiresVariable() || (key == null) || (value == null)) {
1✔
1491
      return;
1✔
1492
    }
1493

1494
    long variableTime = node.getVariableTime();
1✔
1495
    long currentDuration = Math.max(1, variableTime - now);
1✔
1496
    if (isAsync && (currentDuration > MAXIMUM_EXPIRY)) {
1✔
1497
      // expireAfterCreate has not yet set the duration after completion
1498
      return;
1✔
1499
    }
1500

1501
    long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
1✔
1502
    if (duration != currentDuration) {
1✔
1503
      long expirationTime = isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1504
      node.casVariableTime(variableTime, expirationTime);
1✔
1505
    }
1506
  }
1✔
1507

1508
  void setVariableTime(Node<K, V> node, long expirationTime) {
1509
    if (expiresVariable()) {
1✔
1510
      node.setVariableTime(expirationTime);
1✔
1511
    }
1512
  }
1✔
1513

1514
  void setWriteTime(Node<K, V> node, long now) {
1515
    if (expiresAfterWrite() || refreshAfterWrite()) {
1✔
1516
      node.setWriteTime(now & ~1L);
1✔
1517
    }
1518
  }
1✔
1519

1520
  void setAccessTime(Node<K, V> node, long now) {
1521
    if (expiresAfterAccess()) {
1✔
1522
      node.setAccessTime(now);
1✔
1523
    }
1524
  }
1✔
1525

1526
  /** Returns if the entry's write time would exceed the minimum expiration reorder threshold. */
1527
  boolean exceedsWriteTimeTolerance(Node<K, V> node, long varTime, long now) {
1528
    long variableTime = node.getVariableTime();
1✔
1529
    long tolerance = EXPIRE_WRITE_TOLERANCE;
1✔
1530
    long writeTime = node.getWriteTime();
1✔
1531
    return
1✔
1532
        (expiresAfterWrite()
1✔
1533
            && ((expiresAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
1✔
1534
        || (refreshAfterWrite()
1✔
1535
            && ((refreshAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
1✔
1536
        || (expiresVariable() && (Math.abs(varTime - variableTime) > tolerance));
1✔
1537
  }
1538

1539
  /**
1540
   * Performs the post-processing work required after a write.
1541
   *
1542
   * @param task the pending operation to be applied
1543
   */
1544
  void afterWrite(Runnable task) {
1545
    for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) {
1✔
1546
      if (writeBuffer.offer(task)) {
1✔
1547
        scheduleAfterWrite();
1✔
1548
        return;
1✔
1549
      }
1550
      scheduleDrainBuffers();
1✔
1551
      Thread.onSpinWait();
1✔
1552
    }
1553

1554
    // In scenarios where the writing threads cannot make progress then they attempt to provide
1555
    // assistance by performing the eviction work directly. This can resolve cases where the
1556
    // maintenance task is scheduled but not running. That might occur due to all of the executor's
1557
    // threads being busy (perhaps writing into this cache), the write rate greatly exceeds the
1558
    // consuming rate, priority inversion, or if the executor silently discarded the maintenance
1559
    // task. Unfortunately this cannot resolve when the eviction is blocked waiting on a long-
1560
    // running computation due to an eviction listener, the victim is being computed on by a writer,
1561
    // or the victim residing in the same hash bin as a computing entry. In those cases a warning is
1562
    // logged to encourage the application to decouple these computations from the map operations.
1563
    lock();
1✔
1564
    try {
1565
      maintenance(task);
1✔
1566
    } catch (RuntimeException e) {
1✔
1567
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1568
    } finally {
1569
      evictionLock.unlock();
1✔
1570
    }
1571
    rescheduleCleanUpIfIncomplete();
1✔
1572
  }
1✔
1573

1574
  /** Acquires the eviction lock. */
1575
  void lock() {
1576
    @Var long remainingNanos = WARN_AFTER_LOCK_WAIT_NANOS;
1✔
1577
    long end = System.nanoTime() + remainingNanos;
1✔
1578
    @Var boolean interrupted = false;
1✔
1579
    try {
1580
      for (;;) {
1581
        try {
1582
          if (evictionLock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
1✔
1583
            return;
1✔
1584
          }
1585
          logger.log(Level.WARNING, "The cache is experiencing excessive wait times for acquiring "
1✔
1586
              + "the eviction lock. This may indicate that a long-running computation has halted "
1587
              + "eviction when trying to remove the victim entry. Consider using AsyncCache to "
1588
              + "decouple the computation from the map operation.", new TimeoutException());
1589
          evictionLock.lock();
1✔
1590
          return;
1✔
1591
        } catch (InterruptedException e) {
1✔
1592
          remainingNanos = end - System.nanoTime();
1✔
1593
          interrupted = true;
1✔
1594
        }
1✔
1595
      }
1596
    } finally {
1597
      if (interrupted) {
1✔
1598
        Thread.currentThread().interrupt();
1✔
1599
      }
1600
    }
1601
  }
1602

1603
  /**
1604
   * Conditionally schedules the asynchronous maintenance task after a write operation. If the
1605
   * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it
1606
   * is already processing then it is set to transition to REQUIRED upon completion so that a new
1607
   * execution is triggered by the next operation.
1608
   */
1609
  void scheduleAfterWrite() {
1610
    @Var int drainStatus = drainStatusOpaque();
1✔
1611
    for (;;) {
1612
      switch (drainStatus) {
1✔
1613
        case IDLE:
1614
          casDrainStatus(IDLE, REQUIRED);
1✔
1615
          scheduleDrainBuffers();
1✔
1616
          return;
1✔
1617
        case REQUIRED:
1618
          scheduleDrainBuffers();
1✔
1619
          return;
1✔
1620
        case PROCESSING_TO_IDLE:
1621
          if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) {
1✔
1622
            return;
1✔
1623
          }
1624
          drainStatus = drainStatusAcquire();
1✔
1625
          continue;
1✔
1626
        case PROCESSING_TO_REQUIRED:
1627
          return;
1✔
1628
        default:
1629
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
1630
      }
1631
    }
1632
  }
1633

1634
  /**
1635
   * Attempts to schedule an asynchronous task to apply the pending operations to the page
1636
   * replacement policy. If the executor rejects the task then it is run directly.
1637
   */
1638
  void scheduleDrainBuffers() {
1639
    if (drainStatusOpaque() >= PROCESSING_TO_IDLE) {
1✔
1640
      return;
1✔
1641
    }
1642
    if (evictionLock.tryLock()) {
1✔
1643
      try {
1644
        int drainStatus = drainStatusOpaque();
1✔
1645
        if (drainStatus >= PROCESSING_TO_IDLE) {
1✔
1646
          return;
1✔
1647
        }
1648
        setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1649
        executor.execute(drainBuffersTask);
1✔
1650
      } catch (Throwable t) {
1✔
1651
        logger.log(Level.WARNING, "Exception thrown when submitting maintenance task", t);
1✔
1652
        maintenance(/* ignored */ null);
1✔
1653
      } finally {
1654
        evictionLock.unlock();
1✔
1655
      }
1656
    }
1657
  }
1✔
1658

1659
  @Override
1660
  public void cleanUp() {
1661
    try {
1662
      performCleanUp(/* ignored */ null);
1✔
1663
    } catch (RuntimeException e) {
1✔
1664
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1665
    }
1✔
1666
  }
1✔
1667

1668
  /**
1669
   * Performs the maintenance work, blocking until the lock is acquired.
1670
   *
1671
   * @param task an additional pending task to run, or {@code null} if not present
1672
   */
1673
  void performCleanUp(@Nullable Runnable task) {
1674
    evictionLock.lock();
1✔
1675
    try {
1676
      maintenance(task);
1✔
1677
    } finally {
1678
      evictionLock.unlock();
1✔
1679
    }
1680
    rescheduleCleanUpIfIncomplete();
1✔
1681
  }
1✔
1682

1683
  /**
1684
   * If there remains pending operations that were not handled by the prior clean up then try to
1685
   * schedule an asynchronous maintenance task. This may occur due to a concurrent write after the
1686
   * maintenance work had started or if the amortized threshold of work per clean up was reached.
1687
   */
1688
  @SuppressWarnings("resource")
1689
  void rescheduleCleanUpIfIncomplete() {
1690
    if (drainStatusOpaque() != REQUIRED) {
1✔
1691
      return;
1✔
1692
    }
1693

1694
    // An immediate scheduling cannot be performed on a custom executor because it may use a
1695
    // caller-runs policy. This could cause the caller's penalty to exceed the amortized threshold,
1696
    // e.g. repeated concurrent writes could result in a retry loop.
1697
    if (executor == ForkJoinPool.commonPool()) {
1✔
1698
      scheduleDrainBuffers();
1✔
1699
      return;
1✔
1700
    }
1701

1702
    // If a scheduler was configured then the maintenance can be deferred onto the custom executor
1703
    // and run in the near future. Otherwise, it will be handled due to other cache activity.
1704
    var pacer = pacer();
1✔
1705
    if ((pacer != null) && !pacer.isScheduled() && evictionLock.tryLock()) {
1✔
1706
      try {
1707
        if ((drainStatusOpaque() == REQUIRED) && !pacer.isScheduled()) {
1✔
1708
          pacer.schedule(executor, drainBuffersTask, expirationTicker().read(), Pacer.TOLERANCE);
1✔
1709
        }
1710
      } finally {
1711
        evictionLock.unlock();
1✔
1712
      }
1713
    }
1714
  }
1✔
1715

1716
  /**
1717
   * Performs the pending maintenance work and sets the state flags during processing to avoid
1718
   * excess scheduling attempts. The read buffer, write buffer, and reference queues are drained,
1719
   * followed by expiration, and size-based eviction.
1720
   *
1721
   * @param task an additional pending task to run, or {@code null} if not present
1722
   */
1723
  @GuardedBy("evictionLock")
1724
  void maintenance(@Nullable Runnable task) {
1725
    setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1726

1727
    try {
1728
      drainReadBuffer();
1✔
1729

1730
      drainWriteBuffer();
1✔
1731
      if (task != null) {
1✔
1732
        task.run();
1✔
1733
      }
1734

1735
      drainKeyReferences();
1✔
1736
      drainValueReferences();
1✔
1737

1738
      expireEntries();
1✔
1739
      evictEntries();
1✔
1740

1741
      climb();
1✔
1742
    } finally {
1743
      if ((drainStatusOpaque() != PROCESSING_TO_IDLE)
1✔
1744
          || !casDrainStatus(PROCESSING_TO_IDLE, IDLE)) {
1✔
1745
        setDrainStatusOpaque(REQUIRED);
1✔
1746
      }
1747
    }
1748
  }
1✔
1749

1750
  /** Drains the weak key references queue. */
1751
  @GuardedBy("evictionLock")
1752
  void drainKeyReferences() {
1753
    if (!collectKeys()) {
1✔
1754
      return;
1✔
1755
    }
1756
    @Var Reference<? extends K> keyRef;
1757
    while ((keyRef = keyReferenceQueue().poll()) != null) {
1✔
1758
      Node<K, V> node = data.get(keyRef);
1✔
1759
      if (node != null) {
1✔
1760
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1761
      }
1762
    }
1✔
1763
  }
1✔
1764

1765
  /** Drains the weak / soft value references queue. */
1766
  @GuardedBy("evictionLock")
1767
  void drainValueReferences() {
1768
    if (!collectValues()) {
1✔
1769
      return;
1✔
1770
    }
1771
    @Var Reference<? extends V> valueRef;
1772
    while ((valueRef = valueReferenceQueue().poll()) != null) {
1✔
1773
      @SuppressWarnings({"RedundantCast", "unchecked"})
1774
      var ref = (InternalReference<V>) (Object) valueRef;
1✔
1775
      Node<K, V> node = data.get(ref.getKeyReference());
1✔
1776
      if ((node != null) && (valueRef == node.getValueReference())) {
1✔
1777
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1778
      }
1779
    }
1✔
1780
  }
1✔
1781

1782
  /** Drains the read buffer. */
1783
  @GuardedBy("evictionLock")
1784
  void drainReadBuffer() {
1785
    if (!skipReadBuffer()) {
1✔
1786
      readBuffer.drainTo(accessPolicy);
1✔
1787
    }
1788
  }
1✔
1789

1790
  /** Updates the node's location in the page replacement policy. */
1791
  @GuardedBy("evictionLock")
1792
  void onAccess(Node<K, V> node) {
1793
    if (evicts()) {
1✔
1794
      K key = node.getKey();
1✔
1795
      if (key == null) {
1✔
1796
        return;
1✔
1797
      }
1798
      frequencySketch().increment(key);
1✔
1799
      if (node.inWindow()) {
1✔
1800
        reorder(accessOrderWindowDeque(), node);
1✔
1801
      } else if (node.inMainProbation()) {
1✔
1802
        reorderProbation(node);
1✔
1803
      } else {
1804
        reorder(accessOrderProtectedDeque(), node);
1✔
1805
      }
1806
      setHitsInSample(hitsInSample() + 1);
1✔
1807
    } else if (expiresAfterAccess()) {
1✔
1808
      reorder(accessOrderWindowDeque(), node);
1✔
1809
    }
1810
    if (expiresVariable()) {
1✔
1811
      timerWheel().reschedule(node);
1✔
1812
    }
1813
  }
1✔
1814

1815
  /** Promote the node from probation to protected on an access. */
1816
  @GuardedBy("evictionLock")
1817
  void reorderProbation(Node<K, V> node) {
1818
    if (!accessOrderProbationDeque().contains(node)) {
1✔
1819
      // Ignore stale accesses for an entry that is no longer present
1820
      return;
1✔
1821
    } else if (node.getPolicyWeight() > mainProtectedMaximum()) {
1✔
1822
      reorder(accessOrderProbationDeque(), node);
1✔
1823
      return;
1✔
1824
    }
1825

1826
    // If the protected space exceeds its maximum, the LRU items are demoted to the probation space.
1827
    // This is deferred to the adaption phase at the end of the maintenance cycle.
1828
    setMainProtectedWeightedSize(mainProtectedWeightedSize() + node.getPolicyWeight());
1✔
1829
    accessOrderProbationDeque().remove(node);
1✔
1830
    accessOrderProtectedDeque().offerLast(node);
1✔
1831
    node.makeMainProtected();
1✔
1832
  }
1✔
1833

1834
  /** Updates the node's location in the policy's deque. */
1835
  static <K, V> void reorder(LinkedDeque<Node<K, V>> deque, Node<K, V> node) {
1836
    // An entry may be scheduled for reordering despite having been removed. This can occur when the
1837
    // entry was concurrently read while a writer was removing it. If the entry is no longer linked
1838
    // then it does not need to be processed.
1839
    if (deque.contains(node)) {
1✔
1840
      deque.moveToBack(node);
1✔
1841
    }
1842
  }
1✔
1843

1844
  /** Drains the write buffer. */
1845
  @GuardedBy("evictionLock")
1846
  void drainWriteBuffer() {
1847
    for (int i = 0; i <= WRITE_BUFFER_MAX; i++) {
1✔
1848
      Runnable task = writeBuffer.poll();
1✔
1849
      if (task == null) {
1✔
1850
        return;
1✔
1851
      }
1852
      task.run();
1✔
1853
    }
1854
    setDrainStatusOpaque(PROCESSING_TO_REQUIRED);
1✔
1855
  }
1✔
1856

1857
  /**
1858
   * Atomically transitions the node to the <code>dead</code> state and decrements the
1859
   * <code>weightedSize</code>.
1860
   *
1861
   * @param node the entry in the page replacement policy
1862
   */
1863
  @GuardedBy("evictionLock")
1864
  void makeDead(Node<K, V> node) {
1865
    synchronized (node) {
1✔
1866
      if (node.isDead()) {
1✔
1867
        return;
1✔
1868
      }
1869
      if (evicts()) {
1✔
1870
        // The node's policy weight may be out of sync due to a pending update waiting to be
1871
        // processed. At this point the node's weight is finalized, so the weight can be safely
1872
        // taken from the node's perspective and the sizes will be adjusted correctly.
1873
        if (node.inWindow()) {
1✔
1874
          setWindowWeightedSize(windowWeightedSize() - node.getWeight());
1✔
1875
        } else if (node.inMainProtected()) {
1✔
1876
          setMainProtectedWeightedSize(mainProtectedWeightedSize() - node.getWeight());
1✔
1877
        }
1878
        setWeightedSize(weightedSize() - node.getWeight());
1✔
1879
      }
1880
      node.die();
1✔
1881
    }
1✔
1882
  }
1✔
1883

1884
  /** Adds the node to the page replacement policy. */
1885
  final class AddTask implements Runnable {
1886
    final Node<K, V> node;
1887
    final int weight;
1888

1889
    AddTask(Node<K, V> node, int weight) {
1✔
1890
      this.weight = weight;
1✔
1891
      this.node = node;
1✔
1892
    }
1✔
1893

1894
    @Override
1895
    @GuardedBy("evictionLock")
1896
    public void run() {
1897
      if (evicts()) {
1✔
1898
        setWeightedSize(weightedSize() + weight);
1✔
1899
        setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1900
        node.setPolicyWeight(node.getPolicyWeight() + weight);
1✔
1901

1902
        long maximum = maximum();
1✔
1903
        if (weightedSize() >= (maximum >>> 1)) {
1✔
1904
          if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
1905
            evictEntries();
1✔
1906
          } else {
1907
            // Lazily initialize when close to the maximum
1908
            long capacity = isWeighted() ? data.mappingCount() : maximum;
1✔
1909
            frequencySketch().ensureCapacity(capacity);
1✔
1910
          }
1911
        }
1912

1913
        K key = node.getKey();
1✔
1914
        if (key != null) {
1✔
1915
          frequencySketch().increment(key);
1✔
1916
        }
1917

1918
        setMissesInSample(missesInSample() + 1);
1✔
1919
      }
1920

1921
      // ignore out-of-order write operations
1922
      boolean isAlive;
1923
      synchronized (node) {
1✔
1924
        isAlive = node.isAlive();
1✔
1925
      }
1✔
1926
      if (isAlive) {
1✔
1927
        if (expiresAfterWrite()) {
1✔
1928
          writeOrderDeque().offerLast(node);
1✔
1929
        }
1930
        if (expiresVariable()) {
1✔
1931
          timerWheel().schedule(node);
1✔
1932
        }
1933
        if (evicts()) {
1✔
1934
          if (weight > maximum()) {
1✔
1935
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
1936
          } else if (weight > windowMaximum()) {
1✔
1937
            accessOrderWindowDeque().offerFirst(node);
1✔
1938
          } else {
1939
            accessOrderWindowDeque().offerLast(node);
1✔
1940
          }
1941
        } else if (expiresAfterAccess()) {
1✔
1942
          accessOrderWindowDeque().offerLast(node);
1✔
1943
        }
1944
      }
1945
    }
1✔
1946
  }
1947

1948
  /** Removes a node from the page replacement policy. */
1949
  final class RemovalTask implements Runnable {
1950
    final Node<K, V> node;
1951

1952
    RemovalTask(Node<K, V> node) {
1✔
1953
      this.node = node;
1✔
1954
    }
1✔
1955

1956
    @Override
1957
    @GuardedBy("evictionLock")
1958
    public void run() {
1959
      // add may not have been processed yet
1960
      if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1961
        accessOrderWindowDeque().remove(node);
1✔
1962
      } else if (evicts()) {
1✔
1963
        if (node.inMainProbation()) {
1✔
1964
          accessOrderProbationDeque().remove(node);
1✔
1965
        } else {
1966
          accessOrderProtectedDeque().remove(node);
1✔
1967
        }
1968
      }
1969
      if (expiresAfterWrite()) {
1✔
1970
        writeOrderDeque().remove(node);
1✔
1971
      } else if (expiresVariable()) {
1✔
1972
        timerWheel().deschedule(node);
1✔
1973
      }
1974
      makeDead(node);
1✔
1975
    }
1✔
1976
  }
1977

1978
  /** Updates the weighted size. */
1979
  final class UpdateTask implements Runnable {
1980
    final int weightDifference;
1981
    final Node<K, V> node;
1982

1983
    public UpdateTask(Node<K, V> node, int weightDifference) {
1✔
1984
      this.weightDifference = weightDifference;
1✔
1985
      this.node = node;
1✔
1986
    }
1✔
1987

1988
    @Override
1989
    @GuardedBy("evictionLock")
1990
    public void run() {
1991
      if (expiresAfterWrite()) {
1✔
1992
        reorder(writeOrderDeque(), node);
1✔
1993
      } else if (expiresVariable()) {
1✔
1994
        timerWheel().reschedule(node);
1✔
1995
      }
1996
      if (evicts()) {
1✔
1997
        int oldWeightedSize = node.getPolicyWeight();
1✔
1998
        node.setPolicyWeight(oldWeightedSize + weightDifference);
1✔
1999
        if (node.inWindow()) {
1✔
2000
          setWindowWeightedSize(windowWeightedSize() + weightDifference);
1✔
2001
          if (node.getPolicyWeight() > maximum()) {
1✔
2002
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2003
          } else if (node.getPolicyWeight() <= windowMaximum()) {
1✔
2004
            onAccess(node);
1✔
2005
          } else if (accessOrderWindowDeque().contains(node)) {
1✔
2006
            accessOrderWindowDeque().moveToFront(node);
1✔
2007
          }
2008
        } else if (node.inMainProbation()) {
1✔
2009
            if (node.getPolicyWeight() <= maximum()) {
1✔
2010
              onAccess(node);
1✔
2011
            } else {
2012
              evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2013
            }
2014
        } else if (node.inMainProtected()) {
1✔
2015
          setMainProtectedWeightedSize(mainProtectedWeightedSize() + weightDifference);
1✔
2016
          if (node.getPolicyWeight() <= maximum()) {
1✔
2017
            onAccess(node);
1✔
2018
          } else {
2019
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2020
          }
2021
        }
2022

2023
        setWeightedSize(weightedSize() + weightDifference);
1✔
2024
        if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
2025
          evictEntries();
1✔
2026
        }
2027
      } else if (expiresAfterAccess()) {
1✔
2028
        onAccess(node);
1✔
2029
      }
2030
    }
1✔
2031
  }
2032

2033
  /* --------------- Concurrent Map Support --------------- */
2034

2035
  @Override
2036
  public boolean isEmpty() {
2037
    return data.isEmpty();
1✔
2038
  }
2039

2040
  @Override
2041
  public int size() {
2042
    return data.size();
1✔
2043
  }
2044

2045
  @Override
2046
  public long estimatedSize() {
2047
    return data.mappingCount();
1✔
2048
  }
2049

2050
  @Override
2051
  public void clear() {
2052
    Deque<Node<K, V>> entries;
2053
    evictionLock.lock();
1✔
2054
    try {
2055
      // Discard all pending reads
2056
      readBuffer.drainTo(e -> {});
1✔
2057

2058
      // Apply all pending writes
2059
      @Var Runnable task;
2060
      while ((task = writeBuffer.poll()) != null) {
1✔
2061
        task.run();
1✔
2062
      }
2063

2064
      // Cancel the scheduled cleanup
2065
      Pacer pacer = pacer();
1✔
2066
      if (pacer != null) {
1✔
2067
        pacer.cancel();
1✔
2068
      }
2069

2070
      // Discard all entries, falling back to one-by-one to avoid excessive lock hold times
2071
      long now = expirationTicker().read();
1✔
2072
      int threshold = (WRITE_BUFFER_MAX / 2);
1✔
2073
      entries = new ArrayDeque<>(data.values());
1✔
2074
      while (!entries.isEmpty() && (writeBuffer.size() < threshold)) {
1✔
2075
        removeNode(entries.poll(), now);
1✔
2076
      }
2077
    } finally {
2078
      evictionLock.unlock();
1✔
2079
    }
2080

2081
    // Remove any stragglers if released early to more aggressively flush incoming writes
2082
    @Var boolean cleanUp = false;
1✔
2083
    for (var node : entries) {
1✔
2084
      var key = node.getKey();
1✔
2085
      if (key == null) {
1✔
2086
        cleanUp = true;
1✔
2087
      } else {
2088
        remove(key);
1✔
2089
      }
2090
    }
1✔
2091
    if (collectKeys() && cleanUp) {
1✔
2092
      cleanUp();
1✔
2093
    }
2094
  }
1✔
2095

2096
  @GuardedBy("evictionLock")
2097
  @SuppressWarnings("GuardedByChecker")
2098
  void removeNode(Node<K, V> node, long now) {
2099
    K key = node.getKey();
1✔
2100
    var cause = new RemovalCause[1];
1✔
2101
    var keyReference = node.getKeyReference();
1✔
2102
    @SuppressWarnings({"unchecked", "Varifier"})
2103
    @Nullable V[] value = (V[]) new Object[1];
1✔
2104

2105
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
2106
      if (n != node) {
1✔
2107
        return n;
×
2108
      }
2109
      synchronized (n) {
1✔
2110
        value[0] = n.getValue();
1✔
2111

2112
        if ((key == null) || (value[0] == null)) {
1✔
2113
          cause[0] = RemovalCause.COLLECTED;
1✔
2114
        } else if (hasExpired(n, now)) {
1✔
2115
          cause[0] = RemovalCause.EXPIRED;
1✔
2116
        } else {
2117
          cause[0] = RemovalCause.EXPLICIT;
1✔
2118
        }
2119

2120
        if (cause[0].wasEvicted()) {
1✔
2121
          notifyEviction(key, value[0], cause[0]);
1✔
2122
        }
2123

2124
        discardRefresh(node.getKeyReference());
1✔
2125
        node.retire();
1✔
2126
        return null;
1✔
2127
      }
2128
    });
2129

2130
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
2131
      accessOrderWindowDeque().remove(node);
1✔
2132
    } else if (evicts()) {
1✔
2133
      if (node.inMainProbation()) {
1✔
2134
        accessOrderProbationDeque().remove(node);
1✔
2135
      } else {
2136
        accessOrderProtectedDeque().remove(node);
1✔
2137
      }
2138
    }
2139
    if (expiresAfterWrite()) {
1✔
2140
      writeOrderDeque().remove(node);
1✔
2141
    } else if (expiresVariable()) {
1✔
2142
      timerWheel().deschedule(node);
1✔
2143
    }
2144

2145
    synchronized (node) {
1✔
2146
      logIfAlive(node);
1✔
2147
      makeDead(node);
1✔
2148
    }
1✔
2149

2150
    if (cause[0] != null) {
1✔
2151
      notifyRemoval(key, value[0], cause[0]);
1✔
2152
    }
2153
  }
1✔
2154

2155
  @Override
2156
  public boolean containsKey(Object key) {
2157
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2158
    return (node != null) && (node.getValue() != null)
1✔
2159
        && !hasExpired(node, expirationTicker().read());
1✔
2160
  }
2161

2162
  @Override
2163
  @SuppressWarnings("SuspiciousMethodCalls")
2164
  public boolean containsValue(Object value) {
2165
    requireNonNull(value);
1✔
2166

2167
    long now = expirationTicker().read();
1✔
2168
    for (Node<K, V> node : data.values()) {
1✔
2169
      if (node.containsValue(value) && !hasExpired(node, now) && (node.getKey() != null)) {
1✔
2170
        return true;
1✔
2171
      }
2172
    }
1✔
2173
    return false;
1✔
2174
  }
2175

2176
  @Override
2177
  public @Nullable V get(Object key) {
2178
    return getIfPresent(key, /* recordStats= */ false);
1✔
2179
  }
2180

2181
  @Override
2182
  public @Nullable V getIfPresent(Object key, boolean recordStats) {
2183
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2184
    if (node == null) {
1✔
2185
      if (recordStats) {
1✔
2186
        statsCounter().recordMisses(1);
1✔
2187
      }
2188
      if (drainStatusOpaque() == REQUIRED) {
1✔
2189
        scheduleDrainBuffers();
1✔
2190
      }
2191
      return null;
1✔
2192
    }
2193

2194
    V value = node.getValue();
1✔
2195
    long now = expirationTicker().read();
1✔
2196
    if (hasExpired(node, now) || (collectValues() && (value == null))) {
1✔
2197
      if (recordStats) {
1✔
2198
        statsCounter().recordMisses(1);
1✔
2199
      }
2200
      scheduleDrainBuffers();
1✔
2201
      return null;
1✔
2202
    }
2203

2204
    if (!isComputingAsync(value)) {
1✔
2205
      @SuppressWarnings("unchecked")
2206
      var castedKey = (K) key;
1✔
2207
      setAccessTime(node, now);
1✔
2208
      tryExpireAfterRead(node, castedKey, value, expiry(), now);
1✔
2209
    }
2210
    V refreshed = afterRead(node, now, recordStats);
1✔
2211
    return (refreshed == null) ? value : refreshed;
1✔
2212
  }
2213

2214
  @Override
2215
  public @Nullable V getIfPresentQuietly(Object key) {
2216
    V value;
2217
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2218
    if ((node == null) || ((value = node.getValue()) == null)
1✔
2219
        || hasExpired(node, expirationTicker().read())) {
1✔
2220
      return null;
1✔
2221
    }
2222
    return value;
1✔
2223
  }
2224

2225
  /**
2226
   * Returns the key associated with the mapping in this cache, or {@code null} if there is none.
2227
   *
2228
   * @param key the key whose canonical instance is to be returned
2229
   * @return the key used by the mapping, or {@code null} if this cache does not contain a mapping
2230
   *         for the key
2231
   * @throws NullPointerException if the specified key is null
2232
   */
2233
  public @Nullable K getKey(K key) {
2234
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2235
    if (node == null) {
1✔
2236
      if (drainStatusOpaque() == REQUIRED) {
1✔
2237
        scheduleDrainBuffers();
1✔
2238
      }
2239
      return null;
1✔
2240
    }
2241
    afterRead(node, /* now= */ 0L, /* recordHit= */ false);
1✔
2242
    return node.getKey();
1✔
2243
  }
2244

2245
  @Override
2246
  public Map<K, V> getAllPresent(Iterable<? extends K> keys) {
2247
    var result = new LinkedHashMap<K, V>(calculateHashMapCapacity(keys));
1✔
2248
    for (K key : keys) {
1✔
2249
      result.put(key, null);
1✔
2250
    }
1✔
2251

2252
    int uniqueKeys = result.size();
1✔
2253
    long now = expirationTicker().read();
1✔
2254
    for (var iter = result.entrySet().iterator(); iter.hasNext();) {
1✔
2255
      V value;
2256
      var entry = iter.next();
1✔
2257
      Node<K, V> node = data.get(nodeFactory.newLookupKey(entry.getKey()));
1✔
2258
      if ((node == null) || ((value = node.getValue()) == null) || hasExpired(node, now)) {
1✔
2259
        iter.remove();
1✔
2260
      } else {
2261
        if (!isComputingAsync(value)) {
1✔
2262
          tryExpireAfterRead(node, entry.getKey(), value, expiry(), now);
1✔
2263
          setAccessTime(node, now);
1✔
2264
        }
2265
        V refreshed = afterRead(node, now, /* recordHit= */ false);
1✔
2266
        entry.setValue((refreshed == null) ? value : refreshed);
1✔
2267
      }
2268
    }
1✔
2269
    statsCounter().recordHits(result.size());
1✔
2270
    statsCounter().recordMisses(uniqueKeys - result.size());
1✔
2271

2272
    return Collections.unmodifiableMap(result);
1✔
2273
  }
2274

2275
  @Override
2276
  public void putAll(Map<? extends K, ? extends V> map) {
2277
    map.forEach(this::put);
1✔
2278
  }
1✔
2279

2280
  @Override
2281
  public @Nullable V put(K key, V value) {
2282
    return put(key, value, expiry(), /* onlyIfAbsent= */ false);
1✔
2283
  }
2284

2285
  @Override
2286
  public @Nullable V putIfAbsent(K key, V value) {
2287
    return put(key, value, expiry(), /* onlyIfAbsent= */ true);
1✔
2288
  }
2289

2290
  /**
2291
   * Adds a node to the policy and the data store. If an existing node is found, then its value is
2292
   * updated if allowed.
2293
   *
2294
   * @param key key with which the specified value is to be associated
2295
   * @param value value to be associated with the specified key
2296
   * @param expiry the calculator for the write expiration time
2297
   * @param onlyIfAbsent a write is performed only if the key is not already associated with a value
2298
   * @return the prior value in or null if no mapping was found
2299
   */
2300
  @Nullable V put(K key, V value, Expiry<K, V> expiry, boolean onlyIfAbsent) {
2301
    requireNonNull(key);
1✔
2302
    requireNonNull(value);
1✔
2303

2304
    @Var Node<K, V> node = null;
1✔
2305
    long now = expirationTicker().read();
1✔
2306
    int newWeight = weigher.weigh(key, value);
1✔
2307
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2308
    for (int attempts = 1; ; attempts++) {
1✔
2309
      @Var Node<K, V> prior = data.get(lookupKey);
1✔
2310
      if (prior == null) {
1✔
2311
        if (node == null) {
1✔
2312
          node = nodeFactory.newNode(key, keyReferenceQueue(),
1✔
2313
              value, valueReferenceQueue(), newWeight, now);
1✔
2314
          long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
1✔
2315
          setVariableTime(node, expireAfterCreate(key, value, expiry, now));
1✔
2316
          setAccessTime(node, expirationTime);
1✔
2317
          setWriteTime(node, expirationTime);
1✔
2318
        }
2319
        prior = data.putIfAbsent(node.getKeyReference(), node);
1✔
2320
        if (prior == null) {
1✔
2321
          afterWrite(new AddTask(node, newWeight));
1✔
2322
          return null;
1✔
2323
        } else if (onlyIfAbsent) {
1✔
2324
          // An optimistic fast path to avoid unnecessary locking
2325
          V currentValue = prior.getValue();
1✔
2326
          if ((currentValue != null) && !hasExpired(prior, now)) {
1✔
2327
            if (!isComputingAsync(currentValue)) {
1✔
2328
              tryExpireAfterRead(prior, key, currentValue, expiry(), now);
1✔
2329
              setAccessTime(prior, now);
1✔
2330
            }
2331
            afterRead(prior, now, /* recordHit= */ false);
1✔
2332
            return currentValue;
1✔
2333
          }
2334
        }
1✔
2335
      } else if (onlyIfAbsent) {
1✔
2336
        // An optimistic fast path to avoid unnecessary locking
2337
        V currentValue = prior.getValue();
1✔
2338
        if ((currentValue != null) && !hasExpired(prior, now)) {
1✔
2339
          if (!isComputingAsync(currentValue)) {
1✔
2340
            tryExpireAfterRead(prior, key, currentValue, expiry(), now);
1✔
2341
            setAccessTime(prior, now);
1✔
2342
          }
2343
          afterRead(prior, now, /* recordHit= */ false);
1✔
2344
          return currentValue;
1✔
2345
        }
2346
      }
2347

2348
      // A read may race with the entry's removal, so that after the entry is acquired it may no
2349
      // longer be usable. A retry will reread from the map and either find an absent mapping, a
2350
      // new entry, or a stale entry.
2351
      if (!prior.isAlive()) {
1✔
2352
        // A reread of the stale entry may occur if the state transition occurred but the map
2353
        // removal was delayed by a context switch, so that this thread spin waits until resolved.
2354
        if ((attempts & MAX_PUT_SPIN_WAIT_ATTEMPTS) != 0) {
1✔
2355
          Thread.onSpinWait();
1✔
2356
          continue;
1✔
2357
        }
2358

2359
        // If the spin wait attempts are exhausted then fallback to a map computation in order to
2360
        // deschedule this thread until the entry's removal completes. If the key was modified
2361
        // while in the map so that its equals or hashCode changed then the contents may be
2362
        // corrupted, where the cache holds an evicted (dead) entry that could not be removed.
2363
        // That is a violation of the Map contract, so we check that the mapping is in the "alive"
2364
        // state while in the computation.
2365
        data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2366
          requireIsAlive(key, n);
1✔
2367
          return n;
1✔
2368
        });
2369
        continue;
1✔
2370
      }
2371

2372
      V oldValue;
2373
      long varTime;
2374
      int oldWeight;
2375
      @Var boolean expired = false;
1✔
2376
      @Var boolean mayUpdate = true;
1✔
2377
      @Var boolean exceedsTolerance = false;
1✔
2378
      synchronized (prior) {
1✔
2379
        if (!prior.isAlive()) {
1✔
2380
          continue;
1✔
2381
        }
2382
        oldValue = prior.getValue();
1✔
2383
        oldWeight = prior.getWeight();
1✔
2384
        if (oldValue == null) {
1✔
2385
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2386
          notifyEviction(key, null, RemovalCause.COLLECTED);
1✔
2387
        } else if (hasExpired(prior, now)) {
1✔
2388
          expired = true;
1✔
2389
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2390
          notifyEviction(key, oldValue, RemovalCause.EXPIRED);
1✔
2391
        } else if (onlyIfAbsent) {
1✔
2392
          mayUpdate = false;
1✔
2393
          varTime = expireAfterRead(prior, key, value, expiry, now);
1✔
2394
        } else {
2395
          varTime = expireAfterUpdate(prior, key, value, expiry, now);
1✔
2396
        }
2397

2398
        long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
1✔
2399
        if (mayUpdate) {
1✔
2400
          exceedsTolerance = exceedsWriteTimeTolerance(prior, varTime, now);
1✔
2401
          if (expired || exceedsTolerance) {
1✔
2402
            setWriteTime(prior, isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now);
1✔
2403
          }
2404

2405
          prior.setValue(value, valueReferenceQueue());
1✔
2406
          prior.setWeight(newWeight);
1✔
2407

2408
          discardRefresh(prior.getKeyReference());
1✔
2409
        }
2410

2411
        setVariableTime(prior, varTime);
1✔
2412
        setAccessTime(prior, expirationTime);
1✔
2413
      }
1✔
2414

2415
      if (expired) {
1✔
2416
        notifyRemoval(key, oldValue, RemovalCause.EXPIRED);
1✔
2417
      } else if (oldValue == null) {
1✔
2418
        notifyRemoval(key, /* value= */ null, RemovalCause.COLLECTED);
1✔
2419
      } else if (mayUpdate) {
1✔
2420
        notifyOnReplace(key, oldValue, value);
1✔
2421
      }
2422

2423
      int weightedDifference = mayUpdate ? (newWeight - oldWeight) : 0;
1✔
2424
      if ((oldValue == null) || (weightedDifference != 0) || expired) {
1✔
2425
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2426
      } else if (!onlyIfAbsent && exceedsTolerance) {
1✔
2427
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2428
      } else {
2429
        afterRead(prior, now, /* recordHit= */ false);
1✔
2430
      }
2431

2432
      return expired ? null : oldValue;
1✔
2433
    }
2434
  }
2435

2436
  @Override
2437
  public @Nullable V remove(Object key) {
2438
    @SuppressWarnings("unchecked")
2439
    var castKey = (K) key;
1✔
2440
    @SuppressWarnings({"rawtypes", "unchecked"})
2441
    Node<K, V>[] node = new Node[1];
1✔
2442
    @SuppressWarnings({"unchecked", "Varifier"})
2443
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2444
    RemovalCause[] cause = new RemovalCause[1];
1✔
2445
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2446

2447
    data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2448
      synchronized (n) {
1✔
2449
        requireIsAlive(key, n);
1✔
2450
        oldValue[0] = n.getValue();
1✔
2451
        if (oldValue[0] == null) {
1✔
2452
          cause[0] = RemovalCause.COLLECTED;
1✔
2453
        } else if (hasExpired(n, expirationTicker().read())) {
1✔
2454
          cause[0] = RemovalCause.EXPIRED;
1✔
2455
        } else {
2456
          cause[0] = RemovalCause.EXPLICIT;
1✔
2457
        }
2458
        if (cause[0].wasEvicted()) {
1✔
2459
          notifyEviction(castKey, oldValue[0], cause[0]);
1✔
2460
        }
2461
        discardRefresh(lookupKey);
1✔
2462
        node[0] = n;
1✔
2463
        n.retire();
1✔
2464
      }
1✔
2465
      return null;
1✔
2466
    });
2467

2468
    if (cause[0] != null) {
1✔
2469
      afterWrite(new RemovalTask(node[0]));
1✔
2470
      notifyRemoval(castKey, oldValue[0], cause[0]);
1✔
2471
    }
2472
    return (cause[0] == RemovalCause.EXPLICIT) ? oldValue[0] : null;
1✔
2473
  }
2474

2475
  @Override
2476
  public boolean remove(Object key, Object value) {
2477
    requireNonNull(key);
1✔
2478
    if (value == null) {
1✔
2479
      return false;
1✔
2480
    }
2481

2482
    @SuppressWarnings({"rawtypes", "unchecked"})
2483
    Node<K, V>[] removed = new Node[1];
1✔
2484
    @SuppressWarnings({"unchecked", "Varifier"})
2485
    @Nullable K[] oldKey = (K[]) new Object[1];
1✔
2486
    @SuppressWarnings({"unchecked", "Varifier"})
2487
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2488
    RemovalCause[] cause = new RemovalCause[1];
1✔
2489
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2490

2491
    data.computeIfPresent(lookupKey, (kR, node) -> {
1✔
2492
      synchronized (node) {
1✔
2493
        requireIsAlive(key, node);
1✔
2494
        oldKey[0] = node.getKey();
1✔
2495
        oldValue[0] = node.getValue();
1✔
2496
        if ((oldKey[0] == null) || (oldValue[0] == null)) {
1✔
2497
          cause[0] = RemovalCause.COLLECTED;
1✔
2498
        } else if (hasExpired(node, expirationTicker().read())) {
1✔
2499
          cause[0] = RemovalCause.EXPIRED;
1✔
2500
        } else if (node.containsValue(value)) {
1✔
2501
          cause[0] = RemovalCause.EXPLICIT;
1✔
2502
        } else {
2503
          return node;
1✔
2504
        }
2505
        if (cause[0].wasEvicted()) {
1✔
2506
          notifyEviction(oldKey[0], oldValue[0], cause[0]);
1✔
2507
        }
2508
        discardRefresh(lookupKey);
1✔
2509
        removed[0] = node;
1✔
2510
        node.retire();
1✔
2511
        return null;
1✔
2512
      }
2513
    });
2514

2515
    if (removed[0] == null) {
1✔
2516
      return false;
1✔
2517
    }
2518
    afterWrite(new RemovalTask(removed[0]));
1✔
2519
    notifyRemoval(oldKey[0], oldValue[0], cause[0]);
1✔
2520

2521
    return (cause[0] == RemovalCause.EXPLICIT);
1✔
2522
  }
2523

2524
  @Override
2525
  public @Nullable V replace(K key, V value) {
2526
    requireNonNull(key);
1✔
2527
    requireNonNull(value);
1✔
2528

2529
    var now = new long[1];
1✔
2530
    var oldWeight = new int[1];
1✔
2531
    var exceedsTolerance = new boolean[1];
1✔
2532
    @SuppressWarnings({"unchecked", "Varifier"})
2533
    @Nullable K[] nodeKey = (K[]) new Object[1];
1✔
2534
    @SuppressWarnings({"unchecked", "Varifier"})
2535
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2536
    int weight = weigher.weigh(key, value);
1✔
2537
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2538
      synchronized (n) {
1✔
2539
        requireIsAlive(key, n);
1✔
2540
        nodeKey[0] = n.getKey();
1✔
2541
        oldValue[0] = n.getValue();
1✔
2542
        oldWeight[0] = n.getWeight();
1✔
2543
        if ((nodeKey[0] == null) || (oldValue[0] == null)
1✔
2544
            || hasExpired(n, now[0] = expirationTicker().read())) {
1✔
2545
          oldValue[0] = null;
1✔
2546
          return n;
1✔
2547
        }
2548

2549
        long varTime = expireAfterUpdate(n, key, value, expiry(), now[0]);
1✔
2550
        n.setValue(value, valueReferenceQueue());
1✔
2551
        n.setWeight(weight);
1✔
2552

2553
        long expirationTime = isComputingAsync(value) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2554
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2555
        if (exceedsTolerance[0]) {
1✔
2556
          setWriteTime(n, expirationTime);
1✔
2557
        }
2558
        setAccessTime(n, expirationTime);
1✔
2559
        setVariableTime(n, varTime);
1✔
2560

2561
        discardRefresh(k);
1✔
2562
        return n;
1✔
2563
      }
2564
    });
2565

2566
    if ((nodeKey[0] == null) || (oldValue[0] == null)) {
1✔
2567
      return null;
1✔
2568
    }
2569

2570
    int weightedDifference = (weight - oldWeight[0]);
1✔
2571
    if (exceedsTolerance[0] || (weightedDifference != 0)) {
1✔
2572
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2573
    } else {
2574
      afterRead(node, now[0], /* recordHit= */ false);
1✔
2575
    }
2576

2577
    notifyOnReplace(nodeKey[0], oldValue[0], value);
1✔
2578
    return oldValue[0];
1✔
2579
  }
2580

2581
  @Override
2582
  public boolean replace(K key, V oldValue, V newValue) {
2583
    return replace(key, oldValue, newValue, /* shouldDiscardRefresh= */ true);
1✔
2584
  }
2585

2586
  @Override
2587
  public boolean replace(K key, V oldValue, V newValue, boolean shouldDiscardRefresh) {
2588
    requireNonNull(key);
1✔
2589
    requireNonNull(oldValue);
1✔
2590
    requireNonNull(newValue);
1✔
2591

2592
    var now = new long[1];
1✔
2593
    var oldWeight = new int[1];
1✔
2594
    var exceedsTolerance = new boolean[1];
1✔
2595
    @SuppressWarnings({"unchecked", "Varifier"})
2596
    @Nullable K[] nodeKey = (K[]) new Object[1];
1✔
2597
    @SuppressWarnings({"unchecked", "Varifier"})
2598
    @Nullable V[] prevValue = (V[]) new Object[1];
1✔
2599

2600
    int weight = weigher.weigh(key, newValue);
1✔
2601
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2602
      synchronized (n) {
1✔
2603
        requireIsAlive(key, n);
1✔
2604
        nodeKey[0] = n.getKey();
1✔
2605
        prevValue[0] = n.getValue();
1✔
2606
        oldWeight[0] = n.getWeight();
1✔
2607
        if ((nodeKey[0] == null) || (prevValue[0] == null) || !n.containsValue(oldValue)
1✔
2608
            || hasExpired(n, now[0] = expirationTicker().read())) {
1✔
2609
          prevValue[0] = null;
1✔
2610
          return n;
1✔
2611
        }
2612

2613
        long varTime = expireAfterUpdate(n, key, newValue, expiry(), now[0]);
1✔
2614
        n.setValue(newValue, valueReferenceQueue());
1✔
2615
        n.setWeight(weight);
1✔
2616

2617
        long expirationTime = isComputingAsync(newValue) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2618
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2619
        if (exceedsTolerance[0]) {
1✔
2620
          setWriteTime(n, expirationTime);
1✔
2621
        }
2622
        setAccessTime(n, expirationTime);
1✔
2623
        setVariableTime(n, varTime);
1✔
2624

2625
        if (shouldDiscardRefresh) {
1✔
2626
          discardRefresh(k);
1✔
2627
        }
2628
      }
1✔
2629
      return n;
1✔
2630
    });
2631

2632
    if ((nodeKey[0] == null) || (prevValue[0] == null)) {
1✔
2633
      return false;
1✔
2634
    }
2635

2636
    int weightedDifference = (weight - oldWeight[0]);
1✔
2637
    if (exceedsTolerance[0] || (weightedDifference != 0)) {
1✔
2638
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2639
    } else {
2640
      afterRead(node, now[0], /* recordHit= */ false);
1✔
2641
    }
2642

2643
    notifyOnReplace(nodeKey[0], prevValue[0], newValue);
1✔
2644
    return true;
1✔
2645
  }
2646

2647
  @Override
2648
  public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
2649
    requireNonNull(function);
1✔
2650

2651
    BiFunction<K, V, V> remappingFunction = (key, oldValue) ->
1✔
2652
        requireNonNull(function.apply(key, oldValue));
1✔
2653
    for (K key : keySet()) {
1✔
2654
      long[] now = { expirationTicker().read() };
1✔
2655
      Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2656
      remap(key, lookupKey, remappingFunction, expiry(), now, /* computeIfAbsent= */ false);
1✔
2657
    }
1✔
2658
  }
1✔
2659

2660
  @Override
2661
  public @Nullable V computeIfAbsent(K key, @Var Function<? super K, ? extends V> mappingFunction,
2662
      boolean recordStats, boolean recordLoad) {
2663
    requireNonNull(key);
1✔
2664
    requireNonNull(mappingFunction);
1✔
2665
    long now = expirationTicker().read();
1✔
2666

2667
    // An optimistic fast path to avoid unnecessary locking
2668
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2669
    if (node != null) {
1✔
2670
      V value = node.getValue();
1✔
2671
      if ((value != null) && !hasExpired(node, now)) {
1✔
2672
        if (!isComputingAsync(value)) {
1✔
2673
          tryExpireAfterRead(node, key, value, expiry(), now);
1✔
2674
          setAccessTime(node, now);
1✔
2675
        }
2676
        var refreshed = afterRead(node, now, /* recordHit= */ recordStats);
1✔
2677
        return (refreshed == null) ? value : refreshed;
1✔
2678
      }
2679
    }
2680
    if (recordStats) {
1✔
2681
      mappingFunction = statsAware(mappingFunction, recordLoad);
1✔
2682
    }
2683
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2684
    return doComputeIfAbsent(key, keyRef, mappingFunction, new long[] { now }, recordStats);
1✔
2685
  }
2686

2687
  /** Returns the current value from a computeIfAbsent invocation. */
2688
  @Nullable V doComputeIfAbsent(K key, Object keyRef,
2689
      Function<? super K, ? extends @Nullable V> mappingFunction, long[/* 1 */] now,
2690
      boolean recordStats) {
2691
    @SuppressWarnings({"unchecked", "Varifier"})
2692
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2693
    @SuppressWarnings({"unchecked", "Varifier"})
2694
    @Nullable V[] newValue = (V[]) new Object[1];
1✔
2695
    @SuppressWarnings({"unchecked", "Varifier"})
2696
    @Nullable K[] nodeKey = (K[]) new Object[1];
1✔
2697
    @SuppressWarnings({"rawtypes", "unchecked"})
2698
    Node<K, V>[] removed = new Node[1];
1✔
2699

2700
    int[] weight = new int[2]; // old, new
1✔
2701
    RemovalCause[] cause = new RemovalCause[1];
1✔
2702
    Node<K, V> node = data.compute(keyRef, (k, n) -> {
1✔
2703
      if (n == null) {
1✔
2704
        newValue[0] = mappingFunction.apply(key);
1✔
2705
        if (newValue[0] == null) {
1✔
2706
          return null;
1✔
2707
        }
2708
        now[0] = expirationTicker().read();
1✔
2709
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2710
        var created = nodeFactory.newNode(key, keyReferenceQueue(),
1✔
2711
            newValue[0], valueReferenceQueue(), weight[1], now[0]);
1✔
2712
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2713
        setVariableTime(created, expireAfterCreate(key, newValue[0], expiry(), now[0]));
1✔
2714
        setAccessTime(created, expirationTime);
1✔
2715
        setWriteTime(created, expirationTime);
1✔
2716
        return created;
1✔
2717
      }
2718

2719
      synchronized (n) {
1✔
2720
        requireIsAlive(key, n);
1✔
2721
        nodeKey[0] = n.getKey();
1✔
2722
        weight[0] = n.getWeight();
1✔
2723
        oldValue[0] = n.getValue();
1✔
2724
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
1✔
2725
          cause[0] = RemovalCause.COLLECTED;
1✔
2726
        } else if (hasExpired(n, now[0])) {
1✔
2727
          cause[0] = RemovalCause.EXPIRED;
1✔
2728
        } else {
2729
          return n;
1✔
2730
        }
2731

2732
        if (cause[0].wasEvicted()) {
1✔
2733
          notifyEviction(nodeKey[0], oldValue[0], cause[0]);
1✔
2734
        }
2735
        newValue[0] = mappingFunction.apply(key);
1✔
2736
        if (newValue[0] == null) {
1✔
2737
          removed[0] = n;
1✔
2738
          n.retire();
1✔
2739
          return null;
1✔
2740
        }
2741
        now[0] = expirationTicker().read();
1✔
2742
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2743
        long varTime = expireAfterCreate(key, newValue[0], expiry(), now[0]);
1✔
2744

2745
        n.setValue(newValue[0], valueReferenceQueue());
1✔
2746
        n.setWeight(weight[1]);
1✔
2747

2748
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2749
        setAccessTime(n, expirationTime);
1✔
2750
        setWriteTime(n, expirationTime);
1✔
2751
        setVariableTime(n, varTime);
1✔
2752

2753
        discardRefresh(k);
1✔
2754
        return n;
1✔
2755
      }
2756
    });
2757

2758
    if (cause[0] != null) {
1✔
2759
      if (cause[0].wasEvicted()) {
1✔
2760
        statsCounter().recordEviction(weight[0], cause[0]);
1✔
2761
      }
2762
      notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
1✔
2763
    }
2764
    if (node == null) {
1✔
2765
      if (removed[0] != null) {
1✔
2766
        afterWrite(new RemovalTask(removed[0]));
1✔
2767
      }
2768
      return null;
1✔
2769
    }
2770
    if (newValue[0] == null) {
1✔
2771
      if (!isComputingAsync(oldValue[0])) {
1✔
2772
        tryExpireAfterRead(node, key, oldValue[0], expiry(), now[0]);
1✔
2773
        setAccessTime(node, now[0]);
1✔
2774
      }
2775

2776
      afterRead(node, now[0], /* recordHit= */ recordStats);
1✔
2777
      return oldValue[0];
1✔
2778
    }
2779
    if ((oldValue[0] == null) && (cause[0] == null)) {
1✔
2780
      afterWrite(new AddTask(node, weight[1]));
1✔
2781
    } else {
2782
      int weightedDifference = (weight[1] - weight[0]);
1✔
2783
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2784
    }
2785

2786
    return newValue[0];
1✔
2787
  }
2788

2789
  @Override
2790
  public @Nullable V computeIfPresent(K key,
2791
      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2792
    requireNonNull(key);
1✔
2793
    requireNonNull(remappingFunction);
1✔
2794

2795
    // An optimistic fast path to avoid unnecessary locking
2796
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2797
    @Nullable Node<K, V> node = data.get(lookupKey);
1✔
2798
    long now;
2799
    if (node == null) {
1✔
2800
      return null;
1✔
2801
    } else if ((node.getValue() == null) || hasExpired(node, (now = expirationTicker().read()))) {
1✔
2802
      scheduleDrainBuffers();
1✔
2803
      return null;
1✔
2804
    }
2805

2806
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2807
        statsAware(remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
1✔
2808
    return remap(key, lookupKey, statsAwareRemappingFunction,
1✔
2809
        expiry(), new long[] { now }, /* computeIfAbsent= */ false);
1✔
2810
  }
2811

2812
  @Override
2813
  @SuppressWarnings("NullAway")
2814
  public @Nullable V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction,
2815
      @Nullable Expiry<? super K, ? super V> expiry, boolean recordLoad,
2816
      boolean recordLoadFailure) {
2817
    requireNonNull(key);
1✔
2818
    requireNonNull(remappingFunction);
1✔
2819

2820
    long[] now = { expirationTicker().read() };
1✔
2821
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2822
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2823
        statsAware(remappingFunction, recordLoad, recordLoadFailure);
1✔
2824
    return remap(key, keyRef, statsAwareRemappingFunction,
1✔
2825
        expiry, now, /* computeIfAbsent= */ true);
2826
  }
2827

2828
  @Override
2829
  public @Nullable V merge(K key, V value,
2830
      BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2831
    requireNonNull(key);
1✔
2832
    requireNonNull(value);
1✔
2833
    requireNonNull(remappingFunction);
1✔
2834

2835
    long[] now = { expirationTicker().read() };
1✔
2836
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2837
    BiFunction<? super K, ? super V, ? extends V> mergeFunction = (k, oldValue) ->
1✔
2838
        (oldValue == null) ? value : statsAware(remappingFunction).apply(oldValue, value);
1✔
2839
    return remap(key, keyRef, mergeFunction, expiry(), now, /* computeIfAbsent= */ true);
1✔
2840
  }
2841

2842
  /**
2843
   * Attempts to compute a mapping for the specified key and its current mapped value (or
2844
   * {@code null} if there is no current mapping).
2845
   * <p>
2846
   * An entry that has expired or been reference collected is evicted and the computation continues
2847
   * as if the entry had not been present. This method does not pre-screen and does not wrap the
2848
   * remappingFunction to be statistics aware.
2849
   *
2850
   * @param key key with which the specified value is to be associated
2851
   * @param keyRef the key to associate with or a lookup only key if not {@code computeIfAbsent}
2852
   * @param remappingFunction the function to compute a value
2853
   * @param expiry the calculator for the expiration time
2854
   * @param now the current time, according to the ticker
2855
   * @param computeIfAbsent if an absent entry can be computed
2856
   * @return the new value associated with the specified key, or null if none
2857
   */
2858
  @SuppressWarnings("PMD.EmptyControlStatement")
2859
  @Nullable V remap(K key, Object keyRef,
2860
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2861
      Expiry<? super K, ? super V> expiry, long[/* 1 */] now, boolean computeIfAbsent) {
2862
    @SuppressWarnings({"unchecked", "Varifier"})
2863
    @Nullable K[] nodeKey = (K[]) new Object[1];
1✔
2864
    @SuppressWarnings({"unchecked", "Varifier"})
2865
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2866
    @SuppressWarnings({"unchecked", "Varifier"})
2867
    @Nullable V[] newValue = (V[]) new Object[1];
1✔
2868
    @SuppressWarnings({"rawtypes", "unchecked"})
2869
    Node<K, V>[] removed = new Node[1];
1✔
2870

2871
    var weight = new int[2]; // old, new
1✔
2872
    var cause = new RemovalCause[1];
1✔
2873
    var exceedsTolerance = new boolean[1];
1✔
2874

2875
    Node<K, V> node = data.compute(keyRef, (kr, n) -> {
1✔
2876
      if (n == null) {
1✔
2877
        if (!computeIfAbsent) {
1✔
2878
          return null;
1✔
2879
        }
2880
        newValue[0] = remappingFunction.apply(key, null);
1✔
2881
        if (newValue[0] == null) {
1✔
2882
          return null;
1✔
2883
        }
2884
        now[0] = expirationTicker().read();
1✔
2885
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2886
        long varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
1✔
2887
        var created = nodeFactory.newNode(keyRef, newValue[0],
1✔
2888
            valueReferenceQueue(), weight[1], now[0]);
1✔
2889

2890
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2891
        setAccessTime(created, expirationTime);
1✔
2892
        setWriteTime(created, expirationTime);
1✔
2893
        setVariableTime(created, varTime);
1✔
2894

2895
        discardRefresh(key);
1✔
2896
        return created;
1✔
2897
      }
2898

2899
      synchronized (n) {
1✔
2900
        requireIsAlive(key, n);
1✔
2901
        nodeKey[0] = n.getKey();
1✔
2902
        oldValue[0] = n.getValue();
1✔
2903
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
1✔
2904
          cause[0] = RemovalCause.COLLECTED;
1✔
2905
        } else if (hasExpired(n, expirationTicker().read())) {
1✔
2906
          cause[0] = RemovalCause.EXPIRED;
1✔
2907
        }
2908
        if (cause[0] != null) {
1✔
2909
          notifyEviction(nodeKey[0], oldValue[0], cause[0]);
1✔
2910
          if (!computeIfAbsent) {
1✔
2911
            removed[0] = n;
1✔
2912
            n.retire();
1✔
2913
            return null;
1✔
2914
          }
2915
        }
2916

2917
        newValue[0] = remappingFunction.apply(nodeKey[0],
1✔
2918
            (cause[0] == null) ? oldValue[0] : null);
1✔
2919
        if (newValue[0] == null) {
1✔
2920
          if (cause[0] == null) {
1✔
2921
            cause[0] = RemovalCause.EXPLICIT;
1✔
2922
            discardRefresh(kr);
1✔
2923
          }
2924
          removed[0] = n;
1✔
2925
          n.retire();
1✔
2926
          return null;
1✔
2927
        }
2928

2929
        long varTime;
2930
        weight[0] = n.getWeight();
1✔
2931
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2932
        now[0] = expirationTicker().read();
1✔
2933
        if (cause[0] == null) {
1✔
2934
          if (newValue[0] != oldValue[0]) {
1✔
2935
            cause[0] = RemovalCause.REPLACED;
1✔
2936
          }
2937
          varTime = expireAfterUpdate(n, key, newValue[0], expiry, now[0]);
1✔
2938
        } else {
2939
          varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
1✔
2940
        }
2941

2942
        n.setValue(newValue[0], valueReferenceQueue());
1✔
2943
        n.setWeight(weight[1]);
1✔
2944

2945
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2946
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
1✔
2947
        if (((cause[0] != null) && cause[0].wasEvicted()) || exceedsTolerance[0]) {
1✔
2948
          setWriteTime(n, expirationTime);
1✔
2949
        }
2950
        setAccessTime(n, expirationTime);
1✔
2951
        setVariableTime(n, varTime);
1✔
2952

2953
        discardRefresh(kr);
1✔
2954
        return n;
1✔
2955
      }
2956
    });
2957

2958
    if (cause[0] != null) {
1✔
2959
      if (cause[0] == RemovalCause.REPLACED) {
1✔
2960
        requireNonNull(newValue[0]);
1✔
2961
        notifyOnReplace(key, oldValue[0], newValue[0]);
1✔
2962
      } else {
2963
        if (cause[0].wasEvicted()) {
1✔
2964
          statsCounter().recordEviction(weight[0], cause[0]);
1✔
2965
        }
2966
        notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
1✔
2967
      }
2968
    }
2969

2970
    if (removed[0] != null) {
1✔
2971
      afterWrite(new RemovalTask(removed[0]));
1✔
2972
    } else if (node == null) {
1✔
2973
      // absent and not computable
2974
    } else if ((oldValue[0] == null) && (cause[0] == null)) {
1✔
2975
      afterWrite(new AddTask(node, weight[1]));
1✔
2976
    } else {
2977
      int weightedDifference = weight[1] - weight[0];
1✔
2978
      if (exceedsTolerance[0] || (weightedDifference != 0)) {
1✔
2979
        afterWrite(new UpdateTask(node, weightedDifference));
1✔
2980
      } else {
2981
        afterRead(node, now[0], /* recordHit= */ false);
1✔
2982
        if ((cause[0] != null) && cause[0].wasEvicted()) {
1✔
2983
          scheduleDrainBuffers();
1✔
2984
        }
2985
      }
2986
    }
2987

2988
    return newValue[0];
1✔
2989
  }
2990

2991
  @Override
2992
  public void forEach(BiConsumer<? super K, ? super V> action) {
2993
    requireNonNull(action);
1✔
2994

2995
    for (var iterator = new EntryIterator<>(this); iterator.hasNext();) {
1✔
2996
      action.accept(iterator.key, iterator.value);
1✔
2997
      iterator.advance();
1✔
2998
    }
2999
  }
1✔
3000

3001
  @Override
3002
  public Set<K> keySet() {
3003
    Set<K> ks = keySet;
1✔
3004
    return (ks == null) ? (keySet = new KeySetView<>(this)) : ks;
1✔
3005
  }
3006

3007
  @Override
3008
  public Collection<V> values() {
3009
    Collection<V> vs = values;
1✔
3010
    return (vs == null) ? (values = new ValuesView<>(this)) : vs;
1✔
3011
  }
3012

3013
  @Override
3014
  public Set<Entry<K, V>> entrySet() {
3015
    Set<Entry<K, V>> es = entrySet;
1✔
3016
    return (es == null) ? (entrySet = new EntrySetView<>(this)) : es;
1✔
3017
  }
3018

3019
  /**
3020
   * Object equality requires reflexive, symmetric, transitive, and consistency properties. Of
3021
   * these, symmetry and consistency require further clarification for how they are upheld.
3022
   * <p>
3023
   * The <i>consistency</i> property between invocations requires that the results are the same if
3024
   * there are no modifications to the information used. Therefore, usages should expect that this
3025
   * operation may return misleading results if either the maps or the data held by them is modified
3026
   * during the execution of this method. This characteristic allows for comparing the map sizes and
3027
   * assuming stable mappings, as done by {@link java.util.AbstractMap}-based maps.
3028
   * <p>
3029
   * The <i>symmetric</i> property requires that the result is the same for all implementations of
3030
   * {@link Map#equals(Object)}. That contract is defined in terms of the stable mappings provided
3031
   * by {@link #entrySet()}, meaning that the {@link #size()} optimization forces that the count is
3032
   * consistent with the mappings when used for an equality check.
3033
   * <p>
3034
   * The cache's {@link #size()} method may include entries that have expired or have been reference
3035
   * collected, but have not yet been removed from the backing map. An iteration over the map may
3036
   * trigger the removal of these dead entries when skipped over during traversal. To ensure
3037
   * consistency and symmetry, usages should call {@link #cleanUp()} before this method while no
3038
   * other concurrent operations are being performed on this cache. This is not done implicitly by
3039
   * {@link #size()} as many usages assume it to be instantaneous and lock-free.
3040
   */
3041
  @Override
3042
  public boolean equals(@Nullable Object o) {
3043
    if (o == this) {
1✔
3044
      return true;
1✔
3045
    } else if (!(o instanceof Map)) {
1✔
3046
      return false;
1✔
3047
    }
3048

3049
    var map = (Map<?, ?>) o;
1✔
3050
    if (size() != map.size()) {
1✔
3051
      return false;
1✔
3052
    }
3053

3054
    long now = expirationTicker().read();
1✔
3055
    for (var node : data.values()) {
1✔
3056
      K key = node.getKey();
1✔
3057
      V value = node.getValue();
1✔
3058
      if ((key == null) || (value == null)
1✔
3059
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3060
        scheduleDrainBuffers();
1✔
3061
        return false;
1✔
3062
      } else {
3063
        var val = map.get(key);
1✔
3064
        if ((val == null) || ((val != value) && !val.equals(value))) {
1✔
3065
          return false;
1✔
3066
        }
3067
      }
3068
    }
1✔
3069
    return true;
1✔
3070
  }
3071

3072
  @Override
3073
  public int hashCode() {
3074
    @Var int hash = 0;
1✔
3075
    long now = expirationTicker().read();
1✔
3076
    for (var node : data.values()) {
1✔
3077
      K key = node.getKey();
1✔
3078
      V value = node.getValue();
1✔
3079
      if ((key == null) || (value == null)
1✔
3080
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3081
        scheduleDrainBuffers();
1✔
3082
      } else {
3083
        hash += key.hashCode() ^ value.hashCode();
1✔
3084
      }
3085
    }
1✔
3086
    return hash;
1✔
3087
  }
3088

3089
  @Override
3090
  public String toString() {
3091
    var result = new StringBuilder().append('{');
1✔
3092
    long now = expirationTicker().read();
1✔
3093
    for (var node : data.values()) {
1✔
3094
      K key = node.getKey();
1✔
3095
      V value = node.getValue();
1✔
3096
      if ((key == null) || (value == null)
1✔
3097
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3098
        scheduleDrainBuffers();
1✔
3099
      } else {
3100
        if (result.length() != 1) {
1✔
3101
          result.append(',').append(' ');
1✔
3102
        }
3103
        result.append((key == this) ? "(this Map)" : key);
1✔
3104
        result.append('=');
1✔
3105
        result.append((value == this) ? "(this Map)" : value);
1✔
3106
      }
3107
    }
1✔
3108
    return result.append('}').toString();
1✔
3109
  }
3110

3111
  /**
3112
   * Returns the computed result from the ordered traversal of the cache entries.
3113
   *
3114
   * @param hottest the coldest or hottest iteration order
3115
   * @param transformer a function that unwraps the value
3116
   * @param mappingFunction the mapping function to compute a value
3117
   * @return the computed value
3118
   */
3119
  @SuppressWarnings("GuardedByChecker")
3120
  <T> T evictionOrder(boolean hottest, Function<@Nullable V, @Nullable V> transformer,
3121
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3122
    Comparator<Node<K, V>> comparator = Comparator.comparingInt(node -> {
1✔
3123
      K key = node.getKey();
1✔
3124
      return (key == null) ? 0 : frequencySketch().frequency(key);
1✔
3125
    });
3126
    Iterable<Node<K, V>> iterable;
3127
    if (hottest) {
1✔
3128
      iterable = () -> {
1✔
3129
        var secondary = PeekingIterator.comparing(
1✔
3130
            accessOrderProbationDeque().descendingIterator(),
1✔
3131
            accessOrderWindowDeque().descendingIterator(), comparator);
1✔
3132
        return PeekingIterator.concat(
1✔
3133
            accessOrderProtectedDeque().descendingIterator(), secondary);
1✔
3134
      };
3135
    } else {
3136
      iterable = () -> {
1✔
3137
        var primary = PeekingIterator.comparing(
1✔
3138
            accessOrderWindowDeque().iterator(), accessOrderProbationDeque().iterator(),
1✔
3139
            comparator.reversed());
1✔
3140
        return PeekingIterator.concat(primary, accessOrderProtectedDeque().iterator());
1✔
3141
      };
3142
    }
3143
    return snapshot(iterable, transformer, mappingFunction);
1✔
3144
  }
3145

3146
  /**
3147
   * Returns the computed result from the ordered traversal of the cache entries.
3148
   *
3149
   * @param oldest the youngest or oldest iteration order
3150
   * @param transformer a function that unwraps the value
3151
   * @param mappingFunction the mapping function to compute a value
3152
   * @return the computed value
3153
   */
3154
  @SuppressWarnings("GuardedByChecker")
3155
  <T> T expireAfterAccessOrder(boolean oldest, Function<@Nullable V, @Nullable V> transformer,
3156
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3157
    Iterable<Node<K, V>> iterable;
3158
    if (evicts()) {
1✔
3159
      iterable = () -> {
1✔
3160
        @Var Comparator<Node<K, V>> comparator = Comparator.comparingLong(Node::getAccessTime);
1✔
3161
        PeekingIterator<Node<K, V>> first;
3162
        PeekingIterator<Node<K, V>> second;
3163
        PeekingIterator<Node<K, V>> third;
3164
        if (oldest) {
1✔
3165
          first = accessOrderWindowDeque().iterator();
1✔
3166
          second = accessOrderProbationDeque().iterator();
1✔
3167
          third = accessOrderProtectedDeque().iterator();
1✔
3168
        } else {
3169
          comparator = comparator.reversed();
1✔
3170
          first = accessOrderWindowDeque().descendingIterator();
1✔
3171
          second = accessOrderProbationDeque().descendingIterator();
1✔
3172
          third = accessOrderProtectedDeque().descendingIterator();
1✔
3173
        }
3174
        return PeekingIterator.comparing(
1✔
3175
            PeekingIterator.comparing(first, second, comparator), third, comparator);
1✔
3176
      };
3177
    } else {
3178
      iterable = oldest
1✔
3179
          ? accessOrderWindowDeque()
1✔
3180
          : accessOrderWindowDeque()::descendingIterator;
1✔
3181
    }
3182
    return snapshot(iterable, transformer, mappingFunction);
1✔
3183
  }
3184

3185
  /**
3186
   * Returns the computed result from the ordered traversal of the cache entries.
3187
   *
3188
   * @param iterable the supplier of the entries in the cache
3189
   * @param transformer a function that unwraps the value
3190
   * @param mappingFunction the mapping function to compute a value
3191
   * @return the computed value
3192
   */
3193
  @SuppressWarnings("NullAway")
3194
  <T> T snapshot(Iterable<Node<K, V>> iterable, Function<@Nullable V, @Nullable V> transformer,
3195
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3196
    requireNonNull(mappingFunction);
1✔
3197
    requireNonNull(transformer);
1✔
3198
    requireNonNull(iterable);
1✔
3199

3200
    evictionLock.lock();
1✔
3201
    try {
3202
      maintenance(/* ignored */ null);
1✔
3203

3204
      // Obtain the iterator as late as possible for modification count checking
3205
      try (var stream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(
1✔
3206
           iterable.iterator(), DISTINCT | ORDERED | NONNULL | IMMUTABLE), /* parallel= */ false)) {
1✔
3207
        return mappingFunction.apply(stream
1✔
3208
            .map(node -> nodeToCacheEntry(node, transformer))
1✔
3209
            .filter(Objects::nonNull));
1✔
3210
      }
3211
    } finally {
3212
      evictionLock.unlock();
1✔
3213
      rescheduleCleanUpIfIncomplete();
1✔
3214
    }
3215
  }
3216

3217
  /** Returns an entry for the given node if it can be used externally, else null. */
3218
  @Nullable CacheEntry<K, V> nodeToCacheEntry(
3219
      Node<K, V> node, Function<@Nullable V, @Nullable V> transformer) {
3220
    V value = transformer.apply(node.getValue());
1✔
3221
    K key = node.getKey();
1✔
3222
    long now;
3223
    if ((key == null) || (value == null) || !node.isAlive()
1✔
3224
        || hasExpired(node, (now = expirationTicker().read()))) {
1✔
3225
      return null;
1✔
3226
    }
3227

3228
    @Var long expiresAfter = Long.MAX_VALUE;
1✔
3229
    if (expiresAfterAccess()) {
1✔
3230
      expiresAfter = Math.min(expiresAfter, now - node.getAccessTime() + expiresAfterAccessNanos());
1✔
3231
    }
3232
    if (expiresAfterWrite()) {
1✔
3233
      expiresAfter = Math.min(expiresAfter,
1✔
3234
          (now & ~1L) - (node.getWriteTime() & ~1L) + expiresAfterWriteNanos());
1✔
3235
    }
3236
    if (expiresVariable()) {
1✔
3237
      expiresAfter = node.getVariableTime() - now;
1✔
3238
    }
3239

3240
    long refreshableAt = refreshAfterWrite()
1✔
3241
        ? node.getWriteTime() + refreshAfterWriteNanos()
1✔
3242
        : now + Long.MAX_VALUE;
1✔
3243
    int weight = node.getPolicyWeight();
1✔
3244
    return SnapshotEntry.forEntry(key, value, now, weight, now + expiresAfter, refreshableAt);
1✔
3245
  }
3246

3247
  /** A function that produces an unmodifiable map up to the limit in stream order. */
3248
  static final class SizeLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3249
    private final int expectedSize;
3250
    private final long limit;
3251

3252
    SizeLimiter(int expectedSize, long limit) {
1✔
3253
      requireArgument(limit >= 0);
1✔
3254
      this.expectedSize = expectedSize;
1✔
3255
      this.limit = limit;
1✔
3256
    }
1✔
3257

3258
    @Override
3259
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3260
      var map = new LinkedHashMap<K, V>(calculateHashMapCapacity(expectedSize));
1✔
3261
      stream.limit(limit).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3262
      return Collections.unmodifiableMap(map);
1✔
3263
    }
3264
  }
3265

3266
  /** A function that produces an unmodifiable map up to the weighted limit in stream order. */
3267
  static final class WeightLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3268
    private final long weightLimit;
3269

3270
    private long weightedSize;
3271

3272
    WeightLimiter(long weightLimit) {
1✔
3273
      requireArgument(weightLimit >= 0);
1✔
3274
      this.weightLimit = weightLimit;
1✔
3275
    }
1✔
3276

3277
    @Override
3278
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3279
      var map = new LinkedHashMap<K, V>();
1✔
3280
      stream.takeWhile(entry -> {
1✔
3281
        weightedSize = Math.addExact(weightedSize, entry.weight());
1✔
3282
        return (weightedSize <= weightLimit);
1✔
3283
      }).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3284
      return Collections.unmodifiableMap(map);
1✔
3285
    }
3286
  }
3287

3288
  /** An adapter to safely externalize the keys. */
3289
  static final class KeySetView<K, V> extends AbstractSet<K> {
3290
    final BoundedLocalCache<K, V> cache;
3291

3292
    KeySetView(BoundedLocalCache<K, V> cache) {
1✔
3293
      this.cache = requireNonNull(cache);
1✔
3294
    }
1✔
3295

3296
    @Override
3297
    public int size() {
3298
      return cache.size();
1✔
3299
    }
3300

3301
    @Override
3302
    public void clear() {
3303
      cache.clear();
1✔
3304
    }
1✔
3305

3306
    @Override
3307
    @SuppressWarnings("SuspiciousMethodCalls")
3308
    public boolean contains(Object o) {
3309
      return cache.containsKey(o);
1✔
3310
    }
3311

3312
    @Override
3313
    public boolean removeAll(Collection<?> collection) {
3314
      requireNonNull(collection);
1✔
3315
      @Var boolean modified = false;
1✔
3316
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3317
        for (K key : this) {
1✔
3318
          if (collection.contains(key)) {
1✔
3319
            modified |= remove(key);
1✔
3320
          }
3321
        }
1✔
3322
      } else {
3323
        for (var item : collection) {
1✔
3324
          modified |= (item != null) && remove(item);
1✔
3325
        }
1✔
3326
      }
3327
      return modified;
1✔
3328
    }
3329

3330
    @Override
3331
    public boolean remove(Object o) {
3332
      return (cache.remove(o) != null);
1✔
3333
    }
3334

3335
    @Override
3336
    public boolean removeIf(Predicate<? super K> filter) {
3337
      requireNonNull(filter);
1✔
3338
      @Var boolean modified = false;
1✔
3339
      for (K key : this) {
1✔
3340
        if (filter.test(key) && remove(key)) {
1✔
3341
          modified = true;
1✔
3342
        }
3343
      }
1✔
3344
      return modified;
1✔
3345
    }
3346

3347
    @Override
3348
    public boolean retainAll(Collection<?> collection) {
3349
      requireNonNull(collection);
1✔
3350
      @Var boolean modified = false;
1✔
3351
      for (K key : this) {
1✔
3352
        if (!collection.contains(key) && remove(key)) {
1✔
3353
          modified = true;
1✔
3354
        }
3355
      }
1✔
3356
      return modified;
1✔
3357
    }
3358

3359
    @Override
3360
    public Iterator<K> iterator() {
3361
      return new KeyIterator<>(cache);
1✔
3362
    }
3363

3364
    @Override
3365
    public Spliterator<K> spliterator() {
3366
      return new KeySpliterator<>(cache);
1✔
3367
    }
3368
  }
3369

3370
  /** An adapter to safely externalize the key iterator. */
3371
  static final class KeyIterator<K, V> implements Iterator<K> {
3372
    final EntryIterator<K, V> iterator;
3373

3374
    KeyIterator(BoundedLocalCache<K, V> cache) {
1✔
3375
      this.iterator = new EntryIterator<>(cache);
1✔
3376
    }
1✔
3377

3378
    @Override
3379
    public boolean hasNext() {
3380
      return iterator.hasNext();
1✔
3381
    }
3382

3383
    @Override
3384
    public K next() {
3385
      return iterator.nextKey();
1✔
3386
    }
3387

3388
    @Override
3389
    public void remove() {
3390
      iterator.remove();
1✔
3391
    }
1✔
3392
  }
3393

3394
  /** An adapter to safely externalize the key spliterator. */
3395
  static final class KeySpliterator<K, V> implements Spliterator<K> {
3396
    final Spliterator<Node<K, V>> spliterator;
3397
    final BoundedLocalCache<K, V> cache;
3398

3399
    KeySpliterator(BoundedLocalCache<K, V> cache) {
3400
      this(cache, cache.data.values().spliterator());
1✔
3401
    }
1✔
3402

3403
    KeySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3404
      this.spliterator = requireNonNull(spliterator);
1✔
3405
      this.cache = requireNonNull(cache);
1✔
3406
    }
1✔
3407

3408
    @Override
3409
    public void forEachRemaining(Consumer<? super K> action) {
3410
      requireNonNull(action);
1✔
3411
      Consumer<Node<K, V>> consumer = node -> {
1✔
3412
        K key = node.getKey();
1✔
3413
        V value = node.getValue();
1✔
3414
        long now = cache.expirationTicker().read();
1✔
3415
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3416
          action.accept(key);
1✔
3417
        }
3418
      };
1✔
3419
      spliterator.forEachRemaining(consumer);
1✔
3420
    }
1✔
3421

3422
    @Override
3423
    public boolean tryAdvance(Consumer<? super K> action) {
3424
      requireNonNull(action);
1✔
3425
      boolean[] advanced = { false };
1✔
3426
      Consumer<Node<K, V>> consumer = node -> {
1✔
3427
        K key = node.getKey();
1✔
3428
        V value = node.getValue();
1✔
3429
        long now = cache.expirationTicker().read();
1✔
3430
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3431
          action.accept(key);
1✔
3432
          advanced[0] = true;
1✔
3433
        }
3434
      };
1✔
3435
      while (spliterator.tryAdvance(consumer)) {
1✔
3436
        if (advanced[0]) {
1✔
3437
          return true;
1✔
3438
        }
3439
      }
3440
      return false;
1✔
3441
    }
3442

3443
    @Override
3444
    public @Nullable Spliterator<K> trySplit() {
3445
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3446
      return (split == null) ? null : new KeySpliterator<>(cache, split);
1✔
3447
    }
3448

3449
    @Override
3450
    public long estimateSize() {
3451
      return spliterator.estimateSize();
1✔
3452
    }
3453

3454
    @Override
3455
    public int characteristics() {
3456
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3457
    }
3458
  }
3459

3460
  /** An adapter to safely externalize the values. */
3461
  static final class ValuesView<K, V> extends AbstractCollection<V> {
3462
    final BoundedLocalCache<K, V> cache;
3463

3464
    ValuesView(BoundedLocalCache<K, V> cache) {
1✔
3465
      this.cache = requireNonNull(cache);
1✔
3466
    }
1✔
3467

3468
    @Override
3469
    public int size() {
3470
      return cache.size();
1✔
3471
    }
3472

3473
    @Override
3474
    public void clear() {
3475
      cache.clear();
1✔
3476
    }
1✔
3477

3478
    @Override
3479
    @SuppressWarnings("SuspiciousMethodCalls")
3480
    public boolean contains(Object o) {
3481
      return cache.containsValue(o);
1✔
3482
    }
3483

3484
    @Override
3485
    @SuppressWarnings("NullAway")
3486
    public boolean removeAll(Collection<?> collection) {
3487
      requireNonNull(collection);
1✔
3488
      @Var boolean modified = false;
1✔
3489
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3490
        if (collection.contains(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3491
          modified = true;
1✔
3492
        }
3493
        iterator.advance();
1✔
3494
      }
3495
      return modified;
1✔
3496
    }
3497

3498
    @Override
3499
    @SuppressWarnings("NullAway")
3500
    public boolean remove(Object o) {
3501
      if (o == null) {
1✔
3502
        return false;
1✔
3503
      }
3504
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3505
        if (o.equals(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3506
          return true;
1✔
3507
        }
3508
        iterator.advance();
1✔
3509
      }
3510
      return false;
1✔
3511
    }
3512

3513
    @Override
3514
    @SuppressWarnings("NullAway")
3515
    public boolean removeIf(Predicate<? super V> filter) {
3516
      requireNonNull(filter);
1✔
3517
      @Var boolean modified = false;
1✔
3518
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3519
        if (filter.test(iterator.value)) {
1✔
3520
          modified |= cache.remove(iterator.key, iterator.value);
1✔
3521
        }
3522
        iterator.advance();
1✔
3523
      }
3524
      return modified;
1✔
3525
    }
3526

3527
    @Override
3528
    @SuppressWarnings("NullAway")
3529
    public boolean retainAll(Collection<?> collection) {
3530
      requireNonNull(collection);
1✔
3531
      @Var boolean modified = false;
1✔
3532
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3533
        if (!collection.contains(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3534
          modified = true;
1✔
3535
        }
3536
        iterator.advance();
1✔
3537
      }
3538
      return modified;
1✔
3539
    }
3540

3541
    @Override
3542
    public Iterator<V> iterator() {
3543
      return new ValueIterator<>(cache);
1✔
3544
    }
3545

3546
    @Override
3547
    public Spliterator<V> spliterator() {
3548
      return new ValueSpliterator<>(cache);
1✔
3549
    }
3550
  }
3551

3552
  /** An adapter to safely externalize the value iterator. */
3553
  static final class ValueIterator<K, V> implements Iterator<V> {
3554
    final EntryIterator<K, V> iterator;
3555

3556
    ValueIterator(BoundedLocalCache<K, V> cache) {
1✔
3557
      this.iterator = new EntryIterator<>(cache);
1✔
3558
    }
1✔
3559

3560
    @Override
3561
    public boolean hasNext() {
3562
      return iterator.hasNext();
1✔
3563
    }
3564

3565
    @Override
3566
    public V next() {
3567
      return iterator.nextValue();
1✔
3568
    }
3569

3570
    @Override
3571
    public void remove() {
3572
      iterator.remove();
1✔
3573
    }
1✔
3574
  }
3575

3576
  /** An adapter to safely externalize the value spliterator. */
3577
  static final class ValueSpliterator<K, V> implements Spliterator<V> {
3578
    final Spliterator<Node<K, V>> spliterator;
3579
    final BoundedLocalCache<K, V> cache;
3580

3581
    ValueSpliterator(BoundedLocalCache<K, V> cache) {
3582
      this(cache, cache.data.values().spliterator());
1✔
3583
    }
1✔
3584

3585
    ValueSpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3586
      this.spliterator = requireNonNull(spliterator);
1✔
3587
      this.cache = requireNonNull(cache);
1✔
3588
    }
1✔
3589

3590
    @Override
3591
    public void forEachRemaining(Consumer<? super V> action) {
3592
      requireNonNull(action);
1✔
3593
      Consumer<Node<K, V>> consumer = node -> {
1✔
3594
        K key = node.getKey();
1✔
3595
        V value = node.getValue();
1✔
3596
        long now = cache.expirationTicker().read();
1✔
3597
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3598
          action.accept(value);
1✔
3599
        }
3600
      };
1✔
3601
      spliterator.forEachRemaining(consumer);
1✔
3602
    }
1✔
3603

3604
    @Override
3605
    public boolean tryAdvance(Consumer<? super V> action) {
3606
      requireNonNull(action);
1✔
3607
      boolean[] advanced = { false };
1✔
3608
      long now = cache.expirationTicker().read();
1✔
3609
      Consumer<Node<K, V>> consumer = node -> {
1✔
3610
        K key = node.getKey();
1✔
3611
        V value = node.getValue();
1✔
3612
        if ((key != null) && (value != null) && !cache.hasExpired(node, now) && node.isAlive()) {
1✔
3613
          action.accept(value);
1✔
3614
          advanced[0] = true;
1✔
3615
        }
3616
      };
1✔
3617
      while (spliterator.tryAdvance(consumer)) {
1✔
3618
        if (advanced[0]) {
1✔
3619
          return true;
1✔
3620
        }
3621
      }
3622
      return false;
1✔
3623
    }
3624

3625
    @Override
3626
    public @Nullable Spliterator<V> trySplit() {
3627
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3628
      return (split == null) ? null : new ValueSpliterator<>(cache, split);
1✔
3629
    }
3630

3631
    @Override
3632
    public long estimateSize() {
3633
      return spliterator.estimateSize();
1✔
3634
    }
3635

3636
    @Override
3637
    public int characteristics() {
3638
      return CONCURRENT | NONNULL;
1✔
3639
    }
3640
  }
3641

3642
  /** An adapter to safely externalize the entries. */
3643
  static final class EntrySetView<K, V> extends AbstractSet<Entry<K, V>> {
3644
    final BoundedLocalCache<K, V> cache;
3645

3646
    EntrySetView(BoundedLocalCache<K, V> cache) {
1✔
3647
      this.cache = requireNonNull(cache);
1✔
3648
    }
1✔
3649

3650
    @Override
3651
    public int size() {
3652
      return cache.size();
1✔
3653
    }
3654

3655
    @Override
3656
    public void clear() {
3657
      cache.clear();
1✔
3658
    }
1✔
3659

3660
    @Override
3661
    public boolean contains(Object o) {
3662
      if (!(o instanceof Entry<?, ?>)) {
1✔
3663
        return false;
1✔
3664
      }
3665
      var entry = (Entry<?, ?>) o;
1✔
3666
      var key = entry.getKey();
1✔
3667
      var value = entry.getValue();
1✔
3668
      if ((key == null) || (value == null)) {
1✔
3669
        return false;
1✔
3670
      }
3671
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
3672
      return (node != null) && node.containsValue(value);
1✔
3673
    }
3674

3675
    @Override
3676
    public boolean removeAll(Collection<?> collection) {
3677
      requireNonNull(collection);
1✔
3678
      @Var boolean modified = false;
1✔
3679
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3680
        for (var entry : this) {
1✔
3681
          if (collection.contains(entry)) {
1✔
3682
            modified |= remove(entry);
1✔
3683
          }
3684
        }
1✔
3685
      } else {
3686
        for (var item : collection) {
1✔
3687
          modified |= (item != null) && remove(item);
1✔
3688
        }
1✔
3689
      }
3690
      return modified;
1✔
3691
    }
3692

3693
    @Override
3694
    @SuppressWarnings("SuspiciousMethodCalls")
3695
    public boolean remove(Object o) {
3696
      if (!(o instanceof Entry<?, ?>)) {
1✔
3697
        return false;
1✔
3698
      }
3699
      var entry = (Entry<?, ?>) o;
1✔
3700
      var key = entry.getKey();
1✔
3701
      return (key != null) && cache.remove(key, entry.getValue());
1✔
3702
    }
3703

3704
    @Override
3705
    public boolean removeIf(Predicate<? super Entry<K, V>> filter) {
3706
      requireNonNull(filter);
1✔
3707
      @Var boolean modified = false;
1✔
3708
      for (Entry<K, V> entry : this) {
1✔
3709
        if (filter.test(entry)) {
1✔
3710
          modified |= cache.remove(entry.getKey(), entry.getValue());
1✔
3711
        }
3712
      }
1✔
3713
      return modified;
1✔
3714
    }
3715

3716
    @Override
3717
    public boolean retainAll(Collection<?> collection) {
3718
      requireNonNull(collection);
1✔
3719
      @Var boolean modified = false;
1✔
3720
      for (var entry : this) {
1✔
3721
        if (!collection.contains(entry) && remove(entry)) {
1✔
3722
          modified = true;
1✔
3723
        }
3724
      }
1✔
3725
      return modified;
1✔
3726
    }
3727

3728
    @Override
3729
    public Iterator<Entry<K, V>> iterator() {
3730
      return new EntryIterator<>(cache);
1✔
3731
    }
3732

3733
    @Override
3734
    public Spliterator<Entry<K, V>> spliterator() {
3735
      return new EntrySpliterator<>(cache);
1✔
3736
    }
3737
  }
3738

3739
  /** An adapter to safely externalize the entry iterator. */
3740
  static final class EntryIterator<K, V> implements Iterator<Entry<K, V>> {
3741
    final BoundedLocalCache<K, V> cache;
3742
    final Iterator<Node<K, V>> iterator;
3743

3744
    @Nullable K key;
3745
    @Nullable V value;
3746
    @Nullable K removalKey;
3747
    @Nullable Node<K, V> next;
3748

3749
    EntryIterator(BoundedLocalCache<K, V> cache) {
1✔
3750
      this.iterator = cache.data.values().iterator();
1✔
3751
      this.cache = cache;
1✔
3752
    }
1✔
3753

3754
    @Override
3755
    public boolean hasNext() {
3756
      if (next != null) {
1✔
3757
        return true;
1✔
3758
      }
3759

3760
      long now = cache.expirationTicker().read();
1✔
3761
      while (iterator.hasNext()) {
1✔
3762
        next = iterator.next();
1✔
3763
        value = next.getValue();
1✔
3764
        key = next.getKey();
1✔
3765

3766
        boolean evictable = (key == null) || (value == null) || cache.hasExpired(next, now);
1✔
3767
        if (evictable || !next.isAlive()) {
1✔
3768
          if (evictable) {
1✔
3769
            cache.scheduleDrainBuffers();
1✔
3770
          }
3771
          advance();
1✔
3772
          continue;
1✔
3773
        }
3774
        return true;
1✔
3775
      }
3776
      return false;
1✔
3777
    }
3778

3779
    /** Invalidates the current position so that the iterator may compute the next position. */
3780
    void advance() {
3781
      value = null;
1✔
3782
      next = null;
1✔
3783
      key = null;
1✔
3784
    }
1✔
3785

3786
    @SuppressWarnings("NullAway")
3787
    K nextKey() {
3788
      if (!hasNext()) {
1✔
3789
        throw new NoSuchElementException();
1✔
3790
      }
3791
      removalKey = key;
1✔
3792
      advance();
1✔
3793
      return removalKey;
1✔
3794
    }
3795

3796
    @SuppressWarnings("NullAway")
3797
    V nextValue() {
3798
      if (!hasNext()) {
1✔
3799
        throw new NoSuchElementException();
1✔
3800
      }
3801
      removalKey = key;
1✔
3802
      V val = value;
1✔
3803
      advance();
1✔
3804
      return val;
1✔
3805
    }
3806

3807
    @Override
3808
    public Entry<K, V> next() {
3809
      if (!hasNext()) {
1✔
3810
        throw new NoSuchElementException();
1✔
3811
      }
3812
      @SuppressWarnings("NullAway")
3813
      var entry = new WriteThroughEntry<>(cache, key, value);
1✔
3814
      removalKey = key;
1✔
3815
      advance();
1✔
3816
      return entry;
1✔
3817
    }
3818

3819
    @Override
3820
    public void remove() {
3821
      if (removalKey == null) {
1✔
3822
        throw new IllegalStateException();
1✔
3823
      }
3824
      cache.remove(removalKey);
1✔
3825
      removalKey = null;
1✔
3826
    }
1✔
3827
  }
3828

3829
  /** An adapter to safely externalize the entry spliterator. */
3830
  static final class EntrySpliterator<K, V> implements Spliterator<Entry<K, V>> {
3831
    final Spliterator<Node<K, V>> spliterator;
3832
    final BoundedLocalCache<K, V> cache;
3833

3834
    EntrySpliterator(BoundedLocalCache<K, V> cache) {
3835
      this(cache, cache.data.values().spliterator());
1✔
3836
    }
1✔
3837

3838
    EntrySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3839
      this.spliterator = requireNonNull(spliterator);
1✔
3840
      this.cache = requireNonNull(cache);
1✔
3841
    }
1✔
3842

3843
    @Override
3844
    public void forEachRemaining(Consumer<? super Entry<K, V>> action) {
3845
      requireNonNull(action);
1✔
3846
      Consumer<Node<K, V>> consumer = node -> {
1✔
3847
        K key = node.getKey();
1✔
3848
        V value = node.getValue();
1✔
3849
        long now = cache.expirationTicker().read();
1✔
3850
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3851
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3852
        }
3853
      };
1✔
3854
      spliterator.forEachRemaining(consumer);
1✔
3855
    }
1✔
3856

3857
    @Override
3858
    public boolean tryAdvance(Consumer<? super Entry<K, V>> action) {
3859
      requireNonNull(action);
1✔
3860
      boolean[] advanced = { false };
1✔
3861
      Consumer<Node<K, V>> consumer = node -> {
1✔
3862
        K key = node.getKey();
1✔
3863
        V value = node.getValue();
1✔
3864
        long now = cache.expirationTicker().read();
1✔
3865
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3866
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3867
          advanced[0] = true;
1✔
3868
        }
3869
      };
1✔
3870
      while (spliterator.tryAdvance(consumer)) {
1✔
3871
        if (advanced[0]) {
1✔
3872
          return true;
1✔
3873
        }
3874
      }
3875
      return false;
1✔
3876
    }
3877

3878
    @Override
3879
    public @Nullable Spliterator<Entry<K, V>> trySplit() {
3880
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3881
      return (split == null) ? null : new EntrySpliterator<>(cache, split);
1✔
3882
    }
3883

3884
    @Override
3885
    public long estimateSize() {
3886
      return spliterator.estimateSize();
1✔
3887
    }
3888

3889
    @Override
3890
    public int characteristics() {
3891
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3892
    }
3893
  }
3894

3895
  /** A reusable task that performs the maintenance work; used to avoid wrapping by ForkJoinPool. */
3896
  static final class PerformCleanupTask extends ForkJoinTask<@Nullable Void> implements Runnable {
3897
    private static final long serialVersionUID = 1L;
3898

3899
    final WeakReference<BoundedLocalCache<?, ?>> reference;
3900

3901
    PerformCleanupTask(BoundedLocalCache<?, ?> cache) {
1✔
3902
      reference = new WeakReference<>(cache);
1✔
3903
    }
1✔
3904

3905
    @Override
3906
    public boolean exec() {
3907
      try {
3908
        run();
1✔
3909
      } catch (Throwable t) {
1✔
3910
        logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", t);
1✔
3911
      }
1✔
3912

3913
      // Indicates that the task has not completed to allow subsequent submissions to execute
3914
      return false;
1✔
3915
    }
3916

3917
    @Override
3918
    public void run() {
3919
      BoundedLocalCache<?, ?> cache = reference.get();
1✔
3920
      if (cache != null) {
1✔
3921
        cache.performCleanUp(/* ignored */ null);
1✔
3922
      }
3923
    }
1✔
3924

3925
    /**
3926
     * This method cannot be ignored due to being final, so a hostile user supplied Executor could
3927
     * forcibly complete the task and halt future executions. There are easier ways to intentionally
3928
     * harm a system, so this is assumed to not happen in practice.
3929
     */
3930
    // public final void quietlyComplete() {}
3931

3932
    @Override public void complete(@Nullable Void value) {}
1✔
3933
    @Override public void setRawResult(@Nullable Void value) {}
1✔
3934
    @Override public @Nullable Void getRawResult() { return null; }
1✔
3935
    @Override public void completeExceptionally(@Nullable Throwable t) {}
1✔
3936
    @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; }
1✔
3937
  }
3938

3939
  /** Creates a serialization proxy based on the common configuration shared by all cache types. */
3940
  static <K, V> SerializationProxy<K, V> makeSerializationProxy(BoundedLocalCache<?, ?> cache) {
3941
    var proxy = new SerializationProxy<K, V>();
1✔
3942
    proxy.weakKeys = cache.collectKeys();
1✔
3943
    proxy.weakValues = cache.nodeFactory.weakValues();
1✔
3944
    proxy.softValues = cache.nodeFactory.softValues();
1✔
3945
    proxy.isRecordingStats = cache.isRecordingStats();
1✔
3946
    proxy.evictionListener = cache.evictionListener;
1✔
3947
    proxy.removalListener = cache.removalListener();
1✔
3948
    proxy.ticker = cache.expirationTicker();
1✔
3949
    if (cache.expiresAfterAccess()) {
1✔
3950
      proxy.expiresAfterAccessNanos = cache.expiresAfterAccessNanos();
1✔
3951
    }
3952
    if (cache.expiresAfterWrite()) {
1✔
3953
      proxy.expiresAfterWriteNanos = cache.expiresAfterWriteNanos();
1✔
3954
    }
3955
    if (cache.expiresVariable()) {
1✔
3956
      proxy.expiry = cache.expiry();
1✔
3957
    }
3958
    if (cache.refreshAfterWrite()) {
1✔
3959
      proxy.refreshAfterWriteNanos = cache.refreshAfterWriteNanos();
1✔
3960
    }
3961
    if (cache.evicts()) {
1✔
3962
      if (cache.isWeighted) {
1✔
3963
        proxy.weigher = cache.weigher;
1✔
3964
        proxy.maximumWeight = cache.maximum();
1✔
3965
      } else {
3966
        proxy.maximumSize = cache.maximum();
1✔
3967
      }
3968
    }
3969
    proxy.cacheLoader = cache.cacheLoader;
1✔
3970
    proxy.async = cache.isAsync;
1✔
3971
    return proxy;
1✔
3972
  }
3973

3974
  /* --------------- Manual Cache --------------- */
3975

3976
  static class BoundedLocalManualCache<K, V> implements LocalManualCache<K, V>, Serializable {
3977
    private static final long serialVersionUID = 1;
3978

3979
    final BoundedLocalCache<K, V> cache;
3980

3981
    @Nullable Policy<K, V> policy;
3982

3983
    BoundedLocalManualCache(Caffeine<K, V> builder) {
3984
      this(builder, null);
1✔
3985
    }
1✔
3986

3987
    BoundedLocalManualCache(Caffeine<K, V> builder, @Nullable CacheLoader<? super K, V> loader) {
1✔
3988
      cache = LocalCacheFactory.newBoundedLocalCache(builder, loader, /* isAsync= */ false);
1✔
3989
    }
1✔
3990

3991
    @Override
3992
    public final BoundedLocalCache<K, V> cache() {
3993
      return cache;
1✔
3994
    }
3995

3996
    @Override
3997
    public final Policy<K, V> policy() {
3998
      if (policy == null) {
1✔
3999
        @SuppressWarnings("NullAway")
4000
        Function<@Nullable V, @Nullable V> identity = identity();
1✔
4001
        policy = new BoundedPolicy<>(cache, identity, cache.isWeighted);
1✔
4002
      }
4003
      return policy;
1✔
4004
    }
4005

4006
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4007
      throw new InvalidObjectException("Proxy required");
1✔
4008
    }
4009

4010
    private Object writeReplace() {
4011
      return makeSerializationProxy(cache);
1✔
4012
    }
4013
  }
4014

4015
  @SuppressWarnings({"NullableOptional", "OptionalAssignedToNull"})
4016
  static final class BoundedPolicy<K, V> implements Policy<K, V> {
4017
    final Function<@Nullable V, @Nullable V> transformer;
4018
    final BoundedLocalCache<K, V> cache;
4019
    final boolean isWeighted;
4020

4021
    @Nullable Optional<Eviction<K, V>> eviction;
4022
    @Nullable Optional<FixedRefresh<K, V>> refreshes;
4023
    @Nullable Optional<FixedExpiration<K, V>> afterWrite;
4024
    @Nullable Optional<FixedExpiration<K, V>> afterAccess;
4025
    @Nullable Optional<VarExpiration<K, V>> variable;
4026

4027
    BoundedPolicy(BoundedLocalCache<K, V> cache,
4028
        Function<@Nullable V, @Nullable V> transformer, boolean isWeighted) {
1✔
4029
      this.transformer = transformer;
1✔
4030
      this.isWeighted = isWeighted;
1✔
4031
      this.cache = cache;
1✔
4032
    }
1✔
4033

4034
    @Override public boolean isRecordingStats() {
4035
      return cache.isRecordingStats();
1✔
4036
    }
4037
    @Override public @Nullable V getIfPresentQuietly(K key) {
4038
      return transformer.apply(cache.getIfPresentQuietly(key));
1✔
4039
    }
4040
    @SuppressWarnings("NullAway")
4041
    @Override public @Nullable CacheEntry<K, V> getEntryIfPresentQuietly(K key) {
4042
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4043
      return (node == null) ? null : cache.nodeToCacheEntry(node, transformer);
1✔
4044
    }
4045
    @SuppressWarnings("Java9CollectionFactory")
4046
    @Override public Map<K, CompletableFuture<V>> refreshes() {
4047
      var refreshes = cache.refreshes;
1✔
4048
      if ((refreshes == null) || refreshes.isEmpty()) {
1✔
4049
        @SuppressWarnings("ImmutableMapOf")
4050
        Map<K, CompletableFuture<V>> emptyMap = Collections.unmodifiableMap(Collections.emptyMap());
1✔
4051
        return emptyMap;
1✔
4052
      } else if (cache.collectKeys()) {
1✔
4053
        var inFlight = new IdentityHashMap<K, CompletableFuture<V>>(refreshes.size());
1✔
4054
        for (var entry : refreshes.entrySet()) {
1✔
4055
          @SuppressWarnings("unchecked")
4056
          var key = ((InternalReference<K>) entry.getKey()).get();
1✔
4057
          @SuppressWarnings("unchecked")
4058
          var future = (CompletableFuture<V>) entry.getValue();
1✔
4059
          if (key != null) {
1✔
4060
            inFlight.put(key, future);
1✔
4061
          }
4062
        }
1✔
4063
        return Collections.unmodifiableMap(inFlight);
1✔
4064
      }
4065
      @SuppressWarnings("unchecked")
4066
      var castedRefreshes = (Map<K, CompletableFuture<V>>) (Object) refreshes;
1✔
4067
      return Collections.unmodifiableMap(new HashMap<>(castedRefreshes));
1✔
4068
    }
4069
    @Override public Optional<Eviction<K, V>> eviction() {
4070
      return cache.evicts()
1✔
4071
          ? (eviction == null) ? (eviction = Optional.of(new BoundedEviction())) : eviction
1✔
4072
          : Optional.empty();
1✔
4073
    }
4074
    @Override public Optional<FixedExpiration<K, V>> expireAfterAccess() {
4075
      if (!cache.expiresAfterAccess()) {
1✔
4076
        return Optional.empty();
1✔
4077
      }
4078
      return (afterAccess == null)
1✔
4079
          ? (afterAccess = Optional.of(new BoundedExpireAfterAccess()))
1✔
4080
          : afterAccess;
1✔
4081
    }
4082
    @Override public Optional<FixedExpiration<K, V>> expireAfterWrite() {
4083
      if (!cache.expiresAfterWrite()) {
1✔
4084
        return Optional.empty();
1✔
4085
      }
4086
      return (afterWrite == null)
1✔
4087
          ? (afterWrite = Optional.of(new BoundedExpireAfterWrite()))
1✔
4088
          : afterWrite;
1✔
4089
    }
4090
    @Override public Optional<VarExpiration<K, V>> expireVariably() {
4091
      if (!cache.expiresVariable()) {
1✔
4092
        return Optional.empty();
1✔
4093
      }
4094
      return (variable == null)
1✔
4095
          ? (variable = Optional.of(new BoundedVarExpiration()))
1✔
4096
          : variable;
1✔
4097
    }
4098
    @Override public Optional<FixedRefresh<K, V>> refreshAfterWrite() {
4099
      if (!cache.refreshAfterWrite()) {
1✔
4100
        return Optional.empty();
1✔
4101
      }
4102
      return (refreshes == null)
1✔
4103
          ? (refreshes = Optional.of(new BoundedRefreshAfterWrite()))
1✔
4104
          : refreshes;
1✔
4105
    }
4106

4107
    final class BoundedEviction implements Eviction<K, V> {
1✔
4108
      @Override public boolean isWeighted() {
4109
        return isWeighted;
1✔
4110
      }
4111
      @Override public OptionalInt weightOf(K key) {
4112
        requireNonNull(key);
1✔
4113
        if (!isWeighted) {
1✔
4114
          return OptionalInt.empty();
1✔
4115
        }
4116
        Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4117
        if ((node == null) || cache.hasExpired(node, cache.expirationTicker().read())) {
1✔
4118
          return OptionalInt.empty();
1✔
4119
        }
4120
        synchronized (node) {
1✔
4121
          return OptionalInt.of(node.getWeight());
1✔
4122
        }
4123
      }
4124
      @Override public OptionalLong weightedSize() {
4125
        if (cache.evicts() && isWeighted()) {
1✔
4126
          cache.evictionLock.lock();
1✔
4127
          try {
4128
            if (cache.drainStatusOpaque() == REQUIRED) {
1✔
4129
              cache.maintenance(/* ignored */ null);
1✔
4130
            }
4131
            return OptionalLong.of(Math.max(0, cache.weightedSize()));
1✔
4132
          } finally {
4133
            cache.evictionLock.unlock();
1✔
4134
            cache.rescheduleCleanUpIfIncomplete();
1✔
4135
          }
4136
        }
4137
        return OptionalLong.empty();
1✔
4138
      }
4139
      @Override public long getMaximum() {
4140
        cache.evictionLock.lock();
1✔
4141
        try {
4142
          if (cache.drainStatusOpaque() == REQUIRED) {
1✔
4143
            cache.maintenance(/* ignored */ null);
1✔
4144
          }
4145
          return cache.maximum();
1✔
4146
        } finally {
4147
          cache.evictionLock.unlock();
1✔
4148
          cache.rescheduleCleanUpIfIncomplete();
1✔
4149
        }
4150
      }
4151
      @Override public void setMaximum(long maximum) {
4152
        cache.evictionLock.lock();
1✔
4153
        try {
4154
          cache.setMaximumSize(maximum);
1✔
4155
          cache.maintenance(/* ignored */ null);
1✔
4156
        } finally {
4157
          cache.evictionLock.unlock();
1✔
4158
          cache.rescheduleCleanUpIfIncomplete();
1✔
4159
        }
4160
      }
1✔
4161
      @Override public Map<K, V> coldest(int limit) {
4162
        int expectedSize = Math.min(limit, cache.size());
1✔
4163
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4164
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
1✔
4165
      }
4166
      @Override public Map<K, V> coldestWeighted(long weightLimit) {
4167
        var limiter = isWeighted()
1✔
4168
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4169
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4170
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
1✔
4171
      }
4172
      @Override
4173
      public <T> T coldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4174
        requireNonNull(mappingFunction);
1✔
4175
        return cache.evictionOrder(/* hottest= */ false, transformer, mappingFunction);
1✔
4176
      }
4177
      @Override public Map<K, V> hottest(int limit) {
4178
        int expectedSize = Math.min(limit, cache.size());
1✔
4179
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4180
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
1✔
4181
      }
4182
      @Override public Map<K, V> hottestWeighted(long weightLimit) {
4183
        var limiter = isWeighted()
1✔
4184
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4185
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4186
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
1✔
4187
      }
4188
      @Override
4189
      public <T> T hottest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4190
        requireNonNull(mappingFunction);
1✔
4191
        return cache.evictionOrder(/* hottest= */ true, transformer, mappingFunction);
1✔
4192
      }
4193
    }
4194

4195
    @SuppressWarnings("PreferJavaTimeOverload")
4196
    final class BoundedExpireAfterAccess implements FixedExpiration<K, V> {
1✔
4197
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4198
        requireNonNull(key);
1✔
4199
        requireNonNull(unit);
1✔
4200
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4201
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4202
        if (node == null) {
1✔
4203
          return OptionalLong.empty();
1✔
4204
        }
4205
        long now = cache.expirationTicker().read();
1✔
4206
        return cache.hasExpired(node, now)
1✔
4207
            ? OptionalLong.empty()
1✔
4208
            : OptionalLong.of(unit.convert(now - node.getAccessTime(), TimeUnit.NANOSECONDS));
1✔
4209
      }
4210
      @Override public long getExpiresAfter(TimeUnit unit) {
4211
        return unit.convert(cache.expiresAfterAccessNanos(), TimeUnit.NANOSECONDS);
1✔
4212
      }
4213
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4214
        requireArgument(duration >= 0);
1✔
4215
        cache.setExpiresAfterAccessNanos(unit.toNanos(duration));
1✔
4216
        cache.scheduleAfterWrite();
1✔
4217
      }
1✔
4218
      @Override public Map<K, V> oldest(int limit) {
4219
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4220
      }
4221
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4222
        return cache.expireAfterAccessOrder(/* oldest= */ true, transformer, mappingFunction);
1✔
4223
      }
4224
      @Override public Map<K, V> youngest(int limit) {
4225
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4226
      }
4227
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4228
        return cache.expireAfterAccessOrder(/* oldest= */ false, transformer, mappingFunction);
1✔
4229
      }
4230
    }
4231

4232
    @SuppressWarnings("PreferJavaTimeOverload")
4233
    final class BoundedExpireAfterWrite implements FixedExpiration<K, V> {
1✔
4234
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4235
        requireNonNull(key);
1✔
4236
        requireNonNull(unit);
1✔
4237
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4238
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4239
        if (node == null) {
1✔
4240
          return OptionalLong.empty();
1✔
4241
        }
4242
        long now = cache.expirationTicker().read();
1✔
4243
        return cache.hasExpired(node, now)
1✔
4244
            ? OptionalLong.empty()
1✔
4245
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
1✔
4246
      }
4247
      @Override public long getExpiresAfter(TimeUnit unit) {
4248
        return unit.convert(cache.expiresAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4249
      }
4250
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4251
        requireArgument(duration >= 0);
1✔
4252
        cache.setExpiresAfterWriteNanos(unit.toNanos(duration));
1✔
4253
        cache.scheduleAfterWrite();
1✔
4254
      }
1✔
4255
      @Override public Map<K, V> oldest(int limit) {
4256
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4257
      }
4258
      @SuppressWarnings("GuardedByChecker")
4259
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4260
        return cache.snapshot(cache.writeOrderDeque(), transformer, mappingFunction);
1✔
4261
      }
4262
      @Override public Map<K, V> youngest(int limit) {
4263
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4264
      }
4265
      @SuppressWarnings("GuardedByChecker")
4266
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4267
        return cache.snapshot(cache.writeOrderDeque()::descendingIterator,
1✔
4268
            transformer, mappingFunction);
4269
      }
4270
    }
4271

4272
    @SuppressWarnings("PreferJavaTimeOverload")
4273
    final class BoundedVarExpiration implements VarExpiration<K, V> {
1✔
4274
      @Override public OptionalLong getExpiresAfter(K key, TimeUnit unit) {
4275
        requireNonNull(key);
1✔
4276
        requireNonNull(unit);
1✔
4277
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4278
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4279
        if (node == null) {
1✔
4280
          return OptionalLong.empty();
1✔
4281
        }
4282
        long now = cache.expirationTicker().read();
1✔
4283
        return cache.hasExpired(node, now)
1✔
4284
            ? OptionalLong.empty()
1✔
4285
            : OptionalLong.of(unit.convert(node.getVariableTime() - now, TimeUnit.NANOSECONDS));
1✔
4286
      }
4287
      @Override public void setExpiresAfter(K key, long duration, TimeUnit unit) {
4288
        requireNonNull(key);
1✔
4289
        requireNonNull(unit);
1✔
4290
        requireArgument(duration >= 0);
1✔
4291
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4292
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4293
        if (node != null) {
1✔
4294
          long now;
4295
          long durationNanos = TimeUnit.NANOSECONDS.convert(duration, unit);
1✔
4296
          synchronized (node) {
1✔
4297
            now = cache.expirationTicker().read();
1✔
4298
            if (cache.hasExpired(node, now)) {
1✔
4299
              return;
1✔
4300
            }
4301
            node.setVariableTime(now + Math.min(durationNanos, MAXIMUM_EXPIRY));
1✔
4302
          }
1✔
4303
          cache.afterRead(node, now, /* recordHit= */ false);
1✔
4304
        }
4305
      }
1✔
4306
      @Override public @Nullable V put(K key, V value, long duration, TimeUnit unit) {
4307
        requireNonNull(unit);
1✔
4308
        requireNonNull(value);
1✔
4309
        requireArgument(duration >= 0);
1✔
4310
        return cache.isAsync
1✔
4311
            ? putAsync(key, value, duration, unit)
1✔
4312
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ false);
1✔
4313
      }
4314
      @Override public @Nullable V putIfAbsent(K key, V value, long duration, TimeUnit unit) {
4315
        requireNonNull(unit);
1✔
4316
        requireNonNull(value);
1✔
4317
        requireArgument(duration >= 0);
1✔
4318
        return cache.isAsync
1✔
4319
            ? putIfAbsentAsync(key, value, duration, unit)
1✔
4320
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ true);
1✔
4321
      }
4322
      @Nullable V putSync(K key, V value, long duration, TimeUnit unit, boolean onlyIfAbsent) {
4323
        var expiry = new FixedExpireAfterWrite<K, V>(duration, unit);
1✔
4324
        return cache.put(key, value, expiry, onlyIfAbsent);
1✔
4325
      }
4326
      @SuppressWarnings("unchecked")
4327
      @Nullable V putIfAbsentAsync(K key, V value, long duration, TimeUnit unit) {
4328
        // Keep in sync with LocalAsyncCache.AsMapView#putIfAbsent(key, value)
4329
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4330
        var asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4331

4332
        for (;;) {
4333
          var priorFuture = (CompletableFuture<V>) cache.getIfPresent(
1✔
4334
              key, /* recordStats= */ false);
4335
          if (priorFuture != null) {
1✔
4336
            if (!priorFuture.isDone()) {
1✔
4337
              Async.getWhenSuccessful(priorFuture);
×
4338
              continue;
×
4339
            }
4340

4341
            V prior = Async.getWhenSuccessful(priorFuture);
1✔
4342
            if (prior != null) {
1✔
4343
              return prior;
1✔
4344
            }
4345
          }
4346

4347
          boolean[] added = { false };
1✔
4348
          var computed = (CompletableFuture<V>) cache.compute(key, (k, oldValue) -> {
1✔
4349
            var oldValueFuture = (CompletableFuture<V>) oldValue;
1✔
4350
            added[0] = (oldValueFuture == null)
1✔
4351
                || (oldValueFuture.isDone() && (Async.getIfReady(oldValueFuture) == null));
1✔
4352
            return added[0] ? asyncValue : oldValue;
1✔
4353
          }, expiry, /* recordLoad= */ false, /* recordLoadFailure= */ false);
4354

4355
          if (added[0]) {
1✔
4356
            return null;
1✔
4357
          } else {
4358
            V prior = Async.getWhenSuccessful(computed);
×
4359
            if (prior != null) {
×
4360
              return prior;
×
4361
            }
4362
          }
4363
        }
×
4364
      }
4365
      @SuppressWarnings("unchecked")
4366
      @Nullable V putAsync(K key, V value, long duration, TimeUnit unit) {
4367
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4368
        var asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4369

4370
        var oldValueFuture = (CompletableFuture<V>) cache.put(
1✔
4371
            key, asyncValue, expiry, /* onlyIfAbsent= */ false);
4372
        return Async.getWhenSuccessful(oldValueFuture);
1✔
4373
      }
4374
      @SuppressWarnings("NullAway")
4375
      @Override public V compute(K key,
4376
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4377
          Duration duration) {
4378
        requireNonNull(key);
1✔
4379
        requireNonNull(duration);
1✔
4380
        requireNonNull(remappingFunction);
1✔
4381
        requireArgument(!duration.isNegative(), "duration cannot be negative: %s", duration);
1✔
4382
        var expiry = new FixedExpireAfterWrite<K, V>(
1✔
4383
            toNanosSaturated(duration), TimeUnit.NANOSECONDS);
1✔
4384

4385
        return cache.isAsync
1✔
4386
            ? computeAsync(key, remappingFunction, expiry)
1✔
4387
            : cache.compute(key, remappingFunction, expiry,
1✔
4388
                /* recordLoad= */ true, /* recordLoadFailure= */ true);
4389
      }
4390
      @Nullable V computeAsync(K key,
4391
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4392
          Expiry<? super K, ? super V> expiry) {
4393
        // Keep in sync with LocalAsyncCache.AsMapView#compute(key, remappingFunction)
4394
        @SuppressWarnings("unchecked")
4395
        var delegate = (LocalCache<K, CompletableFuture<V>>) cache;
1✔
4396

4397
        @SuppressWarnings({"rawtypes", "unchecked"})
4398
        var newValue = (V[]) new Object[1];
1✔
4399
        for (;;) {
4400
          Async.getWhenSuccessful(delegate.getIfPresentQuietly(key));
1✔
4401

4402
          CompletableFuture<V> valueFuture = delegate.compute(key, (k, oldValueFuture) -> {
1✔
4403
            if ((oldValueFuture != null) && !oldValueFuture.isDone()) {
1✔
4404
              return oldValueFuture;
×
4405
            }
4406

4407
            V oldValue = Async.getIfReady(oldValueFuture);
1✔
4408
            BiFunction<? super K, ? super V, ? extends V> function = delegate.statsAware(
1✔
4409
                remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
4410
            newValue[0] = function.apply(key, oldValue);
1✔
4411
            return (newValue[0] == null) ? null : CompletableFuture.completedFuture(newValue[0]);
1✔
4412
          }, new AsyncExpiry<>(expiry), /* recordLoad= */ false, /* recordLoadFailure= */ false);
4413

4414
          if (newValue[0] != null) {
1✔
4415
            return newValue[0];
1✔
4416
          } else if (valueFuture == null) {
1✔
4417
            return null;
1✔
4418
          }
4419
        }
×
4420
      }
4421
      @Override public Map<K, V> oldest(int limit) {
4422
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4423
      }
4424
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4425
        return cache.snapshot(cache.timerWheel(), transformer, mappingFunction);
1✔
4426
      }
4427
      @Override public Map<K, V> youngest(int limit) {
4428
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4429
      }
4430
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4431
        return cache.snapshot(cache.timerWheel()::descendingIterator, transformer, mappingFunction);
1✔
4432
      }
4433
    }
4434

4435
    static final class FixedExpireAfterWrite<K, V> implements Expiry<K, V> {
4436
      final long duration;
4437
      final TimeUnit unit;
4438

4439
      FixedExpireAfterWrite(long duration, TimeUnit unit) {
1✔
4440
        this.duration = duration;
1✔
4441
        this.unit = unit;
1✔
4442
      }
1✔
4443
      @Override public long expireAfterCreate(K key, V value, long currentTime) {
4444
        return unit.toNanos(duration);
1✔
4445
      }
4446
      @Override public long expireAfterUpdate(
4447
          K key, V value, long currentTime, long currentDuration) {
4448
        return unit.toNanos(duration);
1✔
4449
      }
4450
      @CanIgnoreReturnValue
4451
      @Override public long expireAfterRead(
4452
          K key, V value, long currentTime, long currentDuration) {
4453
        return currentDuration;
1✔
4454
      }
4455
    }
4456

4457
    @SuppressWarnings("PreferJavaTimeOverload")
4458
    final class BoundedRefreshAfterWrite implements FixedRefresh<K, V> {
1✔
4459
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4460
        requireNonNull(key);
1✔
4461
        requireNonNull(unit);
1✔
4462
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4463
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4464
        if (node == null) {
1✔
4465
          return OptionalLong.empty();
1✔
4466
        }
4467
        long now = cache.expirationTicker().read();
1✔
4468
        return cache.hasExpired(node, now)
1✔
4469
            ? OptionalLong.empty()
1✔
4470
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
1✔
4471
      }
4472
      @Override public long getRefreshesAfter(TimeUnit unit) {
4473
        return unit.convert(cache.refreshAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4474
      }
4475
      @Override public void setRefreshesAfter(long duration, TimeUnit unit) {
4476
        requireArgument(duration >= 0);
1✔
4477
        cache.setRefreshAfterWriteNanos(unit.toNanos(duration));
1✔
4478
        cache.scheduleAfterWrite();
1✔
4479
      }
1✔
4480
    }
4481
  }
4482

4483
  /* --------------- Loading Cache --------------- */
4484

4485
  static final class BoundedLocalLoadingCache<K, V>
4486
      extends BoundedLocalManualCache<K, V> implements LocalLoadingCache<K, V> {
4487
    private static final long serialVersionUID = 1;
4488

4489
    final Function<K, @Nullable V> mappingFunction;
4490
    final @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction;
4491

4492
    BoundedLocalLoadingCache(Caffeine<K, V> builder, CacheLoader<? super K, V> loader) {
4493
      super(builder, loader);
1✔
4494
      requireNonNull(loader);
1✔
4495
      mappingFunction = newMappingFunction(loader);
1✔
4496
      bulkMappingFunction = newBulkMappingFunction(loader);
1✔
4497
    }
1✔
4498

4499
    @Override
4500
    @SuppressWarnings("NullAway")
4501
    public AsyncCacheLoader<? super K, V> cacheLoader() {
4502
      return cache.cacheLoader;
1✔
4503
    }
4504

4505
    @Override
4506
    public Function<K, @Nullable V> mappingFunction() {
4507
      return mappingFunction;
1✔
4508
    }
4509

4510
    @Override
4511
    public @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction() {
4512
      return bulkMappingFunction;
1✔
4513
    }
4514

4515
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4516
      throw new InvalidObjectException("Proxy required");
1✔
4517
    }
4518

4519
    private Object writeReplace() {
4520
      return makeSerializationProxy(cache);
1✔
4521
    }
4522
  }
4523

4524
  /* --------------- Async Cache --------------- */
4525

4526
  static final class BoundedLocalAsyncCache<K, V> implements LocalAsyncCache<K, V>, Serializable {
4527
    private static final long serialVersionUID = 1;
4528

4529
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4530
    final boolean isWeighted;
4531

4532
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4533
    @Nullable CacheView<K, V> cacheView;
4534
    @Nullable Policy<K, V> policy;
4535

4536
    @SuppressWarnings("unchecked")
4537
    BoundedLocalAsyncCache(Caffeine<K, V> builder) {
1✔
4538
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4539
          .newBoundedLocalCache(builder, /* cacheLoader= */ null, /* isAsync= */ true);
1✔
4540
      isWeighted = builder.isWeighted();
1✔
4541
    }
1✔
4542

4543
    @Override
4544
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4545
      return cache;
1✔
4546
    }
4547

4548
    @Override
4549
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4550
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4551
    }
4552

4553
    @Override
4554
    public Cache<K, V> synchronous() {
4555
      return (cacheView == null) ? (cacheView = new CacheView<>(this)) : cacheView;
1✔
4556
    }
4557

4558
    @Override
4559
    public Policy<K, V> policy() {
4560
      if (policy == null) {
1✔
4561
        @SuppressWarnings("unchecked")
4562
        var castCache = (BoundedLocalCache<K, V>) cache;
1✔
4563
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
1✔
4564
        @SuppressWarnings({"NullAway", "unchecked", "Varifier"})
4565
        Function<@Nullable V, @Nullable V> castTransformer = (Function<V, V>) transformer;
1✔
4566
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4567
      }
4568
      return policy;
1✔
4569
    }
4570

4571
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4572
      throw new InvalidObjectException("Proxy required");
1✔
4573
    }
4574

4575
    private Object writeReplace() {
4576
      return makeSerializationProxy(cache);
1✔
4577
    }
4578
  }
4579

4580
  /* --------------- Async Loading Cache --------------- */
4581

4582
  static final class BoundedLocalAsyncLoadingCache<K, V>
4583
      extends LocalAsyncLoadingCache<K, V> implements Serializable {
4584
    private static final long serialVersionUID = 1;
4585

4586
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4587
    final boolean isWeighted;
4588

4589
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4590
    @Nullable Policy<K, V> policy;
4591

4592
    @SuppressWarnings("unchecked")
4593
    BoundedLocalAsyncLoadingCache(Caffeine<K, V> builder, AsyncCacheLoader<? super K, V> loader) {
4594
      super(loader);
1✔
4595
      isWeighted = builder.isWeighted();
1✔
4596
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4597
          .newBoundedLocalCache(builder, loader, /* isAsync= */ true);
1✔
4598
    }
1✔
4599

4600
    @Override
4601
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4602
      return cache;
1✔
4603
    }
4604

4605
    @Override
4606
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4607
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4608
    }
4609

4610
    @Override
4611
    public Policy<K, V> policy() {
4612
      if (policy == null) {
1✔
4613
        @SuppressWarnings("unchecked")
4614
        var castCache = (BoundedLocalCache<K, V>) cache;
1✔
4615
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
1✔
4616
        @SuppressWarnings({"NullAway", "unchecked", "Varifier"})
4617
        Function<@Nullable V, @Nullable V> castTransformer = (Function<V, V>) transformer;
1✔
4618
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4619
      }
4620
      return policy;
1✔
4621
    }
4622

4623
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4624
      throw new InvalidObjectException("Proxy required");
1✔
4625
    }
4626

4627
    private Object writeReplace() {
4628
      return makeSerializationProxy(cache);
1✔
4629
    }
4630
  }
4631
}
4632

4633
/** The namespace for field padding through inheritance. */
4634
@SuppressWarnings({"MemberName", "MultiVariableDeclaration"})
4635
final class BLCHeader {
4636

4637
  private BLCHeader() {}
4638

4639
  static class PadDrainStatus {
1✔
4640
    byte p000, p001, p002, p003, p004, p005, p006, p007;
4641
    byte p008, p009, p010, p011, p012, p013, p014, p015;
4642
    byte p016, p017, p018, p019, p020, p021, p022, p023;
4643
    byte p024, p025, p026, p027, p028, p029, p030, p031;
4644
    byte p032, p033, p034, p035, p036, p037, p038, p039;
4645
    byte p040, p041, p042, p043, p044, p045, p046, p047;
4646
    byte p048, p049, p050, p051, p052, p053, p054, p055;
4647
    byte p056, p057, p058, p059, p060, p061, p062, p063;
4648
    byte p064, p065, p066, p067, p068, p069, p070, p071;
4649
    byte p072, p073, p074, p075, p076, p077, p078, p079;
4650
    byte p080, p081, p082, p083, p084, p085, p086, p087;
4651
    byte p088, p089, p090, p091, p092, p093, p094, p095;
4652
    byte p096, p097, p098, p099, p100, p101, p102, p103;
4653
    byte p104, p105, p106, p107, p108, p109, p110, p111;
4654
    byte p112, p113, p114, p115, p116, p117, p118, p119;
4655
  }
4656

4657
  /** Enforces a memory layout to avoid false sharing by padding the drain status. */
4658
  abstract static class DrainStatusRef extends PadDrainStatus {
1✔
4659
    static final VarHandle DRAIN_STATUS;
4660

4661
    /** A drain is not taking place. */
4662
    static final int IDLE = 0;
4663
    /** A drain is required due to a pending write modification. */
4664
    static final int REQUIRED = 1;
4665
    /** A drain is in progress and will transition to idle. */
4666
    static final int PROCESSING_TO_IDLE = 2;
4667
    /** A drain is in progress and will transition to required. */
4668
    static final int PROCESSING_TO_REQUIRED = 3;
4669

4670
    /** The draining status of the buffers. */
4671
    volatile int drainStatus = IDLE;
1✔
4672

4673
    /**
4674
     * Returns whether maintenance work is needed.
4675
     *
4676
     * @param delayable if draining the read buffer can be delayed
4677
     */
4678
    @SuppressWarnings("StatementSwitchToExpressionSwitch")
4679
    boolean shouldDrainBuffers(boolean delayable) {
4680
      switch (drainStatusOpaque()) {
1✔
4681
        case IDLE:
4682
          return !delayable;
1✔
4683
        case REQUIRED:
4684
          return true;
1✔
4685
        case PROCESSING_TO_IDLE:
4686
        case PROCESSING_TO_REQUIRED:
4687
          return false;
1✔
4688
        default:
4689
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
4690
      }
4691
    }
4692

4693
    int drainStatusOpaque() {
4694
      return (int) DRAIN_STATUS.getOpaque(this);
1✔
4695
    }
4696

4697
    int drainStatusAcquire() {
4698
      return (int) DRAIN_STATUS.getAcquire(this);
1✔
4699
    }
4700

4701
    void setDrainStatusOpaque(int drainStatus) {
4702
      DRAIN_STATUS.setOpaque(this, drainStatus);
1✔
4703
    }
1✔
4704

4705
    void setDrainStatusRelease(int drainStatus) {
4706
      DRAIN_STATUS.setRelease(this, drainStatus);
1✔
4707
    }
1✔
4708

4709
    boolean casDrainStatus(int expect, int update) {
4710
      return DRAIN_STATUS.compareAndSet(this, expect, update);
1✔
4711
    }
4712

4713
    static {
4714
      try {
4715
        DRAIN_STATUS = MethodHandles.lookup()
1✔
4716
            .findVarHandle(DrainStatusRef.class, "drainStatus", int.class);
1✔
4717
      } catch (ReflectiveOperationException e) {
×
4718
        throw new ExceptionInInitializerError(e);
×
4719
      }
1✔
4720
    }
1✔
4721
  }
4722
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc