• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ben-manes / caffeine / #4716

14 Jan 2025 03:15AM UTC coverage: 99.043% (-0.03%) from 99.068%
#4716

push

github

ben-manes
Avoid early expiration of an pending future due to delayed pinning (fixes #1623)

7656 of 7730 relevant lines covered (99.04%)

0.99 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.35
/caffeine/src/main/java/com/github/benmanes/caffeine/cache/BoundedLocalCache.java
1
/*
2
 * Copyright 2014 Ben Manes. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
package com.github.benmanes.caffeine.cache;
17

18
import static com.github.benmanes.caffeine.cache.Async.ASYNC_EXPIRY;
19
import static com.github.benmanes.caffeine.cache.Caffeine.calculateHashMapCapacity;
20
import static com.github.benmanes.caffeine.cache.Caffeine.ceilingPowerOfTwo;
21
import static com.github.benmanes.caffeine.cache.Caffeine.requireArgument;
22
import static com.github.benmanes.caffeine.cache.Caffeine.toNanosSaturated;
23
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newBulkMappingFunction;
24
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newMappingFunction;
25
import static com.github.benmanes.caffeine.cache.Node.PROBATION;
26
import static com.github.benmanes.caffeine.cache.Node.PROTECTED;
27
import static com.github.benmanes.caffeine.cache.Node.WINDOW;
28
import static java.util.Locale.US;
29
import static java.util.Objects.requireNonNull;
30
import static java.util.Spliterator.DISTINCT;
31
import static java.util.Spliterator.IMMUTABLE;
32
import static java.util.Spliterator.NONNULL;
33
import static java.util.Spliterator.ORDERED;
34
import static java.util.function.Function.identity;
35

36
import java.io.InvalidObjectException;
37
import java.io.ObjectInputStream;
38
import java.io.Serializable;
39
import java.lang.System.Logger;
40
import java.lang.System.Logger.Level;
41
import java.lang.invoke.MethodHandles;
42
import java.lang.invoke.VarHandle;
43
import java.lang.ref.Reference;
44
import java.lang.ref.ReferenceQueue;
45
import java.lang.ref.WeakReference;
46
import java.time.Duration;
47
import java.util.AbstractCollection;
48
import java.util.AbstractSet;
49
import java.util.ArrayDeque;
50
import java.util.Collection;
51
import java.util.Collections;
52
import java.util.Comparator;
53
import java.util.Deque;
54
import java.util.HashMap;
55
import java.util.IdentityHashMap;
56
import java.util.Iterator;
57
import java.util.LinkedHashMap;
58
import java.util.Map;
59
import java.util.NoSuchElementException;
60
import java.util.Objects;
61
import java.util.Optional;
62
import java.util.OptionalInt;
63
import java.util.OptionalLong;
64
import java.util.Set;
65
import java.util.Spliterator;
66
import java.util.Spliterators;
67
import java.util.concurrent.CancellationException;
68
import java.util.concurrent.CompletableFuture;
69
import java.util.concurrent.ConcurrentHashMap;
70
import java.util.concurrent.ConcurrentMap;
71
import java.util.concurrent.Executor;
72
import java.util.concurrent.ForkJoinPool;
73
import java.util.concurrent.ForkJoinTask;
74
import java.util.concurrent.ThreadLocalRandom;
75
import java.util.concurrent.TimeUnit;
76
import java.util.concurrent.TimeoutException;
77
import java.util.concurrent.locks.ReentrantLock;
78
import java.util.function.BiConsumer;
79
import java.util.function.BiFunction;
80
import java.util.function.Consumer;
81
import java.util.function.Function;
82
import java.util.function.Predicate;
83
import java.util.stream.Stream;
84
import java.util.stream.StreamSupport;
85

86
import org.jspecify.annotations.Nullable;
87

88
import com.github.benmanes.caffeine.cache.Async.AsyncExpiry;
89
import com.github.benmanes.caffeine.cache.LinkedDeque.PeekingIterator;
90
import com.github.benmanes.caffeine.cache.Policy.CacheEntry;
91
import com.github.benmanes.caffeine.cache.References.InternalReference;
92
import com.github.benmanes.caffeine.cache.stats.StatsCounter;
93
import com.google.errorprone.annotations.CanIgnoreReturnValue;
94
import com.google.errorprone.annotations.Var;
95
import com.google.errorprone.annotations.concurrent.GuardedBy;
96

97
/**
98
 * An in-memory cache implementation that supports full concurrency of retrievals, a high expected
99
 * concurrency for updates, and multiple ways to bound the cache.
100
 * <p>
101
 * This class is abstract and code generated subclasses provide the complete implementation for a
102
 * particular configuration. This is to ensure that only the fields and execution paths necessary
103
 * for a given configuration are used.
104
 *
105
 * @author ben.manes@gmail.com (Ben Manes)
106
 * @param <K> the type of keys maintained by this cache
107
 * @param <V> the type of mapped values
108
 */
109
@SuppressWarnings("serial")
110
abstract class BoundedLocalCache<K, V> extends BLCHeader.DrainStatusRef
111
    implements LocalCache<K, V> {
112

113
  /*
114
   * This class performs a best-effort bounding of a ConcurrentHashMap using a page-replacement
115
   * algorithm to determine which entries to evict when the capacity is exceeded.
116
   *
117
   * Concurrency:
118
   * ------------
119
   * The page replacement algorithms are kept eventually consistent with the map. An update to the
120
   * map and recording of reads may not be immediately reflected in the policy's data structures.
121
   * These structures are guarded by a lock, and operations are applied in batches to avoid lock
122
   * contention. The penalty of applying the batches is spread across threads, so that the amortized
123
   * cost is slightly higher than performing just the ConcurrentHashMap operation [1].
124
   *
125
   * A memento of the reads and writes that were performed on the map is recorded in buffers. These
126
   * buffers are drained at the first opportunity after a write or when a read buffer is full. The
127
   * reads are offered to a buffer that will reject additions if contended on or if it is full. Due
128
   * to the concurrent nature of the read and write operations, a strict policy ordering is not
129
   * possible, but it may be observably strict when single-threaded. The buffers are drained
130
   * asynchronously to minimize the request latency and uses a state machine to determine when to
131
   * schedule this work on an executor.
132
   *
133
   * Due to a lack of a strict ordering guarantee, a task can be executed out-of-order, such as a
134
   * removal followed by its addition. The state of the entry is encoded using the key field to
135
   * avoid additional memory usage. An entry is "alive" if it is in both the hash table and the page
136
   * replacement policy. It is "retired" if it is not in the hash table and is pending removal from
137
   * the page replacement policy. Finally, an entry transitions to the "dead" state when it is
138
   * neither in the hash table nor the page replacement policy. Both the retired and dead states are
139
   * represented by a sentinel key that should not be used for map operations.
140
   *
141
   * Eviction:
142
   * ---------
143
   * Maximum size is implemented using the Window TinyLfu policy [2] due to its high hit rate, O(1)
144
   * time complexity, and small footprint. A new entry starts in the admission window and remains
145
   * there as long as it has high temporal locality (recency). Eventually an entry will slip from
146
   * the window into the main space. If the main space is already full, then a historic frequency
147
   * filter determines whether to evict the newly admitted entry or the victim entry chosen by the
148
   * eviction policy. This process ensures that the entries in the window were very recently used,
149
   * while entries in the main space are accessed very frequently and remain moderately recent. The
150
   * windowing allows the policy to have a high hit rate when entries exhibit a bursty access
151
   * pattern, while the filter ensures that popular items are retained. The admission window uses
152
   * LRU and the main space uses Segmented LRU.
153
   *
154
   * The optimal size of the window vs. main spaces is workload dependent [3]. A large admission
155
   * window is favored by recency-biased workloads, while a small one favors frequency-biased
156
   * workloads. When the window is too small, then recent arrivals are prematurely evicted, but when
157
   * it is too large, then they pollute the cache and force the eviction of more popular entries.
158
   * The optimal configuration is dynamically determined by using hill climbing to walk the hit rate
159
   * curve. This is achieved by sampling the hit rate and adjusting the window size in the direction
160
   * that is improving (making positive or negative steps). At each interval, the step size is
161
   * decreased until the hit rate climber converges at the optimal setting. The process is restarted
162
   * when the hit rate changes over a threshold, indicating that the workload altered, and a new
163
   * setting may be required.
164
   *
165
   * The historic usage is retained in a compact popularity sketch, which uses hashing to
166
   * probabilistically estimate an item's frequency. This exposes a flaw where an adversary could
167
   * use hash flooding [4] to artificially raise the frequency of the main space's victim and cause
168
   * all candidates to be rejected. In the worst case, by exploiting hash collisions, an attacker
169
   * could cause the cache to never hit and hold only worthless items, resulting in a
170
   * denial-of-service attack against the underlying resource. This is mitigated by introducing
171
   * jitter, allowing candidates that are at least moderately popular to have a small, random chance
172
   * of being admitted. This causes the victim to be evicted, but in a way that marginally impacts
173
   * the hit rate.
174
   *
175
   * Expiration:
176
   * -----------
177
   * Expiration is implemented in O(1) time complexity. The time-to-idle policy uses an access-order
178
   * queue, the time-to-live policy uses a write-order queue, and variable expiration uses a
179
   * hierarchical timer wheel [5]. The queuing policies allow for peeking at the oldest entry to
180
   * determine if it has expired. If it has not, then the younger entries must not have expired
181
   * either. If a maximum size is set, then expiration will share the queues, minimizing the
182
   * per-entry footprint. The timer wheel based policy uses hashing and cascading in a manner that
183
   * amortizes the penalty of sorting to achieve a similar algorithmic cost.
184
   *
185
   * The expiration updates are applied in a best effort fashion. The reordering of variable or
186
   * access-order expiration may be discarded by the read buffer if it is full or contended.
187
   * Similarly, the reordering of write expiration may be ignored for an entry if the last update
188
   * was within a short time window. This is done to avoid overwhelming the write buffer.
189
   *
190
   * [1] BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free
191
   * http://web.cse.ohio-state.edu/hpcs/WWW/HTML/publications/papers/TR-09-1.pdf
192
   * [2] TinyLFU: A Highly Efficient Cache Admission Policy
193
   * https://dl.acm.org/citation.cfm?id=3149371
194
   * [3] Adaptive Software Cache Management
195
   * https://dl.acm.org/citation.cfm?id=3274816
196
   * [4] Denial of Service via Algorithmic Complexity Attack
197
   * https://www.usenix.org/legacy/events/sec03/tech/full_papers/crosby/crosby.pdf
198
   * [5] Hashed and Hierarchical Timing Wheels
199
   * http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
200
   */
201

202
  static final Logger logger = System.getLogger(BoundedLocalCache.class.getName());
1✔
203

204
  /** The number of CPUs */
205
  static final int NCPU = Runtime.getRuntime().availableProcessors();
1✔
206
  /** The initial capacity of the write buffer. */
207
  static final int WRITE_BUFFER_MIN = 4;
208
  /** The maximum capacity of the write buffer. */
209
  static final int WRITE_BUFFER_MAX = 128 * ceilingPowerOfTwo(NCPU);
1✔
210
  /** The number of attempts to insert into the write buffer before yielding. */
211
  static final int WRITE_BUFFER_RETRIES = 100;
212
  /** The maximum weighted capacity of the map. */
213
  static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
214
  /** The initial percent of the maximum weighted capacity dedicated to the main space. */
215
  static final double PERCENT_MAIN = 0.99d;
216
  /** The percent of the maximum weighted capacity dedicated to the main's protected space. */
217
  static final double PERCENT_MAIN_PROTECTED = 0.80d;
218
  /** The difference in hit rates that restarts the climber. */
219
  static final double HILL_CLIMBER_RESTART_THRESHOLD = 0.05d;
220
  /** The percent of the total size to adapt the window by. */
221
  static final double HILL_CLIMBER_STEP_PERCENT = 0.0625d;
222
  /** The rate to decrease the step size to adapt by. */
223
  static final double HILL_CLIMBER_STEP_DECAY_RATE = 0.98d;
224
  /** The minimum popularity for allowing randomized admission. */
225
  static final int ADMIT_HASHDOS_THRESHOLD = 6;
226
  /** The maximum number of entries that can be transferred between queues. */
227
  static final int QUEUE_TRANSFER_THRESHOLD = 1_000;
228
  /** The maximum time window between entry updates before the expiration must be reordered. */
229
  static final long EXPIRE_WRITE_TOLERANCE = TimeUnit.SECONDS.toNanos(1);
1✔
230
  /** The maximum duration before an entry expires. */
231
  static final long MAXIMUM_EXPIRY = (Long.MAX_VALUE >> 1); // 150 years
232
  /** The duration to wait on the eviction lock before warning of a possible misuse. */
233
  static final long WARN_AFTER_LOCK_WAIT_NANOS = TimeUnit.SECONDS.toNanos(30);
1✔
234
  /** The number of retries before computing to validate the entry's integrity; pow2 modulus. */
235
  static final int MAX_PUT_SPIN_WAIT_ATTEMPTS = 1024 - 1;
236
  /** The handle for the in-flight refresh operations. */
237
  static final VarHandle REFRESHES;
238

239
  final @Nullable RemovalListener<K, V> evictionListener;
240
  final @Nullable AsyncCacheLoader<K, V> cacheLoader;
241

242
  final MpscGrowableArrayQueue<Runnable> writeBuffer;
243
  final ConcurrentHashMap<Object, Node<K, V>> data;
244
  final PerformCleanupTask drainBuffersTask;
245
  final Consumer<Node<K, V>> accessPolicy;
246
  final Buffer<Node<K, V>> readBuffer;
247
  final NodeFactory<K, V> nodeFactory;
248
  final ReentrantLock evictionLock;
249
  final Weigher<K, V> weigher;
250
  final Executor executor;
251

252
  final boolean isWeighted;
253
  final boolean isAsync;
254

255
  @Nullable Set<K> keySet;
256
  @Nullable Collection<V> values;
257
  @Nullable Set<Entry<K, V>> entrySet;
258
  volatile @Nullable ConcurrentMap<Object, CompletableFuture<?>> refreshes;
259

260
  /** Creates an instance based on the builder's configuration. */
261
  @SuppressWarnings("GuardedBy")
262
  protected BoundedLocalCache(Caffeine<K, V> builder,
263
      @Nullable AsyncCacheLoader<K, V> cacheLoader, boolean isAsync) {
1✔
264
    this.isAsync = isAsync;
1✔
265
    this.cacheLoader = cacheLoader;
1✔
266
    executor = builder.getExecutor();
1✔
267
    isWeighted = builder.isWeighted();
1✔
268
    evictionLock = new ReentrantLock();
1✔
269
    weigher = builder.getWeigher(isAsync);
1✔
270
    drainBuffersTask = new PerformCleanupTask(this);
1✔
271
    nodeFactory = NodeFactory.newFactory(builder, isAsync);
1✔
272
    evictionListener = builder.getEvictionListener(isAsync);
1✔
273
    data = new ConcurrentHashMap<>(builder.getInitialCapacity());
1✔
274
    readBuffer = evicts() || collectKeys() || collectValues() || expiresAfterAccess()
1✔
275
        ? new BoundedBuffer<>()
1✔
276
        : Buffer.disabled();
1✔
277
    accessPolicy = (evicts() || expiresAfterAccess()) ? this::onAccess : e -> {};
1✔
278
    writeBuffer = new MpscGrowableArrayQueue<>(WRITE_BUFFER_MIN, WRITE_BUFFER_MAX);
1✔
279

280
    if (evicts()) {
1✔
281
      setMaximumSize(builder.getMaximum());
1✔
282
    }
283
  }
1✔
284

285
  static {
286
    try {
287
      REFRESHES = MethodHandles.lookup()
1✔
288
          .findVarHandle(BoundedLocalCache.class, "refreshes", ConcurrentMap.class);
1✔
289
    } catch (ReflectiveOperationException e) {
×
290
      throw new ExceptionInInitializerError(e);
×
291
    }
1✔
292
  }
1✔
293

294
  /** Ensures that the node is alive during the map operation. */
295
  void requireIsAlive(Object key, Node<?, ?> node) {
296
    if (!node.isAlive()) {
1✔
297
      throw new IllegalStateException(brokenEqualityMessage(key, node));
1✔
298
    }
299
  }
1✔
300

301
  /** Logs if the node cannot be found in the map but is still alive. */
302
  void logIfAlive(Node<?, ?> node) {
303
    if (node.isAlive()) {
1✔
304
      String message = brokenEqualityMessage(node.getKeyReference(), node);
1✔
305
      logger.log(Level.ERROR, message, new IllegalStateException());
1✔
306
    }
307
  }
1✔
308

309
  /** Returns the formatted broken equality error message. */
310
  String brokenEqualityMessage(Object key, Node<?, ?> node) {
311
    return String.format(US, "An invalid state was detected, occurring when the key's equals or "
1✔
312
        + "hashCode was modified while residing in the cache. This violation of the Map "
313
        + "contract can lead to non-deterministic behavior (key: %s, key type: %s, "
314
        + "node type: %s, cache type: %s).", key, key.getClass().getName(),
1✔
315
        node.getClass().getSimpleName(), getClass().getSimpleName());
1✔
316
  }
317

318
  /* --------------- Shared --------------- */
319

320
  @Override
321
  public boolean isAsync() {
322
    return isAsync;
1✔
323
  }
324

325
  /** Returns if the node's value is currently being computed asynchronously. */
326
  final boolean isComputingAsync(@Nullable V value) {
327
    return isAsync && !Async.isReady((CompletableFuture<?>) value);
1✔
328
  }
329

330
  @GuardedBy("evictionLock")
331
  protected AccessOrderDeque<Node<K, V>> accessOrderWindowDeque() {
332
    throw new UnsupportedOperationException();
1✔
333
  }
334

335
  @GuardedBy("evictionLock")
336
  protected AccessOrderDeque<Node<K, V>> accessOrderProbationDeque() {
337
    throw new UnsupportedOperationException();
1✔
338
  }
339

340
  @GuardedBy("evictionLock")
341
  protected AccessOrderDeque<Node<K, V>> accessOrderProtectedDeque() {
342
    throw new UnsupportedOperationException();
1✔
343
  }
344

345
  @GuardedBy("evictionLock")
346
  protected WriteOrderDeque<Node<K, V>> writeOrderDeque() {
347
    throw new UnsupportedOperationException();
1✔
348
  }
349

350
  @Override
351
  public final Executor executor() {
352
    return executor;
1✔
353
  }
354

355
  @Override
356
  @SuppressWarnings("NullAway")
357
  public ConcurrentMap<Object, CompletableFuture<?>> refreshes() {
358
    @Var var pending = refreshes;
1✔
359
    if (pending == null) {
1✔
360
      pending = new ConcurrentHashMap<>();
1✔
361
      if (!REFRESHES.compareAndSet(this, null, pending)) {
1✔
362
        pending = refreshes;
×
363
      }
364
    }
365
    return pending;
1✔
366
  }
367

368
  /** Invalidate the in-flight refresh. */
369
  void discardRefresh(Object keyReference) {
370
    var pending = refreshes;
1✔
371
    if ((pending != null) && pending.containsKey(keyReference)) {
1✔
372
      pending.remove(keyReference);
1✔
373
    }
374
  }
1✔
375

376
  @Override
377
  public Object referenceKey(K key) {
378
    return nodeFactory.newLookupKey(key);
1✔
379
  }
380

381
  @Override
382
  public boolean isPendingEviction(K key) {
383
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
384
    return (node != null)
1✔
385
        && ((node.getValue() == null) || hasExpired(node, expirationTicker().read()));
1✔
386
  }
387

388
  /* --------------- Stats Support --------------- */
389

390
  @Override
391
  public boolean isRecordingStats() {
392
    return false;
1✔
393
  }
394

395
  @Override
396
  public StatsCounter statsCounter() {
397
    return StatsCounter.disabledStatsCounter();
1✔
398
  }
399

400
  @Override
401
  public Ticker statsTicker() {
402
    return Ticker.disabledTicker();
1✔
403
  }
404

405
  /* --------------- Removal Listener Support --------------- */
406

407
  @SuppressWarnings("NullAway")
408
  protected RemovalListener<K, V> removalListener() {
409
    return null;
1✔
410
  }
411

412
  protected boolean hasRemovalListener() {
413
    return false;
1✔
414
  }
415

416
  @Override
417
  public void notifyRemoval(@Nullable K key, @Nullable V value, RemovalCause cause) {
418
    if (!hasRemovalListener()) {
1✔
419
      return;
1✔
420
    }
421
    Runnable task = () -> {
1✔
422
      try {
423
        removalListener().onRemoval(key, value, cause);
1✔
424
      } catch (Throwable t) {
1✔
425
        logger.log(Level.WARNING, "Exception thrown by removal listener", t);
1✔
426
      }
1✔
427
    };
1✔
428
    try {
429
      executor.execute(task);
1✔
430
    } catch (Throwable t) {
1✔
431
      logger.log(Level.ERROR, "Exception thrown when submitting removal listener", t);
1✔
432
      task.run();
1✔
433
    }
1✔
434
  }
1✔
435

436
  /* --------------- Eviction Listener Support --------------- */
437

438
  void notifyEviction(@Nullable K key, @Nullable V value, RemovalCause cause) {
439
    if (evictionListener == null) {
1✔
440
      return;
1✔
441
    }
442
    try {
443
      evictionListener.onRemoval(key, value, cause);
1✔
444
    } catch (Throwable t) {
1✔
445
      logger.log(Level.WARNING, "Exception thrown by eviction listener", t);
1✔
446
    }
1✔
447
  }
1✔
448

449
  /* --------------- Reference Support --------------- */
450

451
  /** Returns if the keys are weak reference garbage collected. */
452
  protected boolean collectKeys() {
453
    return false;
1✔
454
  }
455

456
  /** Returns if the values are weak or soft reference garbage collected. */
457
  protected boolean collectValues() {
458
    return false;
1✔
459
  }
460

461
  @SuppressWarnings("NullAway")
462
  protected ReferenceQueue<K> keyReferenceQueue() {
463
    return null;
1✔
464
  }
465

466
  @SuppressWarnings("NullAway")
467
  protected ReferenceQueue<V> valueReferenceQueue() {
468
    return null;
1✔
469
  }
470

471
  /* --------------- Expiration Support --------------- */
472

473
  /** Returns the {@link Pacer} used to schedule the maintenance task. */
474
  protected @Nullable Pacer pacer() {
475
    return null;
1✔
476
  }
477

478
  /** Returns if the cache expires entries after a variable time threshold. */
479
  protected boolean expiresVariable() {
480
    return false;
1✔
481
  }
482

483
  /** Returns if the cache expires entries after an access time threshold. */
484
  protected boolean expiresAfterAccess() {
485
    return false;
1✔
486
  }
487

488
  /** Returns how long after the last access to an entry the map will retain that entry. */
489
  protected long expiresAfterAccessNanos() {
490
    throw new UnsupportedOperationException();
1✔
491
  }
492

493
  protected void setExpiresAfterAccessNanos(long expireAfterAccessNanos) {
494
    throw new UnsupportedOperationException();
1✔
495
  }
496

497
  /** Returns if the cache expires entries after a write time threshold. */
498
  protected boolean expiresAfterWrite() {
499
    return false;
1✔
500
  }
501

502
  /** Returns how long after the last write to an entry the map will retain that entry. */
503
  protected long expiresAfterWriteNanos() {
504
    throw new UnsupportedOperationException();
1✔
505
  }
506

507
  protected void setExpiresAfterWriteNanos(long expireAfterWriteNanos) {
508
    throw new UnsupportedOperationException();
1✔
509
  }
510

511
  /** Returns if the cache refreshes entries after a write time threshold. */
512
  protected boolean refreshAfterWrite() {
513
    return false;
1✔
514
  }
515

516
  /** Returns how long after the last write an entry becomes a candidate for refresh. */
517
  protected long refreshAfterWriteNanos() {
518
    throw new UnsupportedOperationException();
1✔
519
  }
520

521
  protected void setRefreshAfterWriteNanos(long refreshAfterWriteNanos) {
522
    throw new UnsupportedOperationException();
1✔
523
  }
524

525
  @Override
526
  @SuppressWarnings("NullAway")
527
  public Expiry<K, V> expiry() {
528
    return null;
1✔
529
  }
530

531
  /** Returns the {@link Ticker} used by this cache for expiration. */
532
  public Ticker expirationTicker() {
533
    return Ticker.disabledTicker();
1✔
534
  }
535

536
  protected TimerWheel<K, V> timerWheel() {
537
    throw new UnsupportedOperationException();
1✔
538
  }
539

540
  /* --------------- Eviction Support --------------- */
541

542
  /** Returns if the cache evicts entries due to a maximum size or weight threshold. */
543
  protected boolean evicts() {
544
    return false;
1✔
545
  }
546

547
  /** Returns if entries may be assigned different weights. */
548
  protected boolean isWeighted() {
549
    return (weigher != Weigher.singletonWeigher());
1✔
550
  }
551

552
  protected FrequencySketch<K> frequencySketch() {
553
    throw new UnsupportedOperationException();
1✔
554
  }
555

556
  /** Returns if an access to an entry can skip notifying the eviction policy. */
557
  protected boolean fastpath() {
558
    return false;
1✔
559
  }
560

561
  /** Returns the maximum weighted size. */
562
  protected long maximum() {
563
    throw new UnsupportedOperationException();
1✔
564
  }
565

566
  /** Returns the maximum weighted size of the window space. */
567
  protected long windowMaximum() {
568
    throw new UnsupportedOperationException();
1✔
569
  }
570

571
  /** Returns the maximum weighted size of the main's protected space. */
572
  protected long mainProtectedMaximum() {
573
    throw new UnsupportedOperationException();
1✔
574
  }
575

576
  @GuardedBy("evictionLock")
577
  protected void setMaximum(long maximum) {
578
    throw new UnsupportedOperationException();
1✔
579
  }
580

581
  @GuardedBy("evictionLock")
582
  protected void setWindowMaximum(long maximum) {
583
    throw new UnsupportedOperationException();
1✔
584
  }
585

586
  @GuardedBy("evictionLock")
587
  protected void setMainProtectedMaximum(long maximum) {
588
    throw new UnsupportedOperationException();
1✔
589
  }
590

591
  /** Returns the combined weight of the values in the cache (may be negative). */
592
  protected long weightedSize() {
593
    throw new UnsupportedOperationException();
1✔
594
  }
595

596
  /** Returns the uncorrected combined weight of the values in the window space. */
597
  protected long windowWeightedSize() {
598
    throw new UnsupportedOperationException();
1✔
599
  }
600

601
  /** Returns the uncorrected combined weight of the values in the main's protected space. */
602
  protected long mainProtectedWeightedSize() {
603
    throw new UnsupportedOperationException();
1✔
604
  }
605

606
  @GuardedBy("evictionLock")
607
  protected void setWeightedSize(long weightedSize) {
608
    throw new UnsupportedOperationException();
1✔
609
  }
610

611
  @GuardedBy("evictionLock")
612
  protected void setWindowWeightedSize(long weightedSize) {
613
    throw new UnsupportedOperationException();
1✔
614
  }
615

616
  @GuardedBy("evictionLock")
617
  protected void setMainProtectedWeightedSize(long weightedSize) {
618
    throw new UnsupportedOperationException();
1✔
619
  }
620

621
  protected int hitsInSample() {
622
    throw new UnsupportedOperationException();
1✔
623
  }
624

625
  protected int missesInSample() {
626
    throw new UnsupportedOperationException();
1✔
627
  }
628

629
  protected int sampleCount() {
630
    throw new UnsupportedOperationException();
1✔
631
  }
632

633
  protected double stepSize() {
634
    throw new UnsupportedOperationException();
1✔
635
  }
636

637
  protected double previousSampleHitRate() {
638
    throw new UnsupportedOperationException();
1✔
639
  }
640

641
  protected long adjustment() {
642
    throw new UnsupportedOperationException();
1✔
643
  }
644

645
  @GuardedBy("evictionLock")
646
  protected void setHitsInSample(int hitCount) {
647
    throw new UnsupportedOperationException();
1✔
648
  }
649

650
  @GuardedBy("evictionLock")
651
  protected void setMissesInSample(int missCount) {
652
    throw new UnsupportedOperationException();
1✔
653
  }
654

655
  @GuardedBy("evictionLock")
656
  protected void setSampleCount(int sampleCount) {
657
    throw new UnsupportedOperationException();
1✔
658
  }
659

660
  @GuardedBy("evictionLock")
661
  protected void setStepSize(double stepSize) {
662
    throw new UnsupportedOperationException();
1✔
663
  }
664

665
  @GuardedBy("evictionLock")
666
  protected void setPreviousSampleHitRate(double hitRate) {
667
    throw new UnsupportedOperationException();
1✔
668
  }
669

670
  @GuardedBy("evictionLock")
671
  protected void setAdjustment(long amount) {
672
    throw new UnsupportedOperationException();
1✔
673
  }
674

675
  /**
676
   * Sets the maximum weighted size of the cache. The caller may need to perform a maintenance cycle
677
   * to eagerly evicts entries until the cache shrinks to the appropriate size.
678
   */
679
  @GuardedBy("evictionLock")
680
  @SuppressWarnings("Varifier")
681
  void setMaximumSize(long maximum) {
682
    requireArgument(maximum >= 0, "maximum must not be negative");
1✔
683
    if (maximum == maximum()) {
1✔
684
      return;
1✔
685
    }
686

687
    long max = Math.min(maximum, MAXIMUM_CAPACITY);
1✔
688
    long window = max - (long) (PERCENT_MAIN * max);
1✔
689
    long mainProtected = (long) (PERCENT_MAIN_PROTECTED * (max - window));
1✔
690

691
    setMaximum(max);
1✔
692
    setWindowMaximum(window);
1✔
693
    setMainProtectedMaximum(mainProtected);
1✔
694

695
    setHitsInSample(0);
1✔
696
    setMissesInSample(0);
1✔
697
    setStepSize(-HILL_CLIMBER_STEP_PERCENT * max);
1✔
698

699
    if ((frequencySketch() != null) && !isWeighted() && (weightedSize() >= (max >>> 1))) {
1✔
700
      // Lazily initialize when close to the maximum size
701
      frequencySketch().ensureCapacity(max);
1✔
702
    }
703
  }
1✔
704

705
  /** Evicts entries if the cache exceeds the maximum. */
706
  @GuardedBy("evictionLock")
707
  void evictEntries() {
708
    if (!evicts()) {
1✔
709
      return;
1✔
710
    }
711
    var candidate = evictFromWindow();
1✔
712
    evictFromMain(candidate);
1✔
713
  }
1✔
714

715
  /**
716
   * Evicts entries from the window space into the main space while the window size exceeds a
717
   * maximum.
718
   *
719
   * @return the first candidate promoted into the probation space
720
   */
721
  @GuardedBy("evictionLock")
722
  @Nullable Node<K, V> evictFromWindow() {
723
    @Var Node<K, V> first = null;
1✔
724
    @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
725
    while (windowWeightedSize() > windowMaximum()) {
1✔
726
      // The pending operations will adjust the size to reflect the correct weight
727
      if (node == null) {
1✔
728
        break;
1✔
729
      }
730

731
      Node<K, V> next = node.getNextInAccessOrder();
1✔
732
      if (node.getPolicyWeight() != 0) {
1✔
733
        node.makeMainProbation();
1✔
734
        accessOrderWindowDeque().remove(node);
1✔
735
        accessOrderProbationDeque().offerLast(node);
1✔
736
        if (first == null) {
1✔
737
          first = node;
1✔
738
        }
739

740
        setWindowWeightedSize(windowWeightedSize() - node.getPolicyWeight());
1✔
741
      }
742
      node = next;
1✔
743
    }
1✔
744

745
    return first;
1✔
746
  }
747

748
  /**
749
   * Evicts entries from the main space if the cache exceeds the maximum capacity. The main space
750
   * determines whether admitting an entry (coming from the window space) is preferable to retaining
751
   * the eviction policy's victim. This decision is made using a frequency filter so that the
752
   * least frequently used entry is removed.
753
   * <p>
754
   * The window space's candidates were previously promoted to the probation space at its MRU
755
   * position and the eviction policy's victim starts at the LRU position. The candidates are
756
   * evaluated in promotion order while an eviction is required, and if exhausted then additional
757
   * entries are retrieved from the window space. Likewise, if the victim selection exhausts the
758
   * probation space then additional entries are retrieved the protected space. The queues are
759
   * consumed in LRU order and the evicted entry is the one with a lower relative frequency, where
760
   * the preference is to retain the main space's victims versus the window space's candidates on a
761
   * tie.
762
   *
763
   * @param candidate the first candidate promoted into the probation space
764
   */
765
  @GuardedBy("evictionLock")
766
  void evictFromMain(@Var @Nullable Node<K, V> candidate) {
767
    @Var int victimQueue = PROBATION;
1✔
768
    @Var int candidateQueue = PROBATION;
1✔
769
    @Var Node<K, V> victim = accessOrderProbationDeque().peekFirst();
1✔
770
    while (weightedSize() > maximum()) {
1✔
771
      // Search the admission window for additional candidates
772
      if ((candidate == null) && (candidateQueue == PROBATION)) {
1✔
773
        candidate = accessOrderWindowDeque().peekFirst();
1✔
774
        candidateQueue = WINDOW;
1✔
775
      }
776

777
      // Try evicting from the protected and window queues
778
      if ((candidate == null) && (victim == null)) {
1✔
779
        if (victimQueue == PROBATION) {
1✔
780
          victim = accessOrderProtectedDeque().peekFirst();
1✔
781
          victimQueue = PROTECTED;
1✔
782
          continue;
1✔
783
        } else if (victimQueue == PROTECTED) {
1✔
784
          victim = accessOrderWindowDeque().peekFirst();
1✔
785
          victimQueue = WINDOW;
1✔
786
          continue;
1✔
787
        }
788

789
        // The pending operations will adjust the size to reflect the correct weight
790
        break;
791
      }
792

793
      // Skip over entries with zero weight
794
      if ((victim != null) && (victim.getPolicyWeight() == 0)) {
1✔
795
        victim = victim.getNextInAccessOrder();
1✔
796
        continue;
1✔
797
      } else if ((candidate != null) && (candidate.getPolicyWeight() == 0)) {
1✔
798
        candidate = candidate.getNextInAccessOrder();
1✔
799
        continue;
1✔
800
      }
801

802
      // Evict immediately if only one of the entries is present
803
      if (victim == null) {
1✔
804
        @SuppressWarnings("NullAway")
805
        Node<K, V> previous = candidate.getNextInAccessOrder();
1✔
806
        Node<K, V> evict = candidate;
1✔
807
        candidate = previous;
1✔
808
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
809
        continue;
1✔
810
      } else if (candidate == null) {
1✔
811
        Node<K, V> evict = victim;
1✔
812
        victim = victim.getNextInAccessOrder();
1✔
813
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
814
        continue;
1✔
815
      }
816

817
      // Evict immediately if both selected the same entry
818
      if (candidate == victim) {
1✔
819
        victim = victim.getNextInAccessOrder();
1✔
820
        evictEntry(candidate, RemovalCause.SIZE, 0L);
1✔
821
        candidate = null;
1✔
822
        continue;
1✔
823
      }
824

825
      // Evict immediately if an entry was collected
826
      K victimKey = victim.getKey();
1✔
827
      K candidateKey = candidate.getKey();
1✔
828
      if (victimKey == null) {
1✔
829
        Node<K, V> evict = victim;
1✔
830
        victim = victim.getNextInAccessOrder();
1✔
831
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
832
        continue;
1✔
833
      } else if (candidateKey == null) {
1✔
834
        Node<K, V> evict = candidate;
1✔
835
        candidate = candidate.getNextInAccessOrder();
1✔
836
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
837
        continue;
1✔
838
      }
839

840
      // Evict immediately if an entry was removed
841
      if (!victim.isAlive()) {
1✔
842
        Node<K, V> evict = victim;
1✔
843
        victim = victim.getNextInAccessOrder();
1✔
844
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
845
        continue;
1✔
846
      } else if (!candidate.isAlive()) {
1✔
847
        Node<K, V> evict = candidate;
1✔
848
        candidate = candidate.getNextInAccessOrder();
1✔
849
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
850
        continue;
1✔
851
      }
852

853
      // Evict immediately if the candidate's weight exceeds the maximum
854
      if (candidate.getPolicyWeight() > maximum()) {
1✔
855
        Node<K, V> evict = candidate;
1✔
856
        candidate = candidate.getNextInAccessOrder();
1✔
857
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
858
        continue;
1✔
859
      }
860

861
      // Evict the entry with the lowest frequency
862
      if (admit(candidateKey, victimKey)) {
1✔
863
        Node<K, V> evict = victim;
1✔
864
        victim = victim.getNextInAccessOrder();
1✔
865
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
866
        candidate = candidate.getNextInAccessOrder();
1✔
867
      } else {
1✔
868
        Node<K, V> evict = candidate;
1✔
869
        candidate = candidate.getNextInAccessOrder();
1✔
870
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
871
      }
872
    }
1✔
873
  }
1✔
874

875
  /**
876
   * Determines if the candidate should be accepted into the main space, as determined by its
877
   * frequency relative to the victim. A small amount of randomness is used to protect against hash
878
   * collision attacks, where the victim's frequency is artificially raised so that no new entries
879
   * are admitted.
880
   *
881
   * @param candidateKey the key for the entry being proposed for long term retention
882
   * @param victimKey the key for the entry chosen by the eviction policy for replacement
883
   * @return if the candidate should be admitted and the victim ejected
884
   */
885
  @GuardedBy("evictionLock")
886
  boolean admit(K candidateKey, K victimKey) {
887
    int victimFreq = frequencySketch().frequency(victimKey);
1✔
888
    int candidateFreq = frequencySketch().frequency(candidateKey);
1✔
889
    if (candidateFreq > victimFreq) {
1✔
890
      return true;
1✔
891
    } else if (candidateFreq >= ADMIT_HASHDOS_THRESHOLD) {
1✔
892
      // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack
893
      // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm
894
      // candidate reduces the number of random acceptances to minimize the impact on the hit rate.
895
      int random = ThreadLocalRandom.current().nextInt();
1✔
896
      return ((random & 127) == 0);
1✔
897
    }
898
    return false;
1✔
899
  }
900

901
  /** Expires entries that have expired by access, write, or variable. */
902
  @GuardedBy("evictionLock")
903
  void expireEntries() {
904
    long now = expirationTicker().read();
1✔
905
    expireAfterAccessEntries(now);
1✔
906
    expireAfterWriteEntries(now);
1✔
907
    expireVariableEntries(now);
1✔
908

909
    Pacer pacer = pacer();
1✔
910
    if (pacer != null) {
1✔
911
      long delay = getExpirationDelay(now);
1✔
912
      if (delay == Long.MAX_VALUE) {
1✔
913
        pacer.cancel();
1✔
914
      } else {
915
        pacer.schedule(executor, drainBuffersTask, now, delay);
1✔
916
      }
917
    }
918
  }
1✔
919

920
  /** Expires entries in the access-order queue. */
921
  @GuardedBy("evictionLock")
922
  void expireAfterAccessEntries(long now) {
923
    if (!expiresAfterAccess()) {
1✔
924
      return;
1✔
925
    }
926

927
    expireAfterAccessEntries(now, accessOrderWindowDeque());
1✔
928
    if (evicts()) {
1✔
929
      expireAfterAccessEntries(now, accessOrderProbationDeque());
1✔
930
      expireAfterAccessEntries(now, accessOrderProtectedDeque());
1✔
931
    }
932
  }
1✔
933

934
  /** Expires entries in an access-order queue. */
935
  @GuardedBy("evictionLock")
936
  void expireAfterAccessEntries(long now, AccessOrderDeque<Node<K, V>> accessOrderDeque) {
937
    long duration = expiresAfterAccessNanos();
1✔
938
    for (;;) {
939
      Node<K, V> node = accessOrderDeque.peekFirst();
1✔
940
      if ((node == null) || ((now - node.getAccessTime()) < duration)
1✔
941
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
942
        return;
1✔
943
      }
944
    }
1✔
945
  }
946

947
  /** Expires entries on the write-order queue. */
948
  @GuardedBy("evictionLock")
949
  void expireAfterWriteEntries(long now) {
950
    if (!expiresAfterWrite()) {
1✔
951
      return;
1✔
952
    }
953
    long duration = expiresAfterWriteNanos();
1✔
954
    for (;;) {
955
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
956
      if ((node == null) || ((now - node.getWriteTime()) < duration)
1✔
957
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
958
        break;
1✔
959
      }
960
    }
1✔
961
  }
1✔
962

963
  /** Expires entries in the timer wheel. */
964
  @GuardedBy("evictionLock")
965
  void expireVariableEntries(long now) {
966
    if (expiresVariable()) {
1✔
967
      timerWheel().advance(this, now);
1✔
968
    }
969
  }
1✔
970

971
  /** Returns the duration until the next item expires, or {@link Long#MAX_VALUE} if none. */
972
  @GuardedBy("evictionLock")
973
  long getExpirationDelay(long now) {
974
    @Var long delay = Long.MAX_VALUE;
1✔
975
    if (expiresAfterAccess()) {
1✔
976
      @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
977
      if (node != null) {
1✔
978
        delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
979
      }
980
      if (evicts()) {
1✔
981
        node = accessOrderProbationDeque().peekFirst();
1✔
982
        if (node != null) {
1✔
983
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
984
        }
985
        node = accessOrderProtectedDeque().peekFirst();
1✔
986
        if (node != null) {
1✔
987
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
988
        }
989
      }
990
    }
991
    if (expiresAfterWrite()) {
1✔
992
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
993
      if (node != null) {
1✔
994
        delay = Math.min(delay, expiresAfterWriteNanos() - (now - node.getWriteTime()));
1✔
995
      }
996
    }
997
    if (expiresVariable()) {
1✔
998
      delay = Math.min(delay, timerWheel().getExpirationDelay());
1✔
999
    }
1000
    return delay;
1✔
1001
  }
1002

1003
  /** Returns if the entry has expired. */
1004
  @SuppressWarnings("ShortCircuitBoolean")
1005
  boolean hasExpired(Node<K, V> node, long now) {
1006
    if (isComputingAsync(node.getValue())) {
1✔
1007
      return false;
1✔
1008
    }
1009
    return (expiresAfterAccess() && (now - node.getAccessTime() >= expiresAfterAccessNanos()))
1✔
1010
        | (expiresAfterWrite() && (now - node.getWriteTime() >= expiresAfterWriteNanos()))
1✔
1011
        | (expiresVariable() && (now - node.getVariableTime() >= 0));
1✔
1012
  }
1013

1014
  /**
1015
   * Attempts to evict the entry based on the given removal cause. A removal may be ignored if the
1016
   * entry was updated and is no longer eligible for eviction.
1017
   *
1018
   * @param node the entry to evict
1019
   * @param cause the reason to evict
1020
   * @param now the current time, used only if expiring
1021
   * @return if the entry was evicted
1022
   */
1023
  @GuardedBy("evictionLock")
1024
  @SuppressWarnings({"GuardedByChecker", "NullAway", "PMD.CollapsibleIfStatements"})
1025
  boolean evictEntry(Node<K, V> node, RemovalCause cause, long now) {
1026
    K key = node.getKey();
1✔
1027
    @SuppressWarnings("unchecked")
1028
    var value = (V[]) new Object[1];
1✔
1029
    var removed = new boolean[1];
1✔
1030
    var resurrect = new boolean[1];
1✔
1031
    var actualCause = new RemovalCause[1];
1✔
1032
    var keyReference = node.getKeyReference();
1✔
1033

1034
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
1035
      if (n != node) {
1✔
1036
        return n;
1✔
1037
      }
1038
      synchronized (n) {
1✔
1039
        value[0] = n.getValue();
1✔
1040

1041
        if ((key == null) || (value[0] == null)) {
1✔
1042
          actualCause[0] = RemovalCause.COLLECTED;
1✔
1043
        } else if (cause == RemovalCause.COLLECTED) {
1✔
1044
          resurrect[0] = true;
1✔
1045
          return n;
1✔
1046
        } else {
1047
          actualCause[0] = cause;
1✔
1048
        }
1049

1050
        if (actualCause[0] == RemovalCause.EXPIRED) {
1✔
1051
          @Var boolean expired = false;
1✔
1052
          if (expiresAfterAccess()) {
1✔
1053
            expired |= ((now - n.getAccessTime()) >= expiresAfterAccessNanos());
1✔
1054
          }
1055
          if (expiresAfterWrite()) {
1✔
1056
            expired |= ((now - n.getWriteTime()) >= expiresAfterWriteNanos());
1✔
1057
          }
1058
          if (expiresVariable()) {
1✔
1059
            expired |= ((now - node.getVariableTime()) >= 0);
1✔
1060
          }
1061
          if (!expired) {
1✔
1062
            resurrect[0] = true;
1✔
1063
            return n;
1✔
1064
          }
1065
        } else if (actualCause[0] == RemovalCause.SIZE) {
1✔
1066
          int weight = node.getWeight();
1✔
1067
          if (weight == 0) {
1✔
1068
            resurrect[0] = true;
1✔
1069
            return n;
1✔
1070
          }
1071
        }
1072

1073
        notifyEviction(key, value[0], actualCause[0]);
1✔
1074
        discardRefresh(keyReference);
1✔
1075
        removed[0] = true;
1✔
1076
        node.retire();
1✔
1077
      }
1✔
1078
      return null;
1✔
1079
    });
1080

1081
    // The entry is no longer eligible for eviction
1082
    if (resurrect[0]) {
1✔
1083
      return false;
1✔
1084
    }
1085

1086
    // If the eviction fails due to a concurrent removal of the victim, that removal may cancel out
1087
    // the addition that triggered this eviction. The victim is eagerly unlinked and the size
1088
    // decremented before the removal task so that if an eviction is still required then a new
1089
    // victim will be chosen for removal.
1090
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1091
      accessOrderWindowDeque().remove(node);
1✔
1092
    } else if (evicts()) {
1✔
1093
      if (node.inMainProbation()) {
1✔
1094
        accessOrderProbationDeque().remove(node);
1✔
1095
      } else {
1096
        accessOrderProtectedDeque().remove(node);
1✔
1097
      }
1098
    }
1099
    if (expiresAfterWrite()) {
1✔
1100
      writeOrderDeque().remove(node);
1✔
1101
    } else if (expiresVariable()) {
1✔
1102
      timerWheel().deschedule(node);
1✔
1103
    }
1104

1105
    synchronized (node) {
1✔
1106
      logIfAlive(node);
1✔
1107
      makeDead(node);
1✔
1108
    }
1✔
1109

1110
    if (removed[0]) {
1✔
1111
      statsCounter().recordEviction(node.getWeight(), actualCause[0]);
1✔
1112
      notifyRemoval(key, value[0], actualCause[0]);
1✔
1113
    }
1114

1115
    return true;
1✔
1116
  }
1117

1118
  /** Adapts the eviction policy to towards the optimal recency / frequency configuration. */
1119
  @GuardedBy("evictionLock")
1120
  void climb() {
1121
    if (!evicts()) {
1✔
1122
      return;
1✔
1123
    }
1124

1125
    determineAdjustment();
1✔
1126
    demoteFromMainProtected();
1✔
1127
    long amount = adjustment();
1✔
1128
    if (amount == 0) {
1✔
1129
      return;
1✔
1130
    } else if (amount > 0) {
1✔
1131
      increaseWindow();
1✔
1132
    } else {
1133
      decreaseWindow();
1✔
1134
    }
1135
  }
1✔
1136

1137
  /** Calculates the amount to adapt the window by and sets {@link #adjustment()} accordingly. */
1138
  @GuardedBy("evictionLock")
1139
  void determineAdjustment() {
1140
    if (frequencySketch().isNotInitialized()) {
1✔
1141
      setPreviousSampleHitRate(0.0);
1✔
1142
      setMissesInSample(0);
1✔
1143
      setHitsInSample(0);
1✔
1144
      return;
1✔
1145
    }
1146

1147
    int requestCount = hitsInSample() + missesInSample();
1✔
1148
    if (requestCount < frequencySketch().sampleSize) {
1✔
1149
      return;
1✔
1150
    }
1151

1152
    double hitRate = (double) hitsInSample() / requestCount;
1✔
1153
    double hitRateChange = hitRate - previousSampleHitRate();
1✔
1154
    double amount = (hitRateChange >= 0) ? stepSize() : -stepSize();
1✔
1155
    double nextStepSize = (Math.abs(hitRateChange) >= HILL_CLIMBER_RESTART_THRESHOLD)
1✔
1156
        ? HILL_CLIMBER_STEP_PERCENT * maximum() * (amount >= 0 ? 1 : -1)
1✔
1157
        : HILL_CLIMBER_STEP_DECAY_RATE * amount;
1✔
1158
    setPreviousSampleHitRate(hitRate);
1✔
1159
    setAdjustment((long) amount);
1✔
1160
    setStepSize(nextStepSize);
1✔
1161
    setMissesInSample(0);
1✔
1162
    setHitsInSample(0);
1✔
1163
  }
1✔
1164

1165
  /**
1166
   * Increases the size of the admission window by shrinking the portion allocated to the main
1167
   * space. As the main space is partitioned into probation and protected regions (80% / 20%), for
1168
   * simplicity only the protected is reduced. If the regions exceed their maximums, this may cause
1169
   * protected items to be demoted to the probation region and probation items to be demoted to the
1170
   * admission window.
1171
   */
1172
  @GuardedBy("evictionLock")
1173
  void increaseWindow() {
1174
    if (mainProtectedMaximum() == 0) {
1✔
1175
      return;
1✔
1176
    }
1177

1178
    @Var long quota = Math.min(adjustment(), mainProtectedMaximum());
1✔
1179
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1180
    setWindowMaximum(windowMaximum() + quota);
1✔
1181
    demoteFromMainProtected();
1✔
1182

1183
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1184
      @Var Node<K, V> candidate = accessOrderProbationDeque().peekFirst();
1✔
1185
      @Var boolean probation = true;
1✔
1186
      if ((candidate == null) || (quota < candidate.getPolicyWeight())) {
1✔
1187
        candidate = accessOrderProtectedDeque().peekFirst();
1✔
1188
        probation = false;
1✔
1189
      }
1190
      if (candidate == null) {
1✔
1191
        break;
1✔
1192
      }
1193

1194
      int weight = candidate.getPolicyWeight();
1✔
1195
      if (quota < weight) {
1✔
1196
        break;
1✔
1197
      }
1198

1199
      quota -= weight;
1✔
1200
      if (probation) {
1✔
1201
        accessOrderProbationDeque().remove(candidate);
1✔
1202
      } else {
1203
        setMainProtectedWeightedSize(mainProtectedWeightedSize() - weight);
1✔
1204
        accessOrderProtectedDeque().remove(candidate);
1✔
1205
      }
1206
      setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1207
      accessOrderWindowDeque().offerLast(candidate);
1✔
1208
      candidate.makeWindow();
1✔
1209
    }
1210

1211
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1212
    setWindowMaximum(windowMaximum() - quota);
1✔
1213
    setAdjustment(quota);
1✔
1214
  }
1✔
1215

1216
  /** Decreases the size of the admission window and increases the main's protected region. */
1217
  @GuardedBy("evictionLock")
1218
  void decreaseWindow() {
1219
    if (windowMaximum() <= 1) {
1✔
1220
      return;
1✔
1221
    }
1222

1223
    @Var long quota = Math.min(-adjustment(), Math.max(0, windowMaximum() - 1));
1✔
1224
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1225
    setWindowMaximum(windowMaximum() - quota);
1✔
1226

1227
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1228
      Node<K, V> candidate = accessOrderWindowDeque().peekFirst();
1✔
1229
      if (candidate == null) {
1✔
1230
        break;
1✔
1231
      }
1232

1233
      int weight = candidate.getPolicyWeight();
1✔
1234
      if (quota < weight) {
1✔
1235
        break;
1✔
1236
      }
1237

1238
      quota -= weight;
1✔
1239
      setWindowWeightedSize(windowWeightedSize() - weight);
1✔
1240
      accessOrderWindowDeque().remove(candidate);
1✔
1241
      accessOrderProbationDeque().offerLast(candidate);
1✔
1242
      candidate.makeMainProbation();
1✔
1243
    }
1244

1245
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1246
    setWindowMaximum(windowMaximum() + quota);
1✔
1247
    setAdjustment(-quota);
1✔
1248
  }
1✔
1249

1250
  /** Transfers the nodes from the protected to the probation region if it exceeds the maximum. */
1251
  @GuardedBy("evictionLock")
1252
  void demoteFromMainProtected() {
1253
    long mainProtectedMaximum = mainProtectedMaximum();
1✔
1254
    @Var long mainProtectedWeightedSize = mainProtectedWeightedSize();
1✔
1255
    if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1256
      return;
1✔
1257
    }
1258

1259
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1260
      if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1261
        break;
1✔
1262
      }
1263

1264
      Node<K, V> demoted = accessOrderProtectedDeque().poll();
1✔
1265
      if (demoted == null) {
1✔
1266
        break;
1✔
1267
      }
1268
      demoted.makeMainProbation();
1✔
1269
      accessOrderProbationDeque().offerLast(demoted);
1✔
1270
      mainProtectedWeightedSize -= demoted.getPolicyWeight();
1✔
1271
    }
1272
    setMainProtectedWeightedSize(mainProtectedWeightedSize);
1✔
1273
  }
1✔
1274

1275
  /**
1276
   * Performs the post-processing work required after a read.
1277
   *
1278
   * @param node the entry in the page replacement policy
1279
   * @param now the current time, in nanoseconds
1280
   * @param recordHit if the hit count should be incremented
1281
   * @return the refreshed value if immediately loaded, else null
1282
   */
1283
  @Nullable V afterRead(Node<K, V> node, long now, boolean recordHit) {
1284
    if (recordHit) {
1✔
1285
      statsCounter().recordHits(1);
1✔
1286
    }
1287

1288
    boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL);
1✔
1289
    if (shouldDrainBuffers(delayable)) {
1✔
1290
      scheduleDrainBuffers();
1✔
1291
    }
1292
    return refreshIfNeeded(node, now);
1✔
1293
  }
1294

1295
  /** Returns if the cache should bypass the read buffer. */
1296
  boolean skipReadBuffer() {
1297
    return fastpath() && frequencySketch().isNotInitialized();
1✔
1298
  }
1299

1300
  /**
1301
   * Asynchronously refreshes the entry if eligible.
1302
   *
1303
   * @param node the entry in the cache to refresh
1304
   * @param now the current time, in nanoseconds
1305
   * @return the refreshed value if immediately loaded, else null
1306
   */
1307
  @SuppressWarnings("FutureReturnValueIgnored")
1308
  @Nullable V refreshIfNeeded(Node<K, V> node, long now) {
1309
    if (!refreshAfterWrite()) {
1✔
1310
      return null;
1✔
1311
    }
1312

1313
    K key;
1314
    V oldValue;
1315
    long writeTime = node.getWriteTime();
1✔
1316
    long refreshWriteTime = writeTime | 1L;
1✔
1317
    Object keyReference = node.getKeyReference();
1✔
1318
    ConcurrentMap<Object, CompletableFuture<?>> refreshes;
1319
    if (((now - writeTime) > refreshAfterWriteNanos()) && (keyReference != null)
1✔
1320
        && ((key = node.getKey()) != null) && ((oldValue = node.getValue()) != null)
1✔
1321
        && !isComputingAsync(oldValue) && ((writeTime & 1L) == 0L)
1✔
1322
        && !(refreshes = refreshes()).containsKey(keyReference)
1✔
1323
        && node.isAlive() && node.casWriteTime(writeTime, refreshWriteTime)) {
1✔
1324
      long[] startTime = new long[1];
1✔
1325
      @SuppressWarnings({"rawtypes", "unchecked"})
1326
      CompletableFuture<? extends V>[] refreshFuture = new CompletableFuture[1];
1✔
1327
      try {
1328
        refreshes.computeIfAbsent(keyReference, k -> {
1✔
1329
          try {
1330
            startTime[0] = statsTicker().read();
1✔
1331
            if (isAsync) {
1✔
1332
              @SuppressWarnings("unchecked")
1333
              var future = (CompletableFuture<V>) oldValue;
1✔
1334
              if (Async.isReady(future)) {
1✔
1335
                @SuppressWarnings("NullAway")
1336
                var refresh = cacheLoader.asyncReload(key, future.join(), executor);
1✔
1337
                refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1338
              } else {
1✔
1339
                // no-op if the future's completion state was modified (e.g. obtrude methods)
1340
                return null;
1✔
1341
              }
1342
            } else {
1✔
1343
              @SuppressWarnings("NullAway")
1344
              var refresh = cacheLoader.asyncReload(key, oldValue, executor);
1✔
1345
              refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1346
            }
1347
            return refreshFuture[0];
1✔
1348
          } catch (InterruptedException e) {
1✔
1349
            Thread.currentThread().interrupt();
1✔
1350
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1351
            return null;
1✔
1352
          } catch (Throwable e) {
1✔
1353
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1354
            return null;
1✔
1355
          }
1356
        });
1357
      } finally {
1358
        node.casWriteTime(refreshWriteTime, writeTime);
1✔
1359
      }
1360

1361
      if (refreshFuture[0] == null) {
1✔
1362
        return null;
1✔
1363
      }
1364

1365
      var refreshed = refreshFuture[0].handle((newValue, error) -> {
1✔
1366
        long loadTime = statsTicker().read() - startTime[0];
1✔
1367
        if (error != null) {
1✔
1368
          if (!(error instanceof CancellationException) && !(error instanceof TimeoutException)) {
1✔
1369
            logger.log(Level.WARNING, "Exception thrown during refresh", error);
1✔
1370
          }
1371
          refreshes.remove(keyReference, refreshFuture[0]);
1✔
1372
          statsCounter().recordLoadFailure(loadTime);
1✔
1373
          return null;
1✔
1374
        }
1375

1376
        @SuppressWarnings("unchecked")
1377
        V value = (isAsync && (newValue != null)) ? (V) refreshFuture[0] : newValue;
1✔
1378

1379
        RemovalCause[] cause = new RemovalCause[1];
1✔
1380
        V result = compute(key, (k, currentValue) -> {
1✔
1381
          if (currentValue == null) {
1✔
1382
            // If the entry is absent then discard the refresh and maybe notifying the listener
1383
            if (value != null) {
1✔
1384
              cause[0] = RemovalCause.EXPLICIT;
1✔
1385
            }
1386
            return null;
1✔
1387
          } else if (currentValue == value) {
1✔
1388
            // If the reloaded value is the same instance then no-op
1389
            return currentValue;
1✔
1390
          } else if (isAsync &&
1✔
1391
              (newValue == Async.getIfReady((CompletableFuture<?>) currentValue))) {
1✔
1392
            // If the completed futures hold the same value instance then no-op
1393
            return currentValue;
1✔
1394
          } else if ((currentValue == oldValue) && (node.getWriteTime() == writeTime)) {
1✔
1395
            // If the entry was not modified while in-flight (no ABA) then replace
1396
            return value;
1✔
1397
          }
1398
          // Otherwise, a write invalidated the refresh so discard it and notify the listener
1399
          cause[0] = RemovalCause.REPLACED;
1✔
1400
          return currentValue;
1✔
1401
        }, expiry(), /* recordLoad= */ false, /* recordLoadFailure= */ true);
1✔
1402

1403
        if (cause[0] != null) {
1✔
1404
          notifyRemoval(key, value, cause[0]);
1✔
1405
        }
1406
        if (newValue == null) {
1✔
1407
          statsCounter().recordLoadFailure(loadTime);
1✔
1408
        } else {
1409
          statsCounter().recordLoadSuccess(loadTime);
1✔
1410
        }
1411

1412
        refreshes.remove(keyReference, refreshFuture[0]);
1✔
1413
        return result;
1✔
1414
      });
1415
      return Async.getIfReady(refreshed);
1✔
1416
    }
1417

1418
    return null;
1✔
1419
  }
1420

1421
  /**
1422
   * Returns the expiration time for the entry after being created.
1423
   *
1424
   * @param key the key of the entry that was created
1425
   * @param value the value of the entry that was created
1426
   * @param expiry the calculator for the expiration time
1427
   * @param now the current time, in nanoseconds
1428
   * @return the expiration time
1429
   */
1430
  long expireAfterCreate(@Nullable K key, @Nullable V value,
1431
      Expiry<? super K, ? super V> expiry, long now) {
1432
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1433
      long duration = expiry.expireAfterCreate(key, value, now);
1✔
1434
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1435
    }
1436
    return 0L;
1✔
1437
  }
1438

1439
  /**
1440
   * Returns the expiration time for the entry after being updated.
1441
   *
1442
   * @param node the entry in the page replacement policy
1443
   * @param key the key of the entry that was updated
1444
   * @param value the value of the entry that was updated
1445
   * @param expiry the calculator for the expiration time
1446
   * @param now the current time, in nanoseconds
1447
   * @return the expiration time
1448
   */
1449
  long expireAfterUpdate(Node<K, V> node, @Nullable K key,
1450
      @Nullable V value, Expiry<? super K, ? super V> expiry, long now) {
1451
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1452
      long currentDuration = Math.max(1, node.getVariableTime() - now);
1✔
1453
      long duration = expiry.expireAfterUpdate(key, value, now, currentDuration);
1✔
1454
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1455
    }
1456
    return 0L;
1✔
1457
  }
1458

1459
  /**
1460
   * Returns the access time for the entry after a read.
1461
   *
1462
   * @param node the entry in the page replacement policy
1463
   * @param key the key of the entry that was read
1464
   * @param value the value of the entry that was read
1465
   * @param expiry the calculator for the expiration time
1466
   * @param now the current time, in nanoseconds
1467
   * @return the expiration time
1468
   */
1469
  long expireAfterRead(Node<K, V> node, @Nullable K key,
1470
      @Nullable V value, Expiry<K, V> expiry, long now) {
1471
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1472
      long currentDuration = Math.max(1, node.getVariableTime() - now);
1✔
1473
      long duration = expiry.expireAfterRead(key, value, now, currentDuration);
1✔
1474
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1475
    }
1476
    return 0L;
1✔
1477
  }
1478

1479
  /**
1480
   * Attempts to update the access time for the entry after a read.
1481
   *
1482
   * @param node the entry in the page replacement policy
1483
   * @param key the key of the entry that was read
1484
   * @param value the value of the entry that was read
1485
   * @param expiry the calculator for the expiration time
1486
   * @param now the current time, in nanoseconds
1487
   */
1488
  void tryExpireAfterRead(Node<K, V> node, @Nullable K key,
1489
      @Nullable V value, Expiry<K, V> expiry, long now) {
1490
    if (!expiresVariable() || (key == null) || (value == null)) {
1✔
1491
      return;
1✔
1492
    }
1493

1494
    long variableTime = node.getVariableTime();
1✔
1495
    long currentDuration = Math.max(1, variableTime - now);
1✔
1496
    if (isAsync && (currentDuration > MAXIMUM_EXPIRY)) {
1✔
1497
      // expireAfterCreate has not yet set the duration after completion
1498
      return;
1✔
1499
    }
1500

1501
    long duration = expiry.expireAfterRead(key, value, now, currentDuration);
1✔
1502
    if (duration != currentDuration) {
1✔
1503
      long expirationTime = isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1504
      node.casVariableTime(variableTime, expirationTime);
1✔
1505
    }
1506
  }
1✔
1507

1508
  void setVariableTime(Node<K, V> node, long expirationTime) {
1509
    if (expiresVariable()) {
1✔
1510
      node.setVariableTime(expirationTime);
1✔
1511
    }
1512
  }
1✔
1513

1514
  void setWriteTime(Node<K, V> node, long now) {
1515
    if (expiresAfterWrite() || refreshAfterWrite()) {
1✔
1516
      node.setWriteTime(now & ~1L);
1✔
1517
    }
1518
  }
1✔
1519

1520
  void setAccessTime(Node<K, V> node, long now) {
1521
    if (expiresAfterAccess()) {
1✔
1522
      node.setAccessTime(now);
1✔
1523
    }
1524
  }
1✔
1525

1526
  /**
1527
   * Performs the post-processing work required after a write.
1528
   *
1529
   * @param task the pending operation to be applied
1530
   */
1531
  void afterWrite(Runnable task) {
1532
    for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) {
1✔
1533
      if (writeBuffer.offer(task)) {
1✔
1534
        scheduleAfterWrite();
1✔
1535
        return;
1✔
1536
      }
1537
      scheduleDrainBuffers();
1✔
1538
      Thread.onSpinWait();
1✔
1539
    }
1540

1541
    // In scenarios where the writing threads cannot make progress then they attempt to provide
1542
    // assistance by performing the eviction work directly. This can resolve cases where the
1543
    // maintenance task is scheduled but not running. That might occur due to all of the executor's
1544
    // threads being busy (perhaps writing into this cache), the write rate greatly exceeds the
1545
    // consuming rate, priority inversion, or if the executor silently discarded the maintenance
1546
    // task. Unfortunately this cannot resolve when the eviction is blocked waiting on a long-
1547
    // running computation due to an eviction listener, the victim is being computed on by a writer,
1548
    // or the victim residing in the same hash bin as a computing entry. In those cases a warning is
1549
    // logged to encourage the application to decouple these computations from the map operations.
1550
    lock();
1✔
1551
    try {
1552
      maintenance(task);
1✔
1553
    } catch (RuntimeException e) {
1✔
1554
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1555
    } finally {
1556
      evictionLock.unlock();
1✔
1557
    }
1558
    rescheduleCleanUpIfIncomplete();
1✔
1559
  }
1✔
1560

1561
  /** Acquires the eviction lock. */
1562
  void lock() {
1563
    @Var long remainingNanos = WARN_AFTER_LOCK_WAIT_NANOS;
1✔
1564
    long end = System.nanoTime() + remainingNanos;
1✔
1565
    @Var boolean interrupted = false;
1✔
1566
    try {
1567
      for (;;) {
1568
        try {
1569
          if (evictionLock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
1✔
1570
            return;
1✔
1571
          }
1572
          logger.log(Level.WARNING, "The cache is experiencing excessive wait times for acquiring "
1✔
1573
              + "the eviction lock. This may indicate that a long-running computation has halted "
1574
              + "eviction when trying to remove the victim entry. Consider using AsyncCache to "
1575
              + "decouple the computation from the map operation.", new TimeoutException());
1576
          evictionLock.lock();
1✔
1577
          return;
1✔
1578
        } catch (InterruptedException e) {
1✔
1579
          remainingNanos = end - System.nanoTime();
1✔
1580
          interrupted = true;
1✔
1581
        }
1✔
1582
      }
1583
    } finally {
1584
      if (interrupted) {
1✔
1585
        Thread.currentThread().interrupt();
1✔
1586
      }
1587
    }
1588
  }
1589

1590
  /**
1591
   * Conditionally schedules the asynchronous maintenance task after a write operation. If the
1592
   * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it
1593
   * is already processing then it is set to transition to REQUIRED upon completion so that a new
1594
   * execution is triggered by the next operation.
1595
   */
1596
  void scheduleAfterWrite() {
1597
    @Var int drainStatus = drainStatusOpaque();
1✔
1598
    for (;;) {
1599
      switch (drainStatus) {
1✔
1600
        case IDLE:
1601
          casDrainStatus(IDLE, REQUIRED);
1✔
1602
          scheduleDrainBuffers();
1✔
1603
          return;
1✔
1604
        case REQUIRED:
1605
          scheduleDrainBuffers();
1✔
1606
          return;
1✔
1607
        case PROCESSING_TO_IDLE:
1608
          if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) {
1✔
1609
            return;
1✔
1610
          }
1611
          drainStatus = drainStatusAcquire();
1✔
1612
          continue;
1✔
1613
        case PROCESSING_TO_REQUIRED:
1614
          return;
1✔
1615
        default:
1616
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
1617
      }
1618
    }
1619
  }
1620

1621
  /**
1622
   * Attempts to schedule an asynchronous task to apply the pending operations to the page
1623
   * replacement policy. If the executor rejects the task then it is run directly.
1624
   */
1625
  void scheduleDrainBuffers() {
1626
    if (drainStatusOpaque() >= PROCESSING_TO_IDLE) {
1✔
1627
      return;
1✔
1628
    }
1629
    if (evictionLock.tryLock()) {
1✔
1630
      try {
1631
        int drainStatus = drainStatusOpaque();
1✔
1632
        if (drainStatus >= PROCESSING_TO_IDLE) {
1✔
1633
          return;
1✔
1634
        }
1635
        setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1636
        executor.execute(drainBuffersTask);
1✔
1637
      } catch (Throwable t) {
1✔
1638
        logger.log(Level.WARNING, "Exception thrown when submitting maintenance task", t);
1✔
1639
        maintenance(/* ignored */ null);
1✔
1640
      } finally {
1641
        evictionLock.unlock();
1✔
1642
      }
1643
    }
1644
  }
1✔
1645

1646
  @Override
1647
  public void cleanUp() {
1648
    try {
1649
      performCleanUp(/* ignored */ null);
1✔
1650
    } catch (RuntimeException e) {
1✔
1651
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1652
    }
1✔
1653
  }
1✔
1654

1655
  /**
1656
   * Performs the maintenance work, blocking until the lock is acquired.
1657
   *
1658
   * @param task an additional pending task to run, or {@code null} if not present
1659
   */
1660
  void performCleanUp(@Nullable Runnable task) {
1661
    evictionLock.lock();
1✔
1662
    try {
1663
      maintenance(task);
1✔
1664
    } finally {
1665
      evictionLock.unlock();
1✔
1666
    }
1667
    rescheduleCleanUpIfIncomplete();
1✔
1668
  }
1✔
1669

1670
  /**
1671
   * If there remains pending operations that were not handled by the prior clean up then try to
1672
   * schedule an asynchronous maintenance task. This may occur due to a concurrent write after the
1673
   * maintenance work had started or if the amortized threshold of work per clean up was reached.
1674
   */
1675
  @SuppressWarnings("resource")
1676
  void rescheduleCleanUpIfIncomplete() {
1677
    if (drainStatusOpaque() != REQUIRED) {
1✔
1678
      return;
1✔
1679
    }
1680

1681
    // An immediate scheduling cannot be performed on a custom executor because it may use a
1682
    // caller-runs policy. This could cause the caller's penalty to exceed the amortized threshold,
1683
    // e.g. repeated concurrent writes could result in a retry loop.
1684
    if (executor == ForkJoinPool.commonPool()) {
1✔
1685
      scheduleDrainBuffers();
1✔
1686
      return;
1✔
1687
    }
1688

1689
    // If a scheduler was configured then the maintenance can be deferred onto the custom executor
1690
    // and run in the near future. Otherwise, it will be handled due to other cache activity.
1691
    var pacer = pacer();
1✔
1692
    if ((pacer != null) && !pacer.isScheduled() && evictionLock.tryLock()) {
1✔
1693
      try {
1694
        if ((drainStatusOpaque() == REQUIRED) && !pacer.isScheduled()) {
1✔
1695
          pacer.schedule(executor, drainBuffersTask, expirationTicker().read(), Pacer.TOLERANCE);
1✔
1696
        }
1697
      } finally {
1698
        evictionLock.unlock();
1✔
1699
      }
1700
    }
1701
  }
1✔
1702

1703
  /**
1704
   * Performs the pending maintenance work and sets the state flags during processing to avoid
1705
   * excess scheduling attempts. The read buffer, write buffer, and reference queues are drained,
1706
   * followed by expiration, and size-based eviction.
1707
   *
1708
   * @param task an additional pending task to run, or {@code null} if not present
1709
   */
1710
  @GuardedBy("evictionLock")
1711
  void maintenance(@Nullable Runnable task) {
1712
    setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1713

1714
    try {
1715
      drainReadBuffer();
1✔
1716

1717
      drainWriteBuffer();
1✔
1718
      if (task != null) {
1✔
1719
        task.run();
1✔
1720
      }
1721

1722
      drainKeyReferences();
1✔
1723
      drainValueReferences();
1✔
1724

1725
      expireEntries();
1✔
1726
      evictEntries();
1✔
1727

1728
      climb();
1✔
1729
    } finally {
1730
      if ((drainStatusOpaque() != PROCESSING_TO_IDLE)
1✔
1731
          || !casDrainStatus(PROCESSING_TO_IDLE, IDLE)) {
1✔
1732
        setDrainStatusOpaque(REQUIRED);
1✔
1733
      }
1734
    }
1735
  }
1✔
1736

1737
  /** Drains the weak key references queue. */
1738
  @GuardedBy("evictionLock")
1739
  void drainKeyReferences() {
1740
    if (!collectKeys()) {
1✔
1741
      return;
1✔
1742
    }
1743
    @Var Reference<? extends K> keyRef;
1744
    while ((keyRef = keyReferenceQueue().poll()) != null) {
1✔
1745
      Node<K, V> node = data.get(keyRef);
1✔
1746
      if (node != null) {
1✔
1747
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1748
      }
1749
    }
1✔
1750
  }
1✔
1751

1752
  /** Drains the weak / soft value references queue. */
1753
  @GuardedBy("evictionLock")
1754
  void drainValueReferences() {
1755
    if (!collectValues()) {
1✔
1756
      return;
1✔
1757
    }
1758
    @Var Reference<? extends V> valueRef;
1759
    while ((valueRef = valueReferenceQueue().poll()) != null) {
1✔
1760
      @SuppressWarnings({"RedundantCast", "unchecked"})
1761
      var ref = (InternalReference<V>) (Object) valueRef;
1✔
1762
      Node<K, V> node = data.get(ref.getKeyReference());
1✔
1763
      if ((node != null) && (valueRef == node.getValueReference())) {
1✔
1764
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1765
      }
1766
    }
1✔
1767
  }
1✔
1768

1769
  /** Drains the read buffer. */
1770
  @GuardedBy("evictionLock")
1771
  void drainReadBuffer() {
1772
    if (!skipReadBuffer()) {
1✔
1773
      readBuffer.drainTo(accessPolicy);
1✔
1774
    }
1775
  }
1✔
1776

1777
  /** Updates the node's location in the page replacement policy. */
1778
  @GuardedBy("evictionLock")
1779
  void onAccess(Node<K, V> node) {
1780
    if (evicts()) {
1✔
1781
      K key = node.getKey();
1✔
1782
      if (key == null) {
1✔
1783
        return;
1✔
1784
      }
1785
      frequencySketch().increment(key);
1✔
1786
      if (node.inWindow()) {
1✔
1787
        reorder(accessOrderWindowDeque(), node);
1✔
1788
      } else if (node.inMainProbation()) {
1✔
1789
        reorderProbation(node);
1✔
1790
      } else {
1791
        reorder(accessOrderProtectedDeque(), node);
1✔
1792
      }
1793
      setHitsInSample(hitsInSample() + 1);
1✔
1794
    } else if (expiresAfterAccess()) {
1✔
1795
      reorder(accessOrderWindowDeque(), node);
1✔
1796
    }
1797
    if (expiresVariable()) {
1✔
1798
      timerWheel().reschedule(node);
1✔
1799
    }
1800
  }
1✔
1801

1802
  /** Promote the node from probation to protected on an access. */
1803
  @GuardedBy("evictionLock")
1804
  void reorderProbation(Node<K, V> node) {
1805
    if (!accessOrderProbationDeque().contains(node)) {
1✔
1806
      // Ignore stale accesses for an entry that is no longer present
1807
      return;
1✔
1808
    } else if (node.getPolicyWeight() > mainProtectedMaximum()) {
1✔
1809
      reorder(accessOrderProbationDeque(), node);
1✔
1810
      return;
1✔
1811
    }
1812

1813
    // If the protected space exceeds its maximum, the LRU items are demoted to the probation space.
1814
    // This is deferred to the adaption phase at the end of the maintenance cycle.
1815
    setMainProtectedWeightedSize(mainProtectedWeightedSize() + node.getPolicyWeight());
1✔
1816
    accessOrderProbationDeque().remove(node);
1✔
1817
    accessOrderProtectedDeque().offerLast(node);
1✔
1818
    node.makeMainProtected();
1✔
1819
  }
1✔
1820

1821
  /** Updates the node's location in the policy's deque. */
1822
  static <K, V> void reorder(LinkedDeque<Node<K, V>> deque, Node<K, V> node) {
1823
    // An entry may be scheduled for reordering despite having been removed. This can occur when the
1824
    // entry was concurrently read while a writer was removing it. If the entry is no longer linked
1825
    // then it does not need to be processed.
1826
    if (deque.contains(node)) {
1✔
1827
      deque.moveToBack(node);
1✔
1828
    }
1829
  }
1✔
1830

1831
  /** Drains the write buffer. */
1832
  @GuardedBy("evictionLock")
1833
  void drainWriteBuffer() {
1834
    for (int i = 0; i <= WRITE_BUFFER_MAX; i++) {
1✔
1835
      Runnable task = writeBuffer.poll();
1✔
1836
      if (task == null) {
1✔
1837
        return;
1✔
1838
      }
1839
      task.run();
1✔
1840
    }
1841
    setDrainStatusOpaque(PROCESSING_TO_REQUIRED);
1✔
1842
  }
1✔
1843

1844
  /**
1845
   * Atomically transitions the node to the <code>dead</code> state and decrements the
1846
   * <code>weightedSize</code>.
1847
   *
1848
   * @param node the entry in the page replacement policy
1849
   */
1850
  @GuardedBy("evictionLock")
1851
  void makeDead(Node<K, V> node) {
1852
    synchronized (node) {
1✔
1853
      if (node.isDead()) {
1✔
1854
        return;
1✔
1855
      }
1856
      if (evicts()) {
1✔
1857
        // The node's policy weight may be out of sync due to a pending update waiting to be
1858
        // processed. At this point the node's weight is finalized, so the weight can be safely
1859
        // taken from the node's perspective and the sizes will be adjusted correctly.
1860
        if (node.inWindow()) {
1✔
1861
          setWindowWeightedSize(windowWeightedSize() - node.getWeight());
1✔
1862
        } else if (node.inMainProtected()) {
1✔
1863
          setMainProtectedWeightedSize(mainProtectedWeightedSize() - node.getWeight());
1✔
1864
        }
1865
        setWeightedSize(weightedSize() - node.getWeight());
1✔
1866
      }
1867
      node.die();
1✔
1868
    }
1✔
1869
  }
1✔
1870

1871
  /** Adds the node to the page replacement policy. */
1872
  final class AddTask implements Runnable {
1873
    final Node<K, V> node;
1874
    final int weight;
1875

1876
    AddTask(Node<K, V> node, int weight) {
1✔
1877
      this.weight = weight;
1✔
1878
      this.node = node;
1✔
1879
    }
1✔
1880

1881
    @Override
1882
    @GuardedBy("evictionLock")
1883
    public void run() {
1884
      if (evicts()) {
1✔
1885
        setWeightedSize(weightedSize() + weight);
1✔
1886
        setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1887
        node.setPolicyWeight(node.getPolicyWeight() + weight);
1✔
1888

1889
        long maximum = maximum();
1✔
1890
        if (weightedSize() >= (maximum >>> 1)) {
1✔
1891
          if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
1892
            evictEntries();
1✔
1893
          } else {
1894
            // Lazily initialize when close to the maximum
1895
            long capacity = isWeighted() ? data.mappingCount() : maximum;
1✔
1896
            frequencySketch().ensureCapacity(capacity);
1✔
1897
          }
1898
        }
1899

1900
        K key = node.getKey();
1✔
1901
        if (key != null) {
1✔
1902
          frequencySketch().increment(key);
1✔
1903
        }
1904

1905
        setMissesInSample(missesInSample() + 1);
1✔
1906
      }
1907

1908
      // ignore out-of-order write operations
1909
      boolean isAlive;
1910
      synchronized (node) {
1✔
1911
        isAlive = node.isAlive();
1✔
1912
      }
1✔
1913
      if (isAlive) {
1✔
1914
        if (expiresAfterWrite()) {
1✔
1915
          writeOrderDeque().offerLast(node);
1✔
1916
        }
1917
        if (expiresVariable()) {
1✔
1918
          timerWheel().schedule(node);
1✔
1919
        }
1920
        if (evicts()) {
1✔
1921
          if (weight > maximum()) {
1✔
1922
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
1923
          } else if (weight > windowMaximum()) {
1✔
1924
            accessOrderWindowDeque().offerFirst(node);
1✔
1925
          } else {
1926
            accessOrderWindowDeque().offerLast(node);
1✔
1927
          }
1928
        } else if (expiresAfterAccess()) {
1✔
1929
          accessOrderWindowDeque().offerLast(node);
1✔
1930
        }
1931
      }
1932
    }
1✔
1933
  }
1934

1935
  /** Removes a node from the page replacement policy. */
1936
  final class RemovalTask implements Runnable {
1937
    final Node<K, V> node;
1938

1939
    RemovalTask(Node<K, V> node) {
1✔
1940
      this.node = node;
1✔
1941
    }
1✔
1942

1943
    @Override
1944
    @GuardedBy("evictionLock")
1945
    public void run() {
1946
      // add may not have been processed yet
1947
      if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1948
        accessOrderWindowDeque().remove(node);
1✔
1949
      } else if (evicts()) {
1✔
1950
        if (node.inMainProbation()) {
1✔
1951
          accessOrderProbationDeque().remove(node);
1✔
1952
        } else {
1953
          accessOrderProtectedDeque().remove(node);
1✔
1954
        }
1955
      }
1956
      if (expiresAfterWrite()) {
1✔
1957
        writeOrderDeque().remove(node);
1✔
1958
      } else if (expiresVariable()) {
1✔
1959
        timerWheel().deschedule(node);
1✔
1960
      }
1961
      makeDead(node);
1✔
1962
    }
1✔
1963
  }
1964

1965
  /** Updates the weighted size. */
1966
  final class UpdateTask implements Runnable {
1967
    final int weightDifference;
1968
    final Node<K, V> node;
1969

1970
    public UpdateTask(Node<K, V> node, int weightDifference) {
1✔
1971
      this.weightDifference = weightDifference;
1✔
1972
      this.node = node;
1✔
1973
    }
1✔
1974

1975
    @Override
1976
    @GuardedBy("evictionLock")
1977
    public void run() {
1978
      if (expiresAfterWrite()) {
1✔
1979
        reorder(writeOrderDeque(), node);
1✔
1980
      } else if (expiresVariable()) {
1✔
1981
        timerWheel().reschedule(node);
1✔
1982
      }
1983
      if (evicts()) {
1✔
1984
        int oldWeightedSize = node.getPolicyWeight();
1✔
1985
        node.setPolicyWeight(oldWeightedSize + weightDifference);
1✔
1986
        if (node.inWindow()) {
1✔
1987
          setWindowWeightedSize(windowWeightedSize() + weightDifference);
1✔
1988
          if (node.getPolicyWeight() > maximum()) {
1✔
1989
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
1990
          } else if (node.getPolicyWeight() <= windowMaximum()) {
1✔
1991
            onAccess(node);
1✔
1992
          } else if (accessOrderWindowDeque().contains(node)) {
1✔
1993
            accessOrderWindowDeque().moveToFront(node);
1✔
1994
          }
1995
        } else if (node.inMainProbation()) {
1✔
1996
            if (node.getPolicyWeight() <= maximum()) {
1✔
1997
              onAccess(node);
1✔
1998
            } else {
1999
              evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2000
            }
2001
        } else if (node.inMainProtected()) {
1✔
2002
          setMainProtectedWeightedSize(mainProtectedWeightedSize() + weightDifference);
1✔
2003
          if (node.getPolicyWeight() <= maximum()) {
1✔
2004
            onAccess(node);
1✔
2005
          } else {
2006
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2007
          }
2008
        }
2009

2010
        setWeightedSize(weightedSize() + weightDifference);
1✔
2011
        if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
2012
          evictEntries();
1✔
2013
        }
2014
      } else if (expiresAfterAccess()) {
1✔
2015
        onAccess(node);
1✔
2016
      }
2017
    }
1✔
2018
  }
2019

2020
  /* --------------- Concurrent Map Support --------------- */
2021

2022
  @Override
2023
  public boolean isEmpty() {
2024
    return data.isEmpty();
1✔
2025
  }
2026

2027
  @Override
2028
  public int size() {
2029
    return data.size();
1✔
2030
  }
2031

2032
  @Override
2033
  public long estimatedSize() {
2034
    return data.mappingCount();
1✔
2035
  }
2036

2037
  @Override
2038
  public void clear() {
2039
    Deque<Node<K, V>> entries;
2040
    evictionLock.lock();
1✔
2041
    try {
2042
      // Discard all pending reads
2043
      readBuffer.drainTo(e -> {});
1✔
2044

2045
      // Apply all pending writes
2046
      @Var Runnable task;
2047
      while ((task = writeBuffer.poll()) != null) {
1✔
2048
        task.run();
1✔
2049
      }
2050

2051
      // Cancel the scheduled cleanup
2052
      Pacer pacer = pacer();
1✔
2053
      if (pacer != null) {
1✔
2054
        pacer.cancel();
1✔
2055
      }
2056

2057
      // Discard all entries, falling back to one-by-one to avoid excessive lock hold times
2058
      long now = expirationTicker().read();
1✔
2059
      int threshold = (WRITE_BUFFER_MAX / 2);
1✔
2060
      entries = new ArrayDeque<>(data.values());
1✔
2061
      while (!entries.isEmpty() && (writeBuffer.size() < threshold)) {
1✔
2062
        removeNode(entries.poll(), now);
1✔
2063
      }
2064
    } finally {
2065
      evictionLock.unlock();
1✔
2066
    }
2067

2068
    // Remove any stragglers if released early to more aggressively flush incoming writes
2069
    @Var boolean cleanUp = false;
1✔
2070
    for (var node : entries) {
1✔
2071
      var key = node.getKey();
1✔
2072
      if (key == null) {
1✔
2073
        cleanUp = true;
1✔
2074
      } else {
2075
        remove(key);
1✔
2076
      }
2077
    }
1✔
2078
    if (collectKeys() && cleanUp) {
1✔
2079
      cleanUp();
1✔
2080
    }
2081
  }
1✔
2082

2083
  @GuardedBy("evictionLock")
2084
  @SuppressWarnings("GuardedByChecker")
2085
  void removeNode(Node<K, V> node, long now) {
2086
    K key = node.getKey();
1✔
2087
    var cause = new RemovalCause[1];
1✔
2088
    var keyReference = node.getKeyReference();
1✔
2089
    @SuppressWarnings({"unchecked", "Varifier"})
2090
    @Nullable V[] value = (V[]) new Object[1];
1✔
2091

2092
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
2093
      if (n != node) {
1✔
2094
        return n;
×
2095
      }
2096
      synchronized (n) {
1✔
2097
        value[0] = n.getValue();
1✔
2098

2099
        if ((key == null) || (value[0] == null)) {
1✔
2100
          cause[0] = RemovalCause.COLLECTED;
1✔
2101
        } else if (hasExpired(n, now)) {
1✔
2102
          cause[0] = RemovalCause.EXPIRED;
1✔
2103
        } else {
2104
          cause[0] = RemovalCause.EXPLICIT;
1✔
2105
        }
2106

2107
        if (cause[0].wasEvicted()) {
1✔
2108
          notifyEviction(key, value[0], cause[0]);
1✔
2109
        }
2110

2111
        discardRefresh(node.getKeyReference());
1✔
2112
        node.retire();
1✔
2113
        return null;
1✔
2114
      }
2115
    });
2116

2117
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
2118
      accessOrderWindowDeque().remove(node);
1✔
2119
    } else if (evicts()) {
1✔
2120
      if (node.inMainProbation()) {
1✔
2121
        accessOrderProbationDeque().remove(node);
1✔
2122
      } else {
2123
        accessOrderProtectedDeque().remove(node);
1✔
2124
      }
2125
    }
2126
    if (expiresAfterWrite()) {
1✔
2127
      writeOrderDeque().remove(node);
1✔
2128
    } else if (expiresVariable()) {
1✔
2129
      timerWheel().deschedule(node);
1✔
2130
    }
2131

2132
    synchronized (node) {
1✔
2133
      logIfAlive(node);
1✔
2134
      makeDead(node);
1✔
2135
    }
1✔
2136

2137
    if (cause[0] != null) {
1✔
2138
      notifyRemoval(key, value[0], cause[0]);
1✔
2139
    }
2140
  }
1✔
2141

2142
  @Override
2143
  public boolean containsKey(Object key) {
2144
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2145
    return (node != null) && (node.getValue() != null)
1✔
2146
        && !hasExpired(node, expirationTicker().read());
1✔
2147
  }
2148

2149
  @Override
2150
  @SuppressWarnings("SuspiciousMethodCalls")
2151
  public boolean containsValue(Object value) {
2152
    requireNonNull(value);
1✔
2153

2154
    long now = expirationTicker().read();
1✔
2155
    for (Node<K, V> node : data.values()) {
1✔
2156
      if (node.containsValue(value) && !hasExpired(node, now) && (node.getKey() != null)) {
1✔
2157
        return true;
1✔
2158
      }
2159
    }
1✔
2160
    return false;
1✔
2161
  }
2162

2163
  @Override
2164
  public @Nullable V get(Object key) {
2165
    return getIfPresent(key, /* recordStats= */ false);
1✔
2166
  }
2167

2168
  @Override
2169
  public @Nullable V getIfPresent(Object key, boolean recordStats) {
2170
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2171
    if (node == null) {
1✔
2172
      if (recordStats) {
1✔
2173
        statsCounter().recordMisses(1);
1✔
2174
      }
2175
      if (drainStatusOpaque() == REQUIRED) {
1✔
2176
        scheduleDrainBuffers();
1✔
2177
      }
2178
      return null;
1✔
2179
    }
2180

2181
    V value = node.getValue();
1✔
2182
    long now = expirationTicker().read();
1✔
2183
    if (hasExpired(node, now) || (collectValues() && (value == null))) {
1✔
2184
      if (recordStats) {
1✔
2185
        statsCounter().recordMisses(1);
1✔
2186
      }
2187
      scheduleDrainBuffers();
1✔
2188
      return null;
1✔
2189
    }
2190

2191
    if (!isComputingAsync(value)) {
1✔
2192
      @SuppressWarnings("unchecked")
2193
      var castedKey = (K) key;
1✔
2194
      setAccessTime(node, now);
1✔
2195
      tryExpireAfterRead(node, castedKey, value, expiry(), now);
1✔
2196
    }
2197
    V refreshed = afterRead(node, now, recordStats);
1✔
2198
    return (refreshed == null) ? value : refreshed;
1✔
2199
  }
2200

2201
  @Override
2202
  public @Nullable V getIfPresentQuietly(Object key) {
2203
    V value;
2204
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2205
    if ((node == null) || ((value = node.getValue()) == null)
1✔
2206
        || hasExpired(node, expirationTicker().read())) {
1✔
2207
      return null;
1✔
2208
    }
2209
    return value;
1✔
2210
  }
2211

2212
  /**
2213
   * Returns the key associated with the mapping in this cache, or {@code null} if there is none.
2214
   *
2215
   * @param key the key whose canonical instance is to be returned
2216
   * @return the key used by the mapping, or {@code null} if this cache does not contain a mapping
2217
   *         for the key
2218
   * @throws NullPointerException if the specified key is null
2219
   */
2220
  public @Nullable K getKey(K key) {
2221
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2222
    if (node == null) {
1✔
2223
      if (drainStatusOpaque() == REQUIRED) {
1✔
2224
        scheduleDrainBuffers();
1✔
2225
      }
2226
      return null;
1✔
2227
    }
2228
    afterRead(node, /* now= */ 0L, /* recordHit= */ false);
1✔
2229
    return node.getKey();
1✔
2230
  }
2231

2232
  @Override
2233
  public Map<K, V> getAllPresent(Iterable<? extends K> keys) {
2234
    var result = new LinkedHashMap<K, V>(calculateHashMapCapacity(keys));
1✔
2235
    for (K key : keys) {
1✔
2236
      result.put(key, null);
1✔
2237
    }
1✔
2238

2239
    int uniqueKeys = result.size();
1✔
2240
    long now = expirationTicker().read();
1✔
2241
    for (var iter = result.entrySet().iterator(); iter.hasNext();) {
1✔
2242
      V value;
2243
      var entry = iter.next();
1✔
2244
      Node<K, V> node = data.get(nodeFactory.newLookupKey(entry.getKey()));
1✔
2245
      if ((node == null) || ((value = node.getValue()) == null) || hasExpired(node, now)) {
1✔
2246
        iter.remove();
1✔
2247
      } else {
2248
        if (!isComputingAsync(value)) {
1✔
2249
          tryExpireAfterRead(node, entry.getKey(), value, expiry(), now);
1✔
2250
          setAccessTime(node, now);
1✔
2251
        }
2252
        V refreshed = afterRead(node, now, /* recordHit= */ false);
1✔
2253
        entry.setValue((refreshed == null) ? value : refreshed);
1✔
2254
      }
2255
    }
1✔
2256
    statsCounter().recordHits(result.size());
1✔
2257
    statsCounter().recordMisses(uniqueKeys - result.size());
1✔
2258

2259
    return Collections.unmodifiableMap(result);
1✔
2260
  }
2261

2262
  @Override
2263
  public void putAll(Map<? extends K, ? extends V> map) {
2264
    map.forEach(this::put);
1✔
2265
  }
1✔
2266

2267
  @Override
2268
  public @Nullable V put(K key, V value) {
2269
    return put(key, value, expiry(), /* onlyIfAbsent= */ false);
1✔
2270
  }
2271

2272
  @Override
2273
  public @Nullable V putIfAbsent(K key, V value) {
2274
    return put(key, value, expiry(), /* onlyIfAbsent= */ true);
1✔
2275
  }
2276

2277
  /**
2278
   * Adds a node to the policy and the data store. If an existing node is found, then its value is
2279
   * updated if allowed.
2280
   *
2281
   * @param key key with which the specified value is to be associated
2282
   * @param value value to be associated with the specified key
2283
   * @param expiry the calculator for the write expiration time
2284
   * @param onlyIfAbsent a write is performed only if the key is not already associated with a value
2285
   * @return the prior value in or null if no mapping was found
2286
   */
2287
  @Nullable V put(K key, V value, Expiry<K, V> expiry, boolean onlyIfAbsent) {
2288
    requireNonNull(key);
1✔
2289
    requireNonNull(value);
1✔
2290

2291
    @Var Node<K, V> node = null;
1✔
2292
    long now = expirationTicker().read();
1✔
2293
    int newWeight = weigher.weigh(key, value);
1✔
2294
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2295
    for (int attempts = 1; ; attempts++) {
1✔
2296
      @Var Node<K, V> prior = data.get(lookupKey);
1✔
2297
      if (prior == null) {
1✔
2298
        if (node == null) {
1✔
2299
          node = nodeFactory.newNode(key, keyReferenceQueue(),
1✔
2300
              value, valueReferenceQueue(), newWeight, now);
1✔
2301
          setVariableTime(node, expireAfterCreate(key, value, expiry, now));
1✔
2302
          long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
1✔
2303
          setAccessTime(node, expirationTime);
1✔
2304
          setWriteTime(node, expirationTime);
1✔
2305
        }
2306
        prior = data.putIfAbsent(node.getKeyReference(), node);
1✔
2307
        if (prior == null) {
1✔
2308
          afterWrite(new AddTask(node, newWeight));
1✔
2309
          return null;
1✔
2310
        } else if (onlyIfAbsent) {
1✔
2311
          // An optimistic fast path to avoid unnecessary locking
2312
          V currentValue = prior.getValue();
1✔
2313
          if ((currentValue != null) && !hasExpired(prior, now)) {
1✔
2314
            if (!isComputingAsync(currentValue)) {
1✔
2315
              tryExpireAfterRead(prior, key, currentValue, expiry(), now);
1✔
2316
              setAccessTime(prior, now);
1✔
2317
            }
2318
            afterRead(prior, now, /* recordHit= */ false);
1✔
2319
            return currentValue;
1✔
2320
          }
2321
        }
1✔
2322
      } else if (onlyIfAbsent) {
1✔
2323
        // An optimistic fast path to avoid unnecessary locking
2324
        V currentValue = prior.getValue();
1✔
2325
        if ((currentValue != null) && !hasExpired(prior, now)) {
1✔
2326
          if (!isComputingAsync(currentValue)) {
1✔
2327
            tryExpireAfterRead(prior, key, currentValue, expiry(), now);
1✔
2328
            setAccessTime(prior, now);
1✔
2329
          }
2330
          afterRead(prior, now, /* recordHit= */ false);
1✔
2331
          return currentValue;
1✔
2332
        }
2333
      }
2334

2335
      // A read may race with the entry's removal, so that after the entry is acquired it may no
2336
      // longer be usable. A retry will reread from the map and either find an absent mapping, a
2337
      // new entry, or a stale entry.
2338
      if (!prior.isAlive()) {
1✔
2339
        // A reread of the stale entry may occur if the state transition occurred but the map
2340
        // removal was delayed by a context switch, so that this thread spin waits until resolved.
2341
        if ((attempts & MAX_PUT_SPIN_WAIT_ATTEMPTS) != 0) {
1✔
2342
          Thread.onSpinWait();
1✔
2343
          continue;
1✔
2344
        }
2345

2346
        // If the spin wait attempts are exhausted then fallback to a map computation in order to
2347
        // deschedule this thread until the entry's removal completes. If the key was modified
2348
        // while in the map so that its equals or hashCode changed then the contents may be
2349
        // corrupted, where the cache holds an evicted (dead) entry that could not be removed.
2350
        // That is a violation of the Map contract, so we check that the mapping is in the "alive"
2351
        // state while in the computation.
2352
        data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2353
          requireIsAlive(key, n);
1✔
2354
          return n;
1✔
2355
        });
2356
        continue;
1✔
2357
      }
2358

2359
      V oldValue;
2360
      long varTime;
2361
      int oldWeight;
2362
      @Var boolean expired = false;
1✔
2363
      @Var boolean mayUpdate = true;
1✔
2364
      @Var boolean exceedsTolerance = false;
1✔
2365
      synchronized (prior) {
1✔
2366
        if (!prior.isAlive()) {
1✔
2367
          continue;
1✔
2368
        }
2369
        oldValue = prior.getValue();
1✔
2370
        oldWeight = prior.getWeight();
1✔
2371
        if (oldValue == null) {
1✔
2372
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2373
          notifyEviction(key, null, RemovalCause.COLLECTED);
1✔
2374
        } else if (hasExpired(prior, now)) {
1✔
2375
          expired = true;
1✔
2376
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2377
          notifyEviction(key, oldValue, RemovalCause.EXPIRED);
1✔
2378
        } else if (onlyIfAbsent) {
1✔
2379
          mayUpdate = false;
1✔
2380
          varTime = expireAfterRead(prior, key, value, expiry, now);
1✔
2381
        } else {
2382
          varTime = expireAfterUpdate(prior, key, value, expiry, now);
1✔
2383
        }
2384

2385
        long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
1✔
2386
        if (mayUpdate) {
1✔
2387
          exceedsTolerance =
1✔
2388
              (expiresAfterWrite() && (now - prior.getWriteTime()) > EXPIRE_WRITE_TOLERANCE)
1✔
2389
              || (expiresVariable()
1✔
2390
                  && Math.abs(varTime - prior.getVariableTime()) > EXPIRE_WRITE_TOLERANCE);
1✔
2391
          setWriteTime(prior, expirationTime);
1✔
2392

2393
          prior.setValue(value, valueReferenceQueue());
1✔
2394
          prior.setWeight(newWeight);
1✔
2395

2396
          discardRefresh(prior.getKeyReference());
1✔
2397
        }
2398

2399
        setVariableTime(prior, varTime);
1✔
2400
        setAccessTime(prior, expirationTime);
1✔
2401
      }
1✔
2402

2403
      if (expired) {
1✔
2404
        notifyRemoval(key, oldValue, RemovalCause.EXPIRED);
1✔
2405
      } else if (oldValue == null) {
1✔
2406
        notifyRemoval(key, /* value= */ null, RemovalCause.COLLECTED);
1✔
2407
      } else if (mayUpdate) {
1✔
2408
        notifyOnReplace(key, oldValue, value);
1✔
2409
      }
2410

2411
      int weightedDifference = mayUpdate ? (newWeight - oldWeight) : 0;
1✔
2412
      if ((oldValue == null) || (weightedDifference != 0) || expired) {
1✔
2413
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2414
      } else if (!onlyIfAbsent && exceedsTolerance) {
1✔
2415
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2416
      } else {
2417
        afterRead(prior, now, /* recordHit= */ false);
1✔
2418
      }
2419

2420
      return expired ? null : oldValue;
1✔
2421
    }
2422
  }
2423

2424
  @Override
2425
  public @Nullable V remove(Object key) {
2426
    @SuppressWarnings("unchecked")
2427
    var castKey = (K) key;
1✔
2428
    @SuppressWarnings({"rawtypes", "unchecked"})
2429
    Node<K, V>[] node = new Node[1];
1✔
2430
    @SuppressWarnings({"unchecked", "Varifier"})
2431
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2432
    RemovalCause[] cause = new RemovalCause[1];
1✔
2433
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2434

2435
    data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2436
      synchronized (n) {
1✔
2437
        requireIsAlive(key, n);
1✔
2438
        oldValue[0] = n.getValue();
1✔
2439
        if (oldValue[0] == null) {
1✔
2440
          cause[0] = RemovalCause.COLLECTED;
1✔
2441
        } else if (hasExpired(n, expirationTicker().read())) {
1✔
2442
          cause[0] = RemovalCause.EXPIRED;
1✔
2443
        } else {
2444
          cause[0] = RemovalCause.EXPLICIT;
1✔
2445
        }
2446
        if (cause[0].wasEvicted()) {
1✔
2447
          notifyEviction(castKey, oldValue[0], cause[0]);
1✔
2448
        }
2449
        discardRefresh(lookupKey);
1✔
2450
        node[0] = n;
1✔
2451
        n.retire();
1✔
2452
      }
1✔
2453
      return null;
1✔
2454
    });
2455

2456
    if (cause[0] != null) {
1✔
2457
      afterWrite(new RemovalTask(node[0]));
1✔
2458
      notifyRemoval(castKey, oldValue[0], cause[0]);
1✔
2459
    }
2460
    return (cause[0] == RemovalCause.EXPLICIT) ? oldValue[0] : null;
1✔
2461
  }
2462

2463
  @Override
2464
  public boolean remove(Object key, Object value) {
2465
    requireNonNull(key);
1✔
2466
    if (value == null) {
1✔
2467
      return false;
1✔
2468
    }
2469

2470
    @SuppressWarnings({"rawtypes", "unchecked"})
2471
    Node<K, V>[] removed = new Node[1];
1✔
2472
    @SuppressWarnings({"unchecked", "Varifier"})
2473
    @Nullable K[] oldKey = (K[]) new Object[1];
1✔
2474
    @SuppressWarnings({"unchecked", "Varifier"})
2475
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2476
    RemovalCause[] cause = new RemovalCause[1];
1✔
2477
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2478

2479
    data.computeIfPresent(lookupKey, (kR, node) -> {
1✔
2480
      synchronized (node) {
1✔
2481
        requireIsAlive(key, node);
1✔
2482
        oldKey[0] = node.getKey();
1✔
2483
        oldValue[0] = node.getValue();
1✔
2484
        if ((oldKey[0] == null) || (oldValue[0] == null)) {
1✔
2485
          cause[0] = RemovalCause.COLLECTED;
1✔
2486
        } else if (hasExpired(node, expirationTicker().read())) {
1✔
2487
          cause[0] = RemovalCause.EXPIRED;
1✔
2488
        } else if (node.containsValue(value)) {
1✔
2489
          cause[0] = RemovalCause.EXPLICIT;
1✔
2490
        } else {
2491
          return node;
1✔
2492
        }
2493
        if (cause[0].wasEvicted()) {
1✔
2494
          notifyEviction(oldKey[0], oldValue[0], cause[0]);
1✔
2495
        }
2496
        discardRefresh(lookupKey);
1✔
2497
        removed[0] = node;
1✔
2498
        node.retire();
1✔
2499
        return null;
1✔
2500
      }
2501
    });
2502

2503
    if (removed[0] == null) {
1✔
2504
      return false;
1✔
2505
    }
2506
    afterWrite(new RemovalTask(removed[0]));
1✔
2507
    notifyRemoval(oldKey[0], oldValue[0], cause[0]);
1✔
2508

2509
    return (cause[0] == RemovalCause.EXPLICIT);
1✔
2510
  }
2511

2512
  @Override
2513
  public @Nullable V replace(K key, V value) {
2514
    requireNonNull(key);
1✔
2515
    requireNonNull(value);
1✔
2516

2517
    long[] now = new long[1];
1✔
2518
    var oldWeight = new int[1];
1✔
2519
    @SuppressWarnings({"unchecked", "Varifier"})
2520
    @Nullable K[] nodeKey = (K[]) new Object[1];
1✔
2521
    @SuppressWarnings({"unchecked", "Varifier"})
2522
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2523
    int weight = weigher.weigh(key, value);
1✔
2524
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2525
      synchronized (n) {
1✔
2526
        requireIsAlive(key, n);
1✔
2527
        nodeKey[0] = n.getKey();
1✔
2528
        oldValue[0] = n.getValue();
1✔
2529
        oldWeight[0] = n.getWeight();
1✔
2530
        if ((nodeKey[0] == null) || (oldValue[0] == null)
1✔
2531
            || hasExpired(n, now[0] = expirationTicker().read())) {
1✔
2532
          oldValue[0] = null;
1✔
2533
          return n;
1✔
2534
        }
2535

2536
        long varTime = expireAfterUpdate(n, key, value, expiry(), now[0]);
1✔
2537
        n.setValue(value, valueReferenceQueue());
1✔
2538
        n.setWeight(weight);
1✔
2539

2540
        long expirationTime = isComputingAsync(value) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2541
        setAccessTime(n, expirationTime);
1✔
2542
        setWriteTime(n, expirationTime);
1✔
2543
        setVariableTime(n, varTime);
1✔
2544

2545
        discardRefresh(k);
1✔
2546
        return n;
1✔
2547
      }
2548
    });
2549

2550
    if ((nodeKey[0] == null) || (oldValue[0] == null)) {
1✔
2551
      return null;
1✔
2552
    }
2553

2554
    int weightedDifference = (weight - oldWeight[0]);
1✔
2555
    if (expiresAfterWrite() || (weightedDifference != 0)) {
1✔
2556
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2557
    } else {
2558
      afterRead(node, now[0], /* recordHit= */ false);
1✔
2559
    }
2560

2561
    notifyOnReplace(nodeKey[0], oldValue[0], value);
1✔
2562
    return oldValue[0];
1✔
2563
  }
2564

2565
  @Override
2566
  public boolean replace(K key, V oldValue, V newValue) {
2567
    return replace(key, oldValue, newValue, /* shouldDiscardRefresh= */ true);
1✔
2568
  }
2569

2570
  @Override
2571
  public boolean replace(K key, V oldValue, V newValue, boolean shouldDiscardRefresh) {
2572
    requireNonNull(key);
1✔
2573
    requireNonNull(oldValue);
1✔
2574
    requireNonNull(newValue);
1✔
2575

2576
    int weight = weigher.weigh(key, newValue);
1✔
2577
    @SuppressWarnings({"unchecked", "Varifier"})
2578
    @Nullable K[] nodeKey = (K[]) new Object[1];
1✔
2579
    @SuppressWarnings({"unchecked", "Varifier"})
2580
    @Nullable V[] prevValue = (V[]) new Object[1];
1✔
2581
    int[] oldWeight = new int[1];
1✔
2582
    long[] now = new long[1];
1✔
2583
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2584
      synchronized (n) {
1✔
2585
        requireIsAlive(key, n);
1✔
2586
        nodeKey[0] = n.getKey();
1✔
2587
        prevValue[0] = n.getValue();
1✔
2588
        oldWeight[0] = n.getWeight();
1✔
2589
        if ((nodeKey[0] == null) || (prevValue[0] == null) || !n.containsValue(oldValue)
1✔
2590
            || hasExpired(n, now[0] = expirationTicker().read())) {
1✔
2591
          prevValue[0] = null;
1✔
2592
          return n;
1✔
2593
        }
2594

2595
        long varTime = expireAfterUpdate(n, key, newValue, expiry(), now[0]);
1✔
2596
        n.setValue(newValue, valueReferenceQueue());
1✔
2597
        n.setWeight(weight);
1✔
2598

2599
        long expirationTime = isComputingAsync(newValue) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2600
        setAccessTime(n, expirationTime);
1✔
2601
        setWriteTime(n, expirationTime);
1✔
2602
        setVariableTime(n, varTime);
1✔
2603

2604
        if (shouldDiscardRefresh) {
1✔
2605
          discardRefresh(k);
1✔
2606
        }
2607
      }
1✔
2608
      return n;
1✔
2609
    });
2610

2611
    if ((nodeKey[0] == null) || (prevValue[0] == null)) {
1✔
2612
      return false;
1✔
2613
    }
2614

2615
    int weightedDifference = (weight - oldWeight[0]);
1✔
2616
    if (expiresAfterWrite() || (weightedDifference != 0)) {
1✔
2617
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2618
    } else {
2619
      afterRead(node, now[0], /* recordHit= */ false);
1✔
2620
    }
2621

2622
    notifyOnReplace(nodeKey[0], prevValue[0], newValue);
1✔
2623
    return true;
1✔
2624
  }
2625

2626
  @Override
2627
  public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
2628
    requireNonNull(function);
1✔
2629

2630
    BiFunction<K, V, V> remappingFunction = (key, oldValue) ->
1✔
2631
        requireNonNull(function.apply(key, oldValue));
1✔
2632
    for (K key : keySet()) {
1✔
2633
      long[] now = { expirationTicker().read() };
1✔
2634
      Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2635
      remap(key, lookupKey, remappingFunction, expiry(), now, /* computeIfAbsent= */ false);
1✔
2636
    }
1✔
2637
  }
1✔
2638

2639
  @Override
2640
  public @Nullable V computeIfAbsent(K key, @Var Function<? super K, ? extends V> mappingFunction,
2641
      boolean recordStats, boolean recordLoad) {
2642
    requireNonNull(key);
1✔
2643
    requireNonNull(mappingFunction);
1✔
2644
    long now = expirationTicker().read();
1✔
2645

2646
    // An optimistic fast path to avoid unnecessary locking
2647
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2648
    if (node != null) {
1✔
2649
      V value = node.getValue();
1✔
2650
      if ((value != null) && !hasExpired(node, now)) {
1✔
2651
        if (!isComputingAsync(value)) {
1✔
2652
          tryExpireAfterRead(node, key, value, expiry(), now);
1✔
2653
          setAccessTime(node, now);
1✔
2654
        }
2655
        var refreshed = afterRead(node, now, /* recordHit= */ recordStats);
1✔
2656
        return (refreshed == null) ? value : refreshed;
1✔
2657
      }
2658
    }
2659
    if (recordStats) {
1✔
2660
      mappingFunction = statsAware(mappingFunction, recordLoad);
1✔
2661
    }
2662
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2663
    return doComputeIfAbsent(key, keyRef, mappingFunction, new long[] { now }, recordStats);
1✔
2664
  }
2665

2666
  /** Returns the current value from a computeIfAbsent invocation. */
2667
  @Nullable V doComputeIfAbsent(K key, Object keyRef,
2668
      Function<? super K, ? extends @Nullable V> mappingFunction, long[/* 1 */] now,
2669
      boolean recordStats) {
2670
    @SuppressWarnings({"unchecked", "Varifier"})
2671
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2672
    @SuppressWarnings({"unchecked", "Varifier"})
2673
    @Nullable V[] newValue = (V[]) new Object[1];
1✔
2674
    @SuppressWarnings({"unchecked", "Varifier"})
2675
    @Nullable K[] nodeKey = (K[]) new Object[1];
1✔
2676
    @SuppressWarnings({"rawtypes", "unchecked"})
2677
    Node<K, V>[] removed = new Node[1];
1✔
2678

2679
    int[] weight = new int[2]; // old, new
1✔
2680
    RemovalCause[] cause = new RemovalCause[1];
1✔
2681
    Node<K, V> node = data.compute(keyRef, (k, n) -> {
1✔
2682
      if (n == null) {
1✔
2683
        newValue[0] = mappingFunction.apply(key);
1✔
2684
        if (newValue[0] == null) {
1✔
2685
          return null;
1✔
2686
        }
2687
        now[0] = expirationTicker().read();
1✔
2688
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2689
        var created = nodeFactory.newNode(key, keyReferenceQueue(),
1✔
2690
            newValue[0], valueReferenceQueue(), weight[1], now[0]);
1✔
2691
        setVariableTime(created, expireAfterCreate(key, newValue[0], expiry(), now[0]));
1✔
2692
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2693
        setAccessTime(created, expirationTime);
1✔
2694
        setWriteTime(created, expirationTime);
1✔
2695
        return created;
1✔
2696
      }
2697

2698
      synchronized (n) {
1✔
2699
        requireIsAlive(key, n);
1✔
2700
        nodeKey[0] = n.getKey();
1✔
2701
        weight[0] = n.getWeight();
1✔
2702
        oldValue[0] = n.getValue();
1✔
2703
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
1✔
2704
          cause[0] = RemovalCause.COLLECTED;
1✔
2705
        } else if (hasExpired(n, now[0])) {
1✔
2706
          cause[0] = RemovalCause.EXPIRED;
1✔
2707
        } else {
2708
          return n;
1✔
2709
        }
2710

2711
        if (cause[0].wasEvicted()) {
1✔
2712
          notifyEviction(nodeKey[0], oldValue[0], cause[0]);
1✔
2713
        }
2714
        newValue[0] = mappingFunction.apply(key);
1✔
2715
        if (newValue[0] == null) {
1✔
2716
          removed[0] = n;
1✔
2717
          n.retire();
1✔
2718
          return null;
1✔
2719
        }
2720
        now[0] = expirationTicker().read();
1✔
2721
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2722
        long varTime = expireAfterCreate(key, newValue[0], expiry(), now[0]);
1✔
2723

2724
        n.setValue(newValue[0], valueReferenceQueue());
1✔
2725
        n.setWeight(weight[1]);
1✔
2726

2727
        setVariableTime(n, varTime);
1✔
2728
        if (isComputingAsync(newValue[0])) {
1✔
2729
          long expirationTime = now[0] + ASYNC_EXPIRY;
1✔
2730
          setAccessTime(n, expirationTime);
1✔
2731
          setWriteTime(n, expirationTime);
1✔
2732
        } else {
1✔
2733
          setAccessTime(n, now[0]);
1✔
2734
          setWriteTime(n, now[0]);
1✔
2735
        }
2736
        discardRefresh(k);
1✔
2737
        return n;
1✔
2738
      }
2739
    });
2740

2741
    if (cause[0] != null) {
1✔
2742
      if (cause[0].wasEvicted()) {
1✔
2743
        statsCounter().recordEviction(weight[0], cause[0]);
1✔
2744
      }
2745
      notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
1✔
2746
    }
2747
    if (node == null) {
1✔
2748
      if (removed[0] != null) {
1✔
2749
        afterWrite(new RemovalTask(removed[0]));
1✔
2750
      }
2751
      return null;
1✔
2752
    }
2753
    if (newValue[0] == null) {
1✔
2754
      if (!isComputingAsync(oldValue[0])) {
1✔
2755
        tryExpireAfterRead(node, key, oldValue[0], expiry(), now[0]);
1✔
2756
        setAccessTime(node, now[0]);
1✔
2757
      }
2758

2759
      afterRead(node, now[0], /* recordHit= */ recordStats);
1✔
2760
      return oldValue[0];
1✔
2761
    }
2762
    if ((oldValue[0] == null) && (cause[0] == null)) {
1✔
2763
      afterWrite(new AddTask(node, weight[1]));
1✔
2764
    } else {
2765
      int weightedDifference = (weight[1] - weight[0]);
1✔
2766
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2767
    }
2768

2769
    return newValue[0];
1✔
2770
  }
2771

2772
  @Override
2773
  public @Nullable V computeIfPresent(K key,
2774
      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2775
    requireNonNull(key);
1✔
2776
    requireNonNull(remappingFunction);
1✔
2777

2778
    // An optimistic fast path to avoid unnecessary locking
2779
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2780
    @Nullable Node<K, V> node = data.get(lookupKey);
1✔
2781
    long now;
2782
    if (node == null) {
1✔
2783
      return null;
1✔
2784
    } else if ((node.getValue() == null) || hasExpired(node, (now = expirationTicker().read()))) {
1✔
2785
      scheduleDrainBuffers();
1✔
2786
      return null;
1✔
2787
    }
2788

2789
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2790
        statsAware(remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
1✔
2791
    return remap(key, lookupKey, statsAwareRemappingFunction,
1✔
2792
        expiry(), new long[] { now }, /* computeIfAbsent= */ false);
1✔
2793
  }
2794

2795
  @Override
2796
  @SuppressWarnings("NullAway")
2797
  public @Nullable V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction,
2798
      @Nullable Expiry<? super K, ? super V> expiry, boolean recordLoad,
2799
      boolean recordLoadFailure) {
2800
    requireNonNull(key);
1✔
2801
    requireNonNull(remappingFunction);
1✔
2802

2803
    long[] now = { expirationTicker().read() };
1✔
2804
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2805
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2806
        statsAware(remappingFunction, recordLoad, recordLoadFailure);
1✔
2807
    return remap(key, keyRef, statsAwareRemappingFunction,
1✔
2808
        expiry, now, /* computeIfAbsent= */ true);
2809
  }
2810

2811
  @Override
2812
  public @Nullable V merge(K key, V value,
2813
      BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2814
    requireNonNull(key);
1✔
2815
    requireNonNull(value);
1✔
2816
    requireNonNull(remappingFunction);
1✔
2817

2818
    long[] now = { expirationTicker().read() };
1✔
2819
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2820
    BiFunction<? super K, ? super V, ? extends V> mergeFunction = (k, oldValue) ->
1✔
2821
        (oldValue == null) ? value : statsAware(remappingFunction).apply(oldValue, value);
1✔
2822
    return remap(key, keyRef, mergeFunction, expiry(), now, /* computeIfAbsent= */ true);
1✔
2823
  }
2824

2825
  /**
2826
   * Attempts to compute a mapping for the specified key and its current mapped value (or
2827
   * {@code null} if there is no current mapping).
2828
   * <p>
2829
   * An entry that has expired or been reference collected is evicted and the computation continues
2830
   * as if the entry had not been present. This method does not pre-screen and does not wrap the
2831
   * remappingFunction to be statistics aware.
2832
   *
2833
   * @param key key with which the specified value is to be associated
2834
   * @param keyRef the key to associate with or a lookup only key if not {@code computeIfAbsent}
2835
   * @param remappingFunction the function to compute a value
2836
   * @param expiry the calculator for the expiration time
2837
   * @param now the current time, according to the ticker
2838
   * @param computeIfAbsent if an absent entry can be computed
2839
   * @return the new value associated with the specified key, or null if none
2840
   */
2841
  @SuppressWarnings("PMD.EmptyControlStatement")
2842
  @Nullable V remap(K key, Object keyRef,
2843
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2844
      Expiry<? super K, ? super V> expiry, long[/* 1 */] now, boolean computeIfAbsent) {
2845
    @SuppressWarnings({"unchecked", "Varifier"})
2846
    @Nullable K[] nodeKey = (K[]) new Object[1];
1✔
2847
    @SuppressWarnings({"unchecked", "Varifier"})
2848
    @Nullable V[] oldValue = (V[]) new Object[1];
1✔
2849
    @SuppressWarnings({"unchecked", "Varifier"})
2850
    @Nullable V[] newValue = (V[]) new Object[1];
1✔
2851
    @SuppressWarnings({"rawtypes", "unchecked"})
2852
    Node<K, V>[] removed = new Node[1];
1✔
2853

2854
    var weight = new int[2]; // old, new
1✔
2855
    var cause = new RemovalCause[1];
1✔
2856

2857
    Node<K, V> node = data.compute(keyRef, (kr, n) -> {
1✔
2858
      if (n == null) {
1✔
2859
        if (!computeIfAbsent) {
1✔
2860
          return null;
1✔
2861
        }
2862
        newValue[0] = remappingFunction.apply(key, null);
1✔
2863
        if (newValue[0] == null) {
1✔
2864
          return null;
1✔
2865
        }
2866
        now[0] = expirationTicker().read();
1✔
2867
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2868
        long varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
1✔
2869
        var created = nodeFactory.newNode(keyRef, newValue[0],
1✔
2870
            valueReferenceQueue(), weight[1], now[0]);
1✔
2871

2872
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2873
        setAccessTime(created, expirationTime);
1✔
2874
        setWriteTime(created, expirationTime);
1✔
2875
        setVariableTime(created, varTime);
1✔
2876

2877
        discardRefresh(key);
1✔
2878
        return created;
1✔
2879
      }
2880

2881
      synchronized (n) {
1✔
2882
        requireIsAlive(key, n);
1✔
2883
        nodeKey[0] = n.getKey();
1✔
2884
        oldValue[0] = n.getValue();
1✔
2885
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
1✔
2886
          cause[0] = RemovalCause.COLLECTED;
1✔
2887
        } else if (hasExpired(n, expirationTicker().read())) {
1✔
2888
          cause[0] = RemovalCause.EXPIRED;
1✔
2889
        }
2890
        if (cause[0] != null) {
1✔
2891
          notifyEviction(nodeKey[0], oldValue[0], cause[0]);
1✔
2892
          if (!computeIfAbsent) {
1✔
2893
            removed[0] = n;
1✔
2894
            n.retire();
1✔
2895
            return null;
1✔
2896
          }
2897
        }
2898

2899
        newValue[0] = remappingFunction.apply(nodeKey[0],
1✔
2900
            (cause[0] == null) ? oldValue[0] : null);
1✔
2901
        if (newValue[0] == null) {
1✔
2902
          if (cause[0] == null) {
1✔
2903
            cause[0] = RemovalCause.EXPLICIT;
1✔
2904
            discardRefresh(kr);
1✔
2905
          }
2906
          removed[0] = n;
1✔
2907
          n.retire();
1✔
2908
          return null;
1✔
2909
        }
2910

2911
        long varTime;
2912
        weight[0] = n.getWeight();
1✔
2913
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2914
        now[0] = expirationTicker().read();
1✔
2915
        if (cause[0] == null) {
1✔
2916
          if (newValue[0] != oldValue[0]) {
1✔
2917
            cause[0] = RemovalCause.REPLACED;
1✔
2918
          }
2919
          varTime = expireAfterUpdate(n, key, newValue[0], expiry, now[0]);
1✔
2920
        } else {
2921
          varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
1✔
2922
        }
2923

2924
        n.setValue(newValue[0], valueReferenceQueue());
1✔
2925
        n.setWeight(weight[1]);
1✔
2926

2927
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
1✔
2928
        setAccessTime(n, expirationTime);
1✔
2929
        setWriteTime(n, expirationTime);
1✔
2930
        setVariableTime(n, varTime);
1✔
2931

2932
        discardRefresh(kr);
1✔
2933
        return n;
1✔
2934
      }
2935
    });
2936

2937
    if (cause[0] != null) {
1✔
2938
      if (cause[0] == RemovalCause.REPLACED) {
1✔
2939
        requireNonNull(newValue[0]);
1✔
2940
        notifyOnReplace(key, oldValue[0], newValue[0]);
1✔
2941
      } else {
2942
        if (cause[0].wasEvicted()) {
1✔
2943
          statsCounter().recordEviction(weight[0], cause[0]);
1✔
2944
        }
2945
        notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
1✔
2946
      }
2947
    }
2948

2949
    if (removed[0] != null) {
1✔
2950
      afterWrite(new RemovalTask(removed[0]));
1✔
2951
    } else if (node == null) {
1✔
2952
      // absent and not computable
2953
    } else if ((oldValue[0] == null) && (cause[0] == null)) {
1✔
2954
      afterWrite(new AddTask(node, weight[1]));
1✔
2955
    } else {
2956
      int weightedDifference = weight[1] - weight[0];
1✔
2957
      if (expiresAfterWrite() || (weightedDifference != 0)) {
1✔
2958
        afterWrite(new UpdateTask(node, weightedDifference));
1✔
2959
      } else {
2960
        afterRead(node, now[0], /* recordHit= */ false);
1✔
2961
        if ((cause[0] != null) && cause[0].wasEvicted()) {
1✔
2962
          scheduleDrainBuffers();
1✔
2963
        }
2964
      }
2965
    }
2966

2967
    return newValue[0];
1✔
2968
  }
2969

2970
  @Override
2971
  public void forEach(BiConsumer<? super K, ? super V> action) {
2972
    requireNonNull(action);
1✔
2973

2974
    for (var iterator = new EntryIterator<>(this); iterator.hasNext();) {
1✔
2975
      action.accept(iterator.key, iterator.value);
1✔
2976
      iterator.advance();
1✔
2977
    }
2978
  }
1✔
2979

2980
  @Override
2981
  public Set<K> keySet() {
2982
    Set<K> ks = keySet;
1✔
2983
    return (ks == null) ? (keySet = new KeySetView<>(this)) : ks;
1✔
2984
  }
2985

2986
  @Override
2987
  public Collection<V> values() {
2988
    Collection<V> vs = values;
1✔
2989
    return (vs == null) ? (values = new ValuesView<>(this)) : vs;
1✔
2990
  }
2991

2992
  @Override
2993
  public Set<Entry<K, V>> entrySet() {
2994
    Set<Entry<K, V>> es = entrySet;
1✔
2995
    return (es == null) ? (entrySet = new EntrySetView<>(this)) : es;
1✔
2996
  }
2997

2998
  /**
2999
   * Object equality requires reflexive, symmetric, transitive, and consistency properties. Of
3000
   * these, symmetry and consistency require further clarification for how they are upheld.
3001
   * <p>
3002
   * The <i>consistency</i> property between invocations requires that the results are the same if
3003
   * there are no modifications to the information used. Therefore, usages should expect that this
3004
   * operation may return misleading results if either the maps or the data held by them is modified
3005
   * during the execution of this method. This characteristic allows for comparing the map sizes and
3006
   * assuming stable mappings, as done by {@link java.util.AbstractMap}-based maps.
3007
   * <p>
3008
   * The <i>symmetric</i> property requires that the result is the same for all implementations of
3009
   * {@link Map#equals(Object)}. That contract is defined in terms of the stable mappings provided
3010
   * by {@link #entrySet()}, meaning that the {@link #size()} optimization forces that the count is
3011
   * consistent with the mappings when used for an equality check.
3012
   * <p>
3013
   * The cache's {@link #size()} method may include entries that have expired or have been reference
3014
   * collected, but have not yet been removed from the backing map. An iteration over the map may
3015
   * trigger the removal of these dead entries when skipped over during traversal. To ensure
3016
   * consistency and symmetry, usages should call {@link #cleanUp()} before this method while no
3017
   * other concurrent operations are being performed on this cache. This is not done implicitly by
3018
   * {@link #size()} as many usages assume it to be instantaneous and lock-free.
3019
   */
3020
  @Override
3021
  public boolean equals(@Nullable Object o) {
3022
    if (o == this) {
1✔
3023
      return true;
1✔
3024
    } else if (!(o instanceof Map)) {
1✔
3025
      return false;
1✔
3026
    }
3027

3028
    var map = (Map<?, ?>) o;
1✔
3029
    if (size() != map.size()) {
1✔
3030
      return false;
1✔
3031
    }
3032

3033
    long now = expirationTicker().read();
1✔
3034
    for (var node : data.values()) {
1✔
3035
      K key = node.getKey();
1✔
3036
      V value = node.getValue();
1✔
3037
      if ((key == null) || (value == null)
1✔
3038
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3039
        scheduleDrainBuffers();
1✔
3040
        return false;
1✔
3041
      } else {
3042
        var val = map.get(key);
1✔
3043
        if ((val == null) || ((val != value) && !val.equals(value))) {
1✔
3044
          return false;
1✔
3045
        }
3046
      }
3047
    }
1✔
3048
    return true;
1✔
3049
  }
3050

3051
  @Override
3052
  @SuppressWarnings("NullAway")
3053
  public int hashCode() {
3054
    @Var int hash = 0;
1✔
3055
    long now = expirationTicker().read();
1✔
3056
    for (var node : data.values()) {
1✔
3057
      K key = node.getKey();
1✔
3058
      V value = node.getValue();
1✔
3059
      if ((key == null) || (value == null)
1✔
3060
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3061
        scheduleDrainBuffers();
1✔
3062
      } else {
3063
        hash += key.hashCode() ^ value.hashCode();
1✔
3064
      }
3065
    }
1✔
3066
    return hash;
1✔
3067
  }
3068

3069
  @Override
3070
  public String toString() {
3071
    var result = new StringBuilder().append('{');
1✔
3072
    long now = expirationTicker().read();
1✔
3073
    for (var node : data.values()) {
1✔
3074
      K key = node.getKey();
1✔
3075
      V value = node.getValue();
1✔
3076
      if ((key == null) || (value == null)
1✔
3077
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3078
        scheduleDrainBuffers();
1✔
3079
      } else {
3080
        if (result.length() != 1) {
1✔
3081
          result.append(',').append(' ');
1✔
3082
        }
3083
        result.append((key == this) ? "(this Map)" : key);
1✔
3084
        result.append('=');
1✔
3085
        result.append((value == this) ? "(this Map)" : value);
1✔
3086
      }
3087
    }
1✔
3088
    return result.append('}').toString();
1✔
3089
  }
3090

3091
  /**
3092
   * Returns the computed result from the ordered traversal of the cache entries.
3093
   *
3094
   * @param hottest the coldest or hottest iteration order
3095
   * @param transformer a function that unwraps the value
3096
   * @param mappingFunction the mapping function to compute a value
3097
   * @return the computed value
3098
   */
3099
  @SuppressWarnings("GuardedByChecker")
3100
  <T> T evictionOrder(boolean hottest, Function<@Nullable V, @Nullable V> transformer,
3101
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3102
    Comparator<Node<K, V>> comparator = Comparator.comparingInt(node -> {
1✔
3103
      K key = node.getKey();
1✔
3104
      return (key == null) ? 0 : frequencySketch().frequency(key);
1✔
3105
    });
3106
    Iterable<Node<K, V>> iterable;
3107
    if (hottest) {
1✔
3108
      iterable = () -> {
1✔
3109
        var secondary = PeekingIterator.comparing(
1✔
3110
            accessOrderProbationDeque().descendingIterator(),
1✔
3111
            accessOrderWindowDeque().descendingIterator(), comparator);
1✔
3112
        return PeekingIterator.concat(
1✔
3113
            accessOrderProtectedDeque().descendingIterator(), secondary);
1✔
3114
      };
3115
    } else {
3116
      iterable = () -> {
1✔
3117
        var primary = PeekingIterator.comparing(
1✔
3118
            accessOrderWindowDeque().iterator(), accessOrderProbationDeque().iterator(),
1✔
3119
            comparator.reversed());
1✔
3120
        return PeekingIterator.concat(primary, accessOrderProtectedDeque().iterator());
1✔
3121
      };
3122
    }
3123
    return snapshot(iterable, transformer, mappingFunction);
1✔
3124
  }
3125

3126
  /**
3127
   * Returns the computed result from the ordered traversal of the cache entries.
3128
   *
3129
   * @param oldest the youngest or oldest iteration order
3130
   * @param transformer a function that unwraps the value
3131
   * @param mappingFunction the mapping function to compute a value
3132
   * @return the computed value
3133
   */
3134
  @SuppressWarnings("GuardedByChecker")
3135
  <T> T expireAfterAccessOrder(boolean oldest, Function<@Nullable V, @Nullable V> transformer,
3136
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3137
    Iterable<Node<K, V>> iterable;
3138
    if (evicts()) {
1✔
3139
      iterable = () -> {
1✔
3140
        @Var Comparator<Node<K, V>> comparator = Comparator.comparingLong(Node::getAccessTime);
1✔
3141
        PeekingIterator<Node<K, V>> first;
3142
        PeekingIterator<Node<K, V>> second;
3143
        PeekingIterator<Node<K, V>> third;
3144
        if (oldest) {
1✔
3145
          first = accessOrderWindowDeque().iterator();
1✔
3146
          second = accessOrderProbationDeque().iterator();
1✔
3147
          third = accessOrderProtectedDeque().iterator();
1✔
3148
        } else {
3149
          comparator = comparator.reversed();
1✔
3150
          first = accessOrderWindowDeque().descendingIterator();
1✔
3151
          second = accessOrderProbationDeque().descendingIterator();
1✔
3152
          third = accessOrderProtectedDeque().descendingIterator();
1✔
3153
        }
3154
        return PeekingIterator.comparing(
1✔
3155
            PeekingIterator.comparing(first, second, comparator), third, comparator);
1✔
3156
      };
3157
    } else {
3158
      iterable = oldest
1✔
3159
          ? accessOrderWindowDeque()
1✔
3160
          : accessOrderWindowDeque()::descendingIterator;
1✔
3161
    }
3162
    return snapshot(iterable, transformer, mappingFunction);
1✔
3163
  }
3164

3165
  /**
3166
   * Returns the computed result from the ordered traversal of the cache entries.
3167
   *
3168
   * @param iterable the supplier of the entries in the cache
3169
   * @param transformer a function that unwraps the value
3170
   * @param mappingFunction the mapping function to compute a value
3171
   * @return the computed value
3172
   */
3173
  @SuppressWarnings("NullAway")
3174
  <T> T snapshot(Iterable<Node<K, V>> iterable, Function<@Nullable V, @Nullable V> transformer,
3175
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3176
    requireNonNull(mappingFunction);
1✔
3177
    requireNonNull(transformer);
1✔
3178
    requireNonNull(iterable);
1✔
3179

3180
    evictionLock.lock();
1✔
3181
    try {
3182
      maintenance(/* ignored */ null);
1✔
3183

3184
      // Obtain the iterator as late as possible for modification count checking
3185
      try (var stream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(
1✔
3186
           iterable.iterator(), DISTINCT | ORDERED | NONNULL | IMMUTABLE), /* parallel= */ false)) {
1✔
3187
        return mappingFunction.apply(stream
1✔
3188
            .map(node -> nodeToCacheEntry(node, transformer))
1✔
3189
            .filter(Objects::nonNull));
1✔
3190
      }
3191
    } finally {
3192
      evictionLock.unlock();
1✔
3193
      rescheduleCleanUpIfIncomplete();
1✔
3194
    }
3195
  }
3196

3197
  /** Returns an entry for the given node if it can be used externally, else null. */
3198
  @Nullable CacheEntry<K, V> nodeToCacheEntry(
3199
      Node<K, V> node, Function<@Nullable V, @Nullable V> transformer) {
3200
    V value = transformer.apply(node.getValue());
1✔
3201
    K key = node.getKey();
1✔
3202
    long now;
3203
    if ((key == null) || (value == null) || !node.isAlive()
1✔
3204
        || hasExpired(node, (now = expirationTicker().read()))) {
1✔
3205
      return null;
1✔
3206
    }
3207

3208
    @Var long expiresAfter = Long.MAX_VALUE;
1✔
3209
    if (expiresAfterAccess()) {
1✔
3210
      expiresAfter = Math.min(expiresAfter, now - node.getAccessTime() + expiresAfterAccessNanos());
1✔
3211
    }
3212
    if (expiresAfterWrite()) {
1✔
3213
      expiresAfter = Math.min(expiresAfter,
1✔
3214
          (now & ~1L) - (node.getWriteTime() & ~1L) + expiresAfterWriteNanos());
1✔
3215
    }
3216
    if (expiresVariable()) {
1✔
3217
      expiresAfter = node.getVariableTime() - now;
1✔
3218
    }
3219

3220
    long refreshableAt = refreshAfterWrite()
1✔
3221
        ? node.getWriteTime() + refreshAfterWriteNanos()
1✔
3222
        : now + Long.MAX_VALUE;
1✔
3223
    int weight = node.getPolicyWeight();
1✔
3224
    return SnapshotEntry.forEntry(key, value, now, weight, now + expiresAfter, refreshableAt);
1✔
3225
  }
3226

3227
  /** A function that produces an unmodifiable map up to the limit in stream order. */
3228
  static final class SizeLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3229
    private final int expectedSize;
3230
    private final long limit;
3231

3232
    SizeLimiter(int expectedSize, long limit) {
1✔
3233
      requireArgument(limit >= 0);
1✔
3234
      this.expectedSize = expectedSize;
1✔
3235
      this.limit = limit;
1✔
3236
    }
1✔
3237

3238
    @Override
3239
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3240
      var map = new LinkedHashMap<K, V>(calculateHashMapCapacity(expectedSize));
1✔
3241
      stream.limit(limit).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3242
      return Collections.unmodifiableMap(map);
1✔
3243
    }
3244
  }
3245

3246
  /** A function that produces an unmodifiable map up to the weighted limit in stream order. */
3247
  static final class WeightLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3248
    private final long weightLimit;
3249

3250
    private long weightedSize;
3251

3252
    WeightLimiter(long weightLimit) {
1✔
3253
      requireArgument(weightLimit >= 0);
1✔
3254
      this.weightLimit = weightLimit;
1✔
3255
    }
1✔
3256

3257
    @Override
3258
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3259
      var map = new LinkedHashMap<K, V>();
1✔
3260
      stream.takeWhile(entry -> {
1✔
3261
        weightedSize = Math.addExact(weightedSize, entry.weight());
1✔
3262
        return (weightedSize <= weightLimit);
1✔
3263
      }).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3264
      return Collections.unmodifiableMap(map);
1✔
3265
    }
3266
  }
3267

3268
  /** An adapter to safely externalize the keys. */
3269
  static final class KeySetView<K, V> extends AbstractSet<K> {
3270
    final BoundedLocalCache<K, V> cache;
3271

3272
    KeySetView(BoundedLocalCache<K, V> cache) {
1✔
3273
      this.cache = requireNonNull(cache);
1✔
3274
    }
1✔
3275

3276
    @Override
3277
    public int size() {
3278
      return cache.size();
1✔
3279
    }
3280

3281
    @Override
3282
    public void clear() {
3283
      cache.clear();
1✔
3284
    }
1✔
3285

3286
    @Override
3287
    @SuppressWarnings("SuspiciousMethodCalls")
3288
    public boolean contains(Object o) {
3289
      return cache.containsKey(o);
1✔
3290
    }
3291

3292
    @Override
3293
    public boolean removeAll(Collection<?> collection) {
3294
      requireNonNull(collection);
1✔
3295
      @Var boolean modified = false;
1✔
3296
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3297
        for (K key : this) {
1✔
3298
          if (collection.contains(key)) {
1✔
3299
            modified |= remove(key);
1✔
3300
          }
3301
        }
1✔
3302
      } else {
3303
        for (var item : collection) {
1✔
3304
          modified |= (item != null) && remove(item);
1✔
3305
        }
1✔
3306
      }
3307
      return modified;
1✔
3308
    }
3309

3310
    @Override
3311
    public boolean remove(Object o) {
3312
      return (cache.remove(o) != null);
1✔
3313
    }
3314

3315
    @Override
3316
    public boolean removeIf(Predicate<? super K> filter) {
3317
      requireNonNull(filter);
1✔
3318
      @Var boolean modified = false;
1✔
3319
      for (K key : this) {
1✔
3320
        if (filter.test(key) && remove(key)) {
1✔
3321
          modified = true;
1✔
3322
        }
3323
      }
1✔
3324
      return modified;
1✔
3325
    }
3326

3327
    @Override
3328
    public boolean retainAll(Collection<?> collection) {
3329
      requireNonNull(collection);
1✔
3330
      @Var boolean modified = false;
1✔
3331
      for (K key : this) {
1✔
3332
        if (!collection.contains(key) && remove(key)) {
1✔
3333
          modified = true;
1✔
3334
        }
3335
      }
1✔
3336
      return modified;
1✔
3337
    }
3338

3339
    @Override
3340
    public Iterator<K> iterator() {
3341
      return new KeyIterator<>(cache);
1✔
3342
    }
3343

3344
    @Override
3345
    public Spliterator<K> spliterator() {
3346
      return new KeySpliterator<>(cache);
1✔
3347
    }
3348
  }
3349

3350
  /** An adapter to safely externalize the key iterator. */
3351
  static final class KeyIterator<K, V> implements Iterator<K> {
3352
    final EntryIterator<K, V> iterator;
3353

3354
    KeyIterator(BoundedLocalCache<K, V> cache) {
1✔
3355
      this.iterator = new EntryIterator<>(cache);
1✔
3356
    }
1✔
3357

3358
    @Override
3359
    public boolean hasNext() {
3360
      return iterator.hasNext();
1✔
3361
    }
3362

3363
    @Override
3364
    public K next() {
3365
      return iterator.nextKey();
1✔
3366
    }
3367

3368
    @Override
3369
    public void remove() {
3370
      iterator.remove();
1✔
3371
    }
1✔
3372
  }
3373

3374
  /** An adapter to safely externalize the key spliterator. */
3375
  static final class KeySpliterator<K, V> implements Spliterator<K> {
3376
    final Spliterator<Node<K, V>> spliterator;
3377
    final BoundedLocalCache<K, V> cache;
3378

3379
    KeySpliterator(BoundedLocalCache<K, V> cache) {
3380
      this(cache, cache.data.values().spliterator());
1✔
3381
    }
1✔
3382

3383
    KeySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3384
      this.spliterator = requireNonNull(spliterator);
1✔
3385
      this.cache = requireNonNull(cache);
1✔
3386
    }
1✔
3387

3388
    @Override
3389
    public void forEachRemaining(Consumer<? super K> action) {
3390
      requireNonNull(action);
1✔
3391
      Consumer<Node<K, V>> consumer = node -> {
1✔
3392
        K key = node.getKey();
1✔
3393
        V value = node.getValue();
1✔
3394
        long now = cache.expirationTicker().read();
1✔
3395
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3396
          action.accept(key);
1✔
3397
        }
3398
      };
1✔
3399
      spliterator.forEachRemaining(consumer);
1✔
3400
    }
1✔
3401

3402
    @Override
3403
    public boolean tryAdvance(Consumer<? super K> action) {
3404
      requireNonNull(action);
1✔
3405
      boolean[] advanced = { false };
1✔
3406
      Consumer<Node<K, V>> consumer = node -> {
1✔
3407
        K key = node.getKey();
1✔
3408
        V value = node.getValue();
1✔
3409
        long now = cache.expirationTicker().read();
1✔
3410
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3411
          action.accept(key);
1✔
3412
          advanced[0] = true;
1✔
3413
        }
3414
      };
1✔
3415
      while (spliterator.tryAdvance(consumer)) {
1✔
3416
        if (advanced[0]) {
1✔
3417
          return true;
1✔
3418
        }
3419
      }
3420
      return false;
1✔
3421
    }
3422

3423
    @Override
3424
    public @Nullable Spliterator<K> trySplit() {
3425
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3426
      return (split == null) ? null : new KeySpliterator<>(cache, split);
1✔
3427
    }
3428

3429
    @Override
3430
    public long estimateSize() {
3431
      return spliterator.estimateSize();
1✔
3432
    }
3433

3434
    @Override
3435
    public int characteristics() {
3436
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3437
    }
3438
  }
3439

3440
  /** An adapter to safely externalize the values. */
3441
  static final class ValuesView<K, V> extends AbstractCollection<V> {
3442
    final BoundedLocalCache<K, V> cache;
3443

3444
    ValuesView(BoundedLocalCache<K, V> cache) {
1✔
3445
      this.cache = requireNonNull(cache);
1✔
3446
    }
1✔
3447

3448
    @Override
3449
    public int size() {
3450
      return cache.size();
1✔
3451
    }
3452

3453
    @Override
3454
    public void clear() {
3455
      cache.clear();
1✔
3456
    }
1✔
3457

3458
    @Override
3459
    @SuppressWarnings("SuspiciousMethodCalls")
3460
    public boolean contains(Object o) {
3461
      return cache.containsValue(o);
1✔
3462
    }
3463

3464
    @Override
3465
    @SuppressWarnings("NullAway")
3466
    public boolean removeAll(Collection<?> collection) {
3467
      requireNonNull(collection);
1✔
3468
      @Var boolean modified = false;
1✔
3469
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3470
        if (collection.contains(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3471
          modified = true;
1✔
3472
        }
3473
        iterator.advance();
1✔
3474
      }
3475
      return modified;
1✔
3476
    }
3477

3478
    @Override
3479
    @SuppressWarnings("NullAway")
3480
    public boolean remove(Object o) {
3481
      if (o == null) {
1✔
3482
        return false;
1✔
3483
      }
3484
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3485
        if (o.equals(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3486
          return true;
1✔
3487
        }
3488
        iterator.advance();
1✔
3489
      }
3490
      return false;
1✔
3491
    }
3492

3493
    @Override
3494
    @SuppressWarnings("NullAway")
3495
    public boolean removeIf(Predicate<? super V> filter) {
3496
      requireNonNull(filter);
1✔
3497
      @Var boolean modified = false;
1✔
3498
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3499
        if (filter.test(iterator.value)) {
1✔
3500
          modified |= cache.remove(iterator.key, iterator.value);
1✔
3501
        }
3502
        iterator.advance();
1✔
3503
      }
3504
      return modified;
1✔
3505
    }
3506

3507
    @Override
3508
    @SuppressWarnings("NullAway")
3509
    public boolean retainAll(Collection<?> collection) {
3510
      requireNonNull(collection);
1✔
3511
      @Var boolean modified = false;
1✔
3512
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3513
        if (!collection.contains(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3514
          modified = true;
1✔
3515
        }
3516
        iterator.advance();
1✔
3517
      }
3518
      return modified;
1✔
3519
    }
3520

3521
    @Override
3522
    public Iterator<V> iterator() {
3523
      return new ValueIterator<>(cache);
1✔
3524
    }
3525

3526
    @Override
3527
    public Spliterator<V> spliterator() {
3528
      return new ValueSpliterator<>(cache);
1✔
3529
    }
3530
  }
3531

3532
  /** An adapter to safely externalize the value iterator. */
3533
  static final class ValueIterator<K, V> implements Iterator<V> {
3534
    final EntryIterator<K, V> iterator;
3535

3536
    ValueIterator(BoundedLocalCache<K, V> cache) {
1✔
3537
      this.iterator = new EntryIterator<>(cache);
1✔
3538
    }
1✔
3539

3540
    @Override
3541
    public boolean hasNext() {
3542
      return iterator.hasNext();
1✔
3543
    }
3544

3545
    @Override
3546
    public V next() {
3547
      return iterator.nextValue();
1✔
3548
    }
3549

3550
    @Override
3551
    public void remove() {
3552
      iterator.remove();
1✔
3553
    }
1✔
3554
  }
3555

3556
  /** An adapter to safely externalize the value spliterator. */
3557
  static final class ValueSpliterator<K, V> implements Spliterator<V> {
3558
    final Spliterator<Node<K, V>> spliterator;
3559
    final BoundedLocalCache<K, V> cache;
3560

3561
    ValueSpliterator(BoundedLocalCache<K, V> cache) {
3562
      this(cache, cache.data.values().spliterator());
1✔
3563
    }
1✔
3564

3565
    ValueSpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3566
      this.spliterator = requireNonNull(spliterator);
1✔
3567
      this.cache = requireNonNull(cache);
1✔
3568
    }
1✔
3569

3570
    @Override
3571
    public void forEachRemaining(Consumer<? super V> action) {
3572
      requireNonNull(action);
1✔
3573
      Consumer<Node<K, V>> consumer = node -> {
1✔
3574
        K key = node.getKey();
1✔
3575
        V value = node.getValue();
1✔
3576
        long now = cache.expirationTicker().read();
1✔
3577
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3578
          action.accept(value);
1✔
3579
        }
3580
      };
1✔
3581
      spliterator.forEachRemaining(consumer);
1✔
3582
    }
1✔
3583

3584
    @Override
3585
    public boolean tryAdvance(Consumer<? super V> action) {
3586
      requireNonNull(action);
1✔
3587
      boolean[] advanced = { false };
1✔
3588
      long now = cache.expirationTicker().read();
1✔
3589
      Consumer<Node<K, V>> consumer = node -> {
1✔
3590
        K key = node.getKey();
1✔
3591
        V value = node.getValue();
1✔
3592
        if ((key != null) && (value != null) && !cache.hasExpired(node, now) && node.isAlive()) {
1✔
3593
          action.accept(value);
1✔
3594
          advanced[0] = true;
1✔
3595
        }
3596
      };
1✔
3597
      while (spliterator.tryAdvance(consumer)) {
1✔
3598
        if (advanced[0]) {
1✔
3599
          return true;
1✔
3600
        }
3601
      }
3602
      return false;
1✔
3603
    }
3604

3605
    @Override
3606
    public @Nullable Spliterator<V> trySplit() {
3607
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3608
      return (split == null) ? null : new ValueSpliterator<>(cache, split);
1✔
3609
    }
3610

3611
    @Override
3612
    public long estimateSize() {
3613
      return spliterator.estimateSize();
1✔
3614
    }
3615

3616
    @Override
3617
    public int characteristics() {
3618
      return CONCURRENT | NONNULL;
1✔
3619
    }
3620
  }
3621

3622
  /** An adapter to safely externalize the entries. */
3623
  static final class EntrySetView<K, V> extends AbstractSet<Entry<K, V>> {
3624
    final BoundedLocalCache<K, V> cache;
3625

3626
    EntrySetView(BoundedLocalCache<K, V> cache) {
1✔
3627
      this.cache = requireNonNull(cache);
1✔
3628
    }
1✔
3629

3630
    @Override
3631
    public int size() {
3632
      return cache.size();
1✔
3633
    }
3634

3635
    @Override
3636
    public void clear() {
3637
      cache.clear();
1✔
3638
    }
1✔
3639

3640
    @Override
3641
    public boolean contains(Object o) {
3642
      if (!(o instanceof Entry<?, ?>)) {
1✔
3643
        return false;
1✔
3644
      }
3645
      var entry = (Entry<?, ?>) o;
1✔
3646
      var key = entry.getKey();
1✔
3647
      var value = entry.getValue();
1✔
3648
      if ((key == null) || (value == null)) {
1✔
3649
        return false;
1✔
3650
      }
3651
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
3652
      return (node != null) && node.containsValue(value);
1✔
3653
    }
3654

3655
    @Override
3656
    public boolean removeAll(Collection<?> collection) {
3657
      requireNonNull(collection);
1✔
3658
      @Var boolean modified = false;
1✔
3659
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3660
        for (var entry : this) {
1✔
3661
          if (collection.contains(entry)) {
1✔
3662
            modified |= remove(entry);
1✔
3663
          }
3664
        }
1✔
3665
      } else {
3666
        for (var item : collection) {
1✔
3667
          modified |= (item != null) && remove(item);
1✔
3668
        }
1✔
3669
      }
3670
      return modified;
1✔
3671
    }
3672

3673
    @Override
3674
    @SuppressWarnings("SuspiciousMethodCalls")
3675
    public boolean remove(Object o) {
3676
      if (!(o instanceof Entry<?, ?>)) {
1✔
3677
        return false;
1✔
3678
      }
3679
      var entry = (Entry<?, ?>) o;
1✔
3680
      var key = entry.getKey();
1✔
3681
      return (key != null) && cache.remove(key, entry.getValue());
1✔
3682
    }
3683

3684
    @Override
3685
    public boolean removeIf(Predicate<? super Entry<K, V>> filter) {
3686
      requireNonNull(filter);
1✔
3687
      @Var boolean modified = false;
1✔
3688
      for (Entry<K, V> entry : this) {
1✔
3689
        if (filter.test(entry)) {
1✔
3690
          modified |= cache.remove(entry.getKey(), entry.getValue());
1✔
3691
        }
3692
      }
1✔
3693
      return modified;
1✔
3694
    }
3695

3696
    @Override
3697
    public boolean retainAll(Collection<?> collection) {
3698
      requireNonNull(collection);
1✔
3699
      @Var boolean modified = false;
1✔
3700
      for (var entry : this) {
1✔
3701
        if (!collection.contains(entry) && remove(entry)) {
1✔
3702
          modified = true;
1✔
3703
        }
3704
      }
1✔
3705
      return modified;
1✔
3706
    }
3707

3708
    @Override
3709
    public Iterator<Entry<K, V>> iterator() {
3710
      return new EntryIterator<>(cache);
1✔
3711
    }
3712

3713
    @Override
3714
    public Spliterator<Entry<K, V>> spliterator() {
3715
      return new EntrySpliterator<>(cache);
1✔
3716
    }
3717
  }
3718

3719
  /** An adapter to safely externalize the entry iterator. */
3720
  static final class EntryIterator<K, V> implements Iterator<Entry<K, V>> {
3721
    final BoundedLocalCache<K, V> cache;
3722
    final Iterator<Node<K, V>> iterator;
3723

3724
    @Nullable K key;
3725
    @Nullable V value;
3726
    @Nullable K removalKey;
3727
    @Nullable Node<K, V> next;
3728

3729
    EntryIterator(BoundedLocalCache<K, V> cache) {
1✔
3730
      this.iterator = cache.data.values().iterator();
1✔
3731
      this.cache = cache;
1✔
3732
    }
1✔
3733

3734
    @Override
3735
    public boolean hasNext() {
3736
      if (next != null) {
1✔
3737
        return true;
1✔
3738
      }
3739

3740
      long now = cache.expirationTicker().read();
1✔
3741
      while (iterator.hasNext()) {
1✔
3742
        next = iterator.next();
1✔
3743
        value = next.getValue();
1✔
3744
        key = next.getKey();
1✔
3745

3746
        boolean evictable = (key == null) || (value == null) || cache.hasExpired(next, now);
1✔
3747
        if (evictable || !next.isAlive()) {
1✔
3748
          if (evictable) {
1✔
3749
            cache.scheduleDrainBuffers();
1✔
3750
          }
3751
          advance();
1✔
3752
          continue;
1✔
3753
        }
3754
        return true;
1✔
3755
      }
3756
      return false;
1✔
3757
    }
3758

3759
    /** Invalidates the current position so that the iterator may compute the next position. */
3760
    void advance() {
3761
      value = null;
1✔
3762
      next = null;
1✔
3763
      key = null;
1✔
3764
    }
1✔
3765

3766
    @SuppressWarnings("NullAway")
3767
    K nextKey() {
3768
      if (!hasNext()) {
1✔
3769
        throw new NoSuchElementException();
1✔
3770
      }
3771
      removalKey = key;
1✔
3772
      advance();
1✔
3773
      return removalKey;
1✔
3774
    }
3775

3776
    @SuppressWarnings("NullAway")
3777
    V nextValue() {
3778
      if (!hasNext()) {
1✔
3779
        throw new NoSuchElementException();
1✔
3780
      }
3781
      removalKey = key;
1✔
3782
      V val = value;
1✔
3783
      advance();
1✔
3784
      return val;
1✔
3785
    }
3786

3787
    @Override
3788
    public Entry<K, V> next() {
3789
      if (!hasNext()) {
1✔
3790
        throw new NoSuchElementException();
1✔
3791
      }
3792
      @SuppressWarnings("NullAway")
3793
      var entry = new WriteThroughEntry<>(cache, key, value);
1✔
3794
      removalKey = key;
1✔
3795
      advance();
1✔
3796
      return entry;
1✔
3797
    }
3798

3799
    @Override
3800
    public void remove() {
3801
      if (removalKey == null) {
1✔
3802
        throw new IllegalStateException();
1✔
3803
      }
3804
      cache.remove(removalKey);
1✔
3805
      removalKey = null;
1✔
3806
    }
1✔
3807
  }
3808

3809
  /** An adapter to safely externalize the entry spliterator. */
3810
  static final class EntrySpliterator<K, V> implements Spliterator<Entry<K, V>> {
3811
    final Spliterator<Node<K, V>> spliterator;
3812
    final BoundedLocalCache<K, V> cache;
3813

3814
    EntrySpliterator(BoundedLocalCache<K, V> cache) {
3815
      this(cache, cache.data.values().spliterator());
1✔
3816
    }
1✔
3817

3818
    EntrySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3819
      this.spliterator = requireNonNull(spliterator);
1✔
3820
      this.cache = requireNonNull(cache);
1✔
3821
    }
1✔
3822

3823
    @Override
3824
    public void forEachRemaining(Consumer<? super Entry<K, V>> action) {
3825
      requireNonNull(action);
1✔
3826
      Consumer<Node<K, V>> consumer = node -> {
1✔
3827
        K key = node.getKey();
1✔
3828
        V value = node.getValue();
1✔
3829
        long now = cache.expirationTicker().read();
1✔
3830
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3831
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3832
        }
3833
      };
1✔
3834
      spliterator.forEachRemaining(consumer);
1✔
3835
    }
1✔
3836

3837
    @Override
3838
    public boolean tryAdvance(Consumer<? super Entry<K, V>> action) {
3839
      requireNonNull(action);
1✔
3840
      boolean[] advanced = { false };
1✔
3841
      Consumer<Node<K, V>> consumer = node -> {
1✔
3842
        K key = node.getKey();
1✔
3843
        V value = node.getValue();
1✔
3844
        long now = cache.expirationTicker().read();
1✔
3845
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3846
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3847
          advanced[0] = true;
1✔
3848
        }
3849
      };
1✔
3850
      while (spliterator.tryAdvance(consumer)) {
1✔
3851
        if (advanced[0]) {
1✔
3852
          return true;
1✔
3853
        }
3854
      }
3855
      return false;
1✔
3856
    }
3857

3858
    @Override
3859
    public @Nullable Spliterator<Entry<K, V>> trySplit() {
3860
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3861
      return (split == null) ? null : new EntrySpliterator<>(cache, split);
1✔
3862
    }
3863

3864
    @Override
3865
    public long estimateSize() {
3866
      return spliterator.estimateSize();
1✔
3867
    }
3868

3869
    @Override
3870
    public int characteristics() {
3871
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3872
    }
3873
  }
3874

3875
  /** A reusable task that performs the maintenance work; used to avoid wrapping by ForkJoinPool. */
3876
  static final class PerformCleanupTask extends ForkJoinTask<@Nullable Void> implements Runnable {
3877
    private static final long serialVersionUID = 1L;
3878

3879
    final WeakReference<BoundedLocalCache<?, ?>> reference;
3880

3881
    PerformCleanupTask(BoundedLocalCache<?, ?> cache) {
1✔
3882
      reference = new WeakReference<>(cache);
1✔
3883
    }
1✔
3884

3885
    @Override
3886
    public boolean exec() {
3887
      try {
3888
        run();
1✔
3889
      } catch (Throwable t) {
1✔
3890
        logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", t);
1✔
3891
      }
1✔
3892

3893
      // Indicates that the task has not completed to allow subsequent submissions to execute
3894
      return false;
1✔
3895
    }
3896

3897
    @Override
3898
    public void run() {
3899
      BoundedLocalCache<?, ?> cache = reference.get();
1✔
3900
      if (cache != null) {
1✔
3901
        cache.performCleanUp(/* ignored */ null);
1✔
3902
      }
3903
    }
1✔
3904

3905
    /**
3906
     * This method cannot be ignored due to being final, so a hostile user supplied Executor could
3907
     * forcibly complete the task and halt future executions. There are easier ways to intentionally
3908
     * harm a system, so this is assumed to not happen in practice.
3909
     */
3910
    // public final void quietlyComplete() {}
3911

3912
    @Override public void complete(@Nullable Void value) {}
1✔
3913
    @Override public void setRawResult(@Nullable Void value) {}
1✔
3914
    @Override public @Nullable Void getRawResult() { return null; }
1✔
3915
    @Override public void completeExceptionally(@Nullable Throwable t) {}
1✔
3916
    @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; }
1✔
3917
  }
3918

3919
  /** Creates a serialization proxy based on the common configuration shared by all cache types. */
3920
  static <K, V> SerializationProxy<K, V> makeSerializationProxy(BoundedLocalCache<?, ?> cache) {
3921
    var proxy = new SerializationProxy<K, V>();
1✔
3922
    proxy.weakKeys = cache.collectKeys();
1✔
3923
    proxy.weakValues = cache.nodeFactory.weakValues();
1✔
3924
    proxy.softValues = cache.nodeFactory.softValues();
1✔
3925
    proxy.isRecordingStats = cache.isRecordingStats();
1✔
3926
    proxy.evictionListener = cache.evictionListener;
1✔
3927
    proxy.removalListener = cache.removalListener();
1✔
3928
    proxy.ticker = cache.expirationTicker();
1✔
3929
    if (cache.expiresAfterAccess()) {
1✔
3930
      proxy.expiresAfterAccessNanos = cache.expiresAfterAccessNanos();
1✔
3931
    }
3932
    if (cache.expiresAfterWrite()) {
1✔
3933
      proxy.expiresAfterWriteNanos = cache.expiresAfterWriteNanos();
1✔
3934
    }
3935
    if (cache.expiresVariable()) {
1✔
3936
      proxy.expiry = cache.expiry();
1✔
3937
    }
3938
    if (cache.refreshAfterWrite()) {
1✔
3939
      proxy.refreshAfterWriteNanos = cache.refreshAfterWriteNanos();
1✔
3940
    }
3941
    if (cache.evicts()) {
1✔
3942
      if (cache.isWeighted) {
1✔
3943
        proxy.weigher = cache.weigher;
1✔
3944
        proxy.maximumWeight = cache.maximum();
1✔
3945
      } else {
3946
        proxy.maximumSize = cache.maximum();
1✔
3947
      }
3948
    }
3949
    proxy.cacheLoader = cache.cacheLoader;
1✔
3950
    proxy.async = cache.isAsync;
1✔
3951
    return proxy;
1✔
3952
  }
3953

3954
  /* --------------- Manual Cache --------------- */
3955

3956
  static class BoundedLocalManualCache<K, V> implements LocalManualCache<K, V>, Serializable {
3957
    private static final long serialVersionUID = 1;
3958

3959
    final BoundedLocalCache<K, V> cache;
3960

3961
    @Nullable Policy<K, V> policy;
3962

3963
    BoundedLocalManualCache(Caffeine<K, V> builder) {
3964
      this(builder, null);
1✔
3965
    }
1✔
3966

3967
    BoundedLocalManualCache(Caffeine<K, V> builder, @Nullable CacheLoader<? super K, V> loader) {
1✔
3968
      cache = LocalCacheFactory.newBoundedLocalCache(builder, loader, /* isAsync= */ false);
1✔
3969
    }
1✔
3970

3971
    @Override
3972
    public final BoundedLocalCache<K, V> cache() {
3973
      return cache;
1✔
3974
    }
3975

3976
    @Override
3977
    public final Policy<K, V> policy() {
3978
      if (policy == null) {
1✔
3979
        @SuppressWarnings("NullAway")
3980
        Function<@Nullable V, @Nullable V> identity = identity();
1✔
3981
        policy = new BoundedPolicy<>(cache, identity, cache.isWeighted);
1✔
3982
      }
3983
      return policy;
1✔
3984
    }
3985

3986
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
3987
      throw new InvalidObjectException("Proxy required");
1✔
3988
    }
3989

3990
    private Object writeReplace() {
3991
      return makeSerializationProxy(cache);
1✔
3992
    }
3993
  }
3994

3995
  @SuppressWarnings({"NullableOptional", "OptionalAssignedToNull"})
3996
  static final class BoundedPolicy<K, V> implements Policy<K, V> {
3997
    final Function<@Nullable V, @Nullable V> transformer;
3998
    final BoundedLocalCache<K, V> cache;
3999
    final boolean isWeighted;
4000

4001
    @Nullable Optional<Eviction<K, V>> eviction;
4002
    @Nullable Optional<FixedRefresh<K, V>> refreshes;
4003
    @Nullable Optional<FixedExpiration<K, V>> afterWrite;
4004
    @Nullable Optional<FixedExpiration<K, V>> afterAccess;
4005
    @Nullable Optional<VarExpiration<K, V>> variable;
4006

4007
    BoundedPolicy(BoundedLocalCache<K, V> cache,
4008
        Function<@Nullable V, @Nullable V> transformer, boolean isWeighted) {
1✔
4009
      this.transformer = transformer;
1✔
4010
      this.isWeighted = isWeighted;
1✔
4011
      this.cache = cache;
1✔
4012
    }
1✔
4013

4014
    @Override public boolean isRecordingStats() {
4015
      return cache.isRecordingStats();
1✔
4016
    }
4017
    @Override public @Nullable V getIfPresentQuietly(K key) {
4018
      return transformer.apply(cache.getIfPresentQuietly(key));
1✔
4019
    }
4020
    @SuppressWarnings("NullAway")
4021
    @Override public @Nullable CacheEntry<K, V> getEntryIfPresentQuietly(K key) {
4022
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4023
      return (node == null) ? null : cache.nodeToCacheEntry(node, transformer);
1✔
4024
    }
4025
    @SuppressWarnings("Java9CollectionFactory")
4026
    @Override public Map<K, CompletableFuture<V>> refreshes() {
4027
      var refreshes = cache.refreshes;
1✔
4028
      if ((refreshes == null) || refreshes.isEmpty()) {
1✔
4029
        @SuppressWarnings("ImmutableMapOf")
4030
        Map<K, CompletableFuture<V>> emptyMap = Collections.unmodifiableMap(Collections.emptyMap());
1✔
4031
        return emptyMap;
1✔
4032
      } else if (cache.collectKeys()) {
1✔
4033
        var inFlight = new IdentityHashMap<K, CompletableFuture<V>>(refreshes.size());
1✔
4034
        for (var entry : refreshes.entrySet()) {
1✔
4035
          @SuppressWarnings("unchecked")
4036
          var key = ((InternalReference<K>) entry.getKey()).get();
1✔
4037
          @SuppressWarnings("unchecked")
4038
          var future = (CompletableFuture<V>) entry.getValue();
1✔
4039
          if (key != null) {
1✔
4040
            inFlight.put(key, future);
1✔
4041
          }
4042
        }
1✔
4043
        return Collections.unmodifiableMap(inFlight);
1✔
4044
      }
4045
      @SuppressWarnings("unchecked")
4046
      var castedRefreshes = (Map<K, CompletableFuture<V>>) (Object) refreshes;
1✔
4047
      return Collections.unmodifiableMap(new HashMap<>(castedRefreshes));
1✔
4048
    }
4049
    @Override public Optional<Eviction<K, V>> eviction() {
4050
      return cache.evicts()
1✔
4051
          ? (eviction == null) ? (eviction = Optional.of(new BoundedEviction())) : eviction
1✔
4052
          : Optional.empty();
1✔
4053
    }
4054
    @Override public Optional<FixedExpiration<K, V>> expireAfterAccess() {
4055
      if (!cache.expiresAfterAccess()) {
1✔
4056
        return Optional.empty();
1✔
4057
      }
4058
      return (afterAccess == null)
1✔
4059
          ? (afterAccess = Optional.of(new BoundedExpireAfterAccess()))
1✔
4060
          : afterAccess;
1✔
4061
    }
4062
    @Override public Optional<FixedExpiration<K, V>> expireAfterWrite() {
4063
      if (!cache.expiresAfterWrite()) {
1✔
4064
        return Optional.empty();
1✔
4065
      }
4066
      return (afterWrite == null)
1✔
4067
          ? (afterWrite = Optional.of(new BoundedExpireAfterWrite()))
1✔
4068
          : afterWrite;
1✔
4069
    }
4070
    @Override public Optional<VarExpiration<K, V>> expireVariably() {
4071
      if (!cache.expiresVariable()) {
1✔
4072
        return Optional.empty();
1✔
4073
      }
4074
      return (variable == null)
1✔
4075
          ? (variable = Optional.of(new BoundedVarExpiration()))
1✔
4076
          : variable;
1✔
4077
    }
4078
    @Override public Optional<FixedRefresh<K, V>> refreshAfterWrite() {
4079
      if (!cache.refreshAfterWrite()) {
1✔
4080
        return Optional.empty();
1✔
4081
      }
4082
      return (refreshes == null)
1✔
4083
          ? (refreshes = Optional.of(new BoundedRefreshAfterWrite()))
1✔
4084
          : refreshes;
1✔
4085
    }
4086

4087
    final class BoundedEviction implements Eviction<K, V> {
1✔
4088
      @Override public boolean isWeighted() {
4089
        return isWeighted;
1✔
4090
      }
4091
      @Override public OptionalInt weightOf(K key) {
4092
        requireNonNull(key);
1✔
4093
        if (!isWeighted) {
1✔
4094
          return OptionalInt.empty();
1✔
4095
        }
4096
        Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4097
        if ((node == null) || cache.hasExpired(node, cache.expirationTicker().read())) {
1✔
4098
          return OptionalInt.empty();
1✔
4099
        }
4100
        synchronized (node) {
1✔
4101
          return OptionalInt.of(node.getWeight());
1✔
4102
        }
4103
      }
4104
      @Override public OptionalLong weightedSize() {
4105
        if (cache.evicts() && isWeighted()) {
1✔
4106
          cache.evictionLock.lock();
1✔
4107
          try {
4108
            if (cache.drainStatusOpaque() == REQUIRED) {
1✔
4109
              cache.maintenance(/* ignored */ null);
1✔
4110
            }
4111
            return OptionalLong.of(Math.max(0, cache.weightedSize()));
1✔
4112
          } finally {
4113
            cache.evictionLock.unlock();
1✔
4114
            cache.rescheduleCleanUpIfIncomplete();
1✔
4115
          }
4116
        }
4117
        return OptionalLong.empty();
1✔
4118
      }
4119
      @Override public long getMaximum() {
4120
        cache.evictionLock.lock();
1✔
4121
        try {
4122
          if (cache.drainStatusOpaque() == REQUIRED) {
1✔
4123
            cache.maintenance(/* ignored */ null);
1✔
4124
          }
4125
          return cache.maximum();
1✔
4126
        } finally {
4127
          cache.evictionLock.unlock();
1✔
4128
          cache.rescheduleCleanUpIfIncomplete();
1✔
4129
        }
4130
      }
4131
      @Override public void setMaximum(long maximum) {
4132
        cache.evictionLock.lock();
1✔
4133
        try {
4134
          cache.setMaximumSize(maximum);
1✔
4135
          cache.maintenance(/* ignored */ null);
1✔
4136
        } finally {
4137
          cache.evictionLock.unlock();
1✔
4138
          cache.rescheduleCleanUpIfIncomplete();
1✔
4139
        }
4140
      }
1✔
4141
      @Override public Map<K, V> coldest(int limit) {
4142
        int expectedSize = Math.min(limit, cache.size());
1✔
4143
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4144
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
1✔
4145
      }
4146
      @Override public Map<K, V> coldestWeighted(long weightLimit) {
4147
        var limiter = isWeighted()
1✔
4148
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4149
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4150
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
1✔
4151
      }
4152
      @Override
4153
      public <T> T coldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4154
        requireNonNull(mappingFunction);
1✔
4155
        return cache.evictionOrder(/* hottest= */ false, transformer, mappingFunction);
1✔
4156
      }
4157
      @Override public Map<K, V> hottest(int limit) {
4158
        int expectedSize = Math.min(limit, cache.size());
1✔
4159
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4160
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
1✔
4161
      }
4162
      @Override public Map<K, V> hottestWeighted(long weightLimit) {
4163
        var limiter = isWeighted()
1✔
4164
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4165
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4166
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
1✔
4167
      }
4168
      @Override
4169
      public <T> T hottest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4170
        requireNonNull(mappingFunction);
1✔
4171
        return cache.evictionOrder(/* hottest= */ true, transformer, mappingFunction);
1✔
4172
      }
4173
    }
4174

4175
    @SuppressWarnings("PreferJavaTimeOverload")
4176
    final class BoundedExpireAfterAccess implements FixedExpiration<K, V> {
1✔
4177
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4178
        requireNonNull(key);
1✔
4179
        requireNonNull(unit);
1✔
4180
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4181
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4182
        if (node == null) {
1✔
4183
          return OptionalLong.empty();
1✔
4184
        }
4185
        long now = cache.expirationTicker().read();
1✔
4186
        return cache.hasExpired(node, now)
1✔
4187
            ? OptionalLong.empty()
1✔
4188
            : OptionalLong.of(unit.convert(now - node.getAccessTime(), TimeUnit.NANOSECONDS));
1✔
4189
      }
4190
      @Override public long getExpiresAfter(TimeUnit unit) {
4191
        return unit.convert(cache.expiresAfterAccessNanos(), TimeUnit.NANOSECONDS);
1✔
4192
      }
4193
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4194
        requireArgument(duration >= 0);
1✔
4195
        cache.setExpiresAfterAccessNanos(unit.toNanos(duration));
1✔
4196
        cache.scheduleAfterWrite();
1✔
4197
      }
1✔
4198
      @Override public Map<K, V> oldest(int limit) {
4199
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4200
      }
4201
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4202
        return cache.expireAfterAccessOrder(/* oldest= */ true, transformer, mappingFunction);
1✔
4203
      }
4204
      @Override public Map<K, V> youngest(int limit) {
4205
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4206
      }
4207
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4208
        return cache.expireAfterAccessOrder(/* oldest= */ false, transformer, mappingFunction);
1✔
4209
      }
4210
    }
4211

4212
    @SuppressWarnings("PreferJavaTimeOverload")
4213
    final class BoundedExpireAfterWrite implements FixedExpiration<K, V> {
1✔
4214
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4215
        requireNonNull(key);
1✔
4216
        requireNonNull(unit);
1✔
4217
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4218
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4219
        if (node == null) {
1✔
4220
          return OptionalLong.empty();
1✔
4221
        }
4222
        long now = cache.expirationTicker().read();
1✔
4223
        return cache.hasExpired(node, now)
1✔
4224
            ? OptionalLong.empty()
1✔
4225
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
1✔
4226
      }
4227
      @Override public long getExpiresAfter(TimeUnit unit) {
4228
        return unit.convert(cache.expiresAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4229
      }
4230
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4231
        requireArgument(duration >= 0);
1✔
4232
        cache.setExpiresAfterWriteNanos(unit.toNanos(duration));
1✔
4233
        cache.scheduleAfterWrite();
1✔
4234
      }
1✔
4235
      @Override public Map<K, V> oldest(int limit) {
4236
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4237
      }
4238
      @SuppressWarnings("GuardedByChecker")
4239
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4240
        return cache.snapshot(cache.writeOrderDeque(), transformer, mappingFunction);
1✔
4241
      }
4242
      @Override public Map<K, V> youngest(int limit) {
4243
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4244
      }
4245
      @SuppressWarnings("GuardedByChecker")
4246
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4247
        return cache.snapshot(cache.writeOrderDeque()::descendingIterator,
1✔
4248
            transformer, mappingFunction);
4249
      }
4250
    }
4251

4252
    @SuppressWarnings("PreferJavaTimeOverload")
4253
    final class BoundedVarExpiration implements VarExpiration<K, V> {
1✔
4254
      @Override public OptionalLong getExpiresAfter(K key, TimeUnit unit) {
4255
        requireNonNull(key);
1✔
4256
        requireNonNull(unit);
1✔
4257
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4258
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4259
        if (node == null) {
1✔
4260
          return OptionalLong.empty();
1✔
4261
        }
4262
        long now = cache.expirationTicker().read();
1✔
4263
        return cache.hasExpired(node, now)
1✔
4264
            ? OptionalLong.empty()
1✔
4265
            : OptionalLong.of(unit.convert(node.getVariableTime() - now, TimeUnit.NANOSECONDS));
1✔
4266
      }
4267
      @Override public void setExpiresAfter(K key, long duration, TimeUnit unit) {
4268
        requireNonNull(key);
1✔
4269
        requireNonNull(unit);
1✔
4270
        requireArgument(duration >= 0);
1✔
4271
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4272
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4273
        if (node != null) {
1✔
4274
          long now;
4275
          long durationNanos = TimeUnit.NANOSECONDS.convert(duration, unit);
1✔
4276
          synchronized (node) {
1✔
4277
            now = cache.expirationTicker().read();
1✔
4278
            if (cache.hasExpired(node, now)) {
1✔
4279
              return;
1✔
4280
            }
4281
            node.setVariableTime(now + Math.min(durationNanos, MAXIMUM_EXPIRY));
1✔
4282
          }
1✔
4283
          cache.afterRead(node, now, /* recordHit= */ false);
1✔
4284
        }
4285
      }
1✔
4286
      @Override public @Nullable V put(K key, V value, long duration, TimeUnit unit) {
4287
        requireNonNull(unit);
1✔
4288
        requireNonNull(value);
1✔
4289
        requireArgument(duration >= 0);
1✔
4290
        return cache.isAsync
1✔
4291
            ? putAsync(key, value, duration, unit)
1✔
4292
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ false);
1✔
4293
      }
4294
      @Override public @Nullable V putIfAbsent(K key, V value, long duration, TimeUnit unit) {
4295
        requireNonNull(unit);
1✔
4296
        requireNonNull(value);
1✔
4297
        requireArgument(duration >= 0);
1✔
4298
        return cache.isAsync
1✔
4299
            ? putIfAbsentAsync(key, value, duration, unit)
1✔
4300
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ true);
1✔
4301
      }
4302
      @Nullable V putSync(K key, V value, long duration, TimeUnit unit, boolean onlyIfAbsent) {
4303
        var expiry = new FixedExpireAfterWrite<K, V>(duration, unit);
1✔
4304
        return cache.put(key, value, expiry, onlyIfAbsent);
1✔
4305
      }
4306
      @SuppressWarnings("unchecked")
4307
      @Nullable V putIfAbsentAsync(K key, V value, long duration, TimeUnit unit) {
4308
        // Keep in sync with LocalAsyncCache.AsMapView#putIfAbsent(key, value)
4309
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4310
        var asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4311

4312
        for (;;) {
4313
          var priorFuture = (CompletableFuture<V>) cache.getIfPresent(key, /* recordStats= */ false);
1✔
4314
          if (priorFuture != null) {
1✔
4315
            if (!priorFuture.isDone()) {
1✔
4316
              Async.getWhenSuccessful(priorFuture);
×
4317
              continue;
×
4318
            }
4319

4320
            V prior = Async.getWhenSuccessful(priorFuture);
1✔
4321
            if (prior != null) {
1✔
4322
              return prior;
1✔
4323
            }
4324
          }
4325

4326
          boolean[] added = { false };
1✔
4327
          var computed = (CompletableFuture<V>) cache.compute(key, (k, oldValue) -> {
1✔
4328
            var oldValueFuture = (CompletableFuture<V>) oldValue;
1✔
4329
            added[0] = (oldValueFuture == null)
1✔
4330
                || (oldValueFuture.isDone() && (Async.getIfReady(oldValueFuture) == null));
1✔
4331
            return added[0] ? asyncValue : oldValue;
1✔
4332
          }, expiry, /* recordLoad= */ false, /* recordLoadFailure= */ false);
4333

4334
          if (added[0]) {
1✔
4335
            return null;
1✔
4336
          } else {
4337
            V prior = Async.getWhenSuccessful(computed);
×
4338
            if (prior != null) {
×
4339
              return prior;
×
4340
            }
4341
          }
4342
        }
×
4343
      }
4344
      @SuppressWarnings("unchecked")
4345
      @Nullable V putAsync(K key, V value, long duration, TimeUnit unit) {
4346
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4347
        var asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4348

4349
        var oldValueFuture = (CompletableFuture<V>) cache.put(
1✔
4350
            key, asyncValue, expiry, /* onlyIfAbsent= */ false);
4351
        return Async.getWhenSuccessful(oldValueFuture);
1✔
4352
      }
4353
      @SuppressWarnings("NullAway")
4354
      @Override public V compute(K key,
4355
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4356
          Duration duration) {
4357
        requireNonNull(key);
1✔
4358
        requireNonNull(duration);
1✔
4359
        requireNonNull(remappingFunction);
1✔
4360
        requireArgument(!duration.isNegative(), "duration cannot be negative: %s", duration);
1✔
4361
        var expiry = new FixedExpireAfterWrite<K, V>(
1✔
4362
            toNanosSaturated(duration), TimeUnit.NANOSECONDS);
1✔
4363

4364
        return cache.isAsync
1✔
4365
            ? computeAsync(key, remappingFunction, expiry)
1✔
4366
            : cache.compute(key, remappingFunction, expiry,
1✔
4367
                /* recordLoad= */ true, /* recordLoadFailure= */ true);
4368
      }
4369
      @Nullable V computeAsync(K key,
4370
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4371
          Expiry<? super K, ? super V> expiry) {
4372
        // Keep in sync with LocalAsyncCache.AsMapView#compute(key, remappingFunction)
4373
        @SuppressWarnings("unchecked")
4374
        var delegate = (LocalCache<K, CompletableFuture<V>>) cache;
1✔
4375

4376
        @SuppressWarnings({"rawtypes", "unchecked"})
4377
        var newValue = (V[]) new Object[1];
1✔
4378
        for (;;) {
4379
          Async.getWhenSuccessful(delegate.getIfPresentQuietly(key));
1✔
4380

4381
          CompletableFuture<V> valueFuture = delegate.compute(key, (k, oldValueFuture) -> {
1✔
4382
            if ((oldValueFuture != null) && !oldValueFuture.isDone()) {
1✔
4383
              return oldValueFuture;
×
4384
            }
4385

4386
            V oldValue = Async.getIfReady(oldValueFuture);
1✔
4387
            BiFunction<? super K, ? super V, ? extends V> function = delegate.statsAware(
1✔
4388
                remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
4389
            newValue[0] = function.apply(key, oldValue);
1✔
4390
            return (newValue[0] == null) ? null : CompletableFuture.completedFuture(newValue[0]);
1✔
4391
          }, new AsyncExpiry<>(expiry), /* recordLoad= */ false, /* recordLoadFailure= */ false);
4392

4393
          if (newValue[0] != null) {
1✔
4394
            return newValue[0];
1✔
4395
          } else if (valueFuture == null) {
1✔
4396
            return null;
1✔
4397
          }
4398
        }
×
4399
      }
4400
      @Override public Map<K, V> oldest(int limit) {
4401
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4402
      }
4403
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4404
        return cache.snapshot(cache.timerWheel(), transformer, mappingFunction);
1✔
4405
      }
4406
      @Override public Map<K, V> youngest(int limit) {
4407
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4408
      }
4409
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4410
        return cache.snapshot(cache.timerWheel()::descendingIterator, transformer, mappingFunction);
1✔
4411
      }
4412
    }
4413

4414
    static final class FixedExpireAfterWrite<K, V> implements Expiry<K, V> {
4415
      final long duration;
4416
      final TimeUnit unit;
4417

4418
      FixedExpireAfterWrite(long duration, TimeUnit unit) {
1✔
4419
        this.duration = duration;
1✔
4420
        this.unit = unit;
1✔
4421
      }
1✔
4422
      @Override public long expireAfterCreate(K key, V value, long currentTime) {
4423
        return unit.toNanos(duration);
1✔
4424
      }
4425
      @Override public long expireAfterUpdate(
4426
          K key, V value, long currentTime, long currentDuration) {
4427
        return unit.toNanos(duration);
1✔
4428
      }
4429
      @CanIgnoreReturnValue
4430
      @Override public long expireAfterRead(
4431
          K key, V value, long currentTime, long currentDuration) {
4432
        return currentDuration;
1✔
4433
      }
4434
    }
4435

4436
    @SuppressWarnings("PreferJavaTimeOverload")
4437
    final class BoundedRefreshAfterWrite implements FixedRefresh<K, V> {
1✔
4438
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4439
        requireNonNull(key);
1✔
4440
        requireNonNull(unit);
1✔
4441
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4442
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4443
        if (node == null) {
1✔
4444
          return OptionalLong.empty();
1✔
4445
        }
4446
        long now = cache.expirationTicker().read();
1✔
4447
        return cache.hasExpired(node, now)
1✔
4448
            ? OptionalLong.empty()
1✔
4449
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
1✔
4450
      }
4451
      @Override public long getRefreshesAfter(TimeUnit unit) {
4452
        return unit.convert(cache.refreshAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4453
      }
4454
      @Override public void setRefreshesAfter(long duration, TimeUnit unit) {
4455
        requireArgument(duration >= 0);
1✔
4456
        cache.setRefreshAfterWriteNanos(unit.toNanos(duration));
1✔
4457
        cache.scheduleAfterWrite();
1✔
4458
      }
1✔
4459
    }
4460
  }
4461

4462
  /* --------------- Loading Cache --------------- */
4463

4464
  static final class BoundedLocalLoadingCache<K, V>
4465
      extends BoundedLocalManualCache<K, V> implements LocalLoadingCache<K, V> {
4466
    private static final long serialVersionUID = 1;
4467

4468
    final Function<K, V> mappingFunction;
4469
    final @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction;
4470

4471
    BoundedLocalLoadingCache(Caffeine<K, V> builder, CacheLoader<? super K, V> loader) {
4472
      super(builder, loader);
1✔
4473
      requireNonNull(loader);
1✔
4474
      mappingFunction = newMappingFunction(loader);
1✔
4475
      bulkMappingFunction = newBulkMappingFunction(loader);
1✔
4476
    }
1✔
4477

4478
    @Override
4479
    @SuppressWarnings("NullAway")
4480
    public AsyncCacheLoader<? super K, V> cacheLoader() {
4481
      return cache.cacheLoader;
1✔
4482
    }
4483

4484
    @Override
4485
    public Function<K, V> mappingFunction() {
4486
      return mappingFunction;
1✔
4487
    }
4488

4489
    @Override
4490
    public @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction() {
4491
      return bulkMappingFunction;
1✔
4492
    }
4493

4494
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4495
      throw new InvalidObjectException("Proxy required");
1✔
4496
    }
4497

4498
    private Object writeReplace() {
4499
      return makeSerializationProxy(cache);
1✔
4500
    }
4501
  }
4502

4503
  /* --------------- Async Cache --------------- */
4504

4505
  static final class BoundedLocalAsyncCache<K, V> implements LocalAsyncCache<K, V>, Serializable {
4506
    private static final long serialVersionUID = 1;
4507

4508
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4509
    final boolean isWeighted;
4510

4511
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4512
    @Nullable CacheView<K, V> cacheView;
4513
    @Nullable Policy<K, V> policy;
4514

4515
    @SuppressWarnings("unchecked")
4516
    BoundedLocalAsyncCache(Caffeine<K, V> builder) {
1✔
4517
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4518
          .newBoundedLocalCache(builder, /* cacheLoader= */ null, /* isAsync= */ true);
1✔
4519
      isWeighted = builder.isWeighted();
1✔
4520
    }
1✔
4521

4522
    @Override
4523
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4524
      return cache;
1✔
4525
    }
4526

4527
    @Override
4528
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4529
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4530
    }
4531

4532
    @Override
4533
    public Cache<K, V> synchronous() {
4534
      return (cacheView == null) ? (cacheView = new CacheView<>(this)) : cacheView;
1✔
4535
    }
4536

4537
    @Override
4538
    public Policy<K, V> policy() {
4539
      if (policy == null) {
1✔
4540
        @SuppressWarnings("unchecked")
4541
        var castCache = (BoundedLocalCache<K, V>) cache;
1✔
4542
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
1✔
4543
        @SuppressWarnings({"NullAway", "unchecked", "Varifier"})
4544
        Function<@Nullable V, @Nullable V> castTransformer = (Function<V, V>) transformer;
1✔
4545
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4546
      }
4547
      return policy;
1✔
4548
    }
4549

4550
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4551
      throw new InvalidObjectException("Proxy required");
1✔
4552
    }
4553

4554
    private Object writeReplace() {
4555
      return makeSerializationProxy(cache);
1✔
4556
    }
4557
  }
4558

4559
  /* --------------- Async Loading Cache --------------- */
4560

4561
  static final class BoundedLocalAsyncLoadingCache<K, V>
4562
      extends LocalAsyncLoadingCache<K, V> implements Serializable {
4563
    private static final long serialVersionUID = 1;
4564

4565
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4566
    final boolean isWeighted;
4567

4568
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4569
    @Nullable Policy<K, V> policy;
4570

4571
    @SuppressWarnings("unchecked")
4572
    BoundedLocalAsyncLoadingCache(Caffeine<K, V> builder, AsyncCacheLoader<? super K, V> loader) {
4573
      super(loader);
1✔
4574
      isWeighted = builder.isWeighted();
1✔
4575
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4576
          .newBoundedLocalCache(builder, loader, /* isAsync= */ true);
1✔
4577
    }
1✔
4578

4579
    @Override
4580
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4581
      return cache;
1✔
4582
    }
4583

4584
    @Override
4585
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4586
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4587
    }
4588

4589
    @Override
4590
    public Policy<K, V> policy() {
4591
      if (policy == null) {
1✔
4592
        @SuppressWarnings("unchecked")
4593
        var castCache = (BoundedLocalCache<K, V>) cache;
1✔
4594
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
1✔
4595
        @SuppressWarnings({"NullAway", "unchecked", "Varifier"})
4596
        Function<@Nullable V, @Nullable V> castTransformer = (Function<V, V>) transformer;
1✔
4597
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4598
      }
4599
      return policy;
1✔
4600
    }
4601

4602
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4603
      throw new InvalidObjectException("Proxy required");
1✔
4604
    }
4605

4606
    private Object writeReplace() {
4607
      return makeSerializationProxy(cache);
1✔
4608
    }
4609
  }
4610
}
4611

4612
/** The namespace for field padding through inheritance. */
4613
@SuppressWarnings({"MemberName", "MultiVariableDeclaration"})
4614
final class BLCHeader {
4615

4616
  private BLCHeader() {}
4617

4618
  static class PadDrainStatus {
1✔
4619
    byte p000, p001, p002, p003, p004, p005, p006, p007;
4620
    byte p008, p009, p010, p011, p012, p013, p014, p015;
4621
    byte p016, p017, p018, p019, p020, p021, p022, p023;
4622
    byte p024, p025, p026, p027, p028, p029, p030, p031;
4623
    byte p032, p033, p034, p035, p036, p037, p038, p039;
4624
    byte p040, p041, p042, p043, p044, p045, p046, p047;
4625
    byte p048, p049, p050, p051, p052, p053, p054, p055;
4626
    byte p056, p057, p058, p059, p060, p061, p062, p063;
4627
    byte p064, p065, p066, p067, p068, p069, p070, p071;
4628
    byte p072, p073, p074, p075, p076, p077, p078, p079;
4629
    byte p080, p081, p082, p083, p084, p085, p086, p087;
4630
    byte p088, p089, p090, p091, p092, p093, p094, p095;
4631
    byte p096, p097, p098, p099, p100, p101, p102, p103;
4632
    byte p104, p105, p106, p107, p108, p109, p110, p111;
4633
    byte p112, p113, p114, p115, p116, p117, p118, p119;
4634
  }
4635

4636
  /** Enforces a memory layout to avoid false sharing by padding the drain status. */
4637
  abstract static class DrainStatusRef extends PadDrainStatus {
1✔
4638
    static final VarHandle DRAIN_STATUS;
4639

4640
    /** A drain is not taking place. */
4641
    static final int IDLE = 0;
4642
    /** A drain is required due to a pending write modification. */
4643
    static final int REQUIRED = 1;
4644
    /** A drain is in progress and will transition to idle. */
4645
    static final int PROCESSING_TO_IDLE = 2;
4646
    /** A drain is in progress and will transition to required. */
4647
    static final int PROCESSING_TO_REQUIRED = 3;
4648

4649
    /** The draining status of the buffers. */
4650
    volatile int drainStatus = IDLE;
1✔
4651

4652
    /**
4653
     * Returns whether maintenance work is needed.
4654
     *
4655
     * @param delayable if draining the read buffer can be delayed
4656
     */
4657
    boolean shouldDrainBuffers(boolean delayable) {
4658
      switch (drainStatusOpaque()) {
1✔
4659
        case IDLE:
4660
          return !delayable;
1✔
4661
        case REQUIRED:
4662
          return true;
1✔
4663
        case PROCESSING_TO_IDLE:
4664
        case PROCESSING_TO_REQUIRED:
4665
          return false;
1✔
4666
        default:
4667
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
4668
      }
4669
    }
4670

4671
    int drainStatusOpaque() {
4672
      return (int) DRAIN_STATUS.getOpaque(this);
1✔
4673
    }
4674

4675
    int drainStatusAcquire() {
4676
      return (int) DRAIN_STATUS.getAcquire(this);
1✔
4677
    }
4678

4679
    void setDrainStatusOpaque(int drainStatus) {
4680
      DRAIN_STATUS.setOpaque(this, drainStatus);
1✔
4681
    }
1✔
4682

4683
    void setDrainStatusRelease(int drainStatus) {
4684
      DRAIN_STATUS.setRelease(this, drainStatus);
1✔
4685
    }
1✔
4686

4687
    boolean casDrainStatus(int expect, int update) {
4688
      return DRAIN_STATUS.compareAndSet(this, expect, update);
1✔
4689
    }
4690

4691
    static {
4692
      try {
4693
        DRAIN_STATUS = MethodHandles.lookup()
1✔
4694
            .findVarHandle(DrainStatusRef.class, "drainStatus", int.class);
1✔
4695
      } catch (ReflectiveOperationException e) {
×
4696
        throw new ExceptionInInitializerError(e);
×
4697
      }
1✔
4698
    }
1✔
4699
  }
4700
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc