• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ben-manes / caffeine / #5173

29 Dec 2025 05:27AM UTC coverage: 0.0% (-100.0%) from 100.0%
#5173

push

github

ben-manes
speed up development ci build

0 of 3838 branches covered (0.0%)

0 of 7869 relevant lines covered (0.0%)

0.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/caffeine/src/main/java/com/github/benmanes/caffeine/cache/BoundedLocalCache.java
1
/*
2
 * Copyright 2014 Ben Manes. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
package com.github.benmanes.caffeine.cache;
17

18
import static com.github.benmanes.caffeine.cache.Async.ASYNC_EXPIRY;
19
import static com.github.benmanes.caffeine.cache.Caffeine.calculateHashMapCapacity;
20
import static com.github.benmanes.caffeine.cache.Caffeine.ceilingPowerOfTwo;
21
import static com.github.benmanes.caffeine.cache.Caffeine.requireArgument;
22
import static com.github.benmanes.caffeine.cache.Caffeine.toNanosSaturated;
23
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newBulkMappingFunction;
24
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newMappingFunction;
25
import static com.github.benmanes.caffeine.cache.Node.PROBATION;
26
import static com.github.benmanes.caffeine.cache.Node.PROTECTED;
27
import static com.github.benmanes.caffeine.cache.Node.WINDOW;
28
import static java.util.Locale.US;
29
import static java.util.Objects.requireNonNull;
30
import static java.util.Spliterator.DISTINCT;
31
import static java.util.Spliterator.IMMUTABLE;
32
import static java.util.Spliterator.NONNULL;
33
import static java.util.Spliterator.ORDERED;
34

35
import java.io.InvalidObjectException;
36
import java.io.ObjectInputStream;
37
import java.io.Serializable;
38
import java.lang.System.Logger;
39
import java.lang.System.Logger.Level;
40
import java.lang.invoke.MethodHandles;
41
import java.lang.invoke.VarHandle;
42
import java.lang.ref.Reference;
43
import java.lang.ref.ReferenceQueue;
44
import java.lang.ref.WeakReference;
45
import java.time.Duration;
46
import java.util.AbstractCollection;
47
import java.util.AbstractSet;
48
import java.util.ArrayDeque;
49
import java.util.Collection;
50
import java.util.Collections;
51
import java.util.Comparator;
52
import java.util.Deque;
53
import java.util.HashMap;
54
import java.util.IdentityHashMap;
55
import java.util.Iterator;
56
import java.util.LinkedHashMap;
57
import java.util.Map;
58
import java.util.NoSuchElementException;
59
import java.util.Objects;
60
import java.util.Optional;
61
import java.util.OptionalInt;
62
import java.util.OptionalLong;
63
import java.util.Set;
64
import java.util.Spliterator;
65
import java.util.Spliterators;
66
import java.util.concurrent.CancellationException;
67
import java.util.concurrent.CompletableFuture;
68
import java.util.concurrent.ConcurrentHashMap;
69
import java.util.concurrent.ConcurrentMap;
70
import java.util.concurrent.Executor;
71
import java.util.concurrent.ForkJoinPool;
72
import java.util.concurrent.ForkJoinTask;
73
import java.util.concurrent.ThreadLocalRandom;
74
import java.util.concurrent.TimeUnit;
75
import java.util.concurrent.TimeoutException;
76
import java.util.concurrent.locks.ReentrantLock;
77
import java.util.function.BiConsumer;
78
import java.util.function.BiFunction;
79
import java.util.function.Consumer;
80
import java.util.function.Function;
81
import java.util.function.Predicate;
82
import java.util.stream.Stream;
83
import java.util.stream.StreamSupport;
84

85
import org.jspecify.annotations.NonNull;
86
import org.jspecify.annotations.Nullable;
87

88
import com.github.benmanes.caffeine.cache.Async.AsyncExpiry;
89
import com.github.benmanes.caffeine.cache.LinkedDeque.PeekingIterator;
90
import com.github.benmanes.caffeine.cache.Policy.CacheEntry;
91
import com.github.benmanes.caffeine.cache.References.InternalReference;
92
import com.github.benmanes.caffeine.cache.stats.StatsCounter;
93
import com.google.errorprone.annotations.CanIgnoreReturnValue;
94
import com.google.errorprone.annotations.Var;
95
import com.google.errorprone.annotations.concurrent.GuardedBy;
96

97
/**
98
 * An in-memory cache implementation that supports full concurrency of retrievals, a high expected
99
 * concurrency for updates, and multiple ways to bound the cache.
100
 * <p>
101
 * This class is abstract and code generated subclasses provide the complete implementation for a
102
 * particular configuration. This is to ensure that only the fields and execution paths necessary
103
 * for a given configuration are used.
104
 *
105
 * @author ben.manes@gmail.com (Ben Manes)
106
 * @param <K> the type of keys maintained by this cache
107
 * @param <V> the type of mapped values
108
 */
109
@SuppressWarnings("serial")
110
abstract class BoundedLocalCache<K, V> extends BLCHeader.DrainStatusRef
111
    implements LocalCache<K, V> {
112

113
  /*
114
   * This class performs a best-effort bounding of a ConcurrentHashMap using a page-replacement
115
   * algorithm to determine which entries to evict when the capacity is exceeded.
116
   *
117
   * Concurrency:
118
   * ------------
119
   * The page replacement algorithms are kept eventually consistent with the map. An update to the
120
   * map and recording of reads may not be immediately reflected in the policy's data structures.
121
   * These structures are guarded by a lock, and operations are applied in batches to avoid lock
122
   * contention. The penalty of applying the batches is spread across threads, so that the amortized
123
   * cost is slightly higher than performing just the ConcurrentHashMap operation [1].
124
   *
125
   * A memento of the reads and writes that were performed on the map is recorded in buffers. These
126
   * buffers are drained at the first opportunity after a write or when a read buffer is full. The
127
   * reads are offered to a buffer that will reject additions if contended on or if it is full. Due
128
   * to the concurrent nature of the read and write operations, a strict policy ordering is not
129
   * possible, but it may be observably strict when single-threaded. The buffers are drained
130
   * asynchronously to minimize the request latency and uses a state machine to determine when to
131
   * schedule this work on an executor.
132
   *
133
   * Due to a lack of a strict ordering guarantee, a task can be executed out-of-order, such as a
134
   * removal followed by its addition. The state of the entry is encoded using the key field to
135
   * avoid additional memory usage. An entry is "alive" if it is in both the hash table and the page
136
   * replacement policy. It is "retired" if it is not in the hash table and is pending removal from
137
   * the page replacement policy. Finally, an entry transitions to the "dead" state when it is
138
   * neither in the hash table nor the page replacement policy. Both the retired and dead states are
139
   * represented by a sentinel key that should not be used for map operations.
140
   *
141
   * Eviction:
142
   * ---------
143
   * Maximum size is implemented using the Window TinyLfu policy [2] due to its high hit rate, O(1)
144
   * time complexity, and small footprint. A new entry starts in the admission window and remains
145
   * there as long as it has high temporal locality (recency). Eventually an entry will slip from
146
   * the window into the main space. If the main space is already full, then a historic frequency
147
   * filter determines whether to evict the newly admitted entry or the victim entry chosen by the
148
   * eviction policy. This process ensures that the entries in the window were very recently used,
149
   * while entries in the main space are accessed very frequently and remain moderately recent. The
150
   * windowing allows the policy to have a high hit rate when entries exhibit a bursty access
151
   * pattern, while the filter ensures that popular items are retained. The admission window uses
152
   * LRU and the main space uses Segmented LRU.
153
   *
154
   * The optimal size of the window vs. main spaces is workload dependent [3]. A large admission
155
   * window is favored by recency-biased workloads, while a small one favors frequency-biased
156
   * workloads. When the window is too small, then recent arrivals are prematurely evicted, but when
157
   * it is too large, then they pollute the cache and force the eviction of more popular entries.
158
   * The optimal configuration is dynamically determined by using hill climbing to walk the hit rate
159
   * curve. This is achieved by sampling the hit rate and adjusting the window size in the direction
160
   * that is improving (making positive or negative steps). At each interval, the step size is
161
   * decreased until the hit rate climber converges at the optimal setting. The process is restarted
162
   * when the hit rate changes over a threshold, indicating that the workload altered, and a new
163
   * setting may be required.
164
   *
165
   * The historic usage is retained in a compact popularity sketch, which uses hashing to
166
   * probabilistically estimate an item's frequency. This exposes a flaw where an adversary could
167
   * use hash flooding [4] to artificially raise the frequency of the main space's victim and cause
168
   * all candidates to be rejected. In the worst case, by exploiting hash collisions, an attacker
169
   * could cause the cache to never hit and hold only worthless items, resulting in a
170
   * denial-of-service attack against the underlying resource. This is mitigated by introducing
171
   * jitter, allowing candidates that are at least moderately popular to have a small, random chance
172
   * of being admitted. This causes the victim to be evicted, but in a way that marginally impacts
173
   * the hit rate.
174
   *
175
   * Expiration:
176
   * -----------
177
   * Expiration is implemented in O(1) time complexity. The time-to-idle policy uses an access-order
178
   * queue, the time-to-live policy uses a write-order queue, and variable expiration uses a
179
   * hierarchical timer wheel [5]. The queuing policies allow for peeking at the oldest entry to
180
   * determine if it has expired. If it has not, then the younger entries must not have expired
181
   * either. If a maximum size is set, then expiration will share the queues, minimizing the
182
   * per-entry footprint. The timer wheel based policy uses hashing and cascading in a manner that
183
   * amortizes the penalty of sorting to achieve a similar algorithmic cost.
184
   *
185
   * The expiration updates are applied in a best effort fashion. The reordering of variable or
186
   * access-order expiration may be discarded by the read buffer if it is full or contended.
187
   * Similarly, the reordering of write expiration may be ignored for an entry if the last update
188
   * was within a short time window. This is done to avoid overwhelming the write buffer.
189
   *
190
   * [1] BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free
191
   * https://web.njit.edu/~dingxn/papers/BP-Wrapper.pdf
192
   * [2] TinyLFU: A Highly Efficient Cache Admission Policy
193
   * https://dl.acm.org/citation.cfm?id=3149371
194
   * [3] Adaptive Software Cache Management
195
   * https://dl.acm.org/citation.cfm?id=3274816
196
   * [4] Denial of Service via Algorithmic Complexity Attack
197
   * https://www.usenix.org/legacy/events/sec03/tech/full_papers/crosby/crosby.pdf
198
   * [5] Hashed and Hierarchical Timing Wheels
199
   * http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
200
   */
201

202
  static final Logger logger = System.getLogger(BoundedLocalCache.class.getName());
×
203

204
  /** The number of CPUs */
205
  static final int NCPU = Runtime.getRuntime().availableProcessors();
×
206
  /** The initial capacity of the write buffer. */
207
  static final int WRITE_BUFFER_MIN = 4;
208
  /** The maximum capacity of the write buffer. */
209
  static final int WRITE_BUFFER_MAX = 128 * ceilingPowerOfTwo(NCPU);
×
210
  /** The number of attempts to insert into the write buffer before yielding. */
211
  static final int WRITE_BUFFER_RETRIES = 100;
212
  /** The maximum weighted capacity of the map. */
213
  static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
214
  /** The initial percent of the maximum weighted capacity dedicated to the main space. */
215
  static final double PERCENT_MAIN = 0.99d;
216
  /** The percent of the maximum weighted capacity dedicated to the main's protected space. */
217
  static final double PERCENT_MAIN_PROTECTED = 0.80d;
218
  /** The difference in hit rates that restarts the climber. */
219
  static final double HILL_CLIMBER_RESTART_THRESHOLD = 0.05d;
220
  /** The percent of the total size to adapt the window by. */
221
  static final double HILL_CLIMBER_STEP_PERCENT = 0.0625d;
222
  /** The rate to decrease the step size to adapt by. */
223
  static final double HILL_CLIMBER_STEP_DECAY_RATE = 0.98d;
224
  /** The minimum popularity for allowing randomized admission. */
225
  static final int ADMIT_HASHDOS_THRESHOLD = 6;
226
  /** The maximum number of entries that can be transferred between queues. */
227
  static final int QUEUE_TRANSFER_THRESHOLD = 1_000;
228
  /** The maximum time window between entry updates before the expiration must be reordered. */
229
  static final long EXPIRE_WRITE_TOLERANCE = TimeUnit.SECONDS.toNanos(1);
×
230
  /** The maximum duration before an entry expires. */
231
  static final long MAXIMUM_EXPIRY = (Long.MAX_VALUE >> 1); // 150 years
232
  /** The duration to wait on the eviction lock before warning of a possible misuse. */
233
  static final long WARN_AFTER_LOCK_WAIT_NANOS = TimeUnit.SECONDS.toNanos(30);
×
234
  /** The number of retries before computing to validate the entry's integrity; pow2 modulus. */
235
  static final int MAX_PUT_SPIN_WAIT_ATTEMPTS = 1024 - 1;
236
  /** The handle for the in-flight refresh operations. */
237
  static final VarHandle REFRESHES = findVarHandle(
×
238
      BoundedLocalCache.class, "refreshes", ConcurrentMap.class);
239

240
  final @Nullable RemovalListener<K, V> evictionListener;
241
  final @Nullable AsyncCacheLoader<K, V> cacheLoader;
242

243
  final MpscGrowableArrayQueue<Runnable> writeBuffer;
244
  final ConcurrentHashMap<Object, Node<K, V>> data;
245
  final PerformCleanupTask drainBuffersTask;
246
  final Consumer<Node<K, V>> accessPolicy;
247
  final Buffer<Node<K, V>> readBuffer;
248
  final NodeFactory<K, V> nodeFactory;
249
  final ReentrantLock evictionLock;
250
  final Weigher<K, V> weigher;
251
  final Executor executor;
252

253
  final boolean isWeighted;
254
  final boolean isAsync;
255

256
  @Nullable Set<K> keySet;
257
  @Nullable Collection<V> values;
258
  @Nullable Set<Entry<K, V>> entrySet;
259
  volatile @Nullable ConcurrentMap<Object, CompletableFuture<?>> refreshes;
260

261
  /** Creates an instance based on the builder's configuration. */
262
  @SuppressWarnings("GuardedBy")
263
  protected BoundedLocalCache(Caffeine<K, V> builder,
264
      @Nullable AsyncCacheLoader<K, V> cacheLoader, boolean isAsync) {
×
265
    this.isAsync = isAsync;
×
266
    this.cacheLoader = cacheLoader;
×
267
    executor = builder.getExecutor();
×
268
    isWeighted = builder.isWeighted();
×
269
    evictionLock = new ReentrantLock();
×
270
    weigher = builder.getWeigher(isAsync);
×
271
    drainBuffersTask = new PerformCleanupTask(this);
×
272
    nodeFactory = NodeFactory.newFactory(builder, isAsync);
×
273
    evictionListener = builder.getEvictionListener(isAsync);
×
274
    data = new ConcurrentHashMap<>(builder.getInitialCapacity());
×
275
    readBuffer = evicts() || collectKeys() || collectValues() || expiresAfterAccess()
×
276
        ? new BoundedBuffer<>()
×
277
        : Buffer.disabled();
×
278
    accessPolicy = (evicts() || expiresAfterAccess()) ? this::onAccess : e -> {};
×
279
    writeBuffer = new MpscGrowableArrayQueue<>(WRITE_BUFFER_MIN, WRITE_BUFFER_MAX);
×
280

281
    if (evicts()) {
×
282
      setMaximumSize(builder.getMaximum());
×
283
    }
284
  }
×
285

286
  /** Ensures that the node is alive during the map operation. */
287
  void requireIsAlive(Object key, Node<?, ?> node) {
288
    if (!node.isAlive()) {
×
289
      throw new IllegalStateException(brokenEqualityMessage(key, node));
×
290
    }
291
  }
×
292

293
  /** Logs if the node cannot be found in the map but is still alive. */
294
  void logIfAlive(Node<?, ?> node) {
295
    if (node.isAlive()) {
×
296
      String message = brokenEqualityMessage(node.getKeyReference(), node);
×
297
      logger.log(Level.ERROR, message, new IllegalStateException());
×
298
    }
299
  }
×
300

301
  /** Returns the formatted broken equality error message. */
302
  String brokenEqualityMessage(Object key, Node<?, ?> node) {
303
    return String.format(US, "An invalid state was detected, occurring when the key's equals or "
×
304
        + "hashCode was modified while residing in the cache. This violation of the Map "
305
        + "contract can lead to non-deterministic behavior (key: %s, key type: %s, "
306
        + "node type: %s, cache type: %s).", key, key.getClass().getName(),
×
307
        node.getClass().getSimpleName(), getClass().getSimpleName());
×
308
  }
309

310
  /* --------------- Shared --------------- */
311

312
  @Override
313
  public boolean isAsync() {
314
    return isAsync;
×
315
  }
316

317
  /** Returns if the node's value is currently being computed asynchronously. */
318
  final boolean isComputingAsync(@Nullable V value) {
319
    return isAsync && !Async.isReady((CompletableFuture<?>) value);
×
320
  }
321

322
  @GuardedBy("evictionLock")
323
  protected AccessOrderDeque<Node<K, V>> accessOrderWindowDeque() {
324
    throw new UnsupportedOperationException();
×
325
  }
326

327
  @GuardedBy("evictionLock")
328
  protected AccessOrderDeque<Node<K, V>> accessOrderProbationDeque() {
329
    throw new UnsupportedOperationException();
×
330
  }
331

332
  @GuardedBy("evictionLock")
333
  protected AccessOrderDeque<Node<K, V>> accessOrderProtectedDeque() {
334
    throw new UnsupportedOperationException();
×
335
  }
336

337
  @GuardedBy("evictionLock")
338
  protected WriteOrderDeque<Node<K, V>> writeOrderDeque() {
339
    throw new UnsupportedOperationException();
×
340
  }
341

342
  @Override
343
  public final Executor executor() {
344
    return executor;
×
345
  }
346

347
  @Override
348
  public ConcurrentMap<Object, CompletableFuture<?>> refreshes() {
349
    @Var var pending = refreshes;
×
350
    if (pending == null) {
×
351
      pending = new ConcurrentHashMap<>();
×
352
      if (!REFRESHES.compareAndSet(this, null, pending)) {
×
353
        pending = requireNonNull(refreshes);
×
354
      }
355
    }
356
    return pending;
×
357
  }
358

359
  /** Invalidate the in-flight refresh. */
360
  @SuppressWarnings("RedundantCollectionOperation")
361
  void discardRefresh(Object keyReference) {
362
    var pending = refreshes;
×
363
    if ((pending != null) && pending.containsKey(keyReference)) {
×
364
      pending.remove(keyReference);
×
365
    }
366
  }
×
367

368
  @Override
369
  public Object referenceKey(K key) {
370
    return nodeFactory.newLookupKey(key);
×
371
  }
372

373
  @Override
374
  public boolean isPendingEviction(K key) {
375
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
376
    return (node != null)
×
377
        && ((node.getValue() == null) || hasExpired(node, expirationTicker().read()));
×
378
  }
379

380
  /* --------------- Stats Support --------------- */
381

382
  @Override
383
  public boolean isRecordingStats() {
384
    return false;
×
385
  }
386

387
  @Override
388
  public StatsCounter statsCounter() {
389
    return StatsCounter.disabledStatsCounter();
×
390
  }
391

392
  @Override
393
  public Ticker statsTicker() {
394
    return Ticker.disabledTicker();
×
395
  }
396

397
  /* --------------- Removal Listener Support --------------- */
398

399
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
400
  protected RemovalListener<K, V> removalListener() {
401
    return null;
×
402
  }
403

404
  protected boolean hasRemovalListener() {
405
    return false;
×
406
  }
407

408
  @Override
409
  public void notifyRemoval(@Nullable K key, @Nullable V value, RemovalCause cause) {
410
    if (!hasRemovalListener()) {
×
411
      return;
×
412
    }
413
    Runnable task = () -> {
×
414
      try {
415
        removalListener().onRemoval(key, value, cause);
×
416
      } catch (Throwable t) {
×
417
        logger.log(Level.WARNING, "Exception thrown by removal listener", t);
×
418
      }
×
419
    };
×
420
    try {
421
      executor.execute(task);
×
422
    } catch (Throwable t) {
×
423
      logger.log(Level.ERROR, "Exception thrown when submitting removal listener", t);
×
424
      task.run();
×
425
    }
×
426
  }
×
427

428
  /* --------------- Eviction Listener Support --------------- */
429

430
  void notifyEviction(@Nullable K key, @Nullable V value, RemovalCause cause) {
431
    if (evictionListener == null) {
×
432
      return;
×
433
    }
434
    try {
435
      evictionListener.onRemoval(key, value, cause);
×
436
    } catch (Throwable t) {
×
437
      logger.log(Level.WARNING, "Exception thrown by eviction listener", t);
×
438
    }
×
439
  }
×
440

441
  /* --------------- Reference Support --------------- */
442

443
  /** Returns if the keys are weak reference garbage collected. */
444
  protected boolean collectKeys() {
445
    return false;
×
446
  }
447

448
  /** Returns if the values are weak or soft reference garbage collected. */
449
  protected boolean collectValues() {
450
    return false;
×
451
  }
452

453
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
454
  protected ReferenceQueue<K> keyReferenceQueue() {
455
    return null;
×
456
  }
457

458
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
459
  protected ReferenceQueue<V> valueReferenceQueue() {
460
    return null;
×
461
  }
462

463
  /* --------------- Expiration Support --------------- */
464

465
  /** Returns the {@link Pacer} used to schedule the maintenance task. */
466
  protected @Nullable Pacer pacer() {
467
    return null;
×
468
  }
469

470
  /** Returns if the cache expires entries after a variable time threshold. */
471
  protected boolean expiresVariable() {
472
    return false;
×
473
  }
474

475
  /** Returns if the cache expires entries after an access time threshold. */
476
  protected boolean expiresAfterAccess() {
477
    return false;
×
478
  }
479

480
  /** Returns how long after the last access to an entry the map will retain that entry. */
481
  protected long expiresAfterAccessNanos() {
482
    throw new UnsupportedOperationException();
×
483
  }
484

485
  protected void setExpiresAfterAccessNanos(long expireAfterAccessNanos) {
486
    throw new UnsupportedOperationException();
×
487
  }
488

489
  /** Returns if the cache expires entries after a write time threshold. */
490
  protected boolean expiresAfterWrite() {
491
    return false;
×
492
  }
493

494
  /** Returns how long after the last write to an entry the map will retain that entry. */
495
  protected long expiresAfterWriteNanos() {
496
    throw new UnsupportedOperationException();
×
497
  }
498

499
  protected void setExpiresAfterWriteNanos(long expireAfterWriteNanos) {
500
    throw new UnsupportedOperationException();
×
501
  }
502

503
  /** Returns if the cache refreshes entries after a write time threshold. */
504
  protected boolean refreshAfterWrite() {
505
    return false;
×
506
  }
507

508
  /** Returns how long after the last write an entry becomes a candidate for refresh. */
509
  protected long refreshAfterWriteNanos() {
510
    throw new UnsupportedOperationException();
×
511
  }
512

513
  protected void setRefreshAfterWriteNanos(long refreshAfterWriteNanos) {
514
    throw new UnsupportedOperationException();
×
515
  }
516

517
  @Override
518
  @SuppressWarnings({"DataFlowIssue", "NullAway"})
519
  public Expiry<K, V> expiry() {
520
    return null;
×
521
  }
522

523
  /** Returns the {@link Ticker} used by this cache for expiration. */
524
  public Ticker expirationTicker() {
525
    return Ticker.disabledTicker();
×
526
  }
527

528
  protected TimerWheel<K, V> timerWheel() {
529
    throw new UnsupportedOperationException();
×
530
  }
531

532
  /* --------------- Eviction Support --------------- */
533

534
  /** Returns if the cache evicts entries due to a maximum size or weight threshold. */
535
  protected boolean evicts() {
536
    return false;
×
537
  }
538

539
  /** Returns if entries may be assigned different weights. */
540
  protected boolean isWeighted() {
541
    return (weigher != Weigher.singletonWeigher());
×
542
  }
543

544
  protected FrequencySketch frequencySketch() {
545
    throw new UnsupportedOperationException();
×
546
  }
547

548
  /** Returns if an access to an entry can skip notifying the eviction policy. */
549
  protected boolean fastpath() {
550
    return false;
×
551
  }
552

553
  /** Returns the maximum weighted size. */
554
  protected long maximum() {
555
    throw new UnsupportedOperationException();
×
556
  }
557

558
  /** Returns the maximum weighted size. */
559
  protected long maximumAcquire() {
560
    throw new UnsupportedOperationException();
×
561
  }
562

563
  /** Returns the maximum weighted size of the window space. */
564
  protected long windowMaximum() {
565
    throw new UnsupportedOperationException();
×
566
  }
567

568
  /** Returns the maximum weighted size of the main's protected space. */
569
  protected long mainProtectedMaximum() {
570
    throw new UnsupportedOperationException();
×
571
  }
572

573
  @GuardedBy("evictionLock")
574
  protected void setMaximum(long maximum) {
575
    throw new UnsupportedOperationException();
×
576
  }
577

578
  @GuardedBy("evictionLock")
579
  protected void setWindowMaximum(long maximum) {
580
    throw new UnsupportedOperationException();
×
581
  }
582

583
  @GuardedBy("evictionLock")
584
  protected void setMainProtectedMaximum(long maximum) {
585
    throw new UnsupportedOperationException();
×
586
  }
587

588
  /** Returns the combined weight of the values in the cache (may be negative). */
589
  protected long weightedSize() {
590
    throw new UnsupportedOperationException();
×
591
  }
592

593
  /** Returns the combined weight of the values in the cache (may be negative). */
594
  protected long weightedSizeAcquire() {
595
    throw new UnsupportedOperationException();
×
596
  }
597

598
  /** Returns the uncorrected combined weight of the values in the window space. */
599
  protected long windowWeightedSize() {
600
    throw new UnsupportedOperationException();
×
601
  }
602

603
  /** Returns the uncorrected combined weight of the values in the main's protected space. */
604
  protected long mainProtectedWeightedSize() {
605
    throw new UnsupportedOperationException();
×
606
  }
607

608
  @GuardedBy("evictionLock")
609
  protected void setWeightedSize(long weightedSize) {
610
    throw new UnsupportedOperationException();
×
611
  }
612

613
  @GuardedBy("evictionLock")
614
  protected void setWindowWeightedSize(long weightedSize) {
615
    throw new UnsupportedOperationException();
×
616
  }
617

618
  @GuardedBy("evictionLock")
619
  protected void setMainProtectedWeightedSize(long weightedSize) {
620
    throw new UnsupportedOperationException();
×
621
  }
622

623
  protected int hitsInSample() {
624
    throw new UnsupportedOperationException();
×
625
  }
626

627
  protected int missesInSample() {
628
    throw new UnsupportedOperationException();
×
629
  }
630

631
  protected double stepSize() {
632
    throw new UnsupportedOperationException();
×
633
  }
634

635
  protected double previousSampleHitRate() {
636
    throw new UnsupportedOperationException();
×
637
  }
638

639
  protected long adjustment() {
640
    throw new UnsupportedOperationException();
×
641
  }
642

643
  @GuardedBy("evictionLock")
644
  protected void setHitsInSample(int hitCount) {
645
    throw new UnsupportedOperationException();
×
646
  }
647

648
  @GuardedBy("evictionLock")
649
  protected void setMissesInSample(int missCount) {
650
    throw new UnsupportedOperationException();
×
651
  }
652

653
  @GuardedBy("evictionLock")
654
  protected void setStepSize(double stepSize) {
655
    throw new UnsupportedOperationException();
×
656
  }
657

658
  @GuardedBy("evictionLock")
659
  protected void setPreviousSampleHitRate(double hitRate) {
660
    throw new UnsupportedOperationException();
×
661
  }
662

663
  @GuardedBy("evictionLock")
664
  protected void setAdjustment(long amount) {
665
    throw new UnsupportedOperationException();
×
666
  }
667

668
  /**
669
   * Sets the maximum weighted size of the cache. The caller may need to perform a maintenance cycle
670
   * to eagerly evicts entries until the cache shrinks to the appropriate size.
671
   */
672
  @GuardedBy("evictionLock")
673
  @SuppressWarnings({"ConstantValue", "Varifier"})
674
  void setMaximumSize(long maximum) {
675
    requireArgument(maximum >= 0, "maximum must not be negative");
×
676
    if (maximum == maximum()) {
×
677
      return;
×
678
    }
679

680
    long max = Math.min(maximum, MAXIMUM_CAPACITY);
×
681
    long window = max - (long) (PERCENT_MAIN * max);
×
682
    long mainProtected = (long) (PERCENT_MAIN_PROTECTED * (max - window));
×
683

684
    setMaximum(max);
×
685
    setWindowMaximum(window);
×
686
    setMainProtectedMaximum(mainProtected);
×
687

688
    setHitsInSample(0);
×
689
    setMissesInSample(0);
×
690
    setStepSize(-HILL_CLIMBER_STEP_PERCENT * max);
×
691

692
    if ((frequencySketch() != null) && !isWeighted() && (weightedSize() >= (max >>> 1))) {
×
693
      // Lazily initialize when close to the maximum size
694
      frequencySketch().ensureCapacity(max);
×
695
    }
696
  }
×
697

698
  /** Evicts entries if the cache exceeds the maximum. */
699
  @GuardedBy("evictionLock")
700
  void evictEntries() {
701
    if (!evicts()) {
×
702
      return;
×
703
    }
704
    var candidate = evictFromWindow();
×
705
    evictFromMain(candidate);
×
706
  }
×
707

708
  /**
709
   * Evicts entries from the window space into the main space while the window size exceeds a
710
   * maximum.
711
   *
712
   * @return the first candidate promoted into the probation space
713
   */
714
  @GuardedBy("evictionLock")
715
  @Nullable Node<K, V> evictFromWindow() {
716
    @Var Node<K, V> first = null;
×
717
    @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
×
718
    while (windowWeightedSize() > windowMaximum()) {
×
719
      // The pending operations will adjust the size to reflect the correct weight
720
      if (node == null) {
×
721
        break;
×
722
      }
723

724
      Node<K, V> next = node.getNextInAccessOrder();
×
725
      if (node.getPolicyWeight() != 0) {
×
726
        node.makeMainProbation();
×
727
        accessOrderWindowDeque().remove(node);
×
728
        accessOrderProbationDeque().offerLast(node);
×
729
        if (first == null) {
×
730
          first = node;
×
731
        }
732

733
        setWindowWeightedSize(windowWeightedSize() - node.getPolicyWeight());
×
734
      }
735
      node = next;
×
736
    }
×
737

738
    return first;
×
739
  }
740

741
  /**
742
   * Evicts entries from the main space if the cache exceeds the maximum capacity. The main space
743
   * determines whether admitting an entry (coming from the window space) is preferable to retaining
744
   * the eviction policy's victim. This decision is made using a frequency filter so that the
745
   * least frequently used entry is removed.
746
   * <p>
747
   * The window space's candidates were previously promoted to the probation space at its MRU
748
   * position and the eviction policy's victim starts at the LRU position. The candidates are
749
   * evaluated in promotion order while an eviction is required, and if exhausted then additional
750
   * entries are retrieved from the window space. Likewise, if the victim selection exhausts the
751
   * probation space then additional entries are retrieved from the protected space. The queues are
752
   * consumed in LRU order and the evicted entry is the one with a lower relative frequency, where
753
   * the preference is to retain the main space's victims versus the window space's candidates on a
754
   * tie.
755
   *
756
   * @param candidate the first candidate promoted into the probation space
757
   */
758
  @GuardedBy("evictionLock")
759
  void evictFromMain(@Var @Nullable Node<K, V> candidate) {
760
    @Var int victimQueue = PROBATION;
×
761
    @Var int candidateQueue = PROBATION;
×
762
    @Var Node<K, V> victim = accessOrderProbationDeque().peekFirst();
×
763
    while (weightedSize() > maximum()) {
×
764
      // Search the admission window for additional candidates
765
      if ((candidate == null) && (candidateQueue == PROBATION)) {
×
766
        candidate = accessOrderWindowDeque().peekFirst();
×
767
        candidateQueue = WINDOW;
×
768
      }
769

770
      // Try evicting from the protected and window queues
771
      if ((candidate == null) && (victim == null)) {
×
772
        if (victimQueue == PROBATION) {
×
773
          victim = accessOrderProtectedDeque().peekFirst();
×
774
          victimQueue = PROTECTED;
×
775
          continue;
×
776
        } else if (victimQueue == PROTECTED) {
×
777
          victim = accessOrderWindowDeque().peekFirst();
×
778
          victimQueue = WINDOW;
×
779
          continue;
×
780
        }
781

782
        // The pending operations will adjust the size to reflect the correct weight
783
        break;
784
      }
785

786
      // Skip over entries with zero weight
787
      if ((victim != null) && (victim.getPolicyWeight() == 0)) {
×
788
        victim = victim.getNextInAccessOrder();
×
789
        continue;
×
790
      } else if ((candidate != null) && (candidate.getPolicyWeight() == 0)) {
×
791
        candidate = candidate.getNextInAccessOrder();
×
792
        continue;
×
793
      }
794

795
      // Evict immediately if only one of the entries is present
796
      if (victim == null) {
×
797
        requireNonNull(candidate);
×
798
        Node<K, V> previous = candidate.getNextInAccessOrder();
×
799
        Node<K, V> evict = candidate;
×
800
        candidate = previous;
×
801
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
802
        continue;
×
803
      } else if (candidate == null) {
×
804
        Node<K, V> evict = victim;
×
805
        victim = victim.getNextInAccessOrder();
×
806
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
807
        continue;
×
808
      }
809

810
      // Evict immediately if both selected the same entry
811
      if (candidate == victim) {
×
812
        victim = victim.getNextInAccessOrder();
×
813
        evictEntry(candidate, RemovalCause.SIZE, 0L);
×
814
        candidate = null;
×
815
        continue;
×
816
      }
817

818
      // Evict immediately if an entry was collected
819
      var victimKeyRef = victim.getKeyReferenceOrNull();
×
820
      var candidateKeyRef = candidate.getKeyReferenceOrNull();
×
821
      if (victimKeyRef == null) {
×
822
        Node<K, V> evict = victim;
×
823
        victim = victim.getNextInAccessOrder();
×
824
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
×
825
        continue;
×
826
      } else if (candidateKeyRef == null) {
×
827
        Node<K, V> evict = candidate;
×
828
        candidate = candidate.getNextInAccessOrder();
×
829
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
×
830
        continue;
×
831
      }
832

833
      // Evict immediately if an entry was removed
834
      if (!victim.isAlive()) {
×
835
        Node<K, V> evict = victim;
×
836
        victim = victim.getNextInAccessOrder();
×
837
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
838
        continue;
×
839
      } else if (!candidate.isAlive()) {
×
840
        Node<K, V> evict = candidate;
×
841
        candidate = candidate.getNextInAccessOrder();
×
842
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
843
        continue;
×
844
      }
845

846
      // Evict immediately if the candidate's weight exceeds the maximum
847
      if (candidate.getPolicyWeight() > maximum()) {
×
848
        Node<K, V> evict = candidate;
×
849
        candidate = candidate.getNextInAccessOrder();
×
850
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
851
        continue;
×
852
      }
853

854
      // Evict the entry with the lowest frequency
855
      if (admit(candidateKeyRef, victimKeyRef)) {
×
856
        Node<K, V> evict = victim;
×
857
        victim = victim.getNextInAccessOrder();
×
858
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
859
        candidate = candidate.getNextInAccessOrder();
×
860
      } else {
×
861
        Node<K, V> evict = candidate;
×
862
        candidate = candidate.getNextInAccessOrder();
×
863
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
864
      }
865
    }
×
866
  }
×
867

868
  /**
869
   * Determines if the candidate should be accepted into the main space, as determined by its
870
   * frequency relative to the victim. A small amount of randomness is used to protect against hash
871
   * collision attacks, where the victim's frequency is artificially raised so that no new entries
872
   * are admitted.
873
   *
874
   * @param candidateKeyRef the keyRef for the entry being proposed for long term retention
875
   * @param victimKeyRef the keyRef for the entry chosen by the eviction policy for replacement
876
   * @return if the candidate should be admitted and the victim ejected
877
   */
878
  @GuardedBy("evictionLock")
879
  boolean admit(Object candidateKeyRef, Object victimKeyRef) {
880
    int candidateFreq = frequencySketch().frequency(candidateKeyRef);
×
881
    int victimFreq = frequencySketch().frequency(victimKeyRef);
×
882
    if (candidateFreq > victimFreq) {
×
883
      return true;
×
884
    } else if (candidateFreq >= ADMIT_HASHDOS_THRESHOLD) {
×
885
      // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack
886
      // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm
887
      // candidate reduces the number of random acceptances to minimize the impact on the hit rate.
888
      int random = ThreadLocalRandom.current().nextInt();
×
889
      return ((random & 127) == 0);
×
890
    }
891
    return false;
×
892
  }
893

894
  /** Expires entries that have expired by access, write, or variable. */
895
  @GuardedBy("evictionLock")
896
  void expireEntries() {
897
    long now = expirationTicker().read();
×
898
    expireAfterAccessEntries(now);
×
899
    expireAfterWriteEntries(now);
×
900
    expireVariableEntries(now);
×
901

902
    Pacer pacer = pacer();
×
903
    if (pacer != null) {
×
904
      long delay = getExpirationDelay(now);
×
905
      if (delay == Long.MAX_VALUE) {
×
906
        pacer.cancel();
×
907
      } else {
908
        pacer.schedule(executor, drainBuffersTask, now, delay);
×
909
      }
910
    }
911
  }
×
912

913
  /** Expires entries in the access-order queue. */
914
  @GuardedBy("evictionLock")
915
  void expireAfterAccessEntries(long now) {
916
    if (!expiresAfterAccess()) {
×
917
      return;
×
918
    }
919

920
    expireAfterAccessEntries(now, accessOrderWindowDeque());
×
921
    if (evicts()) {
×
922
      expireAfterAccessEntries(now, accessOrderProbationDeque());
×
923
      expireAfterAccessEntries(now, accessOrderProtectedDeque());
×
924
    }
925
  }
×
926

927
  /** Expires entries in an access-order queue. */
928
  @GuardedBy("evictionLock")
929
  void expireAfterAccessEntries(long now, AccessOrderDeque<Node<K, V>> accessOrderDeque) {
930
    long duration = expiresAfterAccessNanos();
×
931
    for (;;) {
932
      Node<K, V> node = accessOrderDeque.peekFirst();
×
933
      if ((node == null) || ((now - node.getAccessTime()) < duration)
×
934
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
×
935
        return;
×
936
      }
937
    }
×
938
  }
939

940
  /** Expires entries on the write-order queue. */
941
  @GuardedBy("evictionLock")
942
  void expireAfterWriteEntries(long now) {
943
    if (!expiresAfterWrite()) {
×
944
      return;
×
945
    }
946
    long duration = expiresAfterWriteNanos();
×
947
    for (;;) {
948
      Node<K, V> node = writeOrderDeque().peekFirst();
×
949
      if ((node == null) || ((now - node.getWriteTime()) < duration)
×
950
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
×
951
        break;
×
952
      }
953
    }
×
954
  }
×
955

956
  /** Expires entries in the timer wheel. */
957
  @GuardedBy("evictionLock")
958
  void expireVariableEntries(long now) {
959
    if (expiresVariable()) {
×
960
      timerWheel().advance(this, now);
×
961
    }
962
  }
×
963

964
  /** Returns the duration until the next item expires, or {@link Long#MAX_VALUE} if none. */
965
  @GuardedBy("evictionLock")
966
  long getExpirationDelay(long now) {
967
    @Var long delay = Long.MAX_VALUE;
×
968
    if (expiresAfterAccess()) {
×
969
      @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
×
970
      if (node != null) {
×
971
        delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
×
972
      }
973
      if (evicts()) {
×
974
        node = accessOrderProbationDeque().peekFirst();
×
975
        if (node != null) {
×
976
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
×
977
        }
978
        node = accessOrderProtectedDeque().peekFirst();
×
979
        if (node != null) {
×
980
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
×
981
        }
982
      }
983
    }
984
    if (expiresAfterWrite()) {
×
985
      Node<K, V> node = writeOrderDeque().peekFirst();
×
986
      if (node != null) {
×
987
        delay = Math.min(delay, expiresAfterWriteNanos() - (now - node.getWriteTime()));
×
988
      }
989
    }
990
    if (expiresVariable()) {
×
991
      delay = Math.min(delay, timerWheel().getExpirationDelay());
×
992
    }
993
    return delay;
×
994
  }
995

996
  /** Returns if the entry has expired. */
997
  @SuppressWarnings("ShortCircuitBoolean")
998
  boolean hasExpired(Node<K, V> node, long now) {
999
    if (isComputingAsync(node.getValue())) {
×
1000
      return false;
×
1001
    }
1002
    return (expiresAfterAccess() && (now - node.getAccessTime() >= expiresAfterAccessNanos()))
×
1003
        | (expiresAfterWrite() && (now - node.getWriteTime() >= expiresAfterWriteNanos()))
×
1004
        | (expiresVariable() && (now - node.getVariableTime() >= 0));
×
1005
  }
1006

1007
  /**
1008
   * Attempts to evict the entry based on the given removal cause. A removal may be ignored if the
1009
   * entry was updated and is no longer eligible for eviction.
1010
   *
1011
   * @param node the entry to evict
1012
   * @param cause the reason to evict
1013
   * @param now the current time, used only if expiring
1014
   * @return if the entry was evicted
1015
   */
1016
  @GuardedBy("evictionLock")
1017
  @SuppressWarnings({"GuardedByChecker", "SynchronizationOnLocalVariableOrMethodParameter"})
1018
  boolean evictEntry(Node<K, V> node, RemovalCause cause, long now) {
1019
    K key = node.getKey();
×
1020
    @SuppressWarnings({"unchecked", "Varifier"})
1021
    @Nullable V[] value = (V[]) new Object[1];
×
1022
    var removed = new boolean[1];
×
1023
    var resurrect = new boolean[1];
×
1024
    var actualCause = new RemovalCause[1];
×
1025
    var keyReference = node.getKeyReference();
×
1026

1027
    data.computeIfPresent(keyReference, (k, n) -> {
×
1028
      if (n != node) {
×
1029
        return n;
×
1030
      }
1031
      synchronized (n) {
×
1032
        value[0] = n.getValue();
×
1033

1034
        if ((key == null) || (value[0] == null)) {
×
1035
          actualCause[0] = RemovalCause.COLLECTED;
×
1036
        } else if (cause == RemovalCause.COLLECTED) {
×
1037
          resurrect[0] = true;
×
1038
          return n;
×
1039
        } else {
1040
          actualCause[0] = cause;
×
1041
        }
1042

1043
        if (actualCause[0] == RemovalCause.EXPIRED) {
×
1044
          @Var boolean expired = false;
×
1045
          if (expiresAfterAccess()) {
×
1046
            expired |= ((now - n.getAccessTime()) >= expiresAfterAccessNanos());
×
1047
          }
1048
          if (expiresAfterWrite()) {
×
1049
            expired |= ((now - n.getWriteTime()) >= expiresAfterWriteNanos());
×
1050
          }
1051
          if (expiresVariable()) {
×
1052
            expired |= ((now - node.getVariableTime()) >= 0);
×
1053
          }
1054
          if (!expired) {
×
1055
            resurrect[0] = true;
×
1056
            return n;
×
1057
          }
1058
        } else if (actualCause[0] == RemovalCause.SIZE) {
×
1059
          int weight = node.getWeight();
×
1060
          if (weight == 0) {
×
1061
            resurrect[0] = true;
×
1062
            return n;
×
1063
          }
1064
        }
1065

1066
        notifyEviction(key, value[0], actualCause[0]);
×
1067
        discardRefresh(keyReference);
×
1068
        removed[0] = true;
×
1069
        node.retire();
×
1070
        return null;
×
1071
      }
1072
    });
1073

1074
    // The entry is no longer eligible for eviction
1075
    if (resurrect[0]) {
×
1076
      return false;
×
1077
    }
1078

1079
    // If the eviction fails due to a concurrent removal of the victim, that removal may cancel out
1080
    // the addition that triggered this eviction. The victim is eagerly unlinked and the size
1081
    // decremented before the removal task so that if an eviction is still required then a new
1082
    // victim will be chosen for removal.
1083
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
×
1084
      accessOrderWindowDeque().remove(node);
×
1085
    } else if (evicts()) {
×
1086
      if (node.inMainProbation()) {
×
1087
        accessOrderProbationDeque().remove(node);
×
1088
      } else {
1089
        accessOrderProtectedDeque().remove(node);
×
1090
      }
1091
    }
1092
    if (expiresAfterWrite()) {
×
1093
      writeOrderDeque().remove(node);
×
1094
    } else if (expiresVariable()) {
×
1095
      timerWheel().deschedule(node);
×
1096
    }
1097

1098
    synchronized (node) {
×
1099
      logIfAlive(node);
×
1100
      makeDead(node);
×
1101
    }
×
1102

1103
    if (removed[0]) {
×
1104
      statsCounter().recordEviction(node.getWeight(), actualCause[0]);
×
1105
      notifyRemoval(key, value[0], actualCause[0]);
×
1106
    }
1107

1108
    return true;
×
1109
  }
1110

1111
  /** Adapts the eviction policy to towards the optimal recency / frequency configuration. */
1112
  @GuardedBy("evictionLock")
1113
  void climb() {
1114
    if (!evicts()) {
×
1115
      return;
×
1116
    }
1117

1118
    determineAdjustment();
×
1119
    demoteFromMainProtected();
×
1120
    long amount = adjustment();
×
1121
    if (amount == 0) {
×
1122
      return;
×
1123
    } else if (amount > 0) {
×
1124
      increaseWindow();
×
1125
    } else {
1126
      decreaseWindow();
×
1127
    }
1128
  }
×
1129

1130
  /** Calculates the amount to adapt the window by and sets {@link #adjustment()} accordingly. */
1131
  @GuardedBy("evictionLock")
1132
  void determineAdjustment() {
1133
    if (frequencySketch().isNotInitialized()) {
×
1134
      setPreviousSampleHitRate(0.0);
×
1135
      setMissesInSample(0);
×
1136
      setHitsInSample(0);
×
1137
      return;
×
1138
    }
1139

1140
    int requestCount = hitsInSample() + missesInSample();
×
1141
    if (requestCount < frequencySketch().sampleSize) {
×
1142
      return;
×
1143
    }
1144

1145
    double hitRate = (double) hitsInSample() / requestCount;
×
1146
    double hitRateChange = hitRate - previousSampleHitRate();
×
1147
    double amount = (hitRateChange >= 0) ? stepSize() : -stepSize();
×
1148
    double nextStepSize = (Math.abs(hitRateChange) >= HILL_CLIMBER_RESTART_THRESHOLD)
×
1149
        ? HILL_CLIMBER_STEP_PERCENT * maximum() * (amount >= 0 ? 1 : -1)
×
1150
        : HILL_CLIMBER_STEP_DECAY_RATE * amount;
×
1151
    setPreviousSampleHitRate(hitRate);
×
1152
    setAdjustment((long) amount);
×
1153
    setStepSize(nextStepSize);
×
1154
    setMissesInSample(0);
×
1155
    setHitsInSample(0);
×
1156
  }
×
1157

1158
  /**
1159
   * Increases the size of the admission window by shrinking the portion allocated to the main
1160
   * space. As the main space is partitioned into probation and protected regions (80% / 20%), for
1161
   * simplicity only the protected is reduced. If the regions exceed their maximums, this may cause
1162
   * protected items to be demoted to the probation region and probation items to be demoted to the
1163
   * admission window.
1164
   */
1165
  @GuardedBy("evictionLock")
1166
  void increaseWindow() {
1167
    if (mainProtectedMaximum() == 0) {
×
1168
      return;
×
1169
    }
1170

1171
    @Var long quota = Math.min(adjustment(), mainProtectedMaximum());
×
1172
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
×
1173
    setWindowMaximum(windowMaximum() + quota);
×
1174
    demoteFromMainProtected();
×
1175

1176
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
×
1177
      @Var Node<K, V> candidate = accessOrderProbationDeque().peekFirst();
×
1178
      @Var boolean probation = true;
×
1179
      if ((candidate == null) || (quota < candidate.getPolicyWeight())) {
×
1180
        candidate = accessOrderProtectedDeque().peekFirst();
×
1181
        probation = false;
×
1182
      }
1183
      if (candidate == null) {
×
1184
        break;
×
1185
      }
1186

1187
      int weight = candidate.getPolicyWeight();
×
1188
      if (quota < weight) {
×
1189
        break;
×
1190
      }
1191

1192
      quota -= weight;
×
1193
      if (probation) {
×
1194
        accessOrderProbationDeque().remove(candidate);
×
1195
      } else {
1196
        setMainProtectedWeightedSize(mainProtectedWeightedSize() - weight);
×
1197
        accessOrderProtectedDeque().remove(candidate);
×
1198
      }
1199
      setWindowWeightedSize(windowWeightedSize() + weight);
×
1200
      accessOrderWindowDeque().offerLast(candidate);
×
1201
      candidate.makeWindow();
×
1202
    }
1203

1204
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
×
1205
    setWindowMaximum(windowMaximum() - quota);
×
1206
    setAdjustment(quota);
×
1207
  }
×
1208

1209
  /** Decreases the size of the admission window and increases the main's protected region. */
1210
  @GuardedBy("evictionLock")
1211
  void decreaseWindow() {
1212
    if (windowMaximum() <= 1) {
×
1213
      return;
×
1214
    }
1215

1216
    @Var long quota = Math.min(-adjustment(), Math.max(0, windowMaximum() - 1));
×
1217
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
×
1218
    setWindowMaximum(windowMaximum() - quota);
×
1219

1220
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
×
1221
      Node<K, V> candidate = accessOrderWindowDeque().peekFirst();
×
1222
      if (candidate == null) {
×
1223
        break;
×
1224
      }
1225

1226
      int weight = candidate.getPolicyWeight();
×
1227
      if (quota < weight) {
×
1228
        break;
×
1229
      }
1230

1231
      quota -= weight;
×
1232
      setWindowWeightedSize(windowWeightedSize() - weight);
×
1233
      accessOrderWindowDeque().remove(candidate);
×
1234
      accessOrderProbationDeque().offerLast(candidate);
×
1235
      candidate.makeMainProbation();
×
1236
    }
1237

1238
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
×
1239
    setWindowMaximum(windowMaximum() + quota);
×
1240
    setAdjustment(-quota);
×
1241
  }
×
1242

1243
  /** Transfers the nodes from the protected to the probation region if it exceeds the maximum. */
1244
  @GuardedBy("evictionLock")
1245
  void demoteFromMainProtected() {
1246
    long mainProtectedMaximum = mainProtectedMaximum();
×
1247
    @Var long mainProtectedWeightedSize = mainProtectedWeightedSize();
×
1248
    if (mainProtectedWeightedSize <= mainProtectedMaximum) {
×
1249
      return;
×
1250
    }
1251

1252
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
×
1253
      if (mainProtectedWeightedSize <= mainProtectedMaximum) {
×
1254
        break;
×
1255
      }
1256

1257
      Node<K, V> demoted = accessOrderProtectedDeque().poll();
×
1258
      if (demoted == null) {
×
1259
        break;
×
1260
      }
1261
      demoted.makeMainProbation();
×
1262
      accessOrderProbationDeque().offerLast(demoted);
×
1263
      mainProtectedWeightedSize -= demoted.getPolicyWeight();
×
1264
    }
1265
    setMainProtectedWeightedSize(mainProtectedWeightedSize);
×
1266
  }
×
1267

1268
  /**
1269
   * Performs the post-processing work required after a read.
1270
   *
1271
   * @param node the entry in the page replacement policy
1272
   * @param now the current time, in nanoseconds
1273
   * @param recordHit if the hit count should be incremented
1274
   * @return the refreshed value if immediately loaded, else null
1275
   */
1276
  @Nullable V afterRead(Node<K, V> node, long now, boolean recordHit) {
1277
    if (recordHit) {
×
1278
      statsCounter().recordHits(1);
×
1279
    }
1280

1281
    boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL);
×
1282
    if (shouldDrainBuffers(delayable)) {
×
1283
      scheduleDrainBuffers();
×
1284
    }
1285
    return refreshIfNeeded(node, now);
×
1286
  }
1287

1288
  /** Returns if the cache should bypass the read buffer. */
1289
  boolean skipReadBuffer() {
1290
    return fastpath() && frequencySketch().isNotInitialized();
×
1291
  }
1292

1293
  /**
1294
   * Asynchronously refreshes the entry if eligible.
1295
   *
1296
   * @param node the entry in the cache to refresh
1297
   * @param now the current time, in nanoseconds
1298
   * @return the refreshed value if immediately loaded, else null
1299
   */
1300
  @SuppressWarnings("FutureReturnValueIgnored")
1301
  @Nullable V refreshIfNeeded(Node<K, V> node, long now) {
1302
    if (!refreshAfterWrite()) {
×
1303
      return null;
×
1304
    }
1305

1306
    K key;
1307
    V oldValue;
1308
    long writeTime = node.getWriteTime();
×
1309
    long refreshWriteTime = writeTime | 1L;
×
1310
    Object keyReference = node.getKeyReference();
×
1311
    ConcurrentMap<Object, CompletableFuture<?>> refreshes;
1312
    if (((now - writeTime) > refreshAfterWriteNanos())
×
1313
        && ((key = node.getKey()) != null) && ((oldValue = node.getValue()) != null)
×
1314
        && !isComputingAsync(oldValue) && ((writeTime & 1L) == 0L)
×
1315
        && !(refreshes = refreshes()).containsKey(keyReference)
×
1316
        && node.isAlive() && node.casWriteTime(writeTime, refreshWriteTime)) {
×
1317
      long[] startTime = new long[1];
×
1318
      @SuppressWarnings({"rawtypes", "unchecked"})
1319
      @Nullable CompletableFuture<? extends @Nullable V>[] refreshFuture = new CompletableFuture[1];
×
1320
      try {
1321
        refreshes.computeIfAbsent(keyReference, k -> {
×
1322
          try {
1323
            startTime[0] = statsTicker().read();
×
1324
            if (isAsync) {
×
1325
              @SuppressWarnings("unchecked")
1326
              var future = (CompletableFuture<V>) oldValue;
×
1327
              if (Async.isReady(future)) {
×
1328
                requireNonNull(cacheLoader);
×
1329
                var refresh = cacheLoader.asyncReload(key, future.join(), executor);
×
1330
                refreshFuture[0] = requireNonNull(refresh, "Null future");
×
1331
              } else {
×
1332
                // no-op if the future's completion state was modified (e.g. obtrude methods)
1333
                return null;
×
1334
              }
1335
            } else {
×
1336
              requireNonNull(cacheLoader);
×
1337
              var refresh = cacheLoader.asyncReload(key, oldValue, executor);
×
1338
              refreshFuture[0] = requireNonNull(refresh, "Null future");
×
1339
            }
1340
            return refreshFuture[0];
×
1341
          } catch (InterruptedException e) {
×
1342
            Thread.currentThread().interrupt();
×
1343
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
×
1344
            return null;
×
1345
          } catch (Throwable e) {
×
1346
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
×
1347
            return null;
×
1348
          }
1349
        });
1350
      } finally {
1351
        node.casWriteTime(refreshWriteTime, writeTime);
×
1352
      }
1353

1354
      if (refreshFuture[0] == null) {
×
1355
        return null;
×
1356
      }
1357

1358
      var refreshed = refreshFuture[0].handle((newValue, error) -> {
×
1359
        long loadTime = statsTicker().read() - startTime[0];
×
1360
        if (error != null) {
×
1361
          if (!(error instanceof CancellationException) && !(error instanceof TimeoutException)) {
×
1362
            logger.log(Level.WARNING, "Exception thrown during refresh", error);
×
1363
          }
1364
          refreshes.remove(keyReference, refreshFuture[0]);
×
1365
          statsCounter().recordLoadFailure(loadTime);
×
1366
          return null;
×
1367
        }
1368

1369
        @SuppressWarnings("unchecked")
1370
        V value = (isAsync && (newValue != null)) ? (V) refreshFuture[0] : newValue;
×
1371

1372
        @Nullable RemovalCause[] cause = new RemovalCause[1];
×
1373
        V result = compute(key, (K k, @Nullable V currentValue) -> {
×
1374
          boolean removed = refreshes.remove(keyReference, refreshFuture[0]);
×
1375
          if (currentValue == null) {
×
1376
            // If the entry is absent then discard the refresh and maybe notifying the listener
1377
            if (value != null) {
×
1378
              cause[0] = RemovalCause.EXPLICIT;
×
1379
            }
1380
            return null;
×
1381
          } else if (currentValue == value) {
×
1382
            // If the reloaded value is the same instance then no-op
1383
            return currentValue;
×
1384
          } else if (isAsync &&
×
1385
              (newValue == Async.getIfReady((CompletableFuture<?>) currentValue))) {
×
1386
            // If the completed futures hold the same value instance then no-op
1387
            return currentValue;
×
1388
          } else if (removed && (currentValue == oldValue) && (node.getWriteTime() == writeTime)) {
×
1389
            // If the entry was not modified while in-flight (no ABA) then replace
1390
            return value;
×
1391
          }
1392
          // Otherwise, a write invalidated the refresh so discard it and notify the listener
1393
          cause[0] = RemovalCause.REPLACED;
×
1394
          return currentValue;
×
1395
        }, expiry(), /* recordLoad= */ false, /* recordLoadFailure= */ true);
×
1396

1397
        if (cause[0] != null) {
×
1398
          notifyRemoval(key, value, cause[0]);
×
1399
        }
1400
        if (newValue == null) {
×
1401
          statsCounter().recordLoadFailure(loadTime);
×
1402
        } else {
1403
          statsCounter().recordLoadSuccess(loadTime);
×
1404
        }
1405
        return result;
×
1406
      });
1407
      return Async.getIfReady(refreshed);
×
1408
    }
1409

1410
    return null;
×
1411
  }
1412

1413
  /**
1414
   * Returns the expiration time for the entry after being created.
1415
   *
1416
   * @param key the key of the entry that was created
1417
   * @param value the value of the entry that was created
1418
   * @param expiry the calculator for the expiration time
1419
   * @param now the current time, in nanoseconds
1420
   * @return the expiration time
1421
   */
1422
  long expireAfterCreate(K key, V value, @Nullable Expiry<? super K, ? super V> expiry, long now) {
1423
    if (expiresVariable()) {
×
1424
      requireNonNull(expiry);
×
1425
      long duration = Math.max(0L, expiry.expireAfterCreate(key, value, now));
×
1426
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
×
1427
    }
1428
    return 0L;
×
1429
  }
1430

1431
  /**
1432
   * Returns the expiration time for the entry after being updated.
1433
   *
1434
   * @param node the entry in the page replacement policy
1435
   * @param key the key of the entry that was updated
1436
   * @param value the value of the entry that was updated
1437
   * @param expiry the calculator for the expiration time
1438
   * @param now the current time, in nanoseconds
1439
   * @return the expiration time
1440
   */
1441
  long expireAfterUpdate(Node<K, V> node, K key, V value,
1442
      @Nullable Expiry<? super K, ? super V> expiry, long now) {
1443
    if (expiresVariable()) {
×
1444
      requireNonNull(expiry);
×
1445
      long currentDuration = Math.max(1, node.getVariableTime() - now);
×
1446
      long duration = Math.max(0L, expiry.expireAfterUpdate(key, value, now, currentDuration));
×
1447
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
×
1448
    }
1449
    return 0L;
×
1450
  }
1451

1452
  /**
1453
   * Returns the access time for the entry after a read.
1454
   *
1455
   * @param node the entry in the page replacement policy
1456
   * @param key the key of the entry that was read
1457
   * @param value the value of the entry that was read
1458
   * @param expiry the calculator for the expiration time
1459
   * @param now the current time, in nanoseconds
1460
   * @return the expiration time
1461
   */
1462
  long expireAfterRead(Node<K, V> node, K key, V value, Expiry<K, V> expiry, long now) {
1463
    if (expiresVariable()) {
×
1464
      long currentDuration = Math.max(0L, node.getVariableTime() - now);
×
1465
      long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
×
1466
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
×
1467
    }
1468
    return 0L;
×
1469
  }
1470

1471
  /**
1472
   * Attempts to update the access time for the entry after a read.
1473
   *
1474
   * @param node the entry in the page replacement policy
1475
   * @param key the key of the entry that was read
1476
   * @param value the value of the entry that was read
1477
   * @param expiry the calculator for the expiration time
1478
   * @param now the current time, in nanoseconds
1479
   */
1480
  void tryExpireAfterRead(Node<K, V> node, K key, V value, Expiry<K, V> expiry, long now) {
1481
    if (!expiresVariable()) {
×
1482
      return;
×
1483
    }
1484

1485
    long variableTime = node.getVariableTime();
×
1486
    long currentDuration = Math.max(1, variableTime - now);
×
1487
    if (isAsync && (currentDuration > MAXIMUM_EXPIRY)) {
×
1488
      // expireAfterCreate has not yet set the duration after completion
1489
      return;
×
1490
    }
1491

1492
    long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
×
1493
    if (duration != currentDuration) {
×
1494
      long expirationTime = isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
×
1495
      node.casVariableTime(variableTime, expirationTime);
×
1496
    }
1497
  }
×
1498

1499
  void setVariableTime(Node<K, V> node, long expirationTime) {
1500
    if (expiresVariable()) {
×
1501
      node.setVariableTime(expirationTime);
×
1502
    }
1503
  }
×
1504

1505
  void setWriteTime(Node<K, V> node, long now) {
1506
    if (expiresAfterWrite() || refreshAfterWrite()) {
×
1507
      node.setWriteTime(now & ~1L);
×
1508
    }
1509
  }
×
1510

1511
  void setAccessTime(Node<K, V> node, long now) {
1512
    if (expiresAfterAccess()) {
×
1513
      node.setAccessTime(now);
×
1514
    }
1515
  }
×
1516

1517
  /** Returns if the entry's write time would exceed the minimum expiration reorder threshold. */
1518
  boolean exceedsWriteTimeTolerance(Node<K, V> node, long varTime, long now) {
1519
    long variableTime = node.getVariableTime();
×
1520
    long tolerance = EXPIRE_WRITE_TOLERANCE;
×
1521
    long writeTime = node.getWriteTime();
×
1522
    return
×
1523
        (expiresAfterWrite()
×
1524
            && ((expiresAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
×
1525
        || (refreshAfterWrite()
×
1526
            && ((refreshAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
×
1527
        || (expiresVariable() && (Math.abs(varTime - variableTime) > tolerance));
×
1528
  }
1529

1530
  /**
1531
   * Performs the post-processing work required after a write.
1532
   *
1533
   * @param task the pending operation to be applied
1534
   */
1535
  void afterWrite(Runnable task) {
1536
    for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) {
×
1537
      if (writeBuffer.offer(task)) {
×
1538
        scheduleAfterWrite();
×
1539
        return;
×
1540
      }
1541
      scheduleDrainBuffers();
×
1542
      Thread.onSpinWait();
×
1543
    }
1544

1545
    // In scenarios where the writing threads cannot make progress then they attempt to provide
1546
    // assistance by performing the eviction work directly. This can resolve cases where the
1547
    // maintenance task is scheduled but not running. That might occur due to all of the executor's
1548
    // threads being busy (perhaps writing into this cache), the write rate greatly exceeds the
1549
    // consuming rate, priority inversion, or if the executor silently discarded the maintenance
1550
    // task. Unfortunately this cannot resolve when the eviction is blocked waiting on a long-
1551
    // running computation due to an eviction listener, the victim is being computed on by a writer,
1552
    // or the victim residing in the same hash bin as a computing entry. In those cases a warning is
1553
    // logged to encourage the application to decouple these computations from the map operations.
1554
    lock();
×
1555
    try {
1556
      maintenance(task);
×
1557
    } catch (RuntimeException e) {
×
1558
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
×
1559
    } finally {
1560
      evictionLock.unlock();
×
1561
    }
1562
    rescheduleCleanUpIfIncomplete();
×
1563
  }
×
1564

1565
  /** Acquires the eviction lock. */
1566
  void lock() {
1567
    @Var long remainingNanos = WARN_AFTER_LOCK_WAIT_NANOS;
×
1568
    long end = System.nanoTime() + remainingNanos;
×
1569
    @Var boolean interrupted = false;
×
1570
    try {
1571
      for (;;) {
1572
        try {
1573
          if (evictionLock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
×
1574
            return;
×
1575
          }
1576
          logger.log(Level.WARNING, "The cache is experiencing excessive wait times for acquiring "
×
1577
              + "the eviction lock. This may indicate that a long-running computation has halted "
1578
              + "eviction when trying to remove the victim entry. Consider using AsyncCache to "
1579
              + "decouple the computation from the map operation.", new TimeoutException());
1580
          evictionLock.lock();
×
1581
          return;
×
1582
        } catch (InterruptedException e) {
×
1583
          remainingNanos = end - System.nanoTime();
×
1584
          interrupted = true;
×
1585
        }
×
1586
      }
1587
    } finally {
1588
      if (interrupted) {
×
1589
        Thread.currentThread().interrupt();
×
1590
      }
1591
    }
1592
  }
1593

1594
  /**
1595
   * Conditionally schedules the asynchronous maintenance task after a write operation. If the
1596
   * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it
1597
   * is already processing then it is set to transition to REQUIRED upon completion so that a new
1598
   * execution is triggered by the next operation.
1599
   */
1600
  void scheduleAfterWrite() {
1601
    @Var int drainStatus = drainStatusOpaque();
×
1602
    for (;;) {
1603
      switch (drainStatus) {
×
1604
        case IDLE:
1605
          casDrainStatus(IDLE, REQUIRED);
×
1606
          scheduleDrainBuffers();
×
1607
          return;
×
1608
        case REQUIRED:
1609
          scheduleDrainBuffers();
×
1610
          return;
×
1611
        case PROCESSING_TO_IDLE:
1612
          if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) {
×
1613
            return;
×
1614
          }
1615
          drainStatus = drainStatusAcquire();
×
1616
          continue;
×
1617
        case PROCESSING_TO_REQUIRED:
1618
          return;
×
1619
        default:
1620
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
×
1621
      }
1622
    }
1623
  }
1624

1625
  /**
1626
   * Attempts to schedule an asynchronous task to apply the pending operations to the page
1627
   * replacement policy. If the executor rejects the task then it is run directly.
1628
   */
1629
  void scheduleDrainBuffers() {
1630
    if (drainStatusOpaque() >= PROCESSING_TO_IDLE) {
×
1631
      return;
×
1632
    }
1633
    if (evictionLock.tryLock()) {
×
1634
      try {
1635
        int drainStatus = drainStatusOpaque();
×
1636
        if (drainStatus >= PROCESSING_TO_IDLE) {
×
1637
          return;
×
1638
        }
1639
        setDrainStatusRelease(PROCESSING_TO_IDLE);
×
1640
        executor.execute(drainBuffersTask);
×
1641
      } catch (Throwable t) {
×
1642
        logger.log(Level.WARNING, "Exception thrown when submitting maintenance task", t);
×
1643
        maintenance(/* ignored */ null);
×
1644
      } finally {
1645
        evictionLock.unlock();
×
1646
      }
1647
    }
1648
  }
×
1649

1650
  @Override
1651
  public void cleanUp() {
1652
    try {
1653
      performCleanUp(/* ignored */ null);
×
1654
    } catch (RuntimeException e) {
×
1655
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
×
1656
    }
×
1657
  }
×
1658

1659
  /**
1660
   * Performs the maintenance work, blocking until the lock is acquired.
1661
   *
1662
   * @param task an additional pending task to run, or {@code null} if not present
1663
   */
1664
  void performCleanUp(@Nullable Runnable task) {
1665
    evictionLock.lock();
×
1666
    try {
1667
      maintenance(task);
×
1668
    } finally {
1669
      evictionLock.unlock();
×
1670
    }
1671
    rescheduleCleanUpIfIncomplete();
×
1672
  }
×
1673

1674
  /**
1675
   * If there remains pending operations that were not handled by the prior clean up then try to
1676
   * schedule an asynchronous maintenance task. This may occur due to a concurrent write after the
1677
   * maintenance work had started or if the amortized threshold of work per clean up was reached.
1678
   */
1679
  @SuppressWarnings("resource")
1680
  void rescheduleCleanUpIfIncomplete() {
1681
    if (drainStatusOpaque() != REQUIRED) {
×
1682
      return;
×
1683
    }
1684

1685
    // An immediate scheduling cannot be performed on a custom executor because it may use a
1686
    // caller-runs policy. This could cause the caller's penalty to exceed the amortized threshold,
1687
    // e.g. repeated concurrent writes could result in a retry loop.
1688
    if (executor == ForkJoinPool.commonPool()) {
×
1689
      scheduleDrainBuffers();
×
1690
      return;
×
1691
    }
1692

1693
    // If a scheduler was configured then the maintenance can be deferred onto the custom executor
1694
    // and run in the near future. Otherwise, it will be handled due to other cache activity.
1695
    var pacer = pacer();
×
1696
    if ((pacer != null) && !pacer.isScheduled() && evictionLock.tryLock()) {
×
1697
      try {
1698
        if ((drainStatusOpaque() == REQUIRED) && !pacer.isScheduled()) {
×
1699
          pacer.schedule(executor, drainBuffersTask, expirationTicker().read(), Pacer.TOLERANCE);
×
1700
        }
1701
      } finally {
1702
        evictionLock.unlock();
×
1703
      }
1704
    }
1705
  }
×
1706

1707
  /**
1708
   * Performs the pending maintenance work and sets the state flags during processing to avoid
1709
   * excess scheduling attempts. The read buffer, write buffer, and reference queues are drained,
1710
   * followed by expiration, and size-based eviction.
1711
   *
1712
   * @param task an additional pending task to run, or {@code null} if not present
1713
   */
1714
  @GuardedBy("evictionLock")
1715
  void maintenance(@Nullable Runnable task) {
1716
    setDrainStatusRelease(PROCESSING_TO_IDLE);
×
1717

1718
    try {
1719
      drainReadBuffer();
×
1720

1721
      drainWriteBuffer();
×
1722
      if (task != null) {
×
1723
        task.run();
×
1724
      }
1725

1726
      drainKeyReferences();
×
1727
      drainValueReferences();
×
1728

1729
      expireEntries();
×
1730
      evictEntries();
×
1731

1732
      climb();
×
1733
    } finally {
1734
      if ((drainStatusOpaque() != PROCESSING_TO_IDLE)
×
1735
          || !casDrainStatus(PROCESSING_TO_IDLE, IDLE)) {
×
1736
        setDrainStatusOpaque(REQUIRED);
×
1737
      }
1738
    }
1739
  }
×
1740

1741
  /** Drains the weak key references queue. */
1742
  @GuardedBy("evictionLock")
1743
  void drainKeyReferences() {
1744
    if (!collectKeys()) {
×
1745
      return;
×
1746
    }
1747
    @Var Reference<? extends K> keyRef;
1748
    while ((keyRef = keyReferenceQueue().poll()) != null) {
×
1749
      Node<K, V> node = data.get(keyRef);
×
1750
      if (node != null) {
×
1751
        evictEntry(node, RemovalCause.COLLECTED, 0L);
×
1752
      }
1753
    }
×
1754
  }
×
1755

1756
  /** Drains the weak / soft value references queue. */
1757
  @GuardedBy("evictionLock")
1758
  void drainValueReferences() {
1759
    if (!collectValues()) {
×
1760
      return;
×
1761
    }
1762
    @Var Reference<? extends V> valueRef;
1763
    while ((valueRef = valueReferenceQueue().poll()) != null) {
×
1764
      @SuppressWarnings("unchecked")
1765
      var ref = (InternalReference<V>) valueRef;
×
1766
      Node<K, V> node = data.get(ref.getKeyReference());
×
1767
      if ((node != null) && (valueRef == node.getValueReference())) {
×
1768
        evictEntry(node, RemovalCause.COLLECTED, 0L);
×
1769
      }
1770
    }
×
1771
  }
×
1772

1773
  /** Drains the read buffer. */
1774
  @GuardedBy("evictionLock")
1775
  void drainReadBuffer() {
1776
    if (!skipReadBuffer()) {
×
1777
      readBuffer.drainTo(accessPolicy);
×
1778
    }
1779
  }
×
1780

1781
  /** Updates the node's location in the page replacement policy. */
1782
  @GuardedBy("evictionLock")
1783
  void onAccess(Node<K, V> node) {
1784
    if (evicts()) {
×
1785
      var keyRef = node.getKeyReferenceOrNull();
×
1786
      if (keyRef == null) {
×
1787
        return;
×
1788
      }
1789
      frequencySketch().increment(keyRef);
×
1790
      if (node.inWindow()) {
×
1791
        reorder(accessOrderWindowDeque(), node);
×
1792
      } else if (node.inMainProbation()) {
×
1793
        reorderProbation(node);
×
1794
      } else {
1795
        reorder(accessOrderProtectedDeque(), node);
×
1796
      }
1797
      setHitsInSample(hitsInSample() + 1);
×
1798
    } else if (expiresAfterAccess()) {
×
1799
      reorder(accessOrderWindowDeque(), node);
×
1800
    }
1801
    if (expiresVariable()) {
×
1802
      timerWheel().reschedule(node);
×
1803
    }
1804
  }
×
1805

1806
  /** Promote the node from probation to protected on an access. */
1807
  @GuardedBy("evictionLock")
1808
  void reorderProbation(Node<K, V> node) {
1809
    if (!accessOrderProbationDeque().contains(node)) {
×
1810
      // Ignore stale accesses for an entry that is no longer present
1811
      return;
×
1812
    } else if (node.getPolicyWeight() > mainProtectedMaximum()) {
×
1813
      reorder(accessOrderProbationDeque(), node);
×
1814
      return;
×
1815
    }
1816

1817
    // If the protected space exceeds its maximum, the LRU items are demoted to the probation space.
1818
    // This is deferred to the adaption phase at the end of the maintenance cycle.
1819
    setMainProtectedWeightedSize(mainProtectedWeightedSize() + node.getPolicyWeight());
×
1820
    accessOrderProbationDeque().remove(node);
×
1821
    accessOrderProtectedDeque().offerLast(node);
×
1822
    node.makeMainProtected();
×
1823
  }
×
1824

1825
  /** Updates the node's location in the policy's deque. */
1826
  static <K, V> void reorder(LinkedDeque<Node<K, V>> deque, Node<K, V> node) {
1827
    // An entry may be scheduled for reordering despite having been removed. This can occur when the
1828
    // entry was concurrently read while a writer was removing it. If the entry is no longer linked
1829
    // then it does not need to be processed.
1830
    if (deque.contains(node)) {
×
1831
      deque.moveToBack(node);
×
1832
    }
1833
  }
×
1834

1835
  /** Drains the write buffer. */
1836
  @GuardedBy("evictionLock")
1837
  void drainWriteBuffer() {
1838
    for (int i = 0; i <= WRITE_BUFFER_MAX; i++) {
×
1839
      Runnable task = writeBuffer.poll();
×
1840
      if (task == null) {
×
1841
        return;
×
1842
      }
1843
      task.run();
×
1844
    }
1845
    setDrainStatusOpaque(PROCESSING_TO_REQUIRED);
×
1846
  }
×
1847

1848
  /**
1849
   * Atomically transitions the node to the <code>dead</code> state and decrements the
1850
   * <code>weightedSize</code>.
1851
   *
1852
   * @param node the entry in the page replacement policy
1853
   */
1854
  @GuardedBy("evictionLock")
1855
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
1856
  void makeDead(Node<K, V> node) {
1857
    synchronized (node) {
×
1858
      if (node.isDead()) {
×
1859
        return;
×
1860
      }
1861
      if (evicts()) {
×
1862
        // The node's policy weight may be out of sync due to a pending update waiting to be
1863
        // processed. At this point the node's weight is finalized, so the weight can be safely
1864
        // taken from the node's perspective and the sizes will be adjusted correctly.
1865
        if (node.inWindow()) {
×
1866
          setWindowWeightedSize(windowWeightedSize() - node.getWeight());
×
1867
        } else if (node.inMainProtected()) {
×
1868
          setMainProtectedWeightedSize(mainProtectedWeightedSize() - node.getWeight());
×
1869
        }
1870
        setWeightedSize(weightedSize() - node.getWeight());
×
1871
      }
1872
      node.die();
×
1873
    }
×
1874
  }
×
1875

1876
  /** Adds the node to the page replacement policy. */
1877
  final class AddTask implements Runnable {
1878
    final Node<K, V> node;
1879
    final int weight;
1880

1881
    AddTask(Node<K, V> node, int weight) {
×
1882
      this.weight = weight;
×
1883
      this.node = node;
×
1884
    }
×
1885

1886
    @Override
1887
    @GuardedBy("evictionLock")
1888
    public void run() {
1889
      if (evicts()) {
×
1890
        setWeightedSize(weightedSize() + weight);
×
1891
        setWindowWeightedSize(windowWeightedSize() + weight);
×
1892
        node.setPolicyWeight(node.getPolicyWeight() + weight);
×
1893

1894
        long maximum = maximum();
×
1895
        if (weightedSize() >= (maximum >>> 1)) {
×
1896
          if (weightedSize() > MAXIMUM_CAPACITY) {
×
1897
            evictEntries();
×
1898
          } else {
1899
            // Lazily initialize when close to the maximum
1900
            long capacity = isWeighted() ? data.mappingCount() : maximum;
×
1901
            frequencySketch().ensureCapacity(capacity);
×
1902
          }
1903
        }
1904

1905
        var keyRef = node.getKeyReferenceOrNull();
×
1906
        if (keyRef != null) {
×
1907
          frequencySketch().increment(keyRef);
×
1908
        }
1909

1910
        setMissesInSample(missesInSample() + 1);
×
1911
      }
1912

1913
      // ignore out-of-order write operations
1914
      boolean isAlive;
1915
      synchronized (node) {
×
1916
        isAlive = node.isAlive();
×
1917
      }
×
1918
      if (isAlive) {
×
1919
        if (expiresAfterWrite()) {
×
1920
          writeOrderDeque().offerLast(node);
×
1921
        }
1922
        if (expiresVariable()) {
×
1923
          timerWheel().schedule(node);
×
1924
        }
1925
        if (evicts()) {
×
1926
          if (weight > maximum()) {
×
1927
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
×
1928
          } else if (weight > windowMaximum()) {
×
1929
            accessOrderWindowDeque().offerFirst(node);
×
1930
          } else {
1931
            accessOrderWindowDeque().offerLast(node);
×
1932
          }
1933
        } else if (expiresAfterAccess()) {
×
1934
          accessOrderWindowDeque().offerLast(node);
×
1935
        }
1936
      }
1937
    }
×
1938
  }
1939

1940
  /** Removes a node from the page replacement policy. */
1941
  final class RemovalTask implements Runnable {
1942
    final Node<K, V> node;
1943

1944
    RemovalTask(Node<K, V> node) {
×
1945
      this.node = node;
×
1946
    }
×
1947

1948
    @Override
1949
    @GuardedBy("evictionLock")
1950
    public void run() {
1951
      // add may not have been processed yet
1952
      if (node.inWindow() && (evicts() || expiresAfterAccess())) {
×
1953
        accessOrderWindowDeque().remove(node);
×
1954
      } else if (evicts()) {
×
1955
        if (node.inMainProbation()) {
×
1956
          accessOrderProbationDeque().remove(node);
×
1957
        } else {
1958
          accessOrderProtectedDeque().remove(node);
×
1959
        }
1960
      }
1961
      if (expiresAfterWrite()) {
×
1962
        writeOrderDeque().remove(node);
×
1963
      } else if (expiresVariable()) {
×
1964
        timerWheel().deschedule(node);
×
1965
      }
1966
      makeDead(node);
×
1967
    }
×
1968
  }
1969

1970
  /** Updates the weighted size. */
1971
  final class UpdateTask implements Runnable {
1972
    final int weightDifference;
1973
    final Node<K, V> node;
1974

1975
    public UpdateTask(Node<K, V> node, int weightDifference) {
×
1976
      this.weightDifference = weightDifference;
×
1977
      this.node = node;
×
1978
    }
×
1979

1980
    @Override
1981
    @GuardedBy("evictionLock")
1982
    public void run() {
1983
      if (expiresAfterWrite()) {
×
1984
        reorder(writeOrderDeque(), node);
×
1985
      } else if (expiresVariable()) {
×
1986
        timerWheel().reschedule(node);
×
1987
      }
1988
      if (evicts()) {
×
1989
        int oldWeightedSize = node.getPolicyWeight();
×
1990
        node.setPolicyWeight(oldWeightedSize + weightDifference);
×
1991
        if (node.inWindow()) {
×
1992
          setWindowWeightedSize(windowWeightedSize() + weightDifference);
×
1993
          if (node.getPolicyWeight() > maximum()) {
×
1994
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
×
1995
          } else if (node.getPolicyWeight() <= windowMaximum()) {
×
1996
            onAccess(node);
×
1997
          } else if (accessOrderWindowDeque().contains(node)) {
×
1998
            accessOrderWindowDeque().moveToFront(node);
×
1999
          }
2000
        } else if (node.inMainProbation()) {
×
2001
            if (node.getPolicyWeight() <= maximum()) {
×
2002
              onAccess(node);
×
2003
            } else {
2004
              evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
×
2005
            }
2006
        } else {
2007
          setMainProtectedWeightedSize(mainProtectedWeightedSize() + weightDifference);
×
2008
          if (node.getPolicyWeight() <= maximum()) {
×
2009
            onAccess(node);
×
2010
          } else {
2011
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
×
2012
          }
2013
        }
2014

2015
        setWeightedSize(weightedSize() + weightDifference);
×
2016
        if (weightedSize() > MAXIMUM_CAPACITY) {
×
2017
          evictEntries();
×
2018
        }
2019
      } else if (expiresAfterAccess()) {
×
2020
        onAccess(node);
×
2021
      }
2022
    }
×
2023
  }
2024

2025
  /* --------------- Concurrent Map Support --------------- */
2026

2027
  @Override
2028
  public boolean isEmpty() {
2029
    return data.isEmpty();
×
2030
  }
2031

2032
  @Override
2033
  public int size() {
2034
    return data.size();
×
2035
  }
2036

2037
  @Override
2038
  public long estimatedSize() {
2039
    return data.mappingCount();
×
2040
  }
2041

2042
  @Override
2043
  public void clear() {
2044
    Deque<Node<K, V>> entries;
2045
    evictionLock.lock();
×
2046
    try {
2047
      // Discard all pending reads
2048
      readBuffer.drainTo(e -> {});
×
2049

2050
      // Apply all pending writes
2051
      @Var Runnable task;
2052
      while ((task = writeBuffer.poll()) != null) {
×
2053
        task.run();
×
2054
      }
2055

2056
      // Cancel the scheduled cleanup
2057
      Pacer pacer = pacer();
×
2058
      if (pacer != null) {
×
2059
        pacer.cancel();
×
2060
      }
2061

2062
      // Discard all entries, falling back to one-by-one to avoid excessive lock hold times
2063
      long now = expirationTicker().read();
×
2064
      int threshold = (WRITE_BUFFER_MAX / 2);
×
2065
      entries = new ArrayDeque<>(data.values());
×
2066
      while (!entries.isEmpty() && (writeBuffer.size() < threshold)) {
×
2067
        removeNode(entries.poll(), now);
×
2068
      }
2069
    } finally {
2070
      evictionLock.unlock();
×
2071
    }
2072

2073
    // Remove any stragglers if released early to more aggressively flush incoming writes
2074
    @Var boolean cleanUp = false;
×
2075
    for (var node : entries) {
×
2076
      @SuppressWarnings("DataFlowIssue")
2077
      var key = node.getKey();
×
2078
      if (key == null) {
×
2079
        cleanUp = true;
×
2080
      } else {
2081
        remove(key);
×
2082
      }
2083
    }
×
2084
    if (collectKeys() && cleanUp) {
×
2085
      cleanUp();
×
2086
    }
2087
  }
×
2088

2089
  @GuardedBy("evictionLock")
2090
  @SuppressWarnings({"GuardedByChecker", "SynchronizationOnLocalVariableOrMethodParameter"})
2091
  void removeNode(Node<K, V> node, long now) {
2092
    K key = node.getKey();
×
2093
    var cause = new RemovalCause[1];
×
2094
    var keyReference = node.getKeyReference();
×
2095
    @SuppressWarnings({"unchecked", "Varifier"})
2096
    @Nullable V[] value = (V[]) new Object[1];
×
2097

2098
    data.computeIfPresent(keyReference, (k, n) -> {
×
2099
      if (n != node) {
×
2100
        return n;
×
2101
      }
2102
      synchronized (n) {
×
2103
        value[0] = n.getValue();
×
2104

2105
        if ((key == null) || (value[0] == null)) {
×
2106
          cause[0] = RemovalCause.COLLECTED;
×
2107
        } else if (hasExpired(n, now)) {
×
2108
          cause[0] = RemovalCause.EXPIRED;
×
2109
        } else {
2110
          cause[0] = RemovalCause.EXPLICIT;
×
2111
        }
2112

2113
        if (cause[0].wasEvicted()) {
×
2114
          notifyEviction(key, value[0], cause[0]);
×
2115
        }
2116

2117
        discardRefresh(node.getKeyReference());
×
2118
        node.retire();
×
2119
        return null;
×
2120
      }
2121
    });
2122

2123
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
×
2124
      accessOrderWindowDeque().remove(node);
×
2125
    } else if (evicts()) {
×
2126
      if (node.inMainProbation()) {
×
2127
        accessOrderProbationDeque().remove(node);
×
2128
      } else {
2129
        accessOrderProtectedDeque().remove(node);
×
2130
      }
2131
    }
2132
    if (expiresAfterWrite()) {
×
2133
      writeOrderDeque().remove(node);
×
2134
    } else if (expiresVariable()) {
×
2135
      timerWheel().deschedule(node);
×
2136
    }
2137

2138
    synchronized (node) {
×
2139
      logIfAlive(node);
×
2140
      makeDead(node);
×
2141
    }
×
2142

2143
    if (cause[0] != null) {
×
2144
      notifyRemoval(key, value[0], cause[0]);
×
2145
    }
2146
  }
×
2147

2148
  @Override
2149
  public boolean containsKey(Object key) {
2150
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2151
    return (node != null) && (node.getValue() != null)
×
2152
        && !hasExpired(node, expirationTicker().read());
×
2153
  }
2154

2155
  @Override
2156
  public boolean containsValue(Object value) {
2157
    requireNonNull(value);
×
2158

2159
    long now = expirationTicker().read();
×
2160
    for (Node<K, V> node : data.values()) {
×
2161
      if (node.containsValue(value) && !hasExpired(node, now) && (node.getKey() != null)) {
×
2162
        return true;
×
2163
      }
2164
    }
×
2165
    return false;
×
2166
  }
2167

2168
  @Override
2169
  public @Nullable V get(Object key) {
2170
    return getIfPresent(key, /* recordStats= */ false);
×
2171
  }
2172

2173
  @Override
2174
  public @Nullable V getIfPresent(Object key, boolean recordStats) {
2175
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2176
    if (node == null) {
×
2177
      if (recordStats) {
×
2178
        statsCounter().recordMisses(1);
×
2179
      }
2180
      if (drainStatusOpaque() == REQUIRED) {
×
2181
        scheduleDrainBuffers();
×
2182
      }
2183
      return null;
×
2184
    }
2185

2186
    V value = node.getValue();
×
2187
    long now = expirationTicker().read();
×
2188
    if (hasExpired(node, now) || (collectValues() && (value == null))) {
×
2189
      if (recordStats) {
×
2190
        statsCounter().recordMisses(1);
×
2191
      }
2192
      scheduleDrainBuffers();
×
2193
      return null;
×
2194
    }
2195

2196
    if ((value != null) && !isComputingAsync(value)) {
×
2197
      @SuppressWarnings("unchecked")
2198
      var castedKey = (K) key;
×
2199
      setAccessTime(node, now);
×
2200
      tryExpireAfterRead(node, castedKey, value, expiry(), now);
×
2201
    }
2202
    V refreshed = afterRead(node, now, recordStats);
×
2203
    return (refreshed == null) ? value : refreshed;
×
2204
  }
2205

2206
  @Override
2207
  public @Nullable V getIfPresentQuietly(Object key) {
2208
    V value;
2209
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2210
    if ((node == null) || ((value = node.getValue()) == null)
×
2211
        || hasExpired(node, expirationTicker().read())) {
×
2212
      return null;
×
2213
    }
2214
    return value;
×
2215
  }
2216

2217
  /**
2218
   * Returns the key associated with the mapping in this cache, or {@code null} if there is none.
2219
   *
2220
   * @param key the key whose canonical instance is to be returned
2221
   * @return the key used by the mapping, or {@code null} if this cache does not contain a mapping
2222
   *         for the key
2223
   * @throws NullPointerException if the specified key is null
2224
   */
2225
  public @Nullable K getKey(K key) {
2226
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2227
    if (node == null) {
×
2228
      if (drainStatusOpaque() == REQUIRED) {
×
2229
        scheduleDrainBuffers();
×
2230
      }
2231
      return null;
×
2232
    }
2233
    afterRead(node, /* now= */ 0L, /* recordHit= */ false);
×
2234
    return node.getKey();
×
2235
  }
2236

2237
  @Override
2238
  public Map<K, V> getAllPresent(Iterable<? extends K> keys) {
2239
    var result = new LinkedHashMap<K, @Nullable V>(calculateHashMapCapacity(keys));
×
2240
    for (K key : keys) {
×
2241
      result.put(key, null);
×
2242
    }
×
2243

2244
    int uniqueKeys = result.size();
×
2245
    long now = expirationTicker().read();
×
2246
    for (var iter = result.entrySet().iterator(); iter.hasNext();) {
×
2247
      V value;
2248
      var entry = iter.next();
×
2249
      Node<K, V> node = data.get(nodeFactory.newLookupKey(entry.getKey()));
×
2250
      if ((node == null) || ((value = node.getValue()) == null) || hasExpired(node, now)) {
×
2251
        iter.remove();
×
2252
      } else {
2253
        setAccessTime(node, now);
×
2254
        tryExpireAfterRead(node, entry.getKey(), value, expiry(), now);
×
2255
        V refreshed = afterRead(node, now, /* recordHit= */ false);
×
2256
        entry.setValue((refreshed == null) ? value : refreshed);
×
2257
      }
2258
    }
×
2259
    statsCounter().recordHits(result.size());
×
2260
    statsCounter().recordMisses(uniqueKeys - result.size());
×
2261

2262
    @SuppressWarnings("NullableProblems")
2263
    Map<K, V> unmodifiable = Collections.unmodifiableMap(result);
×
2264
    return unmodifiable;
×
2265
  }
2266

2267
  @Override
2268
  public void putAll(Map<? extends K, ? extends V> map) {
2269
    map.forEach(this::put);
×
2270
  }
×
2271

2272
  @Override
2273
  public @Nullable V put(K key, V value) {
2274
    return put(key, value, expiry(), /* onlyIfAbsent= */ false);
×
2275
  }
2276

2277
  @Override
2278
  public @Nullable V putIfAbsent(K key, V value) {
2279
    return put(key, value, expiry(), /* onlyIfAbsent= */ true);
×
2280
  }
2281

2282
  /**
2283
   * Adds a node to the policy and the data store. If an existing node is found, then its value is
2284
   * updated if allowed.
2285
   *
2286
   * @param key key with which the specified value is to be associated
2287
   * @param value value to be associated with the specified key
2288
   * @param expiry the calculator for the write expiration time
2289
   * @param onlyIfAbsent a write is performed only if the key is not already associated with a value
2290
   * @return the prior value in or null if no mapping was found
2291
   */
2292
  @Nullable V put(K key, V value, Expiry<K, V> expiry, boolean onlyIfAbsent) {
2293
    requireNonNull(key);
×
2294
    requireNonNull(value);
×
2295

2296
    @Var Node<K, V> node = null;
×
2297
    long now = expirationTicker().read();
×
2298
    int newWeight = weigher.weigh(key, value);
×
2299
    Object lookupKey = nodeFactory.newLookupKey(key);
×
2300
    for (int attempts = 1; ; attempts++) {
×
2301
      @Var Node<K, V> prior = data.get(lookupKey);
×
2302
      if (prior == null) {
×
2303
        if (node == null) {
×
2304
          node = nodeFactory.newNode(key, keyReferenceQueue(),
×
2305
              value, valueReferenceQueue(), newWeight, now);
×
2306
          long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
×
2307
          setVariableTime(node, expireAfterCreate(key, value, expiry, now));
×
2308
          setAccessTime(node, expirationTime);
×
2309
          setWriteTime(node, expirationTime);
×
2310
        }
2311
        prior = data.putIfAbsent(node.getKeyReference(), node);
×
2312
        if (prior == null) {
×
2313
          afterWrite(new AddTask(node, newWeight));
×
2314
          return null;
×
2315
        } else if (onlyIfAbsent) {
×
2316
          // An optimistic fast path to avoid unnecessary locking
2317
          V currentValue = prior.getValue();
×
2318
          if ((currentValue != null) && !hasExpired(prior, now)) {
×
2319
            if (!isComputingAsync(currentValue)) {
×
2320
              tryExpireAfterRead(prior, key, currentValue, expiry(), now);
×
2321
              setAccessTime(prior, now);
×
2322
            }
2323
            afterRead(prior, now, /* recordHit= */ false);
×
2324
            return currentValue;
×
2325
          }
2326
        }
×
2327
      } else if (onlyIfAbsent) {
×
2328
        // An optimistic fast path to avoid unnecessary locking
2329
        V currentValue = prior.getValue();
×
2330
        if ((currentValue != null) && !hasExpired(prior, now)) {
×
2331
          if (!isComputingAsync(currentValue)) {
×
2332
            tryExpireAfterRead(prior, key, currentValue, expiry(), now);
×
2333
            setAccessTime(prior, now);
×
2334
          }
2335
          afterRead(prior, now, /* recordHit= */ false);
×
2336
          return currentValue;
×
2337
        }
2338
      }
2339

2340
      // A read may race with the entry's removal, so that after the entry is acquired it may no
2341
      // longer be usable. A retry will reread from the map and either find an absent mapping, a
2342
      // new entry, or a stale entry.
2343
      if (!prior.isAlive()) {
×
2344
        // A reread of the stale entry may occur if the state transition occurred but the map
2345
        // removal was delayed by a context switch, so that this thread spin waits until resolved.
2346
        if ((attempts & MAX_PUT_SPIN_WAIT_ATTEMPTS) != 0) {
×
2347
          Thread.onSpinWait();
×
2348
          continue;
×
2349
        }
2350

2351
        // If the spin wait attempts are exhausted then fallback to a map computation in order to
2352
        // deschedule this thread until the entry's removal completes. If the key was modified
2353
        // while in the map so that its equals or hashCode changed then the contents may be
2354
        // corrupted, where the cache holds an evicted (dead) entry that could not be removed.
2355
        // That is a violation of the Map contract, so we check that the mapping is in the "alive"
2356
        // state while in the computation.
2357
        data.computeIfPresent(lookupKey, (k, n) -> {
×
2358
          requireIsAlive(key, n);
×
2359
          return n;
×
2360
        });
2361
        continue;
×
2362
      }
2363

2364
      V oldValue;
2365
      long varTime;
2366
      int oldWeight;
2367
      @Var boolean expired = false;
×
2368
      @Var boolean mayUpdate = true;
×
2369
      @Var boolean exceedsTolerance = false;
×
2370
      synchronized (prior) {
×
2371
        if (!prior.isAlive()) {
×
2372
          continue;
×
2373
        }
2374
        oldValue = prior.getValue();
×
2375
        oldWeight = prior.getWeight();
×
2376
        if (oldValue == null) {
×
2377
          varTime = expireAfterCreate(key, value, expiry, now);
×
2378
          notifyEviction(key, null, RemovalCause.COLLECTED);
×
2379
        } else if (hasExpired(prior, now)) {
×
2380
          expired = true;
×
2381
          varTime = expireAfterCreate(key, value, expiry, now);
×
2382
          notifyEviction(key, oldValue, RemovalCause.EXPIRED);
×
2383
        } else if (onlyIfAbsent) {
×
2384
          mayUpdate = false;
×
2385
          varTime = expireAfterRead(prior, key, value, expiry, now);
×
2386
        } else {
2387
          varTime = expireAfterUpdate(prior, key, value, expiry, now);
×
2388
        }
2389

2390
        long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
×
2391
        if (mayUpdate) {
×
2392
          exceedsTolerance = exceedsWriteTimeTolerance(prior, varTime, now);
×
2393
          if (expired || exceedsTolerance) {
×
2394
            setWriteTime(prior, isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now);
×
2395
          }
2396

2397
          prior.setValue(value, valueReferenceQueue());
×
2398
          prior.setWeight(newWeight);
×
2399

2400
          discardRefresh(prior.getKeyReference());
×
2401
        }
2402

2403
        setVariableTime(prior, varTime);
×
2404
        setAccessTime(prior, expirationTime);
×
2405
      }
×
2406

2407
      if (expired) {
×
2408
        notifyRemoval(key, oldValue, RemovalCause.EXPIRED);
×
2409
      } else if (oldValue == null) {
×
2410
        notifyRemoval(key, /* value= */ null, RemovalCause.COLLECTED);
×
2411
      } else if (mayUpdate) {
×
2412
        notifyOnReplace(key, oldValue, value);
×
2413
      }
2414

2415
      int weightedDifference = mayUpdate ? (newWeight - oldWeight) : 0;
×
2416
      if ((oldValue == null) || (weightedDifference != 0) || expired) {
×
2417
        afterWrite(new UpdateTask(prior, weightedDifference));
×
2418
      } else if (!onlyIfAbsent && exceedsTolerance) {
×
2419
        afterWrite(new UpdateTask(prior, weightedDifference));
×
2420
      } else {
2421
        afterRead(prior, now, /* recordHit= */ false);
×
2422
      }
2423

2424
      return expired ? null : oldValue;
×
2425
    }
2426
  }
2427

2428
  @Override
2429
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2430
  public @Nullable V remove(Object key) {
2431
    @SuppressWarnings({"rawtypes", "unchecked"})
2432
    Node<K, V>[] node = new Node[1];
×
2433
    @SuppressWarnings({"unchecked", "Varifier"})
2434
    @Nullable K[] oldKey = (K[]) new Object[1];
×
2435
    @SuppressWarnings({"unchecked", "Varifier"})
2436
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2437
    @Nullable RemovalCause[] cause = new RemovalCause[1];
×
2438
    Object lookupKey = nodeFactory.newLookupKey(key);
×
2439

2440
    data.computeIfPresent(lookupKey, (k, n) -> {
×
2441
      synchronized (n) {
×
2442
        requireIsAlive(key, n);
×
2443
        oldKey[0] = n.getKey();
×
2444
        oldValue[0] = n.getValue();
×
2445
        RemovalCause actualCause;
2446
        if ((oldKey[0] == null) || (oldValue[0] == null)) {
×
2447
          actualCause = RemovalCause.COLLECTED;
×
2448
        } else if (hasExpired(n, expirationTicker().read())) {
×
2449
          actualCause = RemovalCause.EXPIRED;
×
2450
        } else {
2451
          actualCause= RemovalCause.EXPLICIT;
×
2452
        }
2453
        if (actualCause.wasEvicted()) {
×
2454
          notifyEviction(oldKey[0], oldValue[0], actualCause);
×
2455
        }
2456
        cause[0] = actualCause;
×
2457
        discardRefresh(k);
×
2458
        node[0] = n;
×
2459
        n.retire();
×
2460
        return null;
×
2461
      }
2462
    });
2463

2464
    if (cause[0] != null) {
×
2465
      afterWrite(new RemovalTask(node[0]));
×
2466
      notifyRemoval(oldKey[0], oldValue[0], cause[0]);
×
2467
    }
2468
    return (cause[0] == RemovalCause.EXPLICIT) ? oldValue[0] : null;
×
2469
  }
2470

2471
  @Override
2472
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2473
  public boolean remove(Object key, @Nullable Object value) {
2474
    requireNonNull(key);
×
2475
    if (value == null) {
×
2476
      return false;
×
2477
    }
2478

2479
    @SuppressWarnings({"rawtypes", "unchecked"})
2480
    @Nullable Node<K, V>[] removed = new Node[1];
×
2481
    @SuppressWarnings({"unchecked", "Varifier"})
2482
    @Nullable K[] oldKey = (K[]) new Object[1];
×
2483
    @SuppressWarnings({"unchecked", "Varifier"})
2484
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2485
    RemovalCause[] cause = new RemovalCause[1];
×
2486
    Object lookupKey = nodeFactory.newLookupKey(key);
×
2487

2488
    data.computeIfPresent(lookupKey, (kR, node) -> {
×
2489
      synchronized (node) {
×
2490
        requireIsAlive(key, node);
×
2491
        oldKey[0] = node.getKey();
×
2492
        oldValue[0] = node.getValue();
×
2493
        if ((oldKey[0] == null) || (oldValue[0] == null)) {
×
2494
          cause[0] = RemovalCause.COLLECTED;
×
2495
        } else if (hasExpired(node, expirationTicker().read())) {
×
2496
          cause[0] = RemovalCause.EXPIRED;
×
2497
        } else if (node.containsValue(value)) {
×
2498
          cause[0] = RemovalCause.EXPLICIT;
×
2499
        } else {
2500
          return node;
×
2501
        }
2502
        if (cause[0].wasEvicted()) {
×
2503
          notifyEviction(oldKey[0], oldValue[0], cause[0]);
×
2504
        }
2505
        discardRefresh(kR);
×
2506
        removed[0] = node;
×
2507
        node.retire();
×
2508
        return null;
×
2509
      }
2510
    });
2511

2512
    if (removed[0] == null) {
×
2513
      return false;
×
2514
    }
2515
    afterWrite(new RemovalTask(removed[0]));
×
2516
    notifyRemoval(oldKey[0], oldValue[0], cause[0]);
×
2517

2518
    return (cause[0] == RemovalCause.EXPLICIT);
×
2519
  }
2520

2521
  @Override
2522
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2523
  public @Nullable V replace(K key, V value) {
2524
    requireNonNull(key);
×
2525
    requireNonNull(value);
×
2526

2527
    var now = new long[1];
×
2528
    var oldWeight = new int[1];
×
2529
    var exceedsTolerance = new boolean[1];
×
2530
    @SuppressWarnings({"unchecked", "Varifier"})
2531
    @Nullable K[] nodeKey = (K[]) new Object[1];
×
2532
    @SuppressWarnings({"unchecked", "Varifier"})
2533
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2534
    int weight = weigher.weigh(key, value);
×
2535
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
×
2536
      synchronized (n) {
×
2537
        requireIsAlive(key, n);
×
2538
        nodeKey[0] = n.getKey();
×
2539
        oldValue[0] = n.getValue();
×
2540
        oldWeight[0] = n.getWeight();
×
2541
        if ((nodeKey[0] == null) || (oldValue[0] == null)
×
2542
            || hasExpired(n, now[0] = expirationTicker().read())) {
×
2543
          oldValue[0] = null;
×
2544
          return n;
×
2545
        }
2546

2547
        long varTime = expireAfterUpdate(n, key, value, expiry(), now[0]);
×
2548
        n.setValue(value, valueReferenceQueue());
×
2549
        n.setWeight(weight);
×
2550

2551
        long expirationTime = isComputingAsync(value) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2552
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
×
2553
        if (exceedsTolerance[0]) {
×
2554
          setWriteTime(n, expirationTime);
×
2555
        }
2556
        setAccessTime(n, expirationTime);
×
2557
        setVariableTime(n, varTime);
×
2558

2559
        discardRefresh(k);
×
2560
        return n;
×
2561
      }
2562
    });
2563

2564
    if ((node == null) || (nodeKey[0] == null) || (oldValue[0] == null)) {
×
2565
      return null;
×
2566
    }
2567

2568
    int weightedDifference = (weight - oldWeight[0]);
×
2569
    if (exceedsTolerance[0] || (weightedDifference != 0)) {
×
2570
      afterWrite(new UpdateTask(node, weightedDifference));
×
2571
    } else {
2572
      afterRead(node, now[0], /* recordHit= */ false);
×
2573
    }
2574

2575
    notifyOnReplace(nodeKey[0], oldValue[0], value);
×
2576
    return oldValue[0];
×
2577
  }
2578

2579
  @Override
2580
  public boolean replace(K key, V oldValue, V newValue) {
2581
    return replace(key, oldValue, newValue, /* shouldDiscardRefresh= */ true);
×
2582
  }
2583

2584
  @Override
2585
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2586
  public boolean replace(K key, V oldValue, V newValue, boolean shouldDiscardRefresh) {
2587
    requireNonNull(key);
×
2588
    requireNonNull(oldValue);
×
2589
    requireNonNull(newValue);
×
2590

2591
    var now = new long[1];
×
2592
    var oldWeight = new int[1];
×
2593
    var exceedsTolerance = new boolean[1];
×
2594
    @SuppressWarnings({"unchecked", "Varifier"})
2595
    @Nullable K[] nodeKey = (K[]) new Object[1];
×
2596
    @SuppressWarnings({"unchecked", "Varifier"})
2597
    @Nullable V[] prevValue = (V[]) new Object[1];
×
2598

2599
    int weight = weigher.weigh(key, newValue);
×
2600
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
×
2601
      synchronized (n) {
×
2602
        requireIsAlive(key, n);
×
2603
        nodeKey[0] = n.getKey();
×
2604
        prevValue[0] = n.getValue();
×
2605
        oldWeight[0] = n.getWeight();
×
2606
        if ((nodeKey[0] == null) || (prevValue[0] == null) || !n.containsValue(oldValue)
×
2607
            || hasExpired(n, now[0] = expirationTicker().read())) {
×
2608
          prevValue[0] = null;
×
2609
          return n;
×
2610
        }
2611

2612
        long varTime = expireAfterUpdate(n, key, newValue, expiry(), now[0]);
×
2613
        n.setValue(newValue, valueReferenceQueue());
×
2614
        n.setWeight(weight);
×
2615

2616
        long expirationTime = isComputingAsync(newValue) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2617
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
×
2618
        if (exceedsTolerance[0]) {
×
2619
          setWriteTime(n, expirationTime);
×
2620
        }
2621
        setAccessTime(n, expirationTime);
×
2622
        setVariableTime(n, varTime);
×
2623

2624
        if (shouldDiscardRefresh) {
×
2625
          discardRefresh(k);
×
2626
        }
2627
      }
×
2628
      return n;
×
2629
    });
2630

2631
    if ((node == null) || (nodeKey[0] == null) || (prevValue[0] == null)) {
×
2632
      return false;
×
2633
    }
2634

2635
    int weightedDifference = (weight - oldWeight[0]);
×
2636
    if (exceedsTolerance[0] || (weightedDifference != 0)) {
×
2637
      afterWrite(new UpdateTask(node, weightedDifference));
×
2638
    } else {
2639
      afterRead(node, now[0], /* recordHit= */ false);
×
2640
    }
2641

2642
    notifyOnReplace(nodeKey[0], prevValue[0], newValue);
×
2643
    return true;
×
2644
  }
2645

2646
  @Override
2647
  public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
2648
    requireNonNull(function);
×
2649

2650
    BiFunction<K, V, V> remappingFunction = (key, oldValue) ->
×
2651
        requireNonNull(function.apply(key, oldValue));
×
2652
    for (K key : keySet()) {
×
2653
      long[] now = { expirationTicker().read() };
×
2654
      Object lookupKey = nodeFactory.newLookupKey(key);
×
2655
      remap(key, lookupKey, remappingFunction, expiry(), now, /* computeIfAbsent= */ false);
×
2656
    }
×
2657
  }
×
2658

2659
  @Override
2660
  public @Nullable V computeIfAbsent(K key, @Var Function<? super K, ? extends V> mappingFunction,
2661
      boolean recordStats, boolean recordLoad) {
2662
    requireNonNull(key);
×
2663
    requireNonNull(mappingFunction);
×
2664
    long now = expirationTicker().read();
×
2665

2666
    // An optimistic fast path to avoid unnecessary locking
2667
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2668
    if (node != null) {
×
2669
      V value = node.getValue();
×
2670
      if ((value != null) && !hasExpired(node, now)) {
×
2671
        if (!isComputingAsync(value)) {
×
2672
          tryExpireAfterRead(node, key, value, expiry(), now);
×
2673
          setAccessTime(node, now);
×
2674
        }
2675
        @Nullable V refreshed = afterRead(node, now, /* recordHit= */ recordStats);
×
2676
        return (refreshed == null) ? value : refreshed;
×
2677
      }
2678
    }
2679
    if (recordStats) {
×
2680
      mappingFunction = statsAware(mappingFunction, recordLoad);
×
2681
    }
2682
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
×
2683
    return doComputeIfAbsent(key, keyRef, mappingFunction, new long[] { now }, recordStats);
×
2684
  }
2685

2686
  /** Returns the current value from a computeIfAbsent invocation. */
2687
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2688
  @Nullable V doComputeIfAbsent(K key, Object keyRef,
2689
      Function<? super K, ? extends @Nullable V> mappingFunction, long[/* 1 */] now,
2690
      boolean recordStats) {
2691
    @SuppressWarnings({"unchecked", "Varifier"})
2692
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2693
    @SuppressWarnings({"unchecked", "Varifier"})
2694
    @Nullable V[] newValue = (V[]) new Object[1];
×
2695
    @SuppressWarnings({"unchecked", "Varifier"})
2696
    @Nullable K[] nodeKey = (K[]) new Object[1];
×
2697
    @SuppressWarnings({"rawtypes", "unchecked"})
2698
    @Nullable Node<K, V>[] removed = new Node[1];
×
2699

2700
    int[] weight = new int[2]; // old, new
×
2701
    @Nullable RemovalCause[] cause = new RemovalCause[1];
×
2702
    Node<K, V> node = data.compute(keyRef, (k, n) -> {
×
2703
      if (n == null) {
×
2704
        newValue[0] = mappingFunction.apply(key);
×
2705
        if (newValue[0] == null) {
×
2706
          discardRefresh(k);
×
2707
          return null;
×
2708
        }
2709
        now[0] = expirationTicker().read();
×
2710
        weight[1] = weigher.weigh(key, newValue[0]);
×
2711
        var created = nodeFactory.newNode(key, keyReferenceQueue(),
×
2712
            newValue[0], valueReferenceQueue(), weight[1], now[0]);
×
2713
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2714
        setVariableTime(created, expireAfterCreate(key, newValue[0], expiry(), now[0]));
×
2715
        setAccessTime(created, expirationTime);
×
2716
        setWriteTime(created, expirationTime);
×
2717
        discardRefresh(k);
×
2718
        return created;
×
2719
      }
2720

2721
      synchronized (n) {
×
2722
        requireIsAlive(key, n);
×
2723
        nodeKey[0] = n.getKey();
×
2724
        weight[0] = n.getWeight();
×
2725
        oldValue[0] = n.getValue();
×
2726
        RemovalCause actualCause;
2727
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
×
2728
          actualCause = RemovalCause.COLLECTED;
×
2729
        } else if (hasExpired(n, now[0])) {
×
2730
          actualCause = RemovalCause.EXPIRED;
×
2731
        } else {
2732
          return n;
×
2733
        }
2734

2735
        cause[0] = actualCause;
×
2736
        notifyEviction(nodeKey[0], oldValue[0], actualCause);
×
2737
        newValue[0] = mappingFunction.apply(key);
×
2738
        if (newValue[0] == null) {
×
2739
          discardRefresh(k);
×
2740
          removed[0] = n;
×
2741
          n.retire();
×
2742
          return null;
×
2743
        }
2744
        now[0] = expirationTicker().read();
×
2745
        weight[1] = weigher.weigh(key, newValue[0]);
×
2746
        long varTime = expireAfterCreate(key, newValue[0], expiry(), now[0]);
×
2747

2748
        n.setValue(newValue[0], valueReferenceQueue());
×
2749
        n.setWeight(weight[1]);
×
2750

2751
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2752
        setAccessTime(n, expirationTime);
×
2753
        setWriteTime(n, expirationTime);
×
2754
        setVariableTime(n, varTime);
×
2755

2756
        discardRefresh(k);
×
2757
        return n;
×
2758
      }
2759
    });
2760

2761
    if (cause[0] != null) {
×
2762
      statsCounter().recordEviction(weight[0], cause[0]);
×
2763
      notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
×
2764
    }
2765
    if (node == null) {
×
2766
      if (removed[0] != null) {
×
2767
        afterWrite(new RemovalTask(removed[0]));
×
2768
      }
2769
      return null;
×
2770
    }
2771
    if ((oldValue[0] != null) && (newValue[0] == null)) {
×
2772
      if (!isComputingAsync(oldValue[0])) {
×
2773
        tryExpireAfterRead(node, key, oldValue[0], expiry(), now[0]);
×
2774
        setAccessTime(node, now[0]);
×
2775
      }
2776

2777
      afterRead(node, now[0], /* recordHit= */ recordStats);
×
2778
      return oldValue[0];
×
2779
    }
2780
    if ((oldValue[0] == null) && (cause[0] == null)) {
×
2781
      afterWrite(new AddTask(node, weight[1]));
×
2782
    } else {
2783
      int weightedDifference = (weight[1] - weight[0]);
×
2784
      afterWrite(new UpdateTask(node, weightedDifference));
×
2785
    }
2786

2787
    return newValue[0];
×
2788
  }
2789

2790
  @Override
2791
  public @Nullable V computeIfPresent(K key,
2792
      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2793
    requireNonNull(key);
×
2794
    requireNonNull(remappingFunction);
×
2795

2796
    // An optimistic fast path to avoid unnecessary locking
2797
    Object lookupKey = nodeFactory.newLookupKey(key);
×
2798
    @Nullable Node<K, V> node = data.get(lookupKey);
×
2799
    long now;
2800
    if (node == null) {
×
2801
      return null;
×
2802
    } else if ((node.getValue() == null) || hasExpired(node, (now = expirationTicker().read()))) {
×
2803
      scheduleDrainBuffers();
×
2804
      return null;
×
2805
    }
2806

2807
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
×
2808
        statsAware(remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
×
2809
    return remap(key, lookupKey, statsAwareRemappingFunction,
×
2810
        expiry(), new long[] { now }, /* computeIfAbsent= */ false);
×
2811
  }
2812

2813
  @Override
2814
  @SuppressWarnings("NullAway")
2815
  public @Nullable V compute(K key,
2816
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2817
      @Nullable Expiry<? super K, ? super V> expiry, boolean recordLoad,
2818
      boolean recordLoadFailure) {
2819
    requireNonNull(key);
×
2820
    requireNonNull(remappingFunction);
×
2821

2822
    long[] now = { expirationTicker().read() };
×
2823
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
×
2824
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
×
2825
        statsAware(remappingFunction, recordLoad, recordLoadFailure);
×
2826
    return remap(key, keyRef, statsAwareRemappingFunction,
×
2827
        expiry, now, /* computeIfAbsent= */ true);
2828
  }
2829

2830
  @Override
2831
  public @Nullable V merge(K key, V value,
2832
      BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2833
    requireNonNull(key);
×
2834
    requireNonNull(value);
×
2835
    requireNonNull(remappingFunction);
×
2836

2837
    long[] now = { expirationTicker().read() };
×
2838
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
×
2839
    BiFunction<? super K, ? super @Nullable V, ? extends @Nullable V> mergeFunction =
×
2840
        (k, oldValue) -> (oldValue == null)
×
2841
          ? value
×
2842
          : statsAware(remappingFunction).apply(oldValue, value);
×
2843
    return remap(key, keyRef, mergeFunction, expiry(), now, /* computeIfAbsent= */ true);
×
2844
  }
2845

2846
  /**
2847
   * Attempts to compute a mapping for the specified key and its current mapped value (or
2848
   * {@code null} if there is no current mapping).
2849
   * <p>
2850
   * An entry that has expired or been reference collected is evicted and the computation continues
2851
   * as if the entry had not been present. This method does not pre-screen and does not wrap the
2852
   * remappingFunction to be statistics aware.
2853
   *
2854
   * @param key key with which the specified value is to be associated
2855
   * @param keyRef the key to associate with or a lookup only key if not {@code computeIfAbsent}
2856
   * @param remappingFunction the function to compute a value
2857
   * @param expiry the calculator for the expiration time
2858
   * @param now the current time, according to the ticker
2859
   * @param computeIfAbsent if an absent entry can be computed
2860
   * @return the new value associated with the specified key, or null if none
2861
   */
2862
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2863
  @Nullable V remap(K key, Object keyRef,
2864
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2865
      @Nullable Expiry<? super K, ? super V> expiry, long[/* 1 */] now, boolean computeIfAbsent) {
2866
    @SuppressWarnings({"unchecked", "Varifier"})
2867
    @Nullable K[] nodeKey = (K[]) new Object[1];
×
2868
    @SuppressWarnings({"unchecked", "Varifier"})
2869
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2870
    @SuppressWarnings({"unchecked", "Varifier"})
2871
    @Nullable V[] newValue = (V[]) new Object[1];
×
2872
    @SuppressWarnings({"rawtypes", "unchecked"})
2873
    @Nullable Node<K, V>[] removed = new Node[1];
×
2874

2875
    var weight = new int[2]; // old, new
×
2876
    var cause = new RemovalCause[1];
×
2877
    var exceedsTolerance = new boolean[1];
×
2878

2879
    Node<K, V> node = data.compute(keyRef, (kr, n) -> {
×
2880
      if (n == null) {
×
2881
        if (!computeIfAbsent) {
×
2882
          return null;
×
2883
        }
2884
        newValue[0] = remappingFunction.apply(key, null);
×
2885
        if (newValue[0] == null) {
×
2886
          return null;
×
2887
        }
2888
        now[0] = expirationTicker().read();
×
2889
        weight[1] = weigher.weigh(key, newValue[0]);
×
2890
        long varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
×
2891
        var created = nodeFactory.newNode(keyRef, newValue[0],
×
2892
            valueReferenceQueue(), weight[1], now[0]);
×
2893

2894
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2895
        setAccessTime(created, expirationTime);
×
2896
        setWriteTime(created, expirationTime);
×
2897
        setVariableTime(created, varTime);
×
2898
        discardRefresh(kr);
×
2899
        return created;
×
2900
      }
2901

2902
      synchronized (n) {
×
2903
        requireIsAlive(key, n);
×
2904
        nodeKey[0] = n.getKey();
×
2905
        oldValue[0] = n.getValue();
×
2906
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
×
2907
          cause[0] = RemovalCause.COLLECTED;
×
2908
        } else if (hasExpired(n, expirationTicker().read())) {
×
2909
          cause[0] = RemovalCause.EXPIRED;
×
2910
        }
2911
        if (cause[0] != null) {
×
2912
          notifyEviction(nodeKey[0], oldValue[0], cause[0]);
×
2913
          if (!computeIfAbsent) {
×
2914
            removed[0] = n;
×
2915
            n.retire();
×
2916
            return null;
×
2917
          }
2918
        }
2919

2920
        newValue[0] = remappingFunction.apply(nodeKey[0],
×
2921
            (cause[0] == null) ? oldValue[0] : null);
×
2922
        if (newValue[0] == null) {
×
2923
          if (cause[0] == null) {
×
2924
            cause[0] = RemovalCause.EXPLICIT;
×
2925
            discardRefresh(kr);
×
2926
          }
2927
          removed[0] = n;
×
2928
          n.retire();
×
2929
          return null;
×
2930
        }
2931

2932
        long varTime;
2933
        weight[0] = n.getWeight();
×
2934
        weight[1] = weigher.weigh(key, newValue[0]);
×
2935
        now[0] = expirationTicker().read();
×
2936
        if (cause[0] == null) {
×
2937
          if (newValue[0] != oldValue[0]) {
×
2938
            cause[0] = RemovalCause.REPLACED;
×
2939
          }
2940
          varTime = expireAfterUpdate(n, key, newValue[0], expiry, now[0]);
×
2941
        } else {
2942
          varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
×
2943
        }
2944

2945
        n.setValue(newValue[0], valueReferenceQueue());
×
2946
        n.setWeight(weight[1]);
×
2947

2948
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2949
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
×
2950
        if (((cause[0] != null) && cause[0].wasEvicted()) || exceedsTolerance[0]) {
×
2951
          setWriteTime(n, expirationTime);
×
2952
        }
2953
        setAccessTime(n, expirationTime);
×
2954
        setVariableTime(n, varTime);
×
2955

2956
        discardRefresh(kr);
×
2957
        return n;
×
2958
      }
2959
    });
2960

2961
    if (cause[0] != null) {
×
2962
      if (cause[0] == RemovalCause.REPLACED) {
×
2963
        requireNonNull(newValue[0]);
×
2964
        notifyOnReplace(key, oldValue[0], newValue[0]);
×
2965
      } else {
2966
        if (cause[0].wasEvicted()) {
×
2967
          statsCounter().recordEviction(weight[0], cause[0]);
×
2968
        }
2969
        notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
×
2970
      }
2971
    }
2972

2973
    if (removed[0] != null) {
×
2974
      afterWrite(new RemovalTask(removed[0]));
×
2975
    } else if (node == null) {
×
2976
      // absent and not computable
2977
    } else if ((oldValue[0] == null) && (cause[0] == null)) {
×
2978
      afterWrite(new AddTask(node, weight[1]));
×
2979
    } else {
2980
      int weightedDifference = weight[1] - weight[0];
×
2981
      if (exceedsTolerance[0] || (weightedDifference != 0)) {
×
2982
        afterWrite(new UpdateTask(node, weightedDifference));
×
2983
      } else {
2984
        afterRead(node, now[0], /* recordHit= */ false);
×
2985
        if ((cause[0] != null) && cause[0].wasEvicted()) {
×
2986
          scheduleDrainBuffers();
×
2987
        }
2988
      }
2989
    }
2990

2991
    return newValue[0];
×
2992
  }
2993

2994
  @Override
2995
  public void forEach(BiConsumer<? super K, ? super V> action) {
2996
    requireNonNull(action);
×
2997

2998
    for (var iterator = new EntryIterator<>(this); iterator.hasNext();) {
×
2999
      action.accept(iterator.key, iterator.value);
×
3000
      iterator.advance();
×
3001
    }
3002
  }
×
3003

3004
  @Override
3005
  public Set<K> keySet() {
3006
    Set<K> ks = keySet;
×
3007
    return (ks == null) ? (keySet = new KeySetView<>(this)) : ks;
×
3008
  }
3009

3010
  @Override
3011
  public Collection<V> values() {
3012
    Collection<V> vs = values;
×
3013
    return (vs == null) ? (values = new ValuesView<>(this)) : vs;
×
3014
  }
3015

3016
  @Override
3017
  public Set<Entry<K, V>> entrySet() {
3018
    Set<Entry<K, V>> es = entrySet;
×
3019
    return (es == null) ? (entrySet = new EntrySetView<>(this)) : es;
×
3020
  }
3021

3022
  /**
3023
   * Object equality requires reflexive, symmetric, transitive, and consistency properties. Of
3024
   * these, symmetry and consistency require further clarification for how they are upheld.
3025
   * <p>
3026
   * The <i>consistency</i> property between invocations requires that the results are the same if
3027
   * there are no modifications to the information used. Therefore, usages should expect that this
3028
   * operation may return misleading results if either the maps or the data held by them is modified
3029
   * during the execution of this method. This characteristic allows for comparing the map sizes and
3030
   * assuming stable mappings, as done by {@link java.util.AbstractMap}-based maps.
3031
   * <p>
3032
   * The <i>symmetric</i> property requires that the result is the same for all implementations of
3033
   * {@link Map#equals(Object)}. That contract is defined in terms of the stable mappings provided
3034
   * by {@link #entrySet()}, meaning that the {@link #size()} optimization forces that the count is
3035
   * consistent with the mappings when used for an equality check.
3036
   * <p>
3037
   * The cache's {@link #size()} method may include entries that have expired or have been reference
3038
   * collected, but have not yet been removed from the backing map. An iteration over the map may
3039
   * trigger the removal of these dead entries when skipped over during traversal. To ensure
3040
   * consistency and symmetry, usages should call {@link #cleanUp()} before this method while no
3041
   * other concurrent operations are being performed on this cache. This is not done implicitly by
3042
   * {@link #size()} as many usages assume it to be instantaneous and lock-free.
3043
   */
3044
  @Override
3045
  public boolean equals(@Nullable Object o) {
3046
    if (o == this) {
×
3047
      return true;
×
3048
    } else if (!(o instanceof Map)) {
×
3049
      return false;
×
3050
    }
3051

3052
    var map = (Map<?, ?>) o;
×
3053
    if (size() != map.size()) {
×
3054
      return false;
×
3055
    }
3056

3057
    long now = expirationTicker().read();
×
3058
    for (var node : data.values()) {
×
3059
      K key = node.getKey();
×
3060
      V value = node.getValue();
×
3061
      if ((key == null) || (value == null)
×
3062
          || !node.isAlive() || hasExpired(node, now)) {
×
3063
        scheduleDrainBuffers();
×
3064
        return false;
×
3065
      } else {
3066
        var val = map.get(key);
×
3067
        if ((val == null) || ((val != value) && !val.equals(value))) {
×
3068
          return false;
×
3069
        }
3070
      }
3071
    }
×
3072
    return true;
×
3073
  }
3074

3075
  @Override
3076
  public int hashCode() {
3077
    @Var int hash = 0;
×
3078
    long now = expirationTicker().read();
×
3079
    for (var node : data.values()) {
×
3080
      K key = node.getKey();
×
3081
      V value = node.getValue();
×
3082
      if ((key == null) || (value == null)
×
3083
          || !node.isAlive() || hasExpired(node, now)) {
×
3084
        scheduleDrainBuffers();
×
3085
      } else {
3086
        hash += key.hashCode() ^ value.hashCode();
×
3087
      }
3088
    }
×
3089
    return hash;
×
3090
  }
3091

3092
  @Override
3093
  public String toString() {
3094
    var result = new StringBuilder().append('{');
×
3095
    long now = expirationTicker().read();
×
3096
    for (var node : data.values()) {
×
3097
      K key = node.getKey();
×
3098
      V value = node.getValue();
×
3099
      if ((key == null) || (value == null)
×
3100
          || !node.isAlive() || hasExpired(node, now)) {
×
3101
        scheduleDrainBuffers();
×
3102
      } else {
3103
        if (result.length() != 1) {
×
3104
          result.append(',').append(' ');
×
3105
        }
3106
        result.append((key == this) ? "(this Map)" : key);
×
3107
        result.append('=');
×
3108
        result.append((value == this) ? "(this Map)" : value);
×
3109
      }
3110
    }
×
3111
    return result.append('}').toString();
×
3112
  }
3113

3114
  /**
3115
   * Returns the computed result from the ordered traversal of the cache entries.
3116
   *
3117
   * @param hottest the coldest or hottest iteration order
3118
   * @param transformer a function that unwraps the value
3119
   * @param mappingFunction the mapping function to compute a value
3120
   * @return the computed value
3121
   */
3122
  @SuppressWarnings("GuardedByChecker")
3123
  <T> T evictionOrder(boolean hottest, Function<@Nullable V, @Nullable V> transformer,
3124
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3125
    Comparator<Node<K, V>> comparator = Comparator.comparingInt(node -> {
×
3126
      var keyRef = node.getKeyReferenceOrNull();
×
3127
      return (keyRef == null) ? 0 : frequencySketch().frequency(keyRef);
×
3128
    });
3129
    Iterable<Node<K, V>> iterable;
3130
    if (hottest) {
×
3131
      iterable = () -> {
×
3132
        var secondary = PeekingIterator.comparing(
×
3133
            accessOrderProbationDeque().descendingIterator(),
×
3134
            accessOrderWindowDeque().descendingIterator(), comparator);
×
3135
        return PeekingIterator.concat(
×
3136
            accessOrderProtectedDeque().descendingIterator(), secondary);
×
3137
      };
3138
    } else {
3139
      iterable = () -> {
×
3140
        var primary = PeekingIterator.comparing(
×
3141
            accessOrderWindowDeque().iterator(), accessOrderProbationDeque().iterator(),
×
3142
            comparator.reversed());
×
3143
        return PeekingIterator.concat(primary, accessOrderProtectedDeque().iterator());
×
3144
      };
3145
    }
3146
    return snapshot(iterable, transformer, mappingFunction);
×
3147
  }
3148

3149
  /**
3150
   * Returns the computed result from the ordered traversal of the cache entries.
3151
   *
3152
   * @param oldest the youngest or oldest iteration order
3153
   * @param transformer a function that unwraps the value
3154
   * @param mappingFunction the mapping function to compute a value
3155
   * @return the computed value
3156
   */
3157
  @SuppressWarnings("GuardedByChecker")
3158
  <T> T expireAfterAccessOrder(boolean oldest, Function<@Nullable V, @Nullable V> transformer,
3159
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3160
    Iterable<Node<K, V>> iterable;
3161
    if (evicts()) {
×
3162
      iterable = () -> {
×
3163
        @Var Comparator<Node<K, V>> comparator = Comparator.comparingLong(Node::getAccessTime);
×
3164
        PeekingIterator<Node<K, V>> first;
3165
        PeekingIterator<Node<K, V>> second;
3166
        PeekingIterator<Node<K, V>> third;
3167
        if (oldest) {
×
3168
          first = accessOrderWindowDeque().iterator();
×
3169
          second = accessOrderProbationDeque().iterator();
×
3170
          third = accessOrderProtectedDeque().iterator();
×
3171
        } else {
3172
          comparator = comparator.reversed();
×
3173
          first = accessOrderWindowDeque().descendingIterator();
×
3174
          second = accessOrderProbationDeque().descendingIterator();
×
3175
          third = accessOrderProtectedDeque().descendingIterator();
×
3176
        }
3177
        return PeekingIterator.comparing(
×
3178
            PeekingIterator.comparing(first, second, comparator), third, comparator);
×
3179
      };
3180
    } else {
3181
      iterable = oldest
×
3182
          ? accessOrderWindowDeque()
×
3183
          : accessOrderWindowDeque()::descendingIterator;
×
3184
    }
3185
    return snapshot(iterable, transformer, mappingFunction);
×
3186
  }
3187

3188
  /**
3189
   * Returns the computed result from the ordered traversal of the cache entries.
3190
   *
3191
   * @param iterable the supplier of the entries in the cache
3192
   * @param transformer a function that unwraps the value
3193
   * @param mappingFunction the mapping function to compute a value
3194
   * @return the computed value
3195
   */
3196
  <T> T snapshot(Iterable<Node<K, V>> iterable, Function<@Nullable V, @Nullable V> transformer,
3197
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3198
    requireNonNull(mappingFunction);
×
3199
    requireNonNull(transformer);
×
3200
    requireNonNull(iterable);
×
3201

3202
    evictionLock.lock();
×
3203
    try {
3204
      maintenance(/* ignored */ null);
×
3205

3206
      // Obtain the iterator as late as possible for modification count checking
3207
      try (var stream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(
×
3208
           iterable.iterator(), DISTINCT | ORDERED | NONNULL | IMMUTABLE), /* parallel= */ false)) {
×
3209
        return mappingFunction.apply(stream
×
3210
            .map(node -> nodeToCacheEntry(node, transformer))
×
3211
            .filter(Objects::nonNull));
×
3212
      }
3213
    } finally {
3214
      evictionLock.unlock();
×
3215
      rescheduleCleanUpIfIncomplete();
×
3216
    }
3217
  }
3218

3219
  /** Returns an entry for the given node if it can be used externally, else null. */
3220
  @Nullable CacheEntry<K, V> nodeToCacheEntry(
3221
      Node<K, V> node, Function<@Nullable V, @Nullable V> transformer) {
3222
    V value = transformer.apply(node.getValue());
×
3223
    K key = node.getKey();
×
3224
    long now;
3225
    if ((key == null) || (value == null) || !node.isAlive()
×
3226
        || hasExpired(node, (now = expirationTicker().read()))) {
×
3227
      return null;
×
3228
    }
3229

3230
    @Var long expiresAfter = Long.MAX_VALUE;
×
3231
    if (expiresAfterAccess()) {
×
3232
      expiresAfter = Math.min(expiresAfter, now - node.getAccessTime() + expiresAfterAccessNanos());
×
3233
    }
3234
    if (expiresAfterWrite()) {
×
3235
      expiresAfter = Math.min(expiresAfter,
×
3236
          (now & ~1L) - (node.getWriteTime() & ~1L) + expiresAfterWriteNanos());
×
3237
    }
3238
    if (expiresVariable()) {
×
3239
      expiresAfter = node.getVariableTime() - now;
×
3240
    }
3241

3242
    long refreshableAt = refreshAfterWrite()
×
3243
        ? node.getWriteTime() + refreshAfterWriteNanos()
×
3244
        : now + Long.MAX_VALUE;
×
3245
    int weight = node.getPolicyWeight();
×
3246
    return SnapshotEntry.forEntry(key, value, now, weight, now + expiresAfter, refreshableAt);
×
3247
  }
3248

3249
  /** A function that produces an unmodifiable map up to the limit in stream order. */
3250
  static final class SizeLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3251
    private final int expectedSize;
3252
    private final long limit;
3253

3254
    SizeLimiter(int expectedSize, long limit) {
×
3255
      requireArgument(limit >= 0);
×
3256
      this.expectedSize = expectedSize;
×
3257
      this.limit = limit;
×
3258
    }
×
3259

3260
    @Override
3261
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3262
      var map = new LinkedHashMap<K, V>(calculateHashMapCapacity(expectedSize));
×
3263
      stream.limit(limit).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
×
3264
      return Collections.unmodifiableMap(map);
×
3265
    }
3266
  }
3267

3268
  /** A function that produces an unmodifiable map up to the weighted limit in stream order. */
3269
  static final class WeightLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3270
    private final long weightLimit;
3271

3272
    private long weightedSize;
3273

3274
    WeightLimiter(long weightLimit) {
×
3275
      requireArgument(weightLimit >= 0);
×
3276
      this.weightLimit = weightLimit;
×
3277
    }
×
3278

3279
    @Override
3280
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3281
      var map = new LinkedHashMap<K, V>();
×
3282
      stream.takeWhile(entry -> {
×
3283
        weightedSize = Math.addExact(weightedSize, entry.weight());
×
3284
        return (weightedSize <= weightLimit);
×
3285
      }).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
×
3286
      return Collections.unmodifiableMap(map);
×
3287
    }
3288
  }
3289

3290
  /** An adapter to safely externalize the keys. */
3291
  static final class KeySetView<K, V> extends AbstractSet<K> {
3292
    final BoundedLocalCache<K, V> cache;
3293

3294
    KeySetView(BoundedLocalCache<K, V> cache) {
×
3295
      this.cache = requireNonNull(cache);
×
3296
    }
×
3297

3298
    @Override
3299
    public int size() {
3300
      return cache.size();
×
3301
    }
3302

3303
    @Override
3304
    public void clear() {
3305
      cache.clear();
×
3306
    }
×
3307

3308
    @Override
3309
    @SuppressWarnings("SuspiciousMethodCalls")
3310
    public boolean contains(Object o) {
3311
      return cache.containsKey(o);
×
3312
    }
3313

3314
    @Override
3315
    public boolean removeAll(Collection<?> collection) {
3316
      requireNonNull(collection);
×
3317
      @Var boolean modified = false;
×
3318
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
×
3319
        for (K key : this) {
×
3320
          if (collection.contains(key)) {
×
3321
            modified |= remove(key);
×
3322
          }
3323
        }
×
3324
      } else {
3325
        for (var item : collection) {
×
3326
          modified |= (item != null) && remove(item);
×
3327
        }
×
3328
      }
3329
      return modified;
×
3330
    }
3331

3332
    @Override
3333
    public boolean remove(Object o) {
3334
      return (cache.remove(o) != null);
×
3335
    }
3336

3337
    @Override
3338
    public boolean removeIf(Predicate<? super K> filter) {
3339
      requireNonNull(filter);
×
3340
      @Var boolean modified = false;
×
3341
      for (K key : this) {
×
3342
        if (filter.test(key) && remove(key)) {
×
3343
          modified = true;
×
3344
        }
3345
      }
×
3346
      return modified;
×
3347
    }
3348

3349
    @Override
3350
    public boolean retainAll(Collection<?> collection) {
3351
      requireNonNull(collection);
×
3352
      @Var boolean modified = false;
×
3353
      for (K key : this) {
×
3354
        if (!collection.contains(key) && remove(key)) {
×
3355
          modified = true;
×
3356
        }
3357
      }
×
3358
      return modified;
×
3359
    }
3360

3361
    @Override
3362
    public Iterator<K> iterator() {
3363
      return new KeyIterator<>(cache);
×
3364
    }
3365

3366
    @Override
3367
    public Spliterator<K> spliterator() {
3368
      return new KeySpliterator<>(cache);
×
3369
    }
3370
  }
3371

3372
  /** An adapter to safely externalize the key iterator. */
3373
  static final class KeyIterator<K, V> implements Iterator<K> {
3374
    final EntryIterator<K, V> iterator;
3375

3376
    KeyIterator(BoundedLocalCache<K, V> cache) {
×
3377
      this.iterator = new EntryIterator<>(cache);
×
3378
    }
×
3379

3380
    @Override
3381
    public boolean hasNext() {
3382
      return iterator.hasNext();
×
3383
    }
3384

3385
    @Override
3386
    public K next() {
3387
      return iterator.nextKey();
×
3388
    }
3389

3390
    @Override
3391
    public void remove() {
3392
      iterator.remove();
×
3393
    }
×
3394
  }
3395

3396
  /** An adapter to safely externalize the key spliterator. */
3397
  static final class KeySpliterator<K, V> implements Spliterator<K> {
3398
    final Spliterator<Node<K, V>> spliterator;
3399
    final BoundedLocalCache<K, V> cache;
3400

3401
    KeySpliterator(BoundedLocalCache<K, V> cache) {
3402
      this(cache, cache.data.values().spliterator());
×
3403
    }
×
3404

3405
    KeySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
×
3406
      this.spliterator = requireNonNull(spliterator);
×
3407
      this.cache = requireNonNull(cache);
×
3408
    }
×
3409

3410
    @Override
3411
    public void forEachRemaining(Consumer<? super K> action) {
3412
      requireNonNull(action);
×
3413
      Consumer<Node<K, V>> consumer = node -> {
×
3414
        K key = node.getKey();
×
3415
        V value = node.getValue();
×
3416
        long now = cache.expirationTicker().read();
×
3417
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3418
          action.accept(key);
×
3419
        }
3420
      };
×
3421
      spliterator.forEachRemaining(consumer);
×
3422
    }
×
3423

3424
    @Override
3425
    public boolean tryAdvance(Consumer<? super K> action) {
3426
      requireNonNull(action);
×
3427
      boolean[] advanced = { false };
×
3428
      Consumer<Node<K, V>> consumer = node -> {
×
3429
        K key = node.getKey();
×
3430
        V value = node.getValue();
×
3431
        long now = cache.expirationTicker().read();
×
3432
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3433
          action.accept(key);
×
3434
          advanced[0] = true;
×
3435
        }
3436
      };
×
3437
      while (spliterator.tryAdvance(consumer)) {
×
3438
        if (advanced[0]) {
×
3439
          return true;
×
3440
        }
3441
      }
3442
      return false;
×
3443
    }
3444

3445
    @Override
3446
    public @Nullable Spliterator<K> trySplit() {
3447
      Spliterator<Node<K, V>> split = spliterator.trySplit();
×
3448
      return (split == null) ? null : new KeySpliterator<>(cache, split);
×
3449
    }
3450

3451
    @Override
3452
    public long estimateSize() {
3453
      return spliterator.estimateSize();
×
3454
    }
3455

3456
    @Override
3457
    public int characteristics() {
3458
      return DISTINCT | CONCURRENT | NONNULL;
×
3459
    }
3460
  }
3461

3462
  /** An adapter to safely externalize the values. */
3463
  static final class ValuesView<K, V> extends AbstractCollection<V> {
3464
    final BoundedLocalCache<K, V> cache;
3465

3466
    ValuesView(BoundedLocalCache<K, V> cache) {
×
3467
      this.cache = requireNonNull(cache);
×
3468
    }
×
3469

3470
    @Override
3471
    public int size() {
3472
      return cache.size();
×
3473
    }
3474

3475
    @Override
3476
    public void clear() {
3477
      cache.clear();
×
3478
    }
×
3479

3480
    @Override
3481
    @SuppressWarnings("SuspiciousMethodCalls")
3482
    public boolean contains(Object o) {
3483
      return cache.containsValue(o);
×
3484
    }
3485

3486
    @Override
3487
    public boolean removeAll(Collection<?> collection) {
3488
      requireNonNull(collection);
×
3489
      @Var boolean modified = false;
×
3490
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
×
3491
        var key = requireNonNull(iterator.key);
×
3492
        var value = requireNonNull(iterator.value);
×
3493
        if (collection.contains(value) && cache.remove(key, value)) {
×
3494
          modified = true;
×
3495
        }
3496
        iterator.advance();
×
3497
      }
×
3498
      return modified;
×
3499
    }
3500

3501
    @Override
3502
    public boolean remove(@Nullable Object o) {
3503
      if (o == null) {
×
3504
        return false;
×
3505
      }
3506
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
×
3507
        var key = requireNonNull(iterator.key);
×
3508
        var value = requireNonNull(iterator.value);
×
3509
        if (o.equals(value) && cache.remove(key, value)) {
×
3510
          return true;
×
3511
        }
3512
        iterator.advance();
×
3513
      }
×
3514
      return false;
×
3515
    }
3516

3517
    @Override
3518
    public boolean removeIf(Predicate<? super V> filter) {
3519
      requireNonNull(filter);
×
3520
      @Var boolean modified = false;
×
3521
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
×
3522
        var value = requireNonNull(iterator.value);
×
3523
        if (filter.test(value)) {
×
3524
          var key = requireNonNull(iterator.key);
×
3525
          modified |= cache.remove(key, value);
×
3526
        }
3527
        iterator.advance();
×
3528
      }
×
3529
      return modified;
×
3530
    }
3531

3532
    @Override
3533
    public boolean retainAll(Collection<?> collection) {
3534
      requireNonNull(collection);
×
3535
      @Var boolean modified = false;
×
3536
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
×
3537
        var key = requireNonNull(iterator.key);
×
3538
        var value = requireNonNull(iterator.value);
×
3539
        if (!collection.contains(value) && cache.remove(key, value)) {
×
3540
          modified = true;
×
3541
        }
3542
        iterator.advance();
×
3543
      }
×
3544
      return modified;
×
3545
    }
3546

3547
    @Override
3548
    public Iterator<V> iterator() {
3549
      return new ValueIterator<>(cache);
×
3550
    }
3551

3552
    @Override
3553
    public Spliterator<V> spliterator() {
3554
      return new ValueSpliterator<>(cache);
×
3555
    }
3556
  }
3557

3558
  /** An adapter to safely externalize the value iterator. */
3559
  static final class ValueIterator<K, V> implements Iterator<V> {
3560
    final EntryIterator<K, V> iterator;
3561

3562
    ValueIterator(BoundedLocalCache<K, V> cache) {
×
3563
      this.iterator = new EntryIterator<>(cache);
×
3564
    }
×
3565

3566
    @Override
3567
    public boolean hasNext() {
3568
      return iterator.hasNext();
×
3569
    }
3570

3571
    @Override
3572
    public V next() {
3573
      return iterator.nextValue();
×
3574
    }
3575

3576
    @Override
3577
    public void remove() {
3578
      iterator.remove();
×
3579
    }
×
3580
  }
3581

3582
  /** An adapter to safely externalize the value spliterator. */
3583
  static final class ValueSpliterator<K, V> implements Spliterator<V> {
3584
    final Spliterator<Node<K, V>> spliterator;
3585
    final BoundedLocalCache<K, V> cache;
3586

3587
    ValueSpliterator(BoundedLocalCache<K, V> cache) {
3588
      this(cache, cache.data.values().spliterator());
×
3589
    }
×
3590

3591
    ValueSpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
×
3592
      this.spliterator = requireNonNull(spliterator);
×
3593
      this.cache = requireNonNull(cache);
×
3594
    }
×
3595

3596
    @Override
3597
    public void forEachRemaining(Consumer<? super V> action) {
3598
      requireNonNull(action);
×
3599
      Consumer<Node<K, V>> consumer = node -> {
×
3600
        K key = node.getKey();
×
3601
        V value = node.getValue();
×
3602
        long now = cache.expirationTicker().read();
×
3603
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3604
          action.accept(value);
×
3605
        }
3606
      };
×
3607
      spliterator.forEachRemaining(consumer);
×
3608
    }
×
3609

3610
    @Override
3611
    public boolean tryAdvance(Consumer<? super V> action) {
3612
      requireNonNull(action);
×
3613
      boolean[] advanced = { false };
×
3614
      long now = cache.expirationTicker().read();
×
3615
      Consumer<Node<K, V>> consumer = node -> {
×
3616
        K key = node.getKey();
×
3617
        V value = node.getValue();
×
3618
        if ((key != null) && (value != null) && !cache.hasExpired(node, now) && node.isAlive()) {
×
3619
          action.accept(value);
×
3620
          advanced[0] = true;
×
3621
        }
3622
      };
×
3623
      while (spliterator.tryAdvance(consumer)) {
×
3624
        if (advanced[0]) {
×
3625
          return true;
×
3626
        }
3627
      }
3628
      return false;
×
3629
    }
3630

3631
    @Override
3632
    public @Nullable Spliterator<V> trySplit() {
3633
      Spliterator<Node<K, V>> split = spliterator.trySplit();
×
3634
      return (split == null) ? null : new ValueSpliterator<>(cache, split);
×
3635
    }
3636

3637
    @Override
3638
    public long estimateSize() {
3639
      return spliterator.estimateSize();
×
3640
    }
3641

3642
    @Override
3643
    public int characteristics() {
3644
      return CONCURRENT | NONNULL;
×
3645
    }
3646
  }
3647

3648
  /** An adapter to safely externalize the entries. */
3649
  static final class EntrySetView<K, V> extends AbstractSet<Entry<K, V>> {
3650
    final BoundedLocalCache<K, V> cache;
3651

3652
    EntrySetView(BoundedLocalCache<K, V> cache) {
×
3653
      this.cache = requireNonNull(cache);
×
3654
    }
×
3655

3656
    @Override
3657
    public int size() {
3658
      return cache.size();
×
3659
    }
3660

3661
    @Override
3662
    public void clear() {
3663
      cache.clear();
×
3664
    }
×
3665

3666
    @Override
3667
    public boolean contains(Object o) {
3668
      if (!(o instanceof Entry<?, ?>)) {
×
3669
        return false;
×
3670
      }
3671
      var entry = (Entry<?, ?>) o;
×
3672
      var key = entry.getKey();
×
3673
      var value = entry.getValue();
×
3674
      if ((key == null) || (value == null)) {
×
3675
        return false;
×
3676
      }
3677
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
×
3678
      return (node != null) && node.containsValue(value);
×
3679
    }
3680

3681
    @Override
3682
    public boolean removeAll(Collection<?> collection) {
3683
      requireNonNull(collection);
×
3684
      @Var boolean modified = false;
×
3685
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
×
3686
        for (var entry : this) {
×
3687
          if (collection.contains(entry)) {
×
3688
            modified |= remove(entry);
×
3689
          }
3690
        }
×
3691
      } else {
3692
        for (var item : collection) {
×
3693
          modified |= (item != null) && remove(item);
×
3694
        }
×
3695
      }
3696
      return modified;
×
3697
    }
3698

3699
    @Override
3700
    @SuppressWarnings("SuspiciousMethodCalls")
3701
    public boolean remove(Object o) {
3702
      if (!(o instanceof Entry<?, ?>)) {
×
3703
        return false;
×
3704
      }
3705
      var entry = (Entry<?, ?>) o;
×
3706
      var key = entry.getKey();
×
3707
      return (key != null) && cache.remove(key, entry.getValue());
×
3708
    }
3709

3710
    @Override
3711
    public boolean removeIf(Predicate<? super Entry<K, V>> filter) {
3712
      requireNonNull(filter);
×
3713
      @Var boolean modified = false;
×
3714
      for (Entry<K, V> entry : this) {
×
3715
        if (filter.test(entry)) {
×
3716
          modified |= cache.remove(entry.getKey(), entry.getValue());
×
3717
        }
3718
      }
×
3719
      return modified;
×
3720
    }
3721

3722
    @Override
3723
    public boolean retainAll(Collection<?> collection) {
3724
      requireNonNull(collection);
×
3725
      @Var boolean modified = false;
×
3726
      for (var entry : this) {
×
3727
        if (!collection.contains(entry) && remove(entry)) {
×
3728
          modified = true;
×
3729
        }
3730
      }
×
3731
      return modified;
×
3732
    }
3733

3734
    @Override
3735
    public Iterator<Entry<K, V>> iterator() {
3736
      return new EntryIterator<>(cache);
×
3737
    }
3738

3739
    @Override
3740
    public Spliterator<Entry<K, V>> spliterator() {
3741
      return new EntrySpliterator<>(cache);
×
3742
    }
3743
  }
3744

3745
  /** An adapter to safely externalize the entry iterator. */
3746
  static final class EntryIterator<K, V> implements Iterator<Entry<K, V>> {
3747
    final BoundedLocalCache<K, V> cache;
3748
    final Iterator<Node<K, V>> iterator;
3749

3750
    @Nullable K key;
3751
    @Nullable V value;
3752
    @Nullable K removalKey;
3753
    @Nullable Node<K, V> next;
3754

3755
    EntryIterator(BoundedLocalCache<K, V> cache) {
×
3756
      this.iterator = cache.data.values().iterator();
×
3757
      this.cache = cache;
×
3758
    }
×
3759

3760
    @Override
3761
    public boolean hasNext() {
3762
      if (next != null) {
×
3763
        return true;
×
3764
      }
3765

3766
      long now = cache.expirationTicker().read();
×
3767
      while (iterator.hasNext()) {
×
3768
        next = iterator.next();
×
3769
        value = next.getValue();
×
3770
        key = next.getKey();
×
3771

3772
        boolean evictable = (key == null) || (value == null) || cache.hasExpired(next, now);
×
3773
        if (evictable || !next.isAlive()) {
×
3774
          if (evictable) {
×
3775
            cache.scheduleDrainBuffers();
×
3776
          }
3777
          advance();
×
3778
          continue;
×
3779
        }
3780
        return true;
×
3781
      }
3782
      return false;
×
3783
    }
3784

3785
    /** Invalidates the current position so that the iterator may compute the next position. */
3786
    void advance() {
3787
      value = null;
×
3788
      next = null;
×
3789
      key = null;
×
3790
    }
×
3791

3792
    K nextKey() {
3793
      if (!hasNext()) {
×
3794
        throw new NoSuchElementException();
×
3795
      }
3796
      removalKey = key;
×
3797
      advance();
×
3798
      return requireNonNull(removalKey);
×
3799
    }
3800

3801
    V nextValue() {
3802
      if (!hasNext()) {
×
3803
        throw new NoSuchElementException();
×
3804
      }
3805
      removalKey = key;
×
3806
      V val = value;
×
3807
      advance();
×
3808
      return requireNonNull(val);
×
3809
    }
3810

3811
    @Override
3812
    public Entry<K, V> next() {
3813
      if (!hasNext()) {
×
3814
        throw new NoSuchElementException();
×
3815
      }
3816
      var entry = new WriteThroughEntry<K, @NonNull V>(
×
3817
          cache, requireNonNull(key), requireNonNull(value));
×
3818
      removalKey = key;
×
3819
      advance();
×
3820
      return entry;
×
3821
    }
3822

3823
    @Override
3824
    public void remove() {
3825
      if (removalKey == null) {
×
3826
        throw new IllegalStateException();
×
3827
      }
3828
      cache.remove(removalKey);
×
3829
      removalKey = null;
×
3830
    }
×
3831
  }
3832

3833
  /** An adapter to safely externalize the entry spliterator. */
3834
  static final class EntrySpliterator<K, V> implements Spliterator<Entry<K, V>> {
3835
    final Spliterator<Node<K, V>> spliterator;
3836
    final BoundedLocalCache<K, V> cache;
3837

3838
    EntrySpliterator(BoundedLocalCache<K, V> cache) {
3839
      this(cache, cache.data.values().spliterator());
×
3840
    }
×
3841

3842
    EntrySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
×
3843
      this.spliterator = requireNonNull(spliterator);
×
3844
      this.cache = requireNonNull(cache);
×
3845
    }
×
3846

3847
    @Override
3848
    public void forEachRemaining(Consumer<? super Entry<K, V>> action) {
3849
      requireNonNull(action);
×
3850
      Consumer<Node<K, V>> consumer = node -> {
×
3851
        K key = node.getKey();
×
3852
        V value = node.getValue();
×
3853
        long now = cache.expirationTicker().read();
×
3854
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3855
          action.accept(new WriteThroughEntry<>(cache, key, value));
×
3856
        }
3857
      };
×
3858
      spliterator.forEachRemaining(consumer);
×
3859
    }
×
3860

3861
    @Override
3862
    public boolean tryAdvance(Consumer<? super Entry<K, V>> action) {
3863
      requireNonNull(action);
×
3864
      boolean[] advanced = { false };
×
3865
      Consumer<Node<K, V>> consumer = node -> {
×
3866
        K key = node.getKey();
×
3867
        V value = node.getValue();
×
3868
        long now = cache.expirationTicker().read();
×
3869
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3870
          action.accept(new WriteThroughEntry<>(cache, key, value));
×
3871
          advanced[0] = true;
×
3872
        }
3873
      };
×
3874
      while (spliterator.tryAdvance(consumer)) {
×
3875
        if (advanced[0]) {
×
3876
          return true;
×
3877
        }
3878
      }
3879
      return false;
×
3880
    }
3881

3882
    @Override
3883
    public @Nullable Spliterator<Entry<K, V>> trySplit() {
3884
      Spliterator<Node<K, V>> split = spliterator.trySplit();
×
3885
      return (split == null) ? null : new EntrySpliterator<>(cache, split);
×
3886
    }
3887

3888
    @Override
3889
    public long estimateSize() {
3890
      return spliterator.estimateSize();
×
3891
    }
3892

3893
    @Override
3894
    public int characteristics() {
3895
      return DISTINCT | CONCURRENT | NONNULL;
×
3896
    }
3897
  }
3898

3899
  /** A reusable task that performs the maintenance work; used to avoid wrapping by ForkJoinPool. */
3900
  static final class PerformCleanupTask extends ForkJoinTask<@Nullable Void> implements Runnable {
3901
    private static final long serialVersionUID = 1L;
3902

3903
    final WeakReference<BoundedLocalCache<?, ?>> reference;
3904

3905
    PerformCleanupTask(BoundedLocalCache<?, ?> cache) {
×
3906
      reference = new WeakReference<>(cache);
×
3907
    }
×
3908

3909
    @Override
3910
    public boolean exec() {
3911
      try {
3912
        run();
×
3913
      } catch (Throwable t) {
×
3914
        logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", t);
×
3915
      }
×
3916

3917
      // Indicates that the task has not completed to allow subsequent submissions to execute
3918
      return false;
×
3919
    }
3920

3921
    @Override
3922
    public void run() {
3923
      BoundedLocalCache<?, ?> cache = reference.get();
×
3924
      if (cache != null) {
×
3925
        cache.performCleanUp(/* ignored */ null);
×
3926
      }
3927
    }
×
3928

3929
    /**
3930
     * This method cannot be ignored due to being final, so a hostile user supplied Executor could
3931
     * forcibly complete the task and halt future executions. There are easier ways to intentionally
3932
     * harm a system, so this is assumed to not happen in practice.
3933
     */
3934
    // public final void quietlyComplete() {}
3935

3936
    @Override public void complete(@Nullable Void value) {}
×
3937
    @Override public void setRawResult(@Nullable Void value) {}
×
3938
    @Override public @Nullable Void getRawResult() { return null; }
×
3939
    @Override public void completeExceptionally(@Nullable Throwable t) {}
×
3940
    @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; }
×
3941
  }
3942

3943
  /** Creates a serialization proxy based on the common configuration shared by all cache types. */
3944
  static <K, V> SerializationProxy<K, V> makeSerializationProxy(BoundedLocalCache<?, ?> cache) {
3945
    var proxy = new SerializationProxy<K, V>();
×
3946
    proxy.weakKeys = cache.collectKeys();
×
3947
    proxy.weakValues = cache.nodeFactory.weakValues();
×
3948
    proxy.softValues = cache.nodeFactory.softValues();
×
3949
    proxy.isRecordingStats = cache.isRecordingStats();
×
3950
    proxy.evictionListener = cache.evictionListener;
×
3951
    proxy.removalListener = cache.removalListener();
×
3952
    proxy.ticker = cache.expirationTicker();
×
3953
    if (cache.expiresAfterAccess()) {
×
3954
      proxy.expiresAfterAccessNanos = cache.expiresAfterAccessNanos();
×
3955
    }
3956
    if (cache.expiresAfterWrite()) {
×
3957
      proxy.expiresAfterWriteNanos = cache.expiresAfterWriteNanos();
×
3958
    }
3959
    if (cache.expiresVariable()) {
×
3960
      proxy.expiry = cache.expiry();
×
3961
    }
3962
    if (cache.refreshAfterWrite()) {
×
3963
      proxy.refreshAfterWriteNanos = cache.refreshAfterWriteNanos();
×
3964
    }
3965
    if (cache.evicts()) {
×
3966
      if (cache.isWeighted) {
×
3967
        proxy.weigher = cache.weigher;
×
3968
        proxy.maximumWeight = cache.maximum();
×
3969
      } else {
3970
        proxy.maximumSize = cache.maximum();
×
3971
      }
3972
    }
3973
    proxy.cacheLoader = cache.cacheLoader;
×
3974
    proxy.async = cache.isAsync;
×
3975
    return proxy;
×
3976
  }
3977

3978
  /* --------------- Manual Cache --------------- */
3979

3980
  static class BoundedLocalManualCache<K, V> implements LocalManualCache<K, V>, Serializable {
3981
    private static final long serialVersionUID = 1;
3982

3983
    final BoundedLocalCache<K, V> cache;
3984

3985
    @Nullable Policy<K, V> policy;
3986

3987
    BoundedLocalManualCache(Caffeine<K, V> builder) {
3988
      this(builder, null);
×
3989
    }
×
3990

3991
    BoundedLocalManualCache(Caffeine<K, V> builder, @Nullable CacheLoader<? super K, V> loader) {
×
3992
      cache = LocalCacheFactory.newBoundedLocalCache(builder, loader, /* isAsync= */ false);
×
3993
    }
×
3994

3995
    @Override
3996
    public final BoundedLocalCache<K, V> cache() {
3997
      return cache;
×
3998
    }
3999

4000
    @Override
4001
    public final Policy<K, V> policy() {
4002
      if (policy == null) {
×
4003
        Function<@Nullable V, @Nullable V> identity = v -> v;
×
4004
        policy = new BoundedPolicy<>(cache, identity, cache.isWeighted);
×
4005
      }
4006
      return policy;
×
4007
    }
4008

4009
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4010
      throw new InvalidObjectException("Proxy required");
×
4011
    }
4012

4013
    private Object writeReplace() {
4014
      return makeSerializationProxy(cache);
×
4015
    }
4016
  }
4017

4018
  @SuppressWarnings({"NullableOptional",
4019
    "OptionalAssignedToNull", "OptionalUsedAsFieldOrParameterType"})
4020
  static final class BoundedPolicy<K, V> implements Policy<K, V> {
4021
    final Function<@Nullable V, @Nullable V> transformer;
4022
    final BoundedLocalCache<K, V> cache;
4023
    final boolean isWeighted;
4024

4025
    @Nullable Optional<Eviction<K, V>> eviction;
4026
    @Nullable Optional<FixedRefresh<K, V>> refreshes;
4027
    @Nullable Optional<FixedExpiration<K, V>> afterWrite;
4028
    @Nullable Optional<FixedExpiration<K, V>> afterAccess;
4029
    @Nullable Optional<VarExpiration<K, V>> variable;
4030

4031
    BoundedPolicy(BoundedLocalCache<K, V> cache,
4032
        Function<@Nullable V, @Nullable V> transformer, boolean isWeighted) {
×
4033
      this.transformer = transformer;
×
4034
      this.isWeighted = isWeighted;
×
4035
      this.cache = cache;
×
4036
    }
×
4037

4038
    @Override public boolean isRecordingStats() {
4039
      return cache.isRecordingStats();
×
4040
    }
4041
    @Override public @Nullable V getIfPresentQuietly(K key) {
4042
      return transformer.apply(cache.getIfPresentQuietly(key));
×
4043
    }
4044
    @Override public @Nullable CacheEntry<K, V> getEntryIfPresentQuietly(K key) {
4045
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
×
4046
      return (node == null) ? null : cache.nodeToCacheEntry(node, transformer);
×
4047
    }
4048
    @SuppressWarnings("Java9CollectionFactory")
4049
    @Override public Map<K, CompletableFuture<V>> refreshes() {
4050
      var refreshes = cache.refreshes;
×
4051
      if ((refreshes == null) || refreshes.isEmpty()) {
×
4052
        @SuppressWarnings({"ImmutableMapOf", "RedundantUnmodifiable"})
4053
        Map<K, CompletableFuture<V>> emptyMap = Collections.unmodifiableMap(Collections.emptyMap());
×
4054
        return emptyMap;
×
4055
      } else if (cache.collectKeys()) {
×
4056
        var inFlight = new IdentityHashMap<K, CompletableFuture<V>>(refreshes.size());
×
4057
        for (var entry : refreshes.entrySet()) {
×
4058
          @SuppressWarnings("unchecked")
4059
          @Nullable K key = ((InternalReference<K>) entry.getKey()).get();
×
4060
          @SuppressWarnings("unchecked")
4061
          var future = (CompletableFuture<V>) entry.getValue();
×
4062
          if (key != null) {
×
4063
            inFlight.put(key, future);
×
4064
          }
4065
        }
×
4066
        return Collections.unmodifiableMap(inFlight);
×
4067
      }
4068
      @SuppressWarnings("unchecked")
4069
      var castedRefreshes = (Map<K, CompletableFuture<V>>) (Object) refreshes;
×
4070
      return Collections.unmodifiableMap(new HashMap<>(castedRefreshes));
×
4071
    }
4072
    @Override public Optional<Eviction<K, V>> eviction() {
4073
      return cache.evicts()
×
4074
          ? (eviction == null) ? (eviction = Optional.of(new BoundedEviction())) : eviction
×
4075
          : Optional.empty();
×
4076
    }
4077
    @Override public Optional<FixedExpiration<K, V>> expireAfterAccess() {
4078
      if (!cache.expiresAfterAccess()) {
×
4079
        return Optional.empty();
×
4080
      }
4081
      return (afterAccess == null)
×
4082
          ? (afterAccess = Optional.of(new BoundedExpireAfterAccess()))
×
4083
          : afterAccess;
×
4084
    }
4085
    @Override public Optional<FixedExpiration<K, V>> expireAfterWrite() {
4086
      if (!cache.expiresAfterWrite()) {
×
4087
        return Optional.empty();
×
4088
      }
4089
      return (afterWrite == null)
×
4090
          ? (afterWrite = Optional.of(new BoundedExpireAfterWrite()))
×
4091
          : afterWrite;
×
4092
    }
4093
    @Override public Optional<VarExpiration<K, V>> expireVariably() {
4094
      if (!cache.expiresVariable()) {
×
4095
        return Optional.empty();
×
4096
      }
4097
      return (variable == null)
×
4098
          ? (variable = Optional.of(new BoundedVarExpiration()))
×
4099
          : variable;
×
4100
    }
4101
    @Override public Optional<FixedRefresh<K, V>> refreshAfterWrite() {
4102
      if (!cache.refreshAfterWrite()) {
×
4103
        return Optional.empty();
×
4104
      }
4105
      return (refreshes == null)
×
4106
          ? (refreshes = Optional.of(new BoundedRefreshAfterWrite()))
×
4107
          : refreshes;
×
4108
    }
4109

4110
    final class BoundedEviction implements Eviction<K, V> {
×
4111
      @Override public boolean isWeighted() {
4112
        return isWeighted;
×
4113
      }
4114
      @Override public OptionalInt weightOf(K key) {
4115
        requireNonNull(key);
×
4116
        if (!isWeighted) {
×
4117
          return OptionalInt.empty();
×
4118
        }
4119
        Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
×
4120
        if ((node == null) || cache.hasExpired(node, cache.expirationTicker().read())) {
×
4121
          return OptionalInt.empty();
×
4122
        }
4123
        synchronized (node) {
×
4124
          return node.isAlive() ? OptionalInt.of(node.getWeight()) : OptionalInt.empty();
×
4125
        }
4126
      }
4127
      @Override public OptionalLong weightedSize() {
4128
        return isWeighted
×
4129
            ? OptionalLong.of(Math.max(0, cache.weightedSizeAcquire()))
×
4130
            : OptionalLong.empty();
×
4131
      }
4132
      @Override public long getMaximum() {
4133
        return cache.maximumAcquire();
×
4134
      }
4135
      @Override public void setMaximum(long maximum) {
4136
        cache.evictionLock.lock();
×
4137
        try {
4138
          cache.setMaximumSize(maximum);
×
4139
          cache.maintenance(/* ignored */ null);
×
4140
        } finally {
4141
          cache.evictionLock.unlock();
×
4142
          cache.rescheduleCleanUpIfIncomplete();
×
4143
        }
4144
      }
×
4145
      @Override public Map<K, V> coldest(int limit) {
4146
        int expectedSize = Math.min(limit, cache.size());
×
4147
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
×
4148
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
×
4149
      }
4150
      @Override public Map<K, V> coldestWeighted(long weightLimit) {
4151
        var limiter = isWeighted()
×
4152
            ? new WeightLimiter<K, V>(weightLimit)
×
4153
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
×
4154
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
×
4155
      }
4156
      @Override
4157
      public <T> T coldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4158
        requireNonNull(mappingFunction);
×
4159
        return cache.evictionOrder(/* hottest= */ false, transformer, mappingFunction);
×
4160
      }
4161
      @Override public Map<K, V> hottest(int limit) {
4162
        int expectedSize = Math.min(limit, cache.size());
×
4163
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
×
4164
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
×
4165
      }
4166
      @Override public Map<K, V> hottestWeighted(long weightLimit) {
4167
        var limiter = isWeighted()
×
4168
            ? new WeightLimiter<K, V>(weightLimit)
×
4169
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
×
4170
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
×
4171
      }
4172
      @Override
4173
      public <T> T hottest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4174
        requireNonNull(mappingFunction);
×
4175
        return cache.evictionOrder(/* hottest= */ true, transformer, mappingFunction);
×
4176
      }
4177
    }
4178

4179
    @SuppressWarnings("PreferJavaTimeOverload")
4180
    final class BoundedExpireAfterAccess implements FixedExpiration<K, V> {
×
4181
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4182
        requireNonNull(key);
×
4183
        requireNonNull(unit);
×
4184
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4185
        Node<K, V> node = cache.data.get(lookupKey);
×
4186
        if (node == null) {
×
4187
          return OptionalLong.empty();
×
4188
        }
4189
        long now = cache.expirationTicker().read();
×
4190
        return cache.hasExpired(node, now)
×
4191
            ? OptionalLong.empty()
×
4192
            : OptionalLong.of(unit.convert(now - node.getAccessTime(), TimeUnit.NANOSECONDS));
×
4193
      }
4194
      @Override public long getExpiresAfter(TimeUnit unit) {
4195
        return unit.convert(cache.expiresAfterAccessNanos(), TimeUnit.NANOSECONDS);
×
4196
      }
4197
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4198
        requireArgument(duration >= 0);
×
4199
        cache.setExpiresAfterAccessNanos(unit.toNanos(duration));
×
4200
        cache.scheduleAfterWrite();
×
4201
      }
×
4202
      @Override public Map<K, V> oldest(int limit) {
4203
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4204
      }
4205
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4206
        return cache.expireAfterAccessOrder(/* oldest= */ true, transformer, mappingFunction);
×
4207
      }
4208
      @Override public Map<K, V> youngest(int limit) {
4209
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4210
      }
4211
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4212
        return cache.expireAfterAccessOrder(/* oldest= */ false, transformer, mappingFunction);
×
4213
      }
4214
    }
4215

4216
    @SuppressWarnings("PreferJavaTimeOverload")
4217
    final class BoundedExpireAfterWrite implements FixedExpiration<K, V> {
×
4218
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4219
        requireNonNull(key);
×
4220
        requireNonNull(unit);
×
4221
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4222
        Node<K, V> node = cache.data.get(lookupKey);
×
4223
        if (node == null) {
×
4224
          return OptionalLong.empty();
×
4225
        }
4226
        long now = cache.expirationTicker().read();
×
4227
        return cache.hasExpired(node, now)
×
4228
            ? OptionalLong.empty()
×
4229
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
×
4230
      }
4231
      @Override public long getExpiresAfter(TimeUnit unit) {
4232
        return unit.convert(cache.expiresAfterWriteNanos(), TimeUnit.NANOSECONDS);
×
4233
      }
4234
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4235
        requireArgument(duration >= 0);
×
4236
        cache.setExpiresAfterWriteNanos(unit.toNanos(duration));
×
4237
        cache.scheduleAfterWrite();
×
4238
      }
×
4239
      @Override public Map<K, V> oldest(int limit) {
4240
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4241
      }
4242
      @SuppressWarnings("GuardedByChecker")
4243
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4244
        return cache.snapshot(cache.writeOrderDeque(), transformer, mappingFunction);
×
4245
      }
4246
      @Override public Map<K, V> youngest(int limit) {
4247
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4248
      }
4249
      @SuppressWarnings("GuardedByChecker")
4250
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4251
        return cache.snapshot(cache.writeOrderDeque()::descendingIterator,
×
4252
            transformer, mappingFunction);
4253
      }
4254
    }
4255

4256
    @SuppressWarnings("PreferJavaTimeOverload")
4257
    final class BoundedVarExpiration implements VarExpiration<K, V> {
×
4258
      @Override public OptionalLong getExpiresAfter(K key, TimeUnit unit) {
4259
        requireNonNull(key);
×
4260
        requireNonNull(unit);
×
4261
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4262
        Node<K, V> node = cache.data.get(lookupKey);
×
4263
        if (node == null) {
×
4264
          return OptionalLong.empty();
×
4265
        }
4266
        long now = cache.expirationTicker().read();
×
4267
        return cache.hasExpired(node, now)
×
4268
            ? OptionalLong.empty()
×
4269
            : OptionalLong.of(unit.convert(node.getVariableTime() - now, TimeUnit.NANOSECONDS));
×
4270
      }
4271
      @Override public void setExpiresAfter(K key, long duration, TimeUnit unit) {
4272
        requireNonNull(key);
×
4273
        requireNonNull(unit);
×
4274
        requireArgument(duration >= 0);
×
4275
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4276
        Node<K, V> node = cache.data.get(lookupKey);
×
4277
        if (node != null) {
×
4278
          long now;
4279
          long durationNanos = TimeUnit.NANOSECONDS.convert(duration, unit);
×
4280
          synchronized (node) {
×
4281
            now = cache.expirationTicker().read();
×
4282
            if (cache.isComputingAsync(node.getValue()) || cache.hasExpired(node, now)) {
×
4283
              return;
×
4284
            }
4285
            node.setVariableTime(now + Math.min(durationNanos, MAXIMUM_EXPIRY));
×
4286
          }
×
4287
          cache.afterRead(node, now, /* recordHit= */ false);
×
4288
        }
4289
      }
×
4290
      @Override public @Nullable V put(K key, V value, long duration, TimeUnit unit) {
4291
        requireNonNull(unit);
×
4292
        requireNonNull(value);
×
4293
        requireArgument(duration >= 0);
×
4294
        return cache.isAsync
×
4295
            ? putAsync(key, value, duration, unit)
×
4296
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ false);
×
4297
      }
4298
      @Override public @Nullable V putIfAbsent(K key, V value, long duration, TimeUnit unit) {
4299
        requireNonNull(unit);
×
4300
        requireNonNull(value);
×
4301
        requireArgument(duration >= 0);
×
4302
        return cache.isAsync
×
4303
            ? putIfAbsentAsync(key, value, duration, unit)
×
4304
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ true);
×
4305
      }
4306
      @Nullable V putSync(K key, V value, long duration, TimeUnit unit, boolean onlyIfAbsent) {
4307
        var expiry = new FixedExpireAfterWrite<K, V>(duration, unit);
×
4308
        return cache.put(key, value, expiry, onlyIfAbsent);
×
4309
      }
4310
      @SuppressWarnings("unchecked")
4311
      @Nullable V putIfAbsentAsync(K key, V value, long duration, TimeUnit unit) {
4312
        // Keep in sync with LocalAsyncCache.AsMapView#putIfAbsent(key, value)
4313
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
×
4314
        var asyncValue = (V) CompletableFuture.completedFuture(value);
×
4315

4316
        for (;;) {
4317
          var priorFuture = (CompletableFuture<V>) cache.getIfPresent(
×
4318
              key, /* recordStats= */ false);
4319
          if (priorFuture != null) {
×
4320
            if (!priorFuture.isDone()) {
×
4321
              Async.getWhenSuccessful(priorFuture);
×
4322
              continue;
×
4323
            }
4324

4325
            V prior = Async.getWhenSuccessful(priorFuture);
×
4326
            if (prior != null) {
×
4327
              return prior;
×
4328
            }
4329
          }
4330

4331
          boolean[] added = { false };
×
4332
          var computed = (CompletableFuture<V>) cache.compute(key, (K k, @Nullable V oldValue) -> {
×
4333
            var oldValueFuture = (CompletableFuture<V>) oldValue;
×
4334
            added[0] = (oldValueFuture == null)
×
4335
                || (oldValueFuture.isDone() && (Async.getIfReady(oldValueFuture) == null));
×
4336
            return added[0] ? asyncValue : oldValue;
×
4337
          }, expiry, /* recordLoad= */ false, /* recordLoadFailure= */ false);
4338

4339
          if (added[0]) {
×
4340
            return null;
×
4341
          } else {
4342
            V prior = Async.getWhenSuccessful(computed);
×
4343
            if (prior != null) {
×
4344
              return prior;
×
4345
            }
4346
          }
4347
        }
×
4348
      }
4349
      @SuppressWarnings("unchecked")
4350
      @Nullable V putAsync(K key, V value, long duration, TimeUnit unit) {
4351
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
×
4352
        var asyncValue = (V) CompletableFuture.completedFuture(value);
×
4353

4354
        var oldValueFuture = (CompletableFuture<V>) cache.put(
×
4355
            key, asyncValue, expiry, /* onlyIfAbsent= */ false);
4356
        return Async.getWhenSuccessful(oldValueFuture);
×
4357
      }
4358
      @Override public @Nullable V compute(K key,
4359
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4360
          Duration duration) {
4361
        requireNonNull(key);
×
4362
        requireNonNull(duration);
×
4363
        requireNonNull(remappingFunction);
×
4364
        requireArgument(!duration.isNegative(), "duration cannot be negative: %s", duration);
×
4365
        var expiry = new FixedExpireAfterWrite<K, V>(
×
4366
            toNanosSaturated(duration), TimeUnit.NANOSECONDS);
×
4367

4368
        return cache.isAsync
×
4369
            ? computeAsync(key, remappingFunction, expiry)
×
4370
            : cache.compute(key, remappingFunction, expiry,
×
4371
                /* recordLoad= */ true, /* recordLoadFailure= */ true);
4372
      }
4373
      @Nullable V computeAsync(K key,
4374
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4375
          Expiry<? super K, ? super V> expiry) {
4376
        // Keep in sync with LocalAsyncCache.AsMapView#compute(key, remappingFunction)
4377
        @SuppressWarnings("unchecked")
4378
        var delegate = (LocalCache<K, CompletableFuture<V>>) cache;
×
4379

4380
        @SuppressWarnings({"rawtypes", "unchecked", "Varifier"})
4381
        @Nullable V[] newValue = (@Nullable V[]) new Object[1];
×
4382
        for (;;) {
4383
          Async.getWhenSuccessful(delegate.getIfPresentQuietly(key));
×
4384

4385
          CompletableFuture<V> valueFuture = delegate.compute(
×
4386
              key, (K k, @Nullable CompletableFuture<V> oldValueFuture) -> {
4387
                if ((oldValueFuture != null) && !oldValueFuture.isDone()) {
×
4388
                  return oldValueFuture;
×
4389
                }
4390

4391
                V oldValue = Async.getIfReady(oldValueFuture);
×
4392
                BiFunction<? super K, ? super V, ? extends @Nullable V> function =
×
4393
                    delegate.statsAware(remappingFunction,
×
4394
                        /* recordLoad= */ true, /* recordLoadFailure= */ true);
4395
                newValue[0] = function.apply(key, oldValue);
×
4396
                return (newValue[0] == null) ? null
×
4397
                    : CompletableFuture.completedFuture(newValue[0]);
×
4398
              }, new AsyncExpiry<>(expiry), /* recordLoad= */ false,
4399
              /* recordLoadFailure= */ false);
4400

4401
          if (newValue[0] != null) {
×
4402
            return newValue[0];
×
4403
          } else if (valueFuture == null) {
×
4404
            return null;
×
4405
          }
4406
        }
×
4407
      }
4408
      @Override public Map<K, V> oldest(int limit) {
4409
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4410
      }
4411
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4412
        return cache.snapshot(cache.timerWheel(), transformer, mappingFunction);
×
4413
      }
4414
      @Override public Map<K, V> youngest(int limit) {
4415
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4416
      }
4417
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4418
        return cache.snapshot(cache.timerWheel()::descendingIterator, transformer, mappingFunction);
×
4419
      }
4420
    }
4421

4422
    static final class FixedExpireAfterWrite<K, V> implements Expiry<K, V> {
4423
      final long duration;
4424
      final TimeUnit unit;
4425

4426
      FixedExpireAfterWrite(long duration, TimeUnit unit) {
×
4427
        this.duration = duration;
×
4428
        this.unit = unit;
×
4429
      }
×
4430
      @Override public long expireAfterCreate(K key, V value, long currentTime) {
4431
        return unit.toNanos(duration);
×
4432
      }
4433
      @Override public long expireAfterUpdate(
4434
          K key, V value, long currentTime, long currentDuration) {
4435
        return unit.toNanos(duration);
×
4436
      }
4437
      @CanIgnoreReturnValue
4438
      @Override public long expireAfterRead(
4439
          K key, V value, long currentTime, long currentDuration) {
4440
        return currentDuration;
×
4441
      }
4442
    }
4443

4444
    @SuppressWarnings("PreferJavaTimeOverload")
4445
    final class BoundedRefreshAfterWrite implements FixedRefresh<K, V> {
×
4446
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4447
        requireNonNull(key);
×
4448
        requireNonNull(unit);
×
4449
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4450
        Node<K, V> node = cache.data.get(lookupKey);
×
4451
        if (node == null) {
×
4452
          return OptionalLong.empty();
×
4453
        }
4454
        long now = cache.expirationTicker().read();
×
4455
        return cache.hasExpired(node, now)
×
4456
            ? OptionalLong.empty()
×
4457
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
×
4458
      }
4459
      @Override public long getRefreshesAfter(TimeUnit unit) {
4460
        return unit.convert(cache.refreshAfterWriteNanos(), TimeUnit.NANOSECONDS);
×
4461
      }
4462
      @Override public void setRefreshesAfter(long duration, TimeUnit unit) {
4463
        requireArgument(duration >= 0);
×
4464
        cache.setRefreshAfterWriteNanos(unit.toNanos(duration));
×
4465
        cache.scheduleAfterWrite();
×
4466
      }
×
4467
    }
4468
  }
4469

4470
  /* --------------- Loading Cache --------------- */
4471

4472
  static final class BoundedLocalLoadingCache<K, V>
4473
      extends BoundedLocalManualCache<K, V> implements LocalLoadingCache<K, V> {
4474
    private static final long serialVersionUID = 1;
4475

4476
    final Function<K, @Nullable V> mappingFunction;
4477
    final @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction;
4478

4479
    BoundedLocalLoadingCache(Caffeine<K, V> builder, CacheLoader<? super K, V> loader) {
4480
      super(builder, loader);
×
4481
      requireNonNull(loader);
×
4482
      mappingFunction = newMappingFunction(loader);
×
4483
      bulkMappingFunction = newBulkMappingFunction(loader);
×
4484
    }
×
4485

4486
    @Override
4487
    @SuppressWarnings({"DataFlowIssue", "NullAway"})
4488
    public AsyncCacheLoader<? super K, V> cacheLoader() {
4489
      return cache.cacheLoader;
×
4490
    }
4491

4492
    @Override
4493
    public Function<K, @Nullable V> mappingFunction() {
4494
      return mappingFunction;
×
4495
    }
4496

4497
    @Override
4498
    public @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction() {
4499
      return bulkMappingFunction;
×
4500
    }
4501

4502
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4503
      throw new InvalidObjectException("Proxy required");
×
4504
    }
4505

4506
    private Object writeReplace() {
4507
      return makeSerializationProxy(cache);
×
4508
    }
4509
  }
4510

4511
  /* --------------- Async Cache --------------- */
4512

4513
  static final class BoundedLocalAsyncCache<K, V> implements LocalAsyncCache<K, V>, Serializable {
4514
    private static final long serialVersionUID = 1;
4515

4516
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4517
    final boolean isWeighted;
4518

4519
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4520
    @Nullable CacheView<K, V> cacheView;
4521
    @Nullable Policy<K, V> policy;
4522

4523
    @SuppressWarnings("unchecked")
4524
    BoundedLocalAsyncCache(Caffeine<K, V> builder) {
×
4525
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
×
4526
          .newBoundedLocalCache(builder, /* cacheLoader= */ null, /* isAsync= */ true);
×
4527
      isWeighted = builder.isWeighted();
×
4528
    }
×
4529

4530
    @Override
4531
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4532
      return cache;
×
4533
    }
4534

4535
    @Override
4536
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4537
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
×
4538
    }
4539

4540
    @Override
4541
    public Cache<K, V> synchronous() {
4542
      return (cacheView == null) ? (cacheView = new CacheView<>(this)) : cacheView;
×
4543
    }
4544

4545
    @Override
4546
    public Policy<K, V> policy() {
4547
      if (policy == null) {
×
4548
        @SuppressWarnings("unchecked")
4549
        var castCache = (BoundedLocalCache<K, V>) cache;
×
4550
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
×
4551
        @SuppressWarnings("unchecked")
4552
        var castTransformer = (Function<@Nullable V, @Nullable V>) transformer;
×
4553
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
×
4554
      }
4555
      return policy;
×
4556
    }
4557

4558
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4559
      throw new InvalidObjectException("Proxy required");
×
4560
    }
4561

4562
    private Object writeReplace() {
4563
      return makeSerializationProxy(cache);
×
4564
    }
4565
  }
4566

4567
  /* --------------- Async Loading Cache --------------- */
4568

4569
  static final class BoundedLocalAsyncLoadingCache<K, V>
4570
      extends LocalAsyncLoadingCache<K, V> implements Serializable {
4571
    private static final long serialVersionUID = 1;
4572

4573
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4574
    final boolean isWeighted;
4575

4576
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4577
    @Nullable Policy<K, V> policy;
4578

4579
    @SuppressWarnings("unchecked")
4580
    BoundedLocalAsyncLoadingCache(Caffeine<K, V> builder, AsyncCacheLoader<? super K, V> loader) {
4581
      super(loader);
×
4582
      isWeighted = builder.isWeighted();
×
4583
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
×
4584
          .newBoundedLocalCache(builder, loader, /* isAsync= */ true);
×
4585
    }
×
4586

4587
    @Override
4588
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4589
      return cache;
×
4590
    }
4591

4592
    @Override
4593
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4594
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
×
4595
    }
4596

4597
    @Override
4598
    public Policy<K, V> policy() {
4599
      if (policy == null) {
×
4600
        @SuppressWarnings("unchecked")
4601
        var castCache = (BoundedLocalCache<K, V>) cache;
×
4602
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
×
4603
        @SuppressWarnings("unchecked")
4604
        var castTransformer = (Function<@Nullable V, @Nullable V>) transformer;
×
4605
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
×
4606
      }
4607
      return policy;
×
4608
    }
4609

4610
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4611
      throw new InvalidObjectException("Proxy required");
×
4612
    }
4613

4614
    private Object writeReplace() {
4615
      return makeSerializationProxy(cache);
×
4616
    }
4617
  }
4618
}
4619

4620
/** The namespace for field padding through inheritance. */
4621
@SuppressWarnings({"MemberName", "MultiVariableDeclaration"})
4622
final class BLCHeader {
4623

4624
  private BLCHeader() {}
4625

4626
  static class PadDrainStatus {
×
4627
    byte p000, p001, p002, p003, p004, p005, p006, p007;
4628
    byte p008, p009, p010, p011, p012, p013, p014, p015;
4629
    byte p016, p017, p018, p019, p020, p021, p022, p023;
4630
    byte p024, p025, p026, p027, p028, p029, p030, p031;
4631
    byte p032, p033, p034, p035, p036, p037, p038, p039;
4632
    byte p040, p041, p042, p043, p044, p045, p046, p047;
4633
    byte p048, p049, p050, p051, p052, p053, p054, p055;
4634
    byte p056, p057, p058, p059, p060, p061, p062, p063;
4635
    byte p064, p065, p066, p067, p068, p069, p070, p071;
4636
    byte p072, p073, p074, p075, p076, p077, p078, p079;
4637
    byte p080, p081, p082, p083, p084, p085, p086, p087;
4638
    byte p088, p089, p090, p091, p092, p093, p094, p095;
4639
    byte p096, p097, p098, p099, p100, p101, p102, p103;
4640
    byte p104, p105, p106, p107, p108, p109, p110, p111;
4641
    byte p112, p113, p114, p115, p116, p117, p118, p119;
4642
  }
4643

4644
  /** Enforces a memory layout to avoid false sharing by padding the drain status. */
4645
  abstract static class DrainStatusRef extends PadDrainStatus {
×
4646
    static final VarHandle DRAIN_STATUS = findVarHandle(
×
4647
        DrainStatusRef.class, "drainStatus", int.class);
4648

4649
    /** A drain is not taking place. */
4650
    static final int IDLE = 0;
4651
    /** A drain is required due to a pending write modification. */
4652
    static final int REQUIRED = 1;
4653
    /** A drain is in progress and will transition to idle. */
4654
    static final int PROCESSING_TO_IDLE = 2;
4655
    /** A drain is in progress and will transition to required. */
4656
    static final int PROCESSING_TO_REQUIRED = 3;
4657

4658
    /** The draining status of the buffers. */
4659
    volatile int drainStatus = IDLE;
×
4660

4661
    /**
4662
     * Returns whether maintenance work is needed.
4663
     *
4664
     * @param delayable if draining the read buffer can be delayed
4665
     */
4666
    @SuppressWarnings("StatementSwitchToExpressionSwitch")
4667
    boolean shouldDrainBuffers(boolean delayable) {
4668
      switch (drainStatusOpaque()) {
×
4669
        case IDLE:
4670
          return !delayable;
×
4671
        case REQUIRED:
4672
          return true;
×
4673
        case PROCESSING_TO_IDLE:
4674
        case PROCESSING_TO_REQUIRED:
4675
          return false;
×
4676
        default:
4677
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
×
4678
      }
4679
    }
4680

4681
    int drainStatusOpaque() {
4682
      return (int) DRAIN_STATUS.getOpaque(this);
×
4683
    }
4684

4685
    int drainStatusAcquire() {
4686
      return (int) DRAIN_STATUS.getAcquire(this);
×
4687
    }
4688

4689
    void setDrainStatusOpaque(int drainStatus) {
4690
      DRAIN_STATUS.setOpaque(this, drainStatus);
×
4691
    }
×
4692

4693
    void setDrainStatusRelease(int drainStatus) {
4694
      DRAIN_STATUS.setRelease(this, drainStatus);
×
4695
    }
×
4696

4697
    boolean casDrainStatus(int expect, int update) {
4698
      return DRAIN_STATUS.compareAndSet(this, expect, update);
×
4699
    }
4700

4701
    static VarHandle findVarHandle(Class<?> recv, String name, Class<?> type) {
4702
      try {
4703
        return MethodHandles.lookup().findVarHandle(recv, name, type);
×
4704
      } catch (ReflectiveOperationException e) {
×
4705
        throw new ExceptionInInitializerError(e);
×
4706
      }
4707
    }
4708
  }
4709
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc