• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ben-manes / caffeine / #5157

03 Dec 2025 06:30AM UTC coverage: 0.0% (-100.0%) from 100.0%
#5157

push

github

ben-manes
add loading type to parameterized test dimensions to reduce task size

0 of 3834 branches covered (0.0%)

0 of 7848 relevant lines covered (0.0%)

0.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/caffeine/src/main/java/com/github/benmanes/caffeine/cache/BoundedLocalCache.java
1
/*
2
 * Copyright 2014 Ben Manes. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
package com.github.benmanes.caffeine.cache;
17

18
import static com.github.benmanes.caffeine.cache.Async.ASYNC_EXPIRY;
19
import static com.github.benmanes.caffeine.cache.Caffeine.calculateHashMapCapacity;
20
import static com.github.benmanes.caffeine.cache.Caffeine.ceilingPowerOfTwo;
21
import static com.github.benmanes.caffeine.cache.Caffeine.requireArgument;
22
import static com.github.benmanes.caffeine.cache.Caffeine.toNanosSaturated;
23
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newBulkMappingFunction;
24
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newMappingFunction;
25
import static com.github.benmanes.caffeine.cache.Node.PROBATION;
26
import static com.github.benmanes.caffeine.cache.Node.PROTECTED;
27
import static com.github.benmanes.caffeine.cache.Node.WINDOW;
28
import static java.util.Locale.US;
29
import static java.util.Objects.requireNonNull;
30
import static java.util.Spliterator.DISTINCT;
31
import static java.util.Spliterator.IMMUTABLE;
32
import static java.util.Spliterator.NONNULL;
33
import static java.util.Spliterator.ORDERED;
34

35
import java.io.InvalidObjectException;
36
import java.io.ObjectInputStream;
37
import java.io.Serializable;
38
import java.lang.System.Logger;
39
import java.lang.System.Logger.Level;
40
import java.lang.invoke.MethodHandles;
41
import java.lang.invoke.VarHandle;
42
import java.lang.ref.Reference;
43
import java.lang.ref.ReferenceQueue;
44
import java.lang.ref.WeakReference;
45
import java.time.Duration;
46
import java.util.AbstractCollection;
47
import java.util.AbstractSet;
48
import java.util.ArrayDeque;
49
import java.util.Collection;
50
import java.util.Collections;
51
import java.util.Comparator;
52
import java.util.Deque;
53
import java.util.HashMap;
54
import java.util.IdentityHashMap;
55
import java.util.Iterator;
56
import java.util.LinkedHashMap;
57
import java.util.Map;
58
import java.util.NoSuchElementException;
59
import java.util.Objects;
60
import java.util.Optional;
61
import java.util.OptionalInt;
62
import java.util.OptionalLong;
63
import java.util.Set;
64
import java.util.Spliterator;
65
import java.util.Spliterators;
66
import java.util.concurrent.CancellationException;
67
import java.util.concurrent.CompletableFuture;
68
import java.util.concurrent.ConcurrentHashMap;
69
import java.util.concurrent.ConcurrentMap;
70
import java.util.concurrent.Executor;
71
import java.util.concurrent.ForkJoinPool;
72
import java.util.concurrent.ForkJoinTask;
73
import java.util.concurrent.ThreadLocalRandom;
74
import java.util.concurrent.TimeUnit;
75
import java.util.concurrent.TimeoutException;
76
import java.util.concurrent.locks.ReentrantLock;
77
import java.util.function.BiConsumer;
78
import java.util.function.BiFunction;
79
import java.util.function.Consumer;
80
import java.util.function.Function;
81
import java.util.function.Predicate;
82
import java.util.stream.Stream;
83
import java.util.stream.StreamSupport;
84

85
import org.jspecify.annotations.Nullable;
86

87
import com.github.benmanes.caffeine.cache.Async.AsyncExpiry;
88
import com.github.benmanes.caffeine.cache.LinkedDeque.PeekingIterator;
89
import com.github.benmanes.caffeine.cache.Policy.CacheEntry;
90
import com.github.benmanes.caffeine.cache.References.InternalReference;
91
import com.github.benmanes.caffeine.cache.stats.StatsCounter;
92
import com.google.errorprone.annotations.CanIgnoreReturnValue;
93
import com.google.errorprone.annotations.Var;
94
import com.google.errorprone.annotations.concurrent.GuardedBy;
95

96
/**
97
 * An in-memory cache implementation that supports full concurrency of retrievals, a high expected
98
 * concurrency for updates, and multiple ways to bound the cache.
99
 * <p>
100
 * This class is abstract and code generated subclasses provide the complete implementation for a
101
 * particular configuration. This is to ensure that only the fields and execution paths necessary
102
 * for a given configuration are used.
103
 *
104
 * @author ben.manes@gmail.com (Ben Manes)
105
 * @param <K> the type of keys maintained by this cache
106
 * @param <V> the type of mapped values
107
 */
108
@SuppressWarnings("serial")
109
abstract class BoundedLocalCache<K, V> extends BLCHeader.DrainStatusRef
110
    implements LocalCache<K, V> {
111

112
  /*
113
   * This class performs a best-effort bounding of a ConcurrentHashMap using a page-replacement
114
   * algorithm to determine which entries to evict when the capacity is exceeded.
115
   *
116
   * Concurrency:
117
   * ------------
118
   * The page replacement algorithms are kept eventually consistent with the map. An update to the
119
   * map and recording of reads may not be immediately reflected in the policy's data structures.
120
   * These structures are guarded by a lock, and operations are applied in batches to avoid lock
121
   * contention. The penalty of applying the batches is spread across threads, so that the amortized
122
   * cost is slightly higher than performing just the ConcurrentHashMap operation [1].
123
   *
124
   * A memento of the reads and writes that were performed on the map is recorded in buffers. These
125
   * buffers are drained at the first opportunity after a write or when a read buffer is full. The
126
   * reads are offered to a buffer that will reject additions if contended on or if it is full. Due
127
   * to the concurrent nature of the read and write operations, a strict policy ordering is not
128
   * possible, but it may be observably strict when single-threaded. The buffers are drained
129
   * asynchronously to minimize the request latency and uses a state machine to determine when to
130
   * schedule this work on an executor.
131
   *
132
   * Due to a lack of a strict ordering guarantee, a task can be executed out-of-order, such as a
133
   * removal followed by its addition. The state of the entry is encoded using the key field to
134
   * avoid additional memory usage. An entry is "alive" if it is in both the hash table and the page
135
   * replacement policy. It is "retired" if it is not in the hash table and is pending removal from
136
   * the page replacement policy. Finally, an entry transitions to the "dead" state when it is
137
   * neither in the hash table nor the page replacement policy. Both the retired and dead states are
138
   * represented by a sentinel key that should not be used for map operations.
139
   *
140
   * Eviction:
141
   * ---------
142
   * Maximum size is implemented using the Window TinyLfu policy [2] due to its high hit rate, O(1)
143
   * time complexity, and small footprint. A new entry starts in the admission window and remains
144
   * there as long as it has high temporal locality (recency). Eventually an entry will slip from
145
   * the window into the main space. If the main space is already full, then a historic frequency
146
   * filter determines whether to evict the newly admitted entry or the victim entry chosen by the
147
   * eviction policy. This process ensures that the entries in the window were very recently used,
148
   * while entries in the main space are accessed very frequently and remain moderately recent. The
149
   * windowing allows the policy to have a high hit rate when entries exhibit a bursty access
150
   * pattern, while the filter ensures that popular items are retained. The admission window uses
151
   * LRU and the main space uses Segmented LRU.
152
   *
153
   * The optimal size of the window vs. main spaces is workload dependent [3]. A large admission
154
   * window is favored by recency-biased workloads, while a small one favors frequency-biased
155
   * workloads. When the window is too small, then recent arrivals are prematurely evicted, but when
156
   * it is too large, then they pollute the cache and force the eviction of more popular entries.
157
   * The optimal configuration is dynamically determined by using hill climbing to walk the hit rate
158
   * curve. This is achieved by sampling the hit rate and adjusting the window size in the direction
159
   * that is improving (making positive or negative steps). At each interval, the step size is
160
   * decreased until the hit rate climber converges at the optimal setting. The process is restarted
161
   * when the hit rate changes over a threshold, indicating that the workload altered, and a new
162
   * setting may be required.
163
   *
164
   * The historic usage is retained in a compact popularity sketch, which uses hashing to
165
   * probabilistically estimate an item's frequency. This exposes a flaw where an adversary could
166
   * use hash flooding [4] to artificially raise the frequency of the main space's victim and cause
167
   * all candidates to be rejected. In the worst case, by exploiting hash collisions, an attacker
168
   * could cause the cache to never hit and hold only worthless items, resulting in a
169
   * denial-of-service attack against the underlying resource. This is mitigated by introducing
170
   * jitter, allowing candidates that are at least moderately popular to have a small, random chance
171
   * of being admitted. This causes the victim to be evicted, but in a way that marginally impacts
172
   * the hit rate.
173
   *
174
   * Expiration:
175
   * -----------
176
   * Expiration is implemented in O(1) time complexity. The time-to-idle policy uses an access-order
177
   * queue, the time-to-live policy uses a write-order queue, and variable expiration uses a
178
   * hierarchical timer wheel [5]. The queuing policies allow for peeking at the oldest entry to
179
   * determine if it has expired. If it has not, then the younger entries must not have expired
180
   * either. If a maximum size is set, then expiration will share the queues, minimizing the
181
   * per-entry footprint. The timer wheel based policy uses hashing and cascading in a manner that
182
   * amortizes the penalty of sorting to achieve a similar algorithmic cost.
183
   *
184
   * The expiration updates are applied in a best effort fashion. The reordering of variable or
185
   * access-order expiration may be discarded by the read buffer if it is full or contended.
186
   * Similarly, the reordering of write expiration may be ignored for an entry if the last update
187
   * was within a short time window. This is done to avoid overwhelming the write buffer.
188
   *
189
   * [1] BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free
190
   * https://web.njit.edu/~dingxn/papers/BP-Wrapper.pdf
191
   * [2] TinyLFU: A Highly Efficient Cache Admission Policy
192
   * https://dl.acm.org/citation.cfm?id=3149371
193
   * [3] Adaptive Software Cache Management
194
   * https://dl.acm.org/citation.cfm?id=3274816
195
   * [4] Denial of Service via Algorithmic Complexity Attack
196
   * https://www.usenix.org/legacy/events/sec03/tech/full_papers/crosby/crosby.pdf
197
   * [5] Hashed and Hierarchical Timing Wheels
198
   * http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
199
   */
200

201
  static final Logger logger = System.getLogger(BoundedLocalCache.class.getName());
×
202

203
  /** The number of CPUs */
204
  static final int NCPU = Runtime.getRuntime().availableProcessors();
×
205
  /** The initial capacity of the write buffer. */
206
  static final int WRITE_BUFFER_MIN = 4;
207
  /** The maximum capacity of the write buffer. */
208
  static final int WRITE_BUFFER_MAX = 128 * ceilingPowerOfTwo(NCPU);
×
209
  /** The number of attempts to insert into the write buffer before yielding. */
210
  static final int WRITE_BUFFER_RETRIES = 100;
211
  /** The maximum weighted capacity of the map. */
212
  static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
213
  /** The initial percent of the maximum weighted capacity dedicated to the main space. */
214
  static final double PERCENT_MAIN = 0.99d;
215
  /** The percent of the maximum weighted capacity dedicated to the main's protected space. */
216
  static final double PERCENT_MAIN_PROTECTED = 0.80d;
217
  /** The difference in hit rates that restarts the climber. */
218
  static final double HILL_CLIMBER_RESTART_THRESHOLD = 0.05d;
219
  /** The percent of the total size to adapt the window by. */
220
  static final double HILL_CLIMBER_STEP_PERCENT = 0.0625d;
221
  /** The rate to decrease the step size to adapt by. */
222
  static final double HILL_CLIMBER_STEP_DECAY_RATE = 0.98d;
223
  /** The minimum popularity for allowing randomized admission. */
224
  static final int ADMIT_HASHDOS_THRESHOLD = 6;
225
  /** The maximum number of entries that can be transferred between queues. */
226
  static final int QUEUE_TRANSFER_THRESHOLD = 1_000;
227
  /** The maximum time window between entry updates before the expiration must be reordered. */
228
  static final long EXPIRE_WRITE_TOLERANCE = TimeUnit.SECONDS.toNanos(1);
×
229
  /** The maximum duration before an entry expires. */
230
  static final long MAXIMUM_EXPIRY = (Long.MAX_VALUE >> 1); // 150 years
231
  /** The duration to wait on the eviction lock before warning of a possible misuse. */
232
  static final long WARN_AFTER_LOCK_WAIT_NANOS = TimeUnit.SECONDS.toNanos(30);
×
233
  /** The number of retries before computing to validate the entry's integrity; pow2 modulus. */
234
  static final int MAX_PUT_SPIN_WAIT_ATTEMPTS = 1024 - 1;
235
  /** The handle for the in-flight refresh operations. */
236
  static final VarHandle REFRESHES = findVarHandle(
×
237
      BoundedLocalCache.class, "refreshes", ConcurrentMap.class);
238

239
  final @Nullable RemovalListener<K, V> evictionListener;
240
  final @Nullable AsyncCacheLoader<K, V> cacheLoader;
241

242
  final MpscGrowableArrayQueue<Runnable> writeBuffer;
243
  final ConcurrentHashMap<Object, Node<K, V>> data;
244
  final PerformCleanupTask drainBuffersTask;
245
  final Consumer<Node<K, V>> accessPolicy;
246
  final Buffer<Node<K, V>> readBuffer;
247
  final NodeFactory<K, V> nodeFactory;
248
  final ReentrantLock evictionLock;
249
  final Weigher<K, V> weigher;
250
  final Executor executor;
251

252
  final boolean isWeighted;
253
  final boolean isAsync;
254

255
  @Nullable Set<K> keySet;
256
  @Nullable Collection<V> values;
257
  @Nullable Set<Entry<K, V>> entrySet;
258
  volatile @Nullable ConcurrentMap<Object, CompletableFuture<?>> refreshes;
259

260
  /** Creates an instance based on the builder's configuration. */
261
  @SuppressWarnings("GuardedBy")
262
  protected BoundedLocalCache(Caffeine<K, V> builder,
263
      @Nullable AsyncCacheLoader<K, V> cacheLoader, boolean isAsync) {
×
264
    this.isAsync = isAsync;
×
265
    this.cacheLoader = cacheLoader;
×
266
    executor = builder.getExecutor();
×
267
    isWeighted = builder.isWeighted();
×
268
    evictionLock = new ReentrantLock();
×
269
    weigher = builder.getWeigher(isAsync);
×
270
    drainBuffersTask = new PerformCleanupTask(this);
×
271
    nodeFactory = NodeFactory.newFactory(builder, isAsync);
×
272
    evictionListener = builder.getEvictionListener(isAsync);
×
273
    data = new ConcurrentHashMap<>(builder.getInitialCapacity());
×
274
    readBuffer = evicts() || collectKeys() || collectValues() || expiresAfterAccess()
×
275
        ? new BoundedBuffer<>()
×
276
        : Buffer.disabled();
×
277
    accessPolicy = (evicts() || expiresAfterAccess()) ? this::onAccess : e -> {};
×
278
    writeBuffer = new MpscGrowableArrayQueue<>(WRITE_BUFFER_MIN, WRITE_BUFFER_MAX);
×
279

280
    if (evicts()) {
×
281
      setMaximumSize(builder.getMaximum());
×
282
    }
283
  }
×
284

285
  /** Ensures that the node is alive during the map operation. */
286
  void requireIsAlive(Object key, Node<?, ?> node) {
287
    if (!node.isAlive()) {
×
288
      throw new IllegalStateException(brokenEqualityMessage(key, node));
×
289
    }
290
  }
×
291

292
  /** Logs if the node cannot be found in the map but is still alive. */
293
  void logIfAlive(Node<?, ?> node) {
294
    if (node.isAlive()) {
×
295
      String message = brokenEqualityMessage(node.getKeyReference(), node);
×
296
      logger.log(Level.ERROR, message, new IllegalStateException());
×
297
    }
298
  }
×
299

300
  /** Returns the formatted broken equality error message. */
301
  String brokenEqualityMessage(Object key, Node<?, ?> node) {
302
    return String.format(US, "An invalid state was detected, occurring when the key's equals or "
×
303
        + "hashCode was modified while residing in the cache. This violation of the Map "
304
        + "contract can lead to non-deterministic behavior (key: %s, key type: %s, "
305
        + "node type: %s, cache type: %s).", key, key.getClass().getName(),
×
306
        node.getClass().getSimpleName(), getClass().getSimpleName());
×
307
  }
308

309
  /* --------------- Shared --------------- */
310

311
  @Override
312
  public boolean isAsync() {
313
    return isAsync;
×
314
  }
315

316
  /** Returns if the node's value is currently being computed asynchronously. */
317
  final boolean isComputingAsync(@Nullable V value) {
318
    return isAsync && !Async.isReady((CompletableFuture<?>) value);
×
319
  }
320

321
  @GuardedBy("evictionLock")
322
  protected AccessOrderDeque<Node<K, V>> accessOrderWindowDeque() {
323
    throw new UnsupportedOperationException();
×
324
  }
325

326
  @GuardedBy("evictionLock")
327
  protected AccessOrderDeque<Node<K, V>> accessOrderProbationDeque() {
328
    throw new UnsupportedOperationException();
×
329
  }
330

331
  @GuardedBy("evictionLock")
332
  protected AccessOrderDeque<Node<K, V>> accessOrderProtectedDeque() {
333
    throw new UnsupportedOperationException();
×
334
  }
335

336
  @GuardedBy("evictionLock")
337
  protected WriteOrderDeque<Node<K, V>> writeOrderDeque() {
338
    throw new UnsupportedOperationException();
×
339
  }
340

341
  @Override
342
  public final Executor executor() {
343
    return executor;
×
344
  }
345

346
  @Override
347
  public ConcurrentMap<Object, CompletableFuture<?>> refreshes() {
348
    @Var var pending = refreshes;
×
349
    if (pending == null) {
×
350
      pending = new ConcurrentHashMap<>();
×
351
      if (!REFRESHES.compareAndSet(this, null, pending)) {
×
352
        pending = requireNonNull(refreshes);
×
353
      }
354
    }
355
    return pending;
×
356
  }
357

358
  /** Invalidate the in-flight refresh. */
359
  @SuppressWarnings("RedundantCollectionOperation")
360
  void discardRefresh(Object keyReference) {
361
    var pending = refreshes;
×
362
    if ((pending != null) && pending.containsKey(keyReference)) {
×
363
      pending.remove(keyReference);
×
364
    }
365
  }
×
366

367
  @Override
368
  public Object referenceKey(K key) {
369
    return nodeFactory.newLookupKey(key);
×
370
  }
371

372
  @Override
373
  public boolean isPendingEviction(K key) {
374
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
375
    return (node != null)
×
376
        && ((node.getValue() == null) || hasExpired(node, expirationTicker().read()));
×
377
  }
378

379
  /* --------------- Stats Support --------------- */
380

381
  @Override
382
  public boolean isRecordingStats() {
383
    return false;
×
384
  }
385

386
  @Override
387
  public StatsCounter statsCounter() {
388
    return StatsCounter.disabledStatsCounter();
×
389
  }
390

391
  @Override
392
  public Ticker statsTicker() {
393
    return Ticker.disabledTicker();
×
394
  }
395

396
  /* --------------- Removal Listener Support --------------- */
397

398
  @SuppressWarnings("NullAway")
399
  protected RemovalListener<K, V> removalListener() {
400
    return null;
×
401
  }
402

403
  protected boolean hasRemovalListener() {
404
    return false;
×
405
  }
406

407
  @Override
408
  public void notifyRemoval(@Nullable K key, @Nullable V value, RemovalCause cause) {
409
    if (!hasRemovalListener()) {
×
410
      return;
×
411
    }
412
    Runnable task = () -> {
×
413
      try {
414
        removalListener().onRemoval(key, value, cause);
×
415
      } catch (Throwable t) {
×
416
        logger.log(Level.WARNING, "Exception thrown by removal listener", t);
×
417
      }
×
418
    };
×
419
    try {
420
      executor.execute(task);
×
421
    } catch (Throwable t) {
×
422
      logger.log(Level.ERROR, "Exception thrown when submitting removal listener", t);
×
423
      task.run();
×
424
    }
×
425
  }
×
426

427
  /* --------------- Eviction Listener Support --------------- */
428

429
  void notifyEviction(@Nullable K key, @Nullable V value, RemovalCause cause) {
430
    if (evictionListener == null) {
×
431
      return;
×
432
    }
433
    try {
434
      evictionListener.onRemoval(key, value, cause);
×
435
    } catch (Throwable t) {
×
436
      logger.log(Level.WARNING, "Exception thrown by eviction listener", t);
×
437
    }
×
438
  }
×
439

440
  /* --------------- Reference Support --------------- */
441

442
  /** Returns if the keys are weak reference garbage collected. */
443
  protected boolean collectKeys() {
444
    return false;
×
445
  }
446

447
  /** Returns if the values are weak or soft reference garbage collected. */
448
  protected boolean collectValues() {
449
    return false;
×
450
  }
451

452
  @SuppressWarnings("NullAway")
453
  protected ReferenceQueue<K> keyReferenceQueue() {
454
    return null;
×
455
  }
456

457
  @SuppressWarnings("NullAway")
458
  protected ReferenceQueue<V> valueReferenceQueue() {
459
    return null;
×
460
  }
461

462
  /* --------------- Expiration Support --------------- */
463

464
  /** Returns the {@link Pacer} used to schedule the maintenance task. */
465
  protected @Nullable Pacer pacer() {
466
    return null;
×
467
  }
468

469
  /** Returns if the cache expires entries after a variable time threshold. */
470
  protected boolean expiresVariable() {
471
    return false;
×
472
  }
473

474
  /** Returns if the cache expires entries after an access time threshold. */
475
  protected boolean expiresAfterAccess() {
476
    return false;
×
477
  }
478

479
  /** Returns how long after the last access to an entry the map will retain that entry. */
480
  protected long expiresAfterAccessNanos() {
481
    throw new UnsupportedOperationException();
×
482
  }
483

484
  protected void setExpiresAfterAccessNanos(long expireAfterAccessNanos) {
485
    throw new UnsupportedOperationException();
×
486
  }
487

488
  /** Returns if the cache expires entries after a write time threshold. */
489
  protected boolean expiresAfterWrite() {
490
    return false;
×
491
  }
492

493
  /** Returns how long after the last write to an entry the map will retain that entry. */
494
  protected long expiresAfterWriteNanos() {
495
    throw new UnsupportedOperationException();
×
496
  }
497

498
  protected void setExpiresAfterWriteNanos(long expireAfterWriteNanos) {
499
    throw new UnsupportedOperationException();
×
500
  }
501

502
  /** Returns if the cache refreshes entries after a write time threshold. */
503
  protected boolean refreshAfterWrite() {
504
    return false;
×
505
  }
506

507
  /** Returns how long after the last write an entry becomes a candidate for refresh. */
508
  protected long refreshAfterWriteNanos() {
509
    throw new UnsupportedOperationException();
×
510
  }
511

512
  protected void setRefreshAfterWriteNanos(long refreshAfterWriteNanos) {
513
    throw new UnsupportedOperationException();
×
514
  }
515

516
  @Override
517
  @SuppressWarnings("NullAway")
518
  public Expiry<K, V> expiry() {
519
    return null;
×
520
  }
521

522
  /** Returns the {@link Ticker} used by this cache for expiration. */
523
  public Ticker expirationTicker() {
524
    return Ticker.disabledTicker();
×
525
  }
526

527
  protected TimerWheel<K, V> timerWheel() {
528
    throw new UnsupportedOperationException();
×
529
  }
530

531
  /* --------------- Eviction Support --------------- */
532

533
  /** Returns if the cache evicts entries due to a maximum size or weight threshold. */
534
  protected boolean evicts() {
535
    return false;
×
536
  }
537

538
  /** Returns if entries may be assigned different weights. */
539
  protected boolean isWeighted() {
540
    return (weigher != Weigher.singletonWeigher());
×
541
  }
542

543
  protected FrequencySketch frequencySketch() {
544
    throw new UnsupportedOperationException();
×
545
  }
546

547
  /** Returns if an access to an entry can skip notifying the eviction policy. */
548
  protected boolean fastpath() {
549
    return false;
×
550
  }
551

552
  /** Returns the maximum weighted size. */
553
  protected long maximum() {
554
    throw new UnsupportedOperationException();
×
555
  }
556

557
  /** Returns the maximum weighted size. */
558
  protected long maximumAcquire() {
559
    throw new UnsupportedOperationException();
×
560
  }
561

562
  /** Returns the maximum weighted size of the window space. */
563
  protected long windowMaximum() {
564
    throw new UnsupportedOperationException();
×
565
  }
566

567
  /** Returns the maximum weighted size of the main's protected space. */
568
  protected long mainProtectedMaximum() {
569
    throw new UnsupportedOperationException();
×
570
  }
571

572
  @GuardedBy("evictionLock")
573
  protected void setMaximum(long maximum) {
574
    throw new UnsupportedOperationException();
×
575
  }
576

577
  @GuardedBy("evictionLock")
578
  protected void setWindowMaximum(long maximum) {
579
    throw new UnsupportedOperationException();
×
580
  }
581

582
  @GuardedBy("evictionLock")
583
  protected void setMainProtectedMaximum(long maximum) {
584
    throw new UnsupportedOperationException();
×
585
  }
586

587
  /** Returns the combined weight of the values in the cache (may be negative). */
588
  protected long weightedSize() {
589
    throw new UnsupportedOperationException();
×
590
  }
591

592
  /** Returns the combined weight of the values in the cache (may be negative). */
593
  protected long weightedSizeAcquire() {
594
    throw new UnsupportedOperationException();
×
595
  }
596

597
  /** Returns the uncorrected combined weight of the values in the window space. */
598
  protected long windowWeightedSize() {
599
    throw new UnsupportedOperationException();
×
600
  }
601

602
  /** Returns the uncorrected combined weight of the values in the main's protected space. */
603
  protected long mainProtectedWeightedSize() {
604
    throw new UnsupportedOperationException();
×
605
  }
606

607
  @GuardedBy("evictionLock")
608
  protected void setWeightedSize(long weightedSize) {
609
    throw new UnsupportedOperationException();
×
610
  }
611

612
  @GuardedBy("evictionLock")
613
  protected void setWindowWeightedSize(long weightedSize) {
614
    throw new UnsupportedOperationException();
×
615
  }
616

617
  @GuardedBy("evictionLock")
618
  protected void setMainProtectedWeightedSize(long weightedSize) {
619
    throw new UnsupportedOperationException();
×
620
  }
621

622
  protected int hitsInSample() {
623
    throw new UnsupportedOperationException();
×
624
  }
625

626
  protected int missesInSample() {
627
    throw new UnsupportedOperationException();
×
628
  }
629

630
  protected int sampleCount() {
631
    throw new UnsupportedOperationException();
×
632
  }
633

634
  protected double stepSize() {
635
    throw new UnsupportedOperationException();
×
636
  }
637

638
  protected double previousSampleHitRate() {
639
    throw new UnsupportedOperationException();
×
640
  }
641

642
  protected long adjustment() {
643
    throw new UnsupportedOperationException();
×
644
  }
645

646
  @GuardedBy("evictionLock")
647
  protected void setHitsInSample(int hitCount) {
648
    throw new UnsupportedOperationException();
×
649
  }
650

651
  @GuardedBy("evictionLock")
652
  protected void setMissesInSample(int missCount) {
653
    throw new UnsupportedOperationException();
×
654
  }
655

656
  @GuardedBy("evictionLock")
657
  protected void setSampleCount(int sampleCount) {
658
    throw new UnsupportedOperationException();
×
659
  }
660

661
  @GuardedBy("evictionLock")
662
  protected void setStepSize(double stepSize) {
663
    throw new UnsupportedOperationException();
×
664
  }
665

666
  @GuardedBy("evictionLock")
667
  protected void setPreviousSampleHitRate(double hitRate) {
668
    throw new UnsupportedOperationException();
×
669
  }
670

671
  @GuardedBy("evictionLock")
672
  protected void setAdjustment(long amount) {
673
    throw new UnsupportedOperationException();
×
674
  }
675

676
  /**
677
   * Sets the maximum weighted size of the cache. The caller may need to perform a maintenance cycle
678
   * to eagerly evicts entries until the cache shrinks to the appropriate size.
679
   */
680
  @GuardedBy("evictionLock")
681
  @SuppressWarnings("Varifier")
682
  void setMaximumSize(long maximum) {
683
    requireArgument(maximum >= 0, "maximum must not be negative");
×
684
    if (maximum == maximum()) {
×
685
      return;
×
686
    }
687

688
    long max = Math.min(maximum, MAXIMUM_CAPACITY);
×
689
    long window = max - (long) (PERCENT_MAIN * max);
×
690
    long mainProtected = (long) (PERCENT_MAIN_PROTECTED * (max - window));
×
691

692
    setMaximum(max);
×
693
    setWindowMaximum(window);
×
694
    setMainProtectedMaximum(mainProtected);
×
695

696
    setHitsInSample(0);
×
697
    setMissesInSample(0);
×
698
    setStepSize(-HILL_CLIMBER_STEP_PERCENT * max);
×
699

700
    if ((frequencySketch() != null) && !isWeighted() && (weightedSize() >= (max >>> 1))) {
×
701
      // Lazily initialize when close to the maximum size
702
      frequencySketch().ensureCapacity(max);
×
703
    }
704
  }
×
705

706
  /** Evicts entries if the cache exceeds the maximum. */
707
  @GuardedBy("evictionLock")
708
  void evictEntries() {
709
    if (!evicts()) {
×
710
      return;
×
711
    }
712
    var candidate = evictFromWindow();
×
713
    evictFromMain(candidate);
×
714
  }
×
715

716
  /**
717
   * Evicts entries from the window space into the main space while the window size exceeds a
718
   * maximum.
719
   *
720
   * @return the first candidate promoted into the probation space
721
   */
722
  @GuardedBy("evictionLock")
723
  @Nullable Node<K, V> evictFromWindow() {
724
    @Var Node<K, V> first = null;
×
725
    @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
×
726
    while (windowWeightedSize() > windowMaximum()) {
×
727
      // The pending operations will adjust the size to reflect the correct weight
728
      if (node == null) {
×
729
        break;
×
730
      }
731

732
      Node<K, V> next = node.getNextInAccessOrder();
×
733
      if (node.getPolicyWeight() != 0) {
×
734
        node.makeMainProbation();
×
735
        accessOrderWindowDeque().remove(node);
×
736
        accessOrderProbationDeque().offerLast(node);
×
737
        if (first == null) {
×
738
          first = node;
×
739
        }
740

741
        setWindowWeightedSize(windowWeightedSize() - node.getPolicyWeight());
×
742
      }
743
      node = next;
×
744
    }
×
745

746
    return first;
×
747
  }
748

749
  /**
750
   * Evicts entries from the main space if the cache exceeds the maximum capacity. The main space
751
   * determines whether admitting an entry (coming from the window space) is preferable to retaining
752
   * the eviction policy's victim. This decision is made using a frequency filter so that the
753
   * least frequently used entry is removed.
754
   * <p>
755
   * The window space's candidates were previously promoted to the probation space at its MRU
756
   * position and the eviction policy's victim starts at the LRU position. The candidates are
757
   * evaluated in promotion order while an eviction is required, and if exhausted then additional
758
   * entries are retrieved from the window space. Likewise, if the victim selection exhausts the
759
   * probation space then additional entries are retrieved from the protected space. The queues are
760
   * consumed in LRU order and the evicted entry is the one with a lower relative frequency, where
761
   * the preference is to retain the main space's victims versus the window space's candidates on a
762
   * tie.
763
   *
764
   * @param candidate the first candidate promoted into the probation space
765
   */
766
  @GuardedBy("evictionLock")
767
  void evictFromMain(@Var @Nullable Node<K, V> candidate) {
768
    @Var int victimQueue = PROBATION;
×
769
    @Var int candidateQueue = PROBATION;
×
770
    @Var Node<K, V> victim = accessOrderProbationDeque().peekFirst();
×
771
    while (weightedSize() > maximum()) {
×
772
      // Search the admission window for additional candidates
773
      if ((candidate == null) && (candidateQueue == PROBATION)) {
×
774
        candidate = accessOrderWindowDeque().peekFirst();
×
775
        candidateQueue = WINDOW;
×
776
      }
777

778
      // Try evicting from the protected and window queues
779
      if ((candidate == null) && (victim == null)) {
×
780
        if (victimQueue == PROBATION) {
×
781
          victim = accessOrderProtectedDeque().peekFirst();
×
782
          victimQueue = PROTECTED;
×
783
          continue;
×
784
        } else if (victimQueue == PROTECTED) {
×
785
          victim = accessOrderWindowDeque().peekFirst();
×
786
          victimQueue = WINDOW;
×
787
          continue;
×
788
        }
789

790
        // The pending operations will adjust the size to reflect the correct weight
791
        break;
792
      }
793

794
      // Skip over entries with zero weight
795
      if ((victim != null) && (victim.getPolicyWeight() == 0)) {
×
796
        victim = victim.getNextInAccessOrder();
×
797
        continue;
×
798
      } else if ((candidate != null) && (candidate.getPolicyWeight() == 0)) {
×
799
        candidate = candidate.getNextInAccessOrder();
×
800
        continue;
×
801
      }
802

803
      // Evict immediately if only one of the entries is present
804
      if (victim == null) {
×
805
        requireNonNull(candidate);
×
806
        Node<K, V> previous = candidate.getNextInAccessOrder();
×
807
        Node<K, V> evict = candidate;
×
808
        candidate = previous;
×
809
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
810
        continue;
×
811
      } else if (candidate == null) {
×
812
        Node<K, V> evict = victim;
×
813
        victim = victim.getNextInAccessOrder();
×
814
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
815
        continue;
×
816
      }
817

818
      // Evict immediately if both selected the same entry
819
      if (candidate == victim) {
×
820
        victim = victim.getNextInAccessOrder();
×
821
        evictEntry(candidate, RemovalCause.SIZE, 0L);
×
822
        candidate = null;
×
823
        continue;
×
824
      }
825

826
      // Evict immediately if an entry was collected
827
      var victimKeyRef = victim.getKeyReferenceOrNull();
×
828
      var candidateKeyRef = candidate.getKeyReferenceOrNull();
×
829
      if (victimKeyRef == null) {
×
830
        Node<K, V> evict = victim;
×
831
        victim = victim.getNextInAccessOrder();
×
832
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
×
833
        continue;
×
834
      } else if (candidateKeyRef == null) {
×
835
        Node<K, V> evict = candidate;
×
836
        candidate = candidate.getNextInAccessOrder();
×
837
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
×
838
        continue;
×
839
      }
840

841
      // Evict immediately if an entry was removed
842
      if (!victim.isAlive()) {
×
843
        Node<K, V> evict = victim;
×
844
        victim = victim.getNextInAccessOrder();
×
845
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
846
        continue;
×
847
      } else if (!candidate.isAlive()) {
×
848
        Node<K, V> evict = candidate;
×
849
        candidate = candidate.getNextInAccessOrder();
×
850
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
851
        continue;
×
852
      }
853

854
      // Evict immediately if the candidate's weight exceeds the maximum
855
      if (candidate.getPolicyWeight() > maximum()) {
×
856
        Node<K, V> evict = candidate;
×
857
        candidate = candidate.getNextInAccessOrder();
×
858
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
859
        continue;
×
860
      }
861

862
      // Evict the entry with the lowest frequency
863
      if (admit(candidateKeyRef, victimKeyRef)) {
×
864
        Node<K, V> evict = victim;
×
865
        victim = victim.getNextInAccessOrder();
×
866
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
867
        candidate = candidate.getNextInAccessOrder();
×
868
      } else {
×
869
        Node<K, V> evict = candidate;
×
870
        candidate = candidate.getNextInAccessOrder();
×
871
        evictEntry(evict, RemovalCause.SIZE, 0L);
×
872
      }
873
    }
×
874
  }
×
875

876
  /**
877
   * Determines if the candidate should be accepted into the main space, as determined by its
878
   * frequency relative to the victim. A small amount of randomness is used to protect against hash
879
   * collision attacks, where the victim's frequency is artificially raised so that no new entries
880
   * are admitted.
881
   *
882
   * @param candidateKeyRef the keyRef for the entry being proposed for long term retention
883
   * @param victimKeyRef the keyRef for the entry chosen by the eviction policy for replacement
884
   * @return if the candidate should be admitted and the victim ejected
885
   */
886
  @GuardedBy("evictionLock")
887
  boolean admit(Object candidateKeyRef, Object victimKeyRef) {
888
    int candidateFreq = frequencySketch().frequency(candidateKeyRef);
×
889
    int victimFreq = frequencySketch().frequency(victimKeyRef);
×
890
    if (candidateFreq > victimFreq) {
×
891
      return true;
×
892
    } else if (candidateFreq >= ADMIT_HASHDOS_THRESHOLD) {
×
893
      // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack
894
      // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm
895
      // candidate reduces the number of random acceptances to minimize the impact on the hit rate.
896
      int random = ThreadLocalRandom.current().nextInt();
×
897
      return ((random & 127) == 0);
×
898
    }
899
    return false;
×
900
  }
901

902
  /** Expires entries that have expired by access, write, or variable. */
903
  @GuardedBy("evictionLock")
904
  void expireEntries() {
905
    long now = expirationTicker().read();
×
906
    expireAfterAccessEntries(now);
×
907
    expireAfterWriteEntries(now);
×
908
    expireVariableEntries(now);
×
909

910
    Pacer pacer = pacer();
×
911
    if (pacer != null) {
×
912
      long delay = getExpirationDelay(now);
×
913
      if (delay == Long.MAX_VALUE) {
×
914
        pacer.cancel();
×
915
      } else {
916
        pacer.schedule(executor, drainBuffersTask, now, delay);
×
917
      }
918
    }
919
  }
×
920

921
  /** Expires entries in the access-order queue. */
922
  @GuardedBy("evictionLock")
923
  void expireAfterAccessEntries(long now) {
924
    if (!expiresAfterAccess()) {
×
925
      return;
×
926
    }
927

928
    expireAfterAccessEntries(now, accessOrderWindowDeque());
×
929
    if (evicts()) {
×
930
      expireAfterAccessEntries(now, accessOrderProbationDeque());
×
931
      expireAfterAccessEntries(now, accessOrderProtectedDeque());
×
932
    }
933
  }
×
934

935
  /** Expires entries in an access-order queue. */
936
  @GuardedBy("evictionLock")
937
  void expireAfterAccessEntries(long now, AccessOrderDeque<Node<K, V>> accessOrderDeque) {
938
    long duration = expiresAfterAccessNanos();
×
939
    for (;;) {
940
      Node<K, V> node = accessOrderDeque.peekFirst();
×
941
      if ((node == null) || ((now - node.getAccessTime()) < duration)
×
942
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
×
943
        return;
×
944
      }
945
    }
×
946
  }
947

948
  /** Expires entries on the write-order queue. */
949
  @GuardedBy("evictionLock")
950
  void expireAfterWriteEntries(long now) {
951
    if (!expiresAfterWrite()) {
×
952
      return;
×
953
    }
954
    long duration = expiresAfterWriteNanos();
×
955
    for (;;) {
956
      Node<K, V> node = writeOrderDeque().peekFirst();
×
957
      if ((node == null) || ((now - node.getWriteTime()) < duration)
×
958
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
×
959
        break;
×
960
      }
961
    }
×
962
  }
×
963

964
  /** Expires entries in the timer wheel. */
965
  @GuardedBy("evictionLock")
966
  void expireVariableEntries(long now) {
967
    if (expiresVariable()) {
×
968
      timerWheel().advance(this, now);
×
969
    }
970
  }
×
971

972
  /** Returns the duration until the next item expires, or {@link Long#MAX_VALUE} if none. */
973
  @GuardedBy("evictionLock")
974
  long getExpirationDelay(long now) {
975
    @Var long delay = Long.MAX_VALUE;
×
976
    if (expiresAfterAccess()) {
×
977
      @Var Node<K, V> node = accessOrderWindowDeque().peekFirst();
×
978
      if (node != null) {
×
979
        delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
×
980
      }
981
      if (evicts()) {
×
982
        node = accessOrderProbationDeque().peekFirst();
×
983
        if (node != null) {
×
984
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
×
985
        }
986
        node = accessOrderProtectedDeque().peekFirst();
×
987
        if (node != null) {
×
988
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
×
989
        }
990
      }
991
    }
992
    if (expiresAfterWrite()) {
×
993
      Node<K, V> node = writeOrderDeque().peekFirst();
×
994
      if (node != null) {
×
995
        delay = Math.min(delay, expiresAfterWriteNanos() - (now - node.getWriteTime()));
×
996
      }
997
    }
998
    if (expiresVariable()) {
×
999
      delay = Math.min(delay, timerWheel().getExpirationDelay());
×
1000
    }
1001
    return delay;
×
1002
  }
1003

1004
  /** Returns if the entry has expired. */
1005
  @SuppressWarnings("ShortCircuitBoolean")
1006
  boolean hasExpired(Node<K, V> node, long now) {
1007
    if (isComputingAsync(node.getValue())) {
×
1008
      return false;
×
1009
    }
1010
    return (expiresAfterAccess() && (now - node.getAccessTime() >= expiresAfterAccessNanos()))
×
1011
        | (expiresAfterWrite() && (now - node.getWriteTime() >= expiresAfterWriteNanos()))
×
1012
        | (expiresVariable() && (now - node.getVariableTime() >= 0));
×
1013
  }
1014

1015
  /**
1016
   * Attempts to evict the entry based on the given removal cause. A removal may be ignored if the
1017
   * entry was updated and is no longer eligible for eviction.
1018
   *
1019
   * @param node the entry to evict
1020
   * @param cause the reason to evict
1021
   * @param now the current time, used only if expiring
1022
   * @return if the entry was evicted
1023
   */
1024
  @GuardedBy("evictionLock")
1025
  @SuppressWarnings({"GuardedByChecker", "SynchronizationOnLocalVariableOrMethodParameter"})
1026
  boolean evictEntry(Node<K, V> node, RemovalCause cause, long now) {
1027
    K key = node.getKey();
×
1028
    @SuppressWarnings({"unchecked", "Varifier"})
1029
    @Nullable V[] value = (V[]) new Object[1];
×
1030
    var removed = new boolean[1];
×
1031
    var resurrect = new boolean[1];
×
1032
    var actualCause = new RemovalCause[1];
×
1033
    var keyReference = node.getKeyReference();
×
1034

1035
    data.computeIfPresent(keyReference, (k, n) -> {
×
1036
      if (n != node) {
×
1037
        return n;
×
1038
      }
1039
      synchronized (n) {
×
1040
        value[0] = n.getValue();
×
1041

1042
        if ((key == null) || (value[0] == null)) {
×
1043
          actualCause[0] = RemovalCause.COLLECTED;
×
1044
        } else if (cause == RemovalCause.COLLECTED) {
×
1045
          resurrect[0] = true;
×
1046
          return n;
×
1047
        } else {
1048
          actualCause[0] = cause;
×
1049
        }
1050

1051
        if (actualCause[0] == RemovalCause.EXPIRED) {
×
1052
          @Var boolean expired = false;
×
1053
          if (expiresAfterAccess()) {
×
1054
            expired |= ((now - n.getAccessTime()) >= expiresAfterAccessNanos());
×
1055
          }
1056
          if (expiresAfterWrite()) {
×
1057
            expired |= ((now - n.getWriteTime()) >= expiresAfterWriteNanos());
×
1058
          }
1059
          if (expiresVariable()) {
×
1060
            expired |= ((now - node.getVariableTime()) >= 0);
×
1061
          }
1062
          if (!expired) {
×
1063
            resurrect[0] = true;
×
1064
            return n;
×
1065
          }
1066
        } else if (actualCause[0] == RemovalCause.SIZE) {
×
1067
          int weight = node.getWeight();
×
1068
          if (weight == 0) {
×
1069
            resurrect[0] = true;
×
1070
            return n;
×
1071
          }
1072
        }
1073

1074
        notifyEviction(key, value[0], actualCause[0]);
×
1075
        discardRefresh(keyReference);
×
1076
        removed[0] = true;
×
1077
        node.retire();
×
1078
        return null;
×
1079
      }
1080
    });
1081

1082
    // The entry is no longer eligible for eviction
1083
    if (resurrect[0]) {
×
1084
      return false;
×
1085
    }
1086

1087
    // If the eviction fails due to a concurrent removal of the victim, that removal may cancel out
1088
    // the addition that triggered this eviction. The victim is eagerly unlinked and the size
1089
    // decremented before the removal task so that if an eviction is still required then a new
1090
    // victim will be chosen for removal.
1091
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
×
1092
      accessOrderWindowDeque().remove(node);
×
1093
    } else if (evicts()) {
×
1094
      if (node.inMainProbation()) {
×
1095
        accessOrderProbationDeque().remove(node);
×
1096
      } else {
1097
        accessOrderProtectedDeque().remove(node);
×
1098
      }
1099
    }
1100
    if (expiresAfterWrite()) {
×
1101
      writeOrderDeque().remove(node);
×
1102
    } else if (expiresVariable()) {
×
1103
      timerWheel().deschedule(node);
×
1104
    }
1105

1106
    synchronized (node) {
×
1107
      logIfAlive(node);
×
1108
      makeDead(node);
×
1109
    }
×
1110

1111
    if (removed[0]) {
×
1112
      statsCounter().recordEviction(node.getWeight(), actualCause[0]);
×
1113
      notifyRemoval(key, value[0], actualCause[0]);
×
1114
    }
1115

1116
    return true;
×
1117
  }
1118

1119
  /** Adapts the eviction policy to towards the optimal recency / frequency configuration. */
1120
  @GuardedBy("evictionLock")
1121
  void climb() {
1122
    if (!evicts()) {
×
1123
      return;
×
1124
    }
1125

1126
    determineAdjustment();
×
1127
    demoteFromMainProtected();
×
1128
    long amount = adjustment();
×
1129
    if (amount == 0) {
×
1130
      return;
×
1131
    } else if (amount > 0) {
×
1132
      increaseWindow();
×
1133
    } else {
1134
      decreaseWindow();
×
1135
    }
1136
  }
×
1137

1138
  /** Calculates the amount to adapt the window by and sets {@link #adjustment()} accordingly. */
1139
  @GuardedBy("evictionLock")
1140
  void determineAdjustment() {
1141
    if (frequencySketch().isNotInitialized()) {
×
1142
      setPreviousSampleHitRate(0.0);
×
1143
      setMissesInSample(0);
×
1144
      setHitsInSample(0);
×
1145
      return;
×
1146
    }
1147

1148
    int requestCount = hitsInSample() + missesInSample();
×
1149
    if (requestCount < frequencySketch().sampleSize) {
×
1150
      return;
×
1151
    }
1152

1153
    double hitRate = (double) hitsInSample() / requestCount;
×
1154
    double hitRateChange = hitRate - previousSampleHitRate();
×
1155
    double amount = (hitRateChange >= 0) ? stepSize() : -stepSize();
×
1156
    double nextStepSize = (Math.abs(hitRateChange) >= HILL_CLIMBER_RESTART_THRESHOLD)
×
1157
        ? HILL_CLIMBER_STEP_PERCENT * maximum() * (amount >= 0 ? 1 : -1)
×
1158
        : HILL_CLIMBER_STEP_DECAY_RATE * amount;
×
1159
    setPreviousSampleHitRate(hitRate);
×
1160
    setAdjustment((long) amount);
×
1161
    setStepSize(nextStepSize);
×
1162
    setMissesInSample(0);
×
1163
    setHitsInSample(0);
×
1164
  }
×
1165

1166
  /**
1167
   * Increases the size of the admission window by shrinking the portion allocated to the main
1168
   * space. As the main space is partitioned into probation and protected regions (80% / 20%), for
1169
   * simplicity only the protected is reduced. If the regions exceed their maximums, this may cause
1170
   * protected items to be demoted to the probation region and probation items to be demoted to the
1171
   * admission window.
1172
   */
1173
  @GuardedBy("evictionLock")
1174
  void increaseWindow() {
1175
    if (mainProtectedMaximum() == 0) {
×
1176
      return;
×
1177
    }
1178

1179
    @Var long quota = Math.min(adjustment(), mainProtectedMaximum());
×
1180
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
×
1181
    setWindowMaximum(windowMaximum() + quota);
×
1182
    demoteFromMainProtected();
×
1183

1184
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
×
1185
      @Var Node<K, V> candidate = accessOrderProbationDeque().peekFirst();
×
1186
      @Var boolean probation = true;
×
1187
      if ((candidate == null) || (quota < candidate.getPolicyWeight())) {
×
1188
        candidate = accessOrderProtectedDeque().peekFirst();
×
1189
        probation = false;
×
1190
      }
1191
      if (candidate == null) {
×
1192
        break;
×
1193
      }
1194

1195
      int weight = candidate.getPolicyWeight();
×
1196
      if (quota < weight) {
×
1197
        break;
×
1198
      }
1199

1200
      quota -= weight;
×
1201
      if (probation) {
×
1202
        accessOrderProbationDeque().remove(candidate);
×
1203
      } else {
1204
        setMainProtectedWeightedSize(mainProtectedWeightedSize() - weight);
×
1205
        accessOrderProtectedDeque().remove(candidate);
×
1206
      }
1207
      setWindowWeightedSize(windowWeightedSize() + weight);
×
1208
      accessOrderWindowDeque().offerLast(candidate);
×
1209
      candidate.makeWindow();
×
1210
    }
1211

1212
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
×
1213
    setWindowMaximum(windowMaximum() - quota);
×
1214
    setAdjustment(quota);
×
1215
  }
×
1216

1217
  /** Decreases the size of the admission window and increases the main's protected region. */
1218
  @GuardedBy("evictionLock")
1219
  void decreaseWindow() {
1220
    if (windowMaximum() <= 1) {
×
1221
      return;
×
1222
    }
1223

1224
    @Var long quota = Math.min(-adjustment(), Math.max(0, windowMaximum() - 1));
×
1225
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
×
1226
    setWindowMaximum(windowMaximum() - quota);
×
1227

1228
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
×
1229
      Node<K, V> candidate = accessOrderWindowDeque().peekFirst();
×
1230
      if (candidate == null) {
×
1231
        break;
×
1232
      }
1233

1234
      int weight = candidate.getPolicyWeight();
×
1235
      if (quota < weight) {
×
1236
        break;
×
1237
      }
1238

1239
      quota -= weight;
×
1240
      setWindowWeightedSize(windowWeightedSize() - weight);
×
1241
      accessOrderWindowDeque().remove(candidate);
×
1242
      accessOrderProbationDeque().offerLast(candidate);
×
1243
      candidate.makeMainProbation();
×
1244
    }
1245

1246
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
×
1247
    setWindowMaximum(windowMaximum() + quota);
×
1248
    setAdjustment(-quota);
×
1249
  }
×
1250

1251
  /** Transfers the nodes from the protected to the probation region if it exceeds the maximum. */
1252
  @GuardedBy("evictionLock")
1253
  void demoteFromMainProtected() {
1254
    long mainProtectedMaximum = mainProtectedMaximum();
×
1255
    @Var long mainProtectedWeightedSize = mainProtectedWeightedSize();
×
1256
    if (mainProtectedWeightedSize <= mainProtectedMaximum) {
×
1257
      return;
×
1258
    }
1259

1260
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
×
1261
      if (mainProtectedWeightedSize <= mainProtectedMaximum) {
×
1262
        break;
×
1263
      }
1264

1265
      Node<K, V> demoted = accessOrderProtectedDeque().poll();
×
1266
      if (demoted == null) {
×
1267
        break;
×
1268
      }
1269
      demoted.makeMainProbation();
×
1270
      accessOrderProbationDeque().offerLast(demoted);
×
1271
      mainProtectedWeightedSize -= demoted.getPolicyWeight();
×
1272
    }
1273
    setMainProtectedWeightedSize(mainProtectedWeightedSize);
×
1274
  }
×
1275

1276
  /**
1277
   * Performs the post-processing work required after a read.
1278
   *
1279
   * @param node the entry in the page replacement policy
1280
   * @param now the current time, in nanoseconds
1281
   * @param recordHit if the hit count should be incremented
1282
   * @return the refreshed value if immediately loaded, else null
1283
   */
1284
  @Nullable V afterRead(Node<K, V> node, long now, boolean recordHit) {
1285
    if (recordHit) {
×
1286
      statsCounter().recordHits(1);
×
1287
    }
1288

1289
    boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL);
×
1290
    if (shouldDrainBuffers(delayable)) {
×
1291
      scheduleDrainBuffers();
×
1292
    }
1293
    return refreshIfNeeded(node, now);
×
1294
  }
1295

1296
  /** Returns if the cache should bypass the read buffer. */
1297
  boolean skipReadBuffer() {
1298
    return fastpath() && frequencySketch().isNotInitialized();
×
1299
  }
1300

1301
  /**
1302
   * Asynchronously refreshes the entry if eligible.
1303
   *
1304
   * @param node the entry in the cache to refresh
1305
   * @param now the current time, in nanoseconds
1306
   * @return the refreshed value if immediately loaded, else null
1307
   */
1308
  @SuppressWarnings("FutureReturnValueIgnored")
1309
  @Nullable V refreshIfNeeded(Node<K, V> node, long now) {
1310
    if (!refreshAfterWrite()) {
×
1311
      return null;
×
1312
    }
1313

1314
    K key;
1315
    V oldValue;
1316
    long writeTime = node.getWriteTime();
×
1317
    long refreshWriteTime = writeTime | 1L;
×
1318
    Object keyReference = node.getKeyReference();
×
1319
    ConcurrentMap<Object, CompletableFuture<?>> refreshes;
1320
    if (((now - writeTime) > refreshAfterWriteNanos())
×
1321
        && ((key = node.getKey()) != null) && ((oldValue = node.getValue()) != null)
×
1322
        && !isComputingAsync(oldValue) && ((writeTime & 1L) == 0L)
×
1323
        && !(refreshes = refreshes()).containsKey(keyReference)
×
1324
        && node.isAlive() && node.casWriteTime(writeTime, refreshWriteTime)) {
×
1325
      long[] startTime = new long[1];
×
1326
      @SuppressWarnings({"rawtypes", "unchecked"})
1327
      CompletableFuture<? extends V>[] refreshFuture = new CompletableFuture[1];
×
1328
      try {
1329
        refreshes.computeIfAbsent(keyReference, k -> {
×
1330
          try {
1331
            startTime[0] = statsTicker().read();
×
1332
            if (isAsync) {
×
1333
              @SuppressWarnings("unchecked")
1334
              var future = (CompletableFuture<V>) oldValue;
×
1335
              if (Async.isReady(future)) {
×
1336
                requireNonNull(cacheLoader);
×
1337
                var refresh = cacheLoader.asyncReload(key, future.join(), executor);
×
1338
                refreshFuture[0] = requireNonNull(refresh, "Null future");
×
1339
              } else {
×
1340
                // no-op if the future's completion state was modified (e.g. obtrude methods)
1341
                return null;
×
1342
              }
1343
            } else {
×
1344
              requireNonNull(cacheLoader);
×
1345
              var refresh = cacheLoader.asyncReload(key, oldValue, executor);
×
1346
              refreshFuture[0] = requireNonNull(refresh, "Null future");
×
1347
            }
1348
            return refreshFuture[0];
×
1349
          } catch (InterruptedException e) {
×
1350
            Thread.currentThread().interrupt();
×
1351
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
×
1352
            return null;
×
1353
          } catch (Throwable e) {
×
1354
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
×
1355
            return null;
×
1356
          }
1357
        });
1358
      } finally {
1359
        node.casWriteTime(refreshWriteTime, writeTime);
×
1360
      }
1361

1362
      if (refreshFuture[0] == null) {
×
1363
        return null;
×
1364
      }
1365

1366
      var refreshed = refreshFuture[0].handle((newValue, error) -> {
×
1367
        long loadTime = statsTicker().read() - startTime[0];
×
1368
        if (error != null) {
×
1369
          if (!(error instanceof CancellationException) && !(error instanceof TimeoutException)) {
×
1370
            logger.log(Level.WARNING, "Exception thrown during refresh", error);
×
1371
          }
1372
          refreshes.remove(keyReference, refreshFuture[0]);
×
1373
          statsCounter().recordLoadFailure(loadTime);
×
1374
          return null;
×
1375
        }
1376

1377
        @SuppressWarnings("unchecked")
1378
        V value = (isAsync && (newValue != null)) ? (V) refreshFuture[0] : newValue;
×
1379

1380
        RemovalCause[] cause = new RemovalCause[1];
×
1381
        V result = compute(key, (k, currentValue) -> {
×
1382
          boolean removed = refreshes.remove(keyReference, refreshFuture[0]);
×
1383
          if (currentValue == null) {
×
1384
            // If the entry is absent then discard the refresh and maybe notifying the listener
1385
            if (value != null) {
×
1386
              cause[0] = RemovalCause.EXPLICIT;
×
1387
            }
1388
            return null;
×
1389
          } else if (currentValue == value) {
×
1390
            // If the reloaded value is the same instance then no-op
1391
            return currentValue;
×
1392
          } else if (isAsync &&
×
1393
              (newValue == Async.getIfReady((CompletableFuture<?>) currentValue))) {
×
1394
            // If the completed futures hold the same value instance then no-op
1395
            return currentValue;
×
1396
          } else if (removed && (currentValue == oldValue) && (node.getWriteTime() == writeTime)) {
×
1397
            // If the entry was not modified while in-flight (no ABA) then replace
1398
            return value;
×
1399
          }
1400
          // Otherwise, a write invalidated the refresh so discard it and notify the listener
1401
          cause[0] = RemovalCause.REPLACED;
×
1402
          return currentValue;
×
1403
        }, expiry(), /* recordLoad= */ false, /* recordLoadFailure= */ true);
×
1404

1405
        if (cause[0] != null) {
×
1406
          notifyRemoval(key, value, cause[0]);
×
1407
        }
1408
        if (newValue == null) {
×
1409
          statsCounter().recordLoadFailure(loadTime);
×
1410
        } else {
1411
          statsCounter().recordLoadSuccess(loadTime);
×
1412
        }
1413
        return result;
×
1414
      });
1415
      return Async.getIfReady(refreshed);
×
1416
    }
1417

1418
    return null;
×
1419
  }
1420

1421
  /**
1422
   * Returns the expiration time for the entry after being created.
1423
   *
1424
   * @param key the key of the entry that was created
1425
   * @param value the value of the entry that was created
1426
   * @param expiry the calculator for the expiration time
1427
   * @param now the current time, in nanoseconds
1428
   * @return the expiration time
1429
   */
1430
  long expireAfterCreate(K key, V value, Expiry<? super K, ? super V> expiry, long now) {
1431
    if (expiresVariable()) {
×
1432
      long duration = Math.max(0L, expiry.expireAfterCreate(key, value, now));
×
1433
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
×
1434
    }
1435
    return 0L;
×
1436
  }
1437

1438
  /**
1439
   * Returns the expiration time for the entry after being updated.
1440
   *
1441
   * @param node the entry in the page replacement policy
1442
   * @param key the key of the entry that was updated
1443
   * @param value the value of the entry that was updated
1444
   * @param expiry the calculator for the expiration time
1445
   * @param now the current time, in nanoseconds
1446
   * @return the expiration time
1447
   */
1448
  long expireAfterUpdate(Node<K, V> node, K key, V value,
1449
      Expiry<? super K, ? super V> expiry, long now) {
1450
    if (expiresVariable()) {
×
1451
      long currentDuration = Math.max(1, node.getVariableTime() - now);
×
1452
      long duration = Math.max(0L, expiry.expireAfterUpdate(key, value, now, currentDuration));
×
1453
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
×
1454
    }
1455
    return 0L;
×
1456
  }
1457

1458
  /**
1459
   * Returns the access time for the entry after a read.
1460
   *
1461
   * @param node the entry in the page replacement policy
1462
   * @param key the key of the entry that was read
1463
   * @param value the value of the entry that was read
1464
   * @param expiry the calculator for the expiration time
1465
   * @param now the current time, in nanoseconds
1466
   * @return the expiration time
1467
   */
1468
  long expireAfterRead(Node<K, V> node, K key, V value, Expiry<K, V> expiry, long now) {
1469
    if (expiresVariable()) {
×
1470
      long currentDuration = Math.max(0L, node.getVariableTime() - now);
×
1471
      long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
×
1472
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
×
1473
    }
1474
    return 0L;
×
1475
  }
1476

1477
  /**
1478
   * Attempts to update the access time for the entry after a read.
1479
   *
1480
   * @param node the entry in the page replacement policy
1481
   * @param key the key of the entry that was read
1482
   * @param value the value of the entry that was read
1483
   * @param expiry the calculator for the expiration time
1484
   * @param now the current time, in nanoseconds
1485
   */
1486
  void tryExpireAfterRead(Node<K, V> node, K key, V value, Expiry<K, V> expiry, long now) {
1487
    if (!expiresVariable()) {
×
1488
      return;
×
1489
    }
1490

1491
    long variableTime = node.getVariableTime();
×
1492
    long currentDuration = Math.max(1, variableTime - now);
×
1493
    if (isAsync && (currentDuration > MAXIMUM_EXPIRY)) {
×
1494
      // expireAfterCreate has not yet set the duration after completion
1495
      return;
×
1496
    }
1497

1498
    long duration = Math.max(0L, expiry.expireAfterRead(key, value, now, currentDuration));
×
1499
    if (duration != currentDuration) {
×
1500
      long expirationTime = isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
×
1501
      node.casVariableTime(variableTime, expirationTime);
×
1502
    }
1503
  }
×
1504

1505
  void setVariableTime(Node<K, V> node, long expirationTime) {
1506
    if (expiresVariable()) {
×
1507
      node.setVariableTime(expirationTime);
×
1508
    }
1509
  }
×
1510

1511
  void setWriteTime(Node<K, V> node, long now) {
1512
    if (expiresAfterWrite() || refreshAfterWrite()) {
×
1513
      node.setWriteTime(now & ~1L);
×
1514
    }
1515
  }
×
1516

1517
  void setAccessTime(Node<K, V> node, long now) {
1518
    if (expiresAfterAccess()) {
×
1519
      node.setAccessTime(now);
×
1520
    }
1521
  }
×
1522

1523
  /** Returns if the entry's write time would exceed the minimum expiration reorder threshold. */
1524
  boolean exceedsWriteTimeTolerance(Node<K, V> node, long varTime, long now) {
1525
    long variableTime = node.getVariableTime();
×
1526
    long tolerance = EXPIRE_WRITE_TOLERANCE;
×
1527
    long writeTime = node.getWriteTime();
×
1528
    return
×
1529
        (expiresAfterWrite()
×
1530
            && ((expiresAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
×
1531
        || (refreshAfterWrite()
×
1532
            && ((refreshAfterWriteNanos() <= tolerance) || (Math.abs(now - writeTime) > tolerance)))
×
1533
        || (expiresVariable() && (Math.abs(varTime - variableTime) > tolerance));
×
1534
  }
1535

1536
  /**
1537
   * Performs the post-processing work required after a write.
1538
   *
1539
   * @param task the pending operation to be applied
1540
   */
1541
  void afterWrite(Runnable task) {
1542
    for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) {
×
1543
      if (writeBuffer.offer(task)) {
×
1544
        scheduleAfterWrite();
×
1545
        return;
×
1546
      }
1547
      scheduleDrainBuffers();
×
1548
      Thread.onSpinWait();
×
1549
    }
1550

1551
    // In scenarios where the writing threads cannot make progress then they attempt to provide
1552
    // assistance by performing the eviction work directly. This can resolve cases where the
1553
    // maintenance task is scheduled but not running. That might occur due to all of the executor's
1554
    // threads being busy (perhaps writing into this cache), the write rate greatly exceeds the
1555
    // consuming rate, priority inversion, or if the executor silently discarded the maintenance
1556
    // task. Unfortunately this cannot resolve when the eviction is blocked waiting on a long-
1557
    // running computation due to an eviction listener, the victim is being computed on by a writer,
1558
    // or the victim residing in the same hash bin as a computing entry. In those cases a warning is
1559
    // logged to encourage the application to decouple these computations from the map operations.
1560
    lock();
×
1561
    try {
1562
      maintenance(task);
×
1563
    } catch (RuntimeException e) {
×
1564
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
×
1565
    } finally {
1566
      evictionLock.unlock();
×
1567
    }
1568
    rescheduleCleanUpIfIncomplete();
×
1569
  }
×
1570

1571
  /** Acquires the eviction lock. */
1572
  void lock() {
1573
    @Var long remainingNanos = WARN_AFTER_LOCK_WAIT_NANOS;
×
1574
    long end = System.nanoTime() + remainingNanos;
×
1575
    @Var boolean interrupted = false;
×
1576
    try {
1577
      for (;;) {
1578
        try {
1579
          if (evictionLock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
×
1580
            return;
×
1581
          }
1582
          logger.log(Level.WARNING, "The cache is experiencing excessive wait times for acquiring "
×
1583
              + "the eviction lock. This may indicate that a long-running computation has halted "
1584
              + "eviction when trying to remove the victim entry. Consider using AsyncCache to "
1585
              + "decouple the computation from the map operation.", new TimeoutException());
1586
          evictionLock.lock();
×
1587
          return;
×
1588
        } catch (InterruptedException e) {
×
1589
          remainingNanos = end - System.nanoTime();
×
1590
          interrupted = true;
×
1591
        }
×
1592
      }
1593
    } finally {
1594
      if (interrupted) {
×
1595
        Thread.currentThread().interrupt();
×
1596
      }
1597
    }
1598
  }
1599

1600
  /**
1601
   * Conditionally schedules the asynchronous maintenance task after a write operation. If the
1602
   * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it
1603
   * is already processing then it is set to transition to REQUIRED upon completion so that a new
1604
   * execution is triggered by the next operation.
1605
   */
1606
  void scheduleAfterWrite() {
1607
    @Var int drainStatus = drainStatusOpaque();
×
1608
    for (;;) {
1609
      switch (drainStatus) {
×
1610
        case IDLE:
1611
          casDrainStatus(IDLE, REQUIRED);
×
1612
          scheduleDrainBuffers();
×
1613
          return;
×
1614
        case REQUIRED:
1615
          scheduleDrainBuffers();
×
1616
          return;
×
1617
        case PROCESSING_TO_IDLE:
1618
          if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) {
×
1619
            return;
×
1620
          }
1621
          drainStatus = drainStatusAcquire();
×
1622
          continue;
×
1623
        case PROCESSING_TO_REQUIRED:
1624
          return;
×
1625
        default:
1626
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
×
1627
      }
1628
    }
1629
  }
1630

1631
  /**
1632
   * Attempts to schedule an asynchronous task to apply the pending operations to the page
1633
   * replacement policy. If the executor rejects the task then it is run directly.
1634
   */
1635
  void scheduleDrainBuffers() {
1636
    if (drainStatusOpaque() >= PROCESSING_TO_IDLE) {
×
1637
      return;
×
1638
    }
1639
    if (evictionLock.tryLock()) {
×
1640
      try {
1641
        int drainStatus = drainStatusOpaque();
×
1642
        if (drainStatus >= PROCESSING_TO_IDLE) {
×
1643
          return;
×
1644
        }
1645
        setDrainStatusRelease(PROCESSING_TO_IDLE);
×
1646
        executor.execute(drainBuffersTask);
×
1647
      } catch (Throwable t) {
×
1648
        logger.log(Level.WARNING, "Exception thrown when submitting maintenance task", t);
×
1649
        maintenance(/* ignored */ null);
×
1650
      } finally {
1651
        evictionLock.unlock();
×
1652
      }
1653
    }
1654
  }
×
1655

1656
  @Override
1657
  public void cleanUp() {
1658
    try {
1659
      performCleanUp(/* ignored */ null);
×
1660
    } catch (RuntimeException e) {
×
1661
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
×
1662
    }
×
1663
  }
×
1664

1665
  /**
1666
   * Performs the maintenance work, blocking until the lock is acquired.
1667
   *
1668
   * @param task an additional pending task to run, or {@code null} if not present
1669
   */
1670
  void performCleanUp(@Nullable Runnable task) {
1671
    evictionLock.lock();
×
1672
    try {
1673
      maintenance(task);
×
1674
    } finally {
1675
      evictionLock.unlock();
×
1676
    }
1677
    rescheduleCleanUpIfIncomplete();
×
1678
  }
×
1679

1680
  /**
1681
   * If there remains pending operations that were not handled by the prior clean up then try to
1682
   * schedule an asynchronous maintenance task. This may occur due to a concurrent write after the
1683
   * maintenance work had started or if the amortized threshold of work per clean up was reached.
1684
   */
1685
  @SuppressWarnings("resource")
1686
  void rescheduleCleanUpIfIncomplete() {
1687
    if (drainStatusOpaque() != REQUIRED) {
×
1688
      return;
×
1689
    }
1690

1691
    // An immediate scheduling cannot be performed on a custom executor because it may use a
1692
    // caller-runs policy. This could cause the caller's penalty to exceed the amortized threshold,
1693
    // e.g. repeated concurrent writes could result in a retry loop.
1694
    if (executor == ForkJoinPool.commonPool()) {
×
1695
      scheduleDrainBuffers();
×
1696
      return;
×
1697
    }
1698

1699
    // If a scheduler was configured then the maintenance can be deferred onto the custom executor
1700
    // and run in the near future. Otherwise, it will be handled due to other cache activity.
1701
    var pacer = pacer();
×
1702
    if ((pacer != null) && !pacer.isScheduled() && evictionLock.tryLock()) {
×
1703
      try {
1704
        if ((drainStatusOpaque() == REQUIRED) && !pacer.isScheduled()) {
×
1705
          pacer.schedule(executor, drainBuffersTask, expirationTicker().read(), Pacer.TOLERANCE);
×
1706
        }
1707
      } finally {
1708
        evictionLock.unlock();
×
1709
      }
1710
    }
1711
  }
×
1712

1713
  /**
1714
   * Performs the pending maintenance work and sets the state flags during processing to avoid
1715
   * excess scheduling attempts. The read buffer, write buffer, and reference queues are drained,
1716
   * followed by expiration, and size-based eviction.
1717
   *
1718
   * @param task an additional pending task to run, or {@code null} if not present
1719
   */
1720
  @GuardedBy("evictionLock")
1721
  void maintenance(@Nullable Runnable task) {
1722
    setDrainStatusRelease(PROCESSING_TO_IDLE);
×
1723

1724
    try {
1725
      drainReadBuffer();
×
1726

1727
      drainWriteBuffer();
×
1728
      if (task != null) {
×
1729
        task.run();
×
1730
      }
1731

1732
      drainKeyReferences();
×
1733
      drainValueReferences();
×
1734

1735
      expireEntries();
×
1736
      evictEntries();
×
1737

1738
      climb();
×
1739
    } finally {
1740
      if ((drainStatusOpaque() != PROCESSING_TO_IDLE)
×
1741
          || !casDrainStatus(PROCESSING_TO_IDLE, IDLE)) {
×
1742
        setDrainStatusOpaque(REQUIRED);
×
1743
      }
1744
    }
1745
  }
×
1746

1747
  /** Drains the weak key references queue. */
1748
  @GuardedBy("evictionLock")
1749
  void drainKeyReferences() {
1750
    if (!collectKeys()) {
×
1751
      return;
×
1752
    }
1753
    @Var Reference<? extends K> keyRef;
1754
    while ((keyRef = keyReferenceQueue().poll()) != null) {
×
1755
      Node<K, V> node = data.get(keyRef);
×
1756
      if (node != null) {
×
1757
        evictEntry(node, RemovalCause.COLLECTED, 0L);
×
1758
      }
1759
    }
×
1760
  }
×
1761

1762
  /** Drains the weak / soft value references queue. */
1763
  @GuardedBy("evictionLock")
1764
  void drainValueReferences() {
1765
    if (!collectValues()) {
×
1766
      return;
×
1767
    }
1768
    @Var Reference<? extends V> valueRef;
1769
    while ((valueRef = valueReferenceQueue().poll()) != null) {
×
1770
      @SuppressWarnings("unchecked")
1771
      var ref = (InternalReference<V>) valueRef;
×
1772
      Node<K, V> node = data.get(ref.getKeyReference());
×
1773
      if ((node != null) && (valueRef == node.getValueReference())) {
×
1774
        evictEntry(node, RemovalCause.COLLECTED, 0L);
×
1775
      }
1776
    }
×
1777
  }
×
1778

1779
  /** Drains the read buffer. */
1780
  @GuardedBy("evictionLock")
1781
  void drainReadBuffer() {
1782
    if (!skipReadBuffer()) {
×
1783
      readBuffer.drainTo(accessPolicy);
×
1784
    }
1785
  }
×
1786

1787
  /** Updates the node's location in the page replacement policy. */
1788
  @GuardedBy("evictionLock")
1789
  void onAccess(Node<K, V> node) {
1790
    if (evicts()) {
×
1791
      var keyRef = node.getKeyReferenceOrNull();
×
1792
      if (keyRef == null) {
×
1793
        return;
×
1794
      }
1795
      frequencySketch().increment(keyRef);
×
1796
      if (node.inWindow()) {
×
1797
        reorder(accessOrderWindowDeque(), node);
×
1798
      } else if (node.inMainProbation()) {
×
1799
        reorderProbation(node);
×
1800
      } else {
1801
        reorder(accessOrderProtectedDeque(), node);
×
1802
      }
1803
      setHitsInSample(hitsInSample() + 1);
×
1804
    } else if (expiresAfterAccess()) {
×
1805
      reorder(accessOrderWindowDeque(), node);
×
1806
    }
1807
    if (expiresVariable()) {
×
1808
      timerWheel().reschedule(node);
×
1809
    }
1810
  }
×
1811

1812
  /** Promote the node from probation to protected on an access. */
1813
  @GuardedBy("evictionLock")
1814
  void reorderProbation(Node<K, V> node) {
1815
    if (!accessOrderProbationDeque().contains(node)) {
×
1816
      // Ignore stale accesses for an entry that is no longer present
1817
      return;
×
1818
    } else if (node.getPolicyWeight() > mainProtectedMaximum()) {
×
1819
      reorder(accessOrderProbationDeque(), node);
×
1820
      return;
×
1821
    }
1822

1823
    // If the protected space exceeds its maximum, the LRU items are demoted to the probation space.
1824
    // This is deferred to the adaption phase at the end of the maintenance cycle.
1825
    setMainProtectedWeightedSize(mainProtectedWeightedSize() + node.getPolicyWeight());
×
1826
    accessOrderProbationDeque().remove(node);
×
1827
    accessOrderProtectedDeque().offerLast(node);
×
1828
    node.makeMainProtected();
×
1829
  }
×
1830

1831
  /** Updates the node's location in the policy's deque. */
1832
  static <K, V> void reorder(LinkedDeque<Node<K, V>> deque, Node<K, V> node) {
1833
    // An entry may be scheduled for reordering despite having been removed. This can occur when the
1834
    // entry was concurrently read while a writer was removing it. If the entry is no longer linked
1835
    // then it does not need to be processed.
1836
    if (deque.contains(node)) {
×
1837
      deque.moveToBack(node);
×
1838
    }
1839
  }
×
1840

1841
  /** Drains the write buffer. */
1842
  @GuardedBy("evictionLock")
1843
  void drainWriteBuffer() {
1844
    for (int i = 0; i <= WRITE_BUFFER_MAX; i++) {
×
1845
      Runnable task = writeBuffer.poll();
×
1846
      if (task == null) {
×
1847
        return;
×
1848
      }
1849
      task.run();
×
1850
    }
1851
    setDrainStatusOpaque(PROCESSING_TO_REQUIRED);
×
1852
  }
×
1853

1854
  /**
1855
   * Atomically transitions the node to the <code>dead</code> state and decrements the
1856
   * <code>weightedSize</code>.
1857
   *
1858
   * @param node the entry in the page replacement policy
1859
   */
1860
  @GuardedBy("evictionLock")
1861
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
1862
  void makeDead(Node<K, V> node) {
1863
    synchronized (node) {
×
1864
      if (node.isDead()) {
×
1865
        return;
×
1866
      }
1867
      if (evicts()) {
×
1868
        // The node's policy weight may be out of sync due to a pending update waiting to be
1869
        // processed. At this point the node's weight is finalized, so the weight can be safely
1870
        // taken from the node's perspective and the sizes will be adjusted correctly.
1871
        if (node.inWindow()) {
×
1872
          setWindowWeightedSize(windowWeightedSize() - node.getWeight());
×
1873
        } else if (node.inMainProtected()) {
×
1874
          setMainProtectedWeightedSize(mainProtectedWeightedSize() - node.getWeight());
×
1875
        }
1876
        setWeightedSize(weightedSize() - node.getWeight());
×
1877
      }
1878
      node.die();
×
1879
    }
×
1880
  }
×
1881

1882
  /** Adds the node to the page replacement policy. */
1883
  final class AddTask implements Runnable {
1884
    final Node<K, V> node;
1885
    final int weight;
1886

1887
    AddTask(Node<K, V> node, int weight) {
×
1888
      this.weight = weight;
×
1889
      this.node = node;
×
1890
    }
×
1891

1892
    @Override
1893
    @GuardedBy("evictionLock")
1894
    public void run() {
1895
      if (evicts()) {
×
1896
        setWeightedSize(weightedSize() + weight);
×
1897
        setWindowWeightedSize(windowWeightedSize() + weight);
×
1898
        node.setPolicyWeight(node.getPolicyWeight() + weight);
×
1899

1900
        long maximum = maximum();
×
1901
        if (weightedSize() >= (maximum >>> 1)) {
×
1902
          if (weightedSize() > MAXIMUM_CAPACITY) {
×
1903
            evictEntries();
×
1904
          } else {
1905
            // Lazily initialize when close to the maximum
1906
            long capacity = isWeighted() ? data.mappingCount() : maximum;
×
1907
            frequencySketch().ensureCapacity(capacity);
×
1908
          }
1909
        }
1910

1911
        var keyRef = node.getKeyReferenceOrNull();
×
1912
        if (keyRef != null) {
×
1913
          frequencySketch().increment(keyRef);
×
1914
        }
1915

1916
        setMissesInSample(missesInSample() + 1);
×
1917
      }
1918

1919
      // ignore out-of-order write operations
1920
      boolean isAlive;
1921
      synchronized (node) {
×
1922
        isAlive = node.isAlive();
×
1923
      }
×
1924
      if (isAlive) {
×
1925
        if (expiresAfterWrite()) {
×
1926
          writeOrderDeque().offerLast(node);
×
1927
        }
1928
        if (expiresVariable()) {
×
1929
          timerWheel().schedule(node);
×
1930
        }
1931
        if (evicts()) {
×
1932
          if (weight > maximum()) {
×
1933
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
×
1934
          } else if (weight > windowMaximum()) {
×
1935
            accessOrderWindowDeque().offerFirst(node);
×
1936
          } else {
1937
            accessOrderWindowDeque().offerLast(node);
×
1938
          }
1939
        } else if (expiresAfterAccess()) {
×
1940
          accessOrderWindowDeque().offerLast(node);
×
1941
        }
1942
      }
1943
    }
×
1944
  }
1945

1946
  /** Removes a node from the page replacement policy. */
1947
  final class RemovalTask implements Runnable {
1948
    final Node<K, V> node;
1949

1950
    RemovalTask(Node<K, V> node) {
×
1951
      this.node = node;
×
1952
    }
×
1953

1954
    @Override
1955
    @GuardedBy("evictionLock")
1956
    public void run() {
1957
      // add may not have been processed yet
1958
      if (node.inWindow() && (evicts() || expiresAfterAccess())) {
×
1959
        accessOrderWindowDeque().remove(node);
×
1960
      } else if (evicts()) {
×
1961
        if (node.inMainProbation()) {
×
1962
          accessOrderProbationDeque().remove(node);
×
1963
        } else {
1964
          accessOrderProtectedDeque().remove(node);
×
1965
        }
1966
      }
1967
      if (expiresAfterWrite()) {
×
1968
        writeOrderDeque().remove(node);
×
1969
      } else if (expiresVariable()) {
×
1970
        timerWheel().deschedule(node);
×
1971
      }
1972
      makeDead(node);
×
1973
    }
×
1974
  }
1975

1976
  /** Updates the weighted size. */
1977
  final class UpdateTask implements Runnable {
1978
    final int weightDifference;
1979
    final Node<K, V> node;
1980

1981
    public UpdateTask(Node<K, V> node, int weightDifference) {
×
1982
      this.weightDifference = weightDifference;
×
1983
      this.node = node;
×
1984
    }
×
1985

1986
    @Override
1987
    @GuardedBy("evictionLock")
1988
    public void run() {
1989
      if (expiresAfterWrite()) {
×
1990
        reorder(writeOrderDeque(), node);
×
1991
      } else if (expiresVariable()) {
×
1992
        timerWheel().reschedule(node);
×
1993
      }
1994
      if (evicts()) {
×
1995
        int oldWeightedSize = node.getPolicyWeight();
×
1996
        node.setPolicyWeight(oldWeightedSize + weightDifference);
×
1997
        if (node.inWindow()) {
×
1998
          setWindowWeightedSize(windowWeightedSize() + weightDifference);
×
1999
          if (node.getPolicyWeight() > maximum()) {
×
2000
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
×
2001
          } else if (node.getPolicyWeight() <= windowMaximum()) {
×
2002
            onAccess(node);
×
2003
          } else if (accessOrderWindowDeque().contains(node)) {
×
2004
            accessOrderWindowDeque().moveToFront(node);
×
2005
          }
2006
        } else if (node.inMainProbation()) {
×
2007
            if (node.getPolicyWeight() <= maximum()) {
×
2008
              onAccess(node);
×
2009
            } else {
2010
              evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
×
2011
            }
2012
        } else {
2013
          setMainProtectedWeightedSize(mainProtectedWeightedSize() + weightDifference);
×
2014
          if (node.getPolicyWeight() <= maximum()) {
×
2015
            onAccess(node);
×
2016
          } else {
2017
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
×
2018
          }
2019
        }
2020

2021
        setWeightedSize(weightedSize() + weightDifference);
×
2022
        if (weightedSize() > MAXIMUM_CAPACITY) {
×
2023
          evictEntries();
×
2024
        }
2025
      } else if (expiresAfterAccess()) {
×
2026
        onAccess(node);
×
2027
      }
2028
    }
×
2029
  }
2030

2031
  /* --------------- Concurrent Map Support --------------- */
2032

2033
  @Override
2034
  public boolean isEmpty() {
2035
    return data.isEmpty();
×
2036
  }
2037

2038
  @Override
2039
  public int size() {
2040
    return data.size();
×
2041
  }
2042

2043
  @Override
2044
  public long estimatedSize() {
2045
    return data.mappingCount();
×
2046
  }
2047

2048
  @Override
2049
  public void clear() {
2050
    Deque<Node<K, V>> entries;
2051
    evictionLock.lock();
×
2052
    try {
2053
      // Discard all pending reads
2054
      readBuffer.drainTo(e -> {});
×
2055

2056
      // Apply all pending writes
2057
      @Var Runnable task;
2058
      while ((task = writeBuffer.poll()) != null) {
×
2059
        task.run();
×
2060
      }
2061

2062
      // Cancel the scheduled cleanup
2063
      Pacer pacer = pacer();
×
2064
      if (pacer != null) {
×
2065
        pacer.cancel();
×
2066
      }
2067

2068
      // Discard all entries, falling back to one-by-one to avoid excessive lock hold times
2069
      long now = expirationTicker().read();
×
2070
      int threshold = (WRITE_BUFFER_MAX / 2);
×
2071
      entries = new ArrayDeque<>(data.values());
×
2072
      while (!entries.isEmpty() && (writeBuffer.size() < threshold)) {
×
2073
        removeNode(entries.poll(), now);
×
2074
      }
2075
    } finally {
2076
      evictionLock.unlock();
×
2077
    }
2078

2079
    // Remove any stragglers if released early to more aggressively flush incoming writes
2080
    @Var boolean cleanUp = false;
×
2081
    for (var node : entries) {
×
2082
      var key = node.getKey();
×
2083
      if (key == null) {
×
2084
        cleanUp = true;
×
2085
      } else {
2086
        remove(key);
×
2087
      }
2088
    }
×
2089
    if (collectKeys() && cleanUp) {
×
2090
      cleanUp();
×
2091
    }
2092
  }
×
2093

2094
  @GuardedBy("evictionLock")
2095
  @SuppressWarnings({"GuardedByChecker", "SynchronizationOnLocalVariableOrMethodParameter"})
2096
  void removeNode(Node<K, V> node, long now) {
2097
    K key = node.getKey();
×
2098
    var cause = new RemovalCause[1];
×
2099
    var keyReference = node.getKeyReference();
×
2100
    @SuppressWarnings({"unchecked", "Varifier"})
2101
    @Nullable V[] value = (V[]) new Object[1];
×
2102

2103
    data.computeIfPresent(keyReference, (k, n) -> {
×
2104
      if (n != node) {
×
2105
        return n;
×
2106
      }
2107
      synchronized (n) {
×
2108
        value[0] = n.getValue();
×
2109

2110
        if ((key == null) || (value[0] == null)) {
×
2111
          cause[0] = RemovalCause.COLLECTED;
×
2112
        } else if (hasExpired(n, now)) {
×
2113
          cause[0] = RemovalCause.EXPIRED;
×
2114
        } else {
2115
          cause[0] = RemovalCause.EXPLICIT;
×
2116
        }
2117

2118
        if (cause[0].wasEvicted()) {
×
2119
          notifyEviction(key, value[0], cause[0]);
×
2120
        }
2121

2122
        discardRefresh(node.getKeyReference());
×
2123
        node.retire();
×
2124
        return null;
×
2125
      }
2126
    });
2127

2128
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
×
2129
      accessOrderWindowDeque().remove(node);
×
2130
    } else if (evicts()) {
×
2131
      if (node.inMainProbation()) {
×
2132
        accessOrderProbationDeque().remove(node);
×
2133
      } else {
2134
        accessOrderProtectedDeque().remove(node);
×
2135
      }
2136
    }
2137
    if (expiresAfterWrite()) {
×
2138
      writeOrderDeque().remove(node);
×
2139
    } else if (expiresVariable()) {
×
2140
      timerWheel().deschedule(node);
×
2141
    }
2142

2143
    synchronized (node) {
×
2144
      logIfAlive(node);
×
2145
      makeDead(node);
×
2146
    }
×
2147

2148
    if (cause[0] != null) {
×
2149
      notifyRemoval(key, value[0], cause[0]);
×
2150
    }
2151
  }
×
2152

2153
  @Override
2154
  public boolean containsKey(Object key) {
2155
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2156
    return (node != null) && (node.getValue() != null)
×
2157
        && !hasExpired(node, expirationTicker().read());
×
2158
  }
2159

2160
  @Override
2161
  @SuppressWarnings("SuspiciousMethodCalls")
2162
  public boolean containsValue(Object value) {
2163
    requireNonNull(value);
×
2164

2165
    long now = expirationTicker().read();
×
2166
    for (Node<K, V> node : data.values()) {
×
2167
      if (node.containsValue(value) && !hasExpired(node, now) && (node.getKey() != null)) {
×
2168
        return true;
×
2169
      }
2170
    }
×
2171
    return false;
×
2172
  }
2173

2174
  @Override
2175
  public @Nullable V get(Object key) {
2176
    return getIfPresent(key, /* recordStats= */ false);
×
2177
  }
2178

2179
  @Override
2180
  public @Nullable V getIfPresent(Object key, boolean recordStats) {
2181
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2182
    if (node == null) {
×
2183
      if (recordStats) {
×
2184
        statsCounter().recordMisses(1);
×
2185
      }
2186
      if (drainStatusOpaque() == REQUIRED) {
×
2187
        scheduleDrainBuffers();
×
2188
      }
2189
      return null;
×
2190
    }
2191

2192
    V value = node.getValue();
×
2193
    long now = expirationTicker().read();
×
2194
    if (hasExpired(node, now) || (collectValues() && (value == null))) {
×
2195
      if (recordStats) {
×
2196
        statsCounter().recordMisses(1);
×
2197
      }
2198
      scheduleDrainBuffers();
×
2199
      return null;
×
2200
    }
2201

2202
    if ((value != null) && !isComputingAsync(value)) {
×
2203
      @SuppressWarnings("unchecked")
2204
      var castedKey = (K) key;
×
2205
      setAccessTime(node, now);
×
2206
      tryExpireAfterRead(node, castedKey, value, expiry(), now);
×
2207
    }
2208
    V refreshed = afterRead(node, now, recordStats);
×
2209
    return (refreshed == null) ? value : refreshed;
×
2210
  }
2211

2212
  @Override
2213
  public @Nullable V getIfPresentQuietly(Object key) {
2214
    V value;
2215
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2216
    if ((node == null) || ((value = node.getValue()) == null)
×
2217
        || hasExpired(node, expirationTicker().read())) {
×
2218
      return null;
×
2219
    }
2220
    return value;
×
2221
  }
2222

2223
  /**
2224
   * Returns the key associated with the mapping in this cache, or {@code null} if there is none.
2225
   *
2226
   * @param key the key whose canonical instance is to be returned
2227
   * @return the key used by the mapping, or {@code null} if this cache does not contain a mapping
2228
   *         for the key
2229
   * @throws NullPointerException if the specified key is null
2230
   */
2231
  public @Nullable K getKey(K key) {
2232
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2233
    if (node == null) {
×
2234
      if (drainStatusOpaque() == REQUIRED) {
×
2235
        scheduleDrainBuffers();
×
2236
      }
2237
      return null;
×
2238
    }
2239
    afterRead(node, /* now= */ 0L, /* recordHit= */ false);
×
2240
    return node.getKey();
×
2241
  }
2242

2243
  @Override
2244
  public Map<K, V> getAllPresent(Iterable<? extends K> keys) {
2245
    var result = new LinkedHashMap<K, V>(calculateHashMapCapacity(keys));
×
2246
    for (K key : keys) {
×
2247
      result.put(key, null);
×
2248
    }
×
2249

2250
    int uniqueKeys = result.size();
×
2251
    long now = expirationTicker().read();
×
2252
    for (var iter = result.entrySet().iterator(); iter.hasNext();) {
×
2253
      V value;
2254
      var entry = iter.next();
×
2255
      Node<K, V> node = data.get(nodeFactory.newLookupKey(entry.getKey()));
×
2256
      if ((node == null) || ((value = node.getValue()) == null) || hasExpired(node, now)) {
×
2257
        iter.remove();
×
2258
      } else {
2259
        setAccessTime(node, now);
×
2260
        tryExpireAfterRead(node, entry.getKey(), value, expiry(), now);
×
2261
        V refreshed = afterRead(node, now, /* recordHit= */ false);
×
2262
        entry.setValue((refreshed == null) ? value : refreshed);
×
2263
      }
2264
    }
×
2265
    statsCounter().recordHits(result.size());
×
2266
    statsCounter().recordMisses(uniqueKeys - result.size());
×
2267

2268
    return Collections.unmodifiableMap(result);
×
2269
  }
2270

2271
  @Override
2272
  public void putAll(Map<? extends K, ? extends V> map) {
2273
    map.forEach(this::put);
×
2274
  }
×
2275

2276
  @Override
2277
  public @Nullable V put(K key, V value) {
2278
    return put(key, value, expiry(), /* onlyIfAbsent= */ false);
×
2279
  }
2280

2281
  @Override
2282
  public @Nullable V putIfAbsent(K key, V value) {
2283
    return put(key, value, expiry(), /* onlyIfAbsent= */ true);
×
2284
  }
2285

2286
  /**
2287
   * Adds a node to the policy and the data store. If an existing node is found, then its value is
2288
   * updated if allowed.
2289
   *
2290
   * @param key key with which the specified value is to be associated
2291
   * @param value value to be associated with the specified key
2292
   * @param expiry the calculator for the write expiration time
2293
   * @param onlyIfAbsent a write is performed only if the key is not already associated with a value
2294
   * @return the prior value in or null if no mapping was found
2295
   */
2296
  @Nullable V put(K key, V value, Expiry<K, V> expiry, boolean onlyIfAbsent) {
2297
    requireNonNull(key);
×
2298
    requireNonNull(value);
×
2299

2300
    @Var Node<K, V> node = null;
×
2301
    long now = expirationTicker().read();
×
2302
    int newWeight = weigher.weigh(key, value);
×
2303
    Object lookupKey = nodeFactory.newLookupKey(key);
×
2304
    for (int attempts = 1; ; attempts++) {
×
2305
      @Var Node<K, V> prior = data.get(lookupKey);
×
2306
      if (prior == null) {
×
2307
        if (node == null) {
×
2308
          node = nodeFactory.newNode(key, keyReferenceQueue(),
×
2309
              value, valueReferenceQueue(), newWeight, now);
×
2310
          long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
×
2311
          setVariableTime(node, expireAfterCreate(key, value, expiry, now));
×
2312
          setAccessTime(node, expirationTime);
×
2313
          setWriteTime(node, expirationTime);
×
2314
        }
2315
        prior = data.putIfAbsent(node.getKeyReference(), node);
×
2316
        if (prior == null) {
×
2317
          afterWrite(new AddTask(node, newWeight));
×
2318
          return null;
×
2319
        } else if (onlyIfAbsent) {
×
2320
          // An optimistic fast path to avoid unnecessary locking
2321
          V currentValue = prior.getValue();
×
2322
          if ((currentValue != null) && !hasExpired(prior, now)) {
×
2323
            if (!isComputingAsync(currentValue)) {
×
2324
              tryExpireAfterRead(prior, key, currentValue, expiry(), now);
×
2325
              setAccessTime(prior, now);
×
2326
            }
2327
            afterRead(prior, now, /* recordHit= */ false);
×
2328
            return currentValue;
×
2329
          }
2330
        }
×
2331
      } else if (onlyIfAbsent) {
×
2332
        // An optimistic fast path to avoid unnecessary locking
2333
        V currentValue = prior.getValue();
×
2334
        if ((currentValue != null) && !hasExpired(prior, now)) {
×
2335
          if (!isComputingAsync(currentValue)) {
×
2336
            tryExpireAfterRead(prior, key, currentValue, expiry(), now);
×
2337
            setAccessTime(prior, now);
×
2338
          }
2339
          afterRead(prior, now, /* recordHit= */ false);
×
2340
          return currentValue;
×
2341
        }
2342
      }
2343

2344
      // A read may race with the entry's removal, so that after the entry is acquired it may no
2345
      // longer be usable. A retry will reread from the map and either find an absent mapping, a
2346
      // new entry, or a stale entry.
2347
      if (!prior.isAlive()) {
×
2348
        // A reread of the stale entry may occur if the state transition occurred but the map
2349
        // removal was delayed by a context switch, so that this thread spin waits until resolved.
2350
        if ((attempts & MAX_PUT_SPIN_WAIT_ATTEMPTS) != 0) {
×
2351
          Thread.onSpinWait();
×
2352
          continue;
×
2353
        }
2354

2355
        // If the spin wait attempts are exhausted then fallback to a map computation in order to
2356
        // deschedule this thread until the entry's removal completes. If the key was modified
2357
        // while in the map so that its equals or hashCode changed then the contents may be
2358
        // corrupted, where the cache holds an evicted (dead) entry that could not be removed.
2359
        // That is a violation of the Map contract, so we check that the mapping is in the "alive"
2360
        // state while in the computation.
2361
        data.computeIfPresent(lookupKey, (k, n) -> {
×
2362
          requireIsAlive(key, n);
×
2363
          return n;
×
2364
        });
2365
        continue;
×
2366
      }
2367

2368
      V oldValue;
2369
      long varTime;
2370
      int oldWeight;
2371
      @Var boolean expired = false;
×
2372
      @Var boolean mayUpdate = true;
×
2373
      @Var boolean exceedsTolerance = false;
×
2374
      synchronized (prior) {
×
2375
        if (!prior.isAlive()) {
×
2376
          continue;
×
2377
        }
2378
        oldValue = prior.getValue();
×
2379
        oldWeight = prior.getWeight();
×
2380
        if (oldValue == null) {
×
2381
          varTime = expireAfterCreate(key, value, expiry, now);
×
2382
          notifyEviction(key, null, RemovalCause.COLLECTED);
×
2383
        } else if (hasExpired(prior, now)) {
×
2384
          expired = true;
×
2385
          varTime = expireAfterCreate(key, value, expiry, now);
×
2386
          notifyEviction(key, oldValue, RemovalCause.EXPIRED);
×
2387
        } else if (onlyIfAbsent) {
×
2388
          mayUpdate = false;
×
2389
          varTime = expireAfterRead(prior, key, value, expiry, now);
×
2390
        } else {
2391
          varTime = expireAfterUpdate(prior, key, value, expiry, now);
×
2392
        }
2393

2394
        long expirationTime = isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now;
×
2395
        if (mayUpdate) {
×
2396
          exceedsTolerance = exceedsWriteTimeTolerance(prior, varTime, now);
×
2397
          if (expired || exceedsTolerance) {
×
2398
            setWriteTime(prior, isComputingAsync(value) ? (now + ASYNC_EXPIRY) : now);
×
2399
          }
2400

2401
          prior.setValue(value, valueReferenceQueue());
×
2402
          prior.setWeight(newWeight);
×
2403

2404
          discardRefresh(prior.getKeyReference());
×
2405
        }
2406

2407
        setVariableTime(prior, varTime);
×
2408
        setAccessTime(prior, expirationTime);
×
2409
      }
×
2410

2411
      if (expired) {
×
2412
        notifyRemoval(key, oldValue, RemovalCause.EXPIRED);
×
2413
      } else if (oldValue == null) {
×
2414
        notifyRemoval(key, /* value= */ null, RemovalCause.COLLECTED);
×
2415
      } else if (mayUpdate) {
×
2416
        notifyOnReplace(key, oldValue, value);
×
2417
      }
2418

2419
      int weightedDifference = mayUpdate ? (newWeight - oldWeight) : 0;
×
2420
      if ((oldValue == null) || (weightedDifference != 0) || expired) {
×
2421
        afterWrite(new UpdateTask(prior, weightedDifference));
×
2422
      } else if (!onlyIfAbsent && exceedsTolerance) {
×
2423
        afterWrite(new UpdateTask(prior, weightedDifference));
×
2424
      } else {
2425
        afterRead(prior, now, /* recordHit= */ false);
×
2426
      }
2427

2428
      return expired ? null : oldValue;
×
2429
    }
2430
  }
2431

2432
  @Override
2433
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2434
  public @Nullable V remove(Object key) {
2435
    @SuppressWarnings({"rawtypes", "unchecked"})
2436
    Node<K, V>[] node = new Node[1];
×
2437
    @SuppressWarnings({"unchecked", "Varifier"})
2438
    @Nullable K[] oldKey = (K[]) new Object[1];
×
2439
    @SuppressWarnings({"unchecked", "Varifier"})
2440
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2441
    RemovalCause[] cause = new RemovalCause[1];
×
2442
    Object lookupKey = nodeFactory.newLookupKey(key);
×
2443

2444
    data.computeIfPresent(lookupKey, (k, n) -> {
×
2445
      synchronized (n) {
×
2446
        requireIsAlive(key, n);
×
2447
        oldKey[0] = n.getKey();
×
2448
        oldValue[0] = n.getValue();
×
2449
        if ((oldKey[0] == null) || (oldValue[0] == null)) {
×
2450
          cause[0] = RemovalCause.COLLECTED;
×
2451
        } else if (hasExpired(n, expirationTicker().read())) {
×
2452
          cause[0] = RemovalCause.EXPIRED;
×
2453
        } else {
2454
          cause[0] = RemovalCause.EXPLICIT;
×
2455
        }
2456
        if (cause[0].wasEvicted()) {
×
2457
          notifyEviction(oldKey[0], oldValue[0], cause[0]);
×
2458
        }
2459
        discardRefresh(k);
×
2460
        node[0] = n;
×
2461
        n.retire();
×
2462
        return null;
×
2463
      }
2464
    });
2465

2466
    if (cause[0] != null) {
×
2467
      afterWrite(new RemovalTask(node[0]));
×
2468
      notifyRemoval(oldKey[0], oldValue[0], cause[0]);
×
2469
    }
2470
    return (cause[0] == RemovalCause.EXPLICIT) ? oldValue[0] : null;
×
2471
  }
2472

2473
  @Override
2474
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2475
  public boolean remove(Object key, Object value) {
2476
    requireNonNull(key);
×
2477
    if (value == null) {
×
2478
      return false;
×
2479
    }
2480

2481
    @SuppressWarnings({"rawtypes", "unchecked"})
2482
    Node<K, V>[] removed = new Node[1];
×
2483
    @SuppressWarnings({"unchecked", "Varifier"})
2484
    @Nullable K[] oldKey = (K[]) new Object[1];
×
2485
    @SuppressWarnings({"unchecked", "Varifier"})
2486
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2487
    RemovalCause[] cause = new RemovalCause[1];
×
2488
    Object lookupKey = nodeFactory.newLookupKey(key);
×
2489

2490
    data.computeIfPresent(lookupKey, (kR, node) -> {
×
2491
      synchronized (node) {
×
2492
        requireIsAlive(key, node);
×
2493
        oldKey[0] = node.getKey();
×
2494
        oldValue[0] = node.getValue();
×
2495
        if ((oldKey[0] == null) || (oldValue[0] == null)) {
×
2496
          cause[0] = RemovalCause.COLLECTED;
×
2497
        } else if (hasExpired(node, expirationTicker().read())) {
×
2498
          cause[0] = RemovalCause.EXPIRED;
×
2499
        } else if (node.containsValue(value)) {
×
2500
          cause[0] = RemovalCause.EXPLICIT;
×
2501
        } else {
2502
          return node;
×
2503
        }
2504
        if (cause[0].wasEvicted()) {
×
2505
          notifyEviction(oldKey[0], oldValue[0], cause[0]);
×
2506
        }
2507
        discardRefresh(kR);
×
2508
        removed[0] = node;
×
2509
        node.retire();
×
2510
        return null;
×
2511
      }
2512
    });
2513

2514
    if (removed[0] == null) {
×
2515
      return false;
×
2516
    }
2517
    afterWrite(new RemovalTask(removed[0]));
×
2518
    notifyRemoval(oldKey[0], oldValue[0], cause[0]);
×
2519

2520
    return (cause[0] == RemovalCause.EXPLICIT);
×
2521
  }
2522

2523
  @Override
2524
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2525
  public @Nullable V replace(K key, V value) {
2526
    requireNonNull(key);
×
2527
    requireNonNull(value);
×
2528

2529
    var now = new long[1];
×
2530
    var oldWeight = new int[1];
×
2531
    var exceedsTolerance = new boolean[1];
×
2532
    @SuppressWarnings({"unchecked", "Varifier"})
2533
    @Nullable K[] nodeKey = (K[]) new Object[1];
×
2534
    @SuppressWarnings({"unchecked", "Varifier"})
2535
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2536
    int weight = weigher.weigh(key, value);
×
2537
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
×
2538
      synchronized (n) {
×
2539
        requireIsAlive(key, n);
×
2540
        nodeKey[0] = n.getKey();
×
2541
        oldValue[0] = n.getValue();
×
2542
        oldWeight[0] = n.getWeight();
×
2543
        if ((nodeKey[0] == null) || (oldValue[0] == null)
×
2544
            || hasExpired(n, now[0] = expirationTicker().read())) {
×
2545
          oldValue[0] = null;
×
2546
          return n;
×
2547
        }
2548

2549
        long varTime = expireAfterUpdate(n, key, value, expiry(), now[0]);
×
2550
        n.setValue(value, valueReferenceQueue());
×
2551
        n.setWeight(weight);
×
2552

2553
        long expirationTime = isComputingAsync(value) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2554
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
×
2555
        if (exceedsTolerance[0]) {
×
2556
          setWriteTime(n, expirationTime);
×
2557
        }
2558
        setAccessTime(n, expirationTime);
×
2559
        setVariableTime(n, varTime);
×
2560

2561
        discardRefresh(k);
×
2562
        return n;
×
2563
      }
2564
    });
2565

2566
    if ((nodeKey[0] == null) || (oldValue[0] == null)) {
×
2567
      return null;
×
2568
    }
2569

2570
    int weightedDifference = (weight - oldWeight[0]);
×
2571
    if (exceedsTolerance[0] || (weightedDifference != 0)) {
×
2572
      afterWrite(new UpdateTask(node, weightedDifference));
×
2573
    } else {
2574
      afterRead(node, now[0], /* recordHit= */ false);
×
2575
    }
2576

2577
    notifyOnReplace(nodeKey[0], oldValue[0], value);
×
2578
    return oldValue[0];
×
2579
  }
2580

2581
  @Override
2582
  public boolean replace(K key, V oldValue, V newValue) {
2583
    return replace(key, oldValue, newValue, /* shouldDiscardRefresh= */ true);
×
2584
  }
2585

2586
  @Override
2587
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2588
  public boolean replace(K key, V oldValue, V newValue, boolean shouldDiscardRefresh) {
2589
    requireNonNull(key);
×
2590
    requireNonNull(oldValue);
×
2591
    requireNonNull(newValue);
×
2592

2593
    var now = new long[1];
×
2594
    var oldWeight = new int[1];
×
2595
    var exceedsTolerance = new boolean[1];
×
2596
    @SuppressWarnings({"unchecked", "Varifier"})
2597
    @Nullable K[] nodeKey = (K[]) new Object[1];
×
2598
    @SuppressWarnings({"unchecked", "Varifier"})
2599
    @Nullable V[] prevValue = (V[]) new Object[1];
×
2600

2601
    int weight = weigher.weigh(key, newValue);
×
2602
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
×
2603
      synchronized (n) {
×
2604
        requireIsAlive(key, n);
×
2605
        nodeKey[0] = n.getKey();
×
2606
        prevValue[0] = n.getValue();
×
2607
        oldWeight[0] = n.getWeight();
×
2608
        if ((nodeKey[0] == null) || (prevValue[0] == null) || !n.containsValue(oldValue)
×
2609
            || hasExpired(n, now[0] = expirationTicker().read())) {
×
2610
          prevValue[0] = null;
×
2611
          return n;
×
2612
        }
2613

2614
        long varTime = expireAfterUpdate(n, key, newValue, expiry(), now[0]);
×
2615
        n.setValue(newValue, valueReferenceQueue());
×
2616
        n.setWeight(weight);
×
2617

2618
        long expirationTime = isComputingAsync(newValue) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2619
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
×
2620
        if (exceedsTolerance[0]) {
×
2621
          setWriteTime(n, expirationTime);
×
2622
        }
2623
        setAccessTime(n, expirationTime);
×
2624
        setVariableTime(n, varTime);
×
2625

2626
        if (shouldDiscardRefresh) {
×
2627
          discardRefresh(k);
×
2628
        }
2629
      }
×
2630
      return n;
×
2631
    });
2632

2633
    if ((nodeKey[0] == null) || (prevValue[0] == null)) {
×
2634
      return false;
×
2635
    }
2636

2637
    int weightedDifference = (weight - oldWeight[0]);
×
2638
    if (exceedsTolerance[0] || (weightedDifference != 0)) {
×
2639
      afterWrite(new UpdateTask(node, weightedDifference));
×
2640
    } else {
2641
      afterRead(node, now[0], /* recordHit= */ false);
×
2642
    }
2643

2644
    notifyOnReplace(nodeKey[0], prevValue[0], newValue);
×
2645
    return true;
×
2646
  }
2647

2648
  @Override
2649
  public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
2650
    requireNonNull(function);
×
2651

2652
    BiFunction<K, V, V> remappingFunction = (key, oldValue) ->
×
2653
        requireNonNull(function.apply(key, oldValue));
×
2654
    for (K key : keySet()) {
×
2655
      long[] now = { expirationTicker().read() };
×
2656
      Object lookupKey = nodeFactory.newLookupKey(key);
×
2657
      remap(key, lookupKey, remappingFunction, expiry(), now, /* computeIfAbsent= */ false);
×
2658
    }
×
2659
  }
×
2660

2661
  @Override
2662
  public @Nullable V computeIfAbsent(K key, @Var Function<? super K, ? extends V> mappingFunction,
2663
      boolean recordStats, boolean recordLoad) {
2664
    requireNonNull(key);
×
2665
    requireNonNull(mappingFunction);
×
2666
    long now = expirationTicker().read();
×
2667

2668
    // An optimistic fast path to avoid unnecessary locking
2669
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
×
2670
    if (node != null) {
×
2671
      V value = node.getValue();
×
2672
      if ((value != null) && !hasExpired(node, now)) {
×
2673
        if (!isComputingAsync(value)) {
×
2674
          tryExpireAfterRead(node, key, value, expiry(), now);
×
2675
          setAccessTime(node, now);
×
2676
        }
2677
        var refreshed = afterRead(node, now, /* recordHit= */ recordStats);
×
2678
        return (refreshed == null) ? value : refreshed;
×
2679
      }
2680
    }
2681
    if (recordStats) {
×
2682
      mappingFunction = statsAware(mappingFunction, recordLoad);
×
2683
    }
2684
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
×
2685
    return doComputeIfAbsent(key, keyRef, mappingFunction, new long[] { now }, recordStats);
×
2686
  }
2687

2688
  /** Returns the current value from a computeIfAbsent invocation. */
2689
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2690
  @Nullable V doComputeIfAbsent(K key, Object keyRef,
2691
      Function<? super K, ? extends @Nullable V> mappingFunction, long[/* 1 */] now,
2692
      boolean recordStats) {
2693
    @SuppressWarnings({"unchecked", "Varifier"})
2694
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2695
    @SuppressWarnings({"unchecked", "Varifier"})
2696
    @Nullable V[] newValue = (V[]) new Object[1];
×
2697
    @SuppressWarnings({"unchecked", "Varifier"})
2698
    @Nullable K[] nodeKey = (K[]) new Object[1];
×
2699
    @SuppressWarnings({"rawtypes", "unchecked"})
2700
    Node<K, V>[] removed = new Node[1];
×
2701

2702
    int[] weight = new int[2]; // old, new
×
2703
    RemovalCause[] cause = new RemovalCause[1];
×
2704
    Node<K, V> node = data.compute(keyRef, (k, n) -> {
×
2705
      if (n == null) {
×
2706
        newValue[0] = mappingFunction.apply(key);
×
2707
        if (newValue[0] == null) {
×
2708
          discardRefresh(k);
×
2709
          return null;
×
2710
        }
2711
        now[0] = expirationTicker().read();
×
2712
        weight[1] = weigher.weigh(key, newValue[0]);
×
2713
        var created = nodeFactory.newNode(key, keyReferenceQueue(),
×
2714
            newValue[0], valueReferenceQueue(), weight[1], now[0]);
×
2715
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2716
        setVariableTime(created, expireAfterCreate(key, newValue[0], expiry(), now[0]));
×
2717
        setAccessTime(created, expirationTime);
×
2718
        setWriteTime(created, expirationTime);
×
2719
        discardRefresh(k);
×
2720
        return created;
×
2721
      }
2722

2723
      synchronized (n) {
×
2724
        requireIsAlive(key, n);
×
2725
        nodeKey[0] = n.getKey();
×
2726
        weight[0] = n.getWeight();
×
2727
        oldValue[0] = n.getValue();
×
2728
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
×
2729
          cause[0] = RemovalCause.COLLECTED;
×
2730
        } else if (hasExpired(n, now[0])) {
×
2731
          cause[0] = RemovalCause.EXPIRED;
×
2732
        } else {
2733
          return n;
×
2734
        }
2735

2736
        notifyEviction(nodeKey[0], oldValue[0], cause[0]);
×
2737
        newValue[0] = mappingFunction.apply(key);
×
2738
        if (newValue[0] == null) {
×
2739
          discardRefresh(k);
×
2740
          removed[0] = n;
×
2741
          n.retire();
×
2742
          return null;
×
2743
        }
2744
        now[0] = expirationTicker().read();
×
2745
        weight[1] = weigher.weigh(key, newValue[0]);
×
2746
        long varTime = expireAfterCreate(key, newValue[0], expiry(), now[0]);
×
2747

2748
        n.setValue(newValue[0], valueReferenceQueue());
×
2749
        n.setWeight(weight[1]);
×
2750

2751
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2752
        setAccessTime(n, expirationTime);
×
2753
        setWriteTime(n, expirationTime);
×
2754
        setVariableTime(n, varTime);
×
2755

2756
        discardRefresh(k);
×
2757
        return n;
×
2758
      }
2759
    });
2760

2761
    if (cause[0] != null) {
×
2762
      statsCounter().recordEviction(weight[0], cause[0]);
×
2763
      notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
×
2764
    }
2765
    if (node == null) {
×
2766
      if (removed[0] != null) {
×
2767
        afterWrite(new RemovalTask(removed[0]));
×
2768
      }
2769
      return null;
×
2770
    }
2771
    if ((oldValue[0] != null) && (newValue[0] == null)) {
×
2772
      if (!isComputingAsync(oldValue[0])) {
×
2773
        tryExpireAfterRead(node, key, oldValue[0], expiry(), now[0]);
×
2774
        setAccessTime(node, now[0]);
×
2775
      }
2776

2777
      afterRead(node, now[0], /* recordHit= */ recordStats);
×
2778
      return oldValue[0];
×
2779
    }
2780
    if ((oldValue[0] == null) && (cause[0] == null)) {
×
2781
      afterWrite(new AddTask(node, weight[1]));
×
2782
    } else {
2783
      int weightedDifference = (weight[1] - weight[0]);
×
2784
      afterWrite(new UpdateTask(node, weightedDifference));
×
2785
    }
2786

2787
    return newValue[0];
×
2788
  }
2789

2790
  @Override
2791
  public @Nullable V computeIfPresent(K key,
2792
      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2793
    requireNonNull(key);
×
2794
    requireNonNull(remappingFunction);
×
2795

2796
    // An optimistic fast path to avoid unnecessary locking
2797
    Object lookupKey = nodeFactory.newLookupKey(key);
×
2798
    @Nullable Node<K, V> node = data.get(lookupKey);
×
2799
    long now;
2800
    if (node == null) {
×
2801
      return null;
×
2802
    } else if ((node.getValue() == null) || hasExpired(node, (now = expirationTicker().read()))) {
×
2803
      scheduleDrainBuffers();
×
2804
      return null;
×
2805
    }
2806

2807
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
×
2808
        statsAware(remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
×
2809
    return remap(key, lookupKey, statsAwareRemappingFunction,
×
2810
        expiry(), new long[] { now }, /* computeIfAbsent= */ false);
×
2811
  }
2812

2813
  @Override
2814
  @SuppressWarnings("NullAway")
2815
  public @Nullable V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction,
2816
      @Nullable Expiry<? super K, ? super V> expiry, boolean recordLoad,
2817
      boolean recordLoadFailure) {
2818
    requireNonNull(key);
×
2819
    requireNonNull(remappingFunction);
×
2820

2821
    long[] now = { expirationTicker().read() };
×
2822
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
×
2823
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
×
2824
        statsAware(remappingFunction, recordLoad, recordLoadFailure);
×
2825
    return remap(key, keyRef, statsAwareRemappingFunction,
×
2826
        expiry, now, /* computeIfAbsent= */ true);
2827
  }
2828

2829
  @Override
2830
  public @Nullable V merge(K key, V value,
2831
      BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2832
    requireNonNull(key);
×
2833
    requireNonNull(value);
×
2834
    requireNonNull(remappingFunction);
×
2835

2836
    long[] now = { expirationTicker().read() };
×
2837
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
×
2838
    BiFunction<? super K, ? super V, ? extends V> mergeFunction = (k, oldValue) ->
×
2839
        (oldValue == null) ? value : statsAware(remappingFunction).apply(oldValue, value);
×
2840
    return remap(key, keyRef, mergeFunction, expiry(), now, /* computeIfAbsent= */ true);
×
2841
  }
2842

2843
  /**
2844
   * Attempts to compute a mapping for the specified key and its current mapped value (or
2845
   * {@code null} if there is no current mapping).
2846
   * <p>
2847
   * An entry that has expired or been reference collected is evicted and the computation continues
2848
   * as if the entry had not been present. This method does not pre-screen and does not wrap the
2849
   * remappingFunction to be statistics aware.
2850
   *
2851
   * @param key key with which the specified value is to be associated
2852
   * @param keyRef the key to associate with or a lookup only key if not {@code computeIfAbsent}
2853
   * @param remappingFunction the function to compute a value
2854
   * @param expiry the calculator for the expiration time
2855
   * @param now the current time, according to the ticker
2856
   * @param computeIfAbsent if an absent entry can be computed
2857
   * @return the new value associated with the specified key, or null if none
2858
   */
2859
  @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
2860
  @Nullable V remap(K key, Object keyRef,
2861
      BiFunction<? super K, ? super V, ? extends @Nullable V> remappingFunction,
2862
      Expiry<? super K, ? super V> expiry, long[/* 1 */] now, boolean computeIfAbsent) {
2863
    @SuppressWarnings({"unchecked", "Varifier"})
2864
    @Nullable K[] nodeKey = (K[]) new Object[1];
×
2865
    @SuppressWarnings({"unchecked", "Varifier"})
2866
    @Nullable V[] oldValue = (V[]) new Object[1];
×
2867
    @SuppressWarnings({"unchecked", "Varifier"})
2868
    @Nullable V[] newValue = (V[]) new Object[1];
×
2869
    @SuppressWarnings({"rawtypes", "unchecked"})
2870
    Node<K, V>[] removed = new Node[1];
×
2871

2872
    var weight = new int[2]; // old, new
×
2873
    var cause = new RemovalCause[1];
×
2874
    var exceedsTolerance = new boolean[1];
×
2875

2876
    Node<K, V> node = data.compute(keyRef, (kr, n) -> {
×
2877
      if (n == null) {
×
2878
        if (!computeIfAbsent) {
×
2879
          return null;
×
2880
        }
2881
        newValue[0] = remappingFunction.apply(key, null);
×
2882
        if (newValue[0] == null) {
×
2883
          return null;
×
2884
        }
2885
        now[0] = expirationTicker().read();
×
2886
        weight[1] = weigher.weigh(key, newValue[0]);
×
2887
        long varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
×
2888
        var created = nodeFactory.newNode(keyRef, newValue[0],
×
2889
            valueReferenceQueue(), weight[1], now[0]);
×
2890

2891
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2892
        setAccessTime(created, expirationTime);
×
2893
        setWriteTime(created, expirationTime);
×
2894
        setVariableTime(created, varTime);
×
2895
        discardRefresh(kr);
×
2896
        return created;
×
2897
      }
2898

2899
      synchronized (n) {
×
2900
        requireIsAlive(key, n);
×
2901
        nodeKey[0] = n.getKey();
×
2902
        oldValue[0] = n.getValue();
×
2903
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
×
2904
          cause[0] = RemovalCause.COLLECTED;
×
2905
        } else if (hasExpired(n, expirationTicker().read())) {
×
2906
          cause[0] = RemovalCause.EXPIRED;
×
2907
        }
2908
        if (cause[0] != null) {
×
2909
          notifyEviction(nodeKey[0], oldValue[0], cause[0]);
×
2910
          if (!computeIfAbsent) {
×
2911
            removed[0] = n;
×
2912
            n.retire();
×
2913
            return null;
×
2914
          }
2915
        }
2916

2917
        newValue[0] = remappingFunction.apply(nodeKey[0],
×
2918
            (cause[0] == null) ? oldValue[0] : null);
×
2919
        if (newValue[0] == null) {
×
2920
          if (cause[0] == null) {
×
2921
            cause[0] = RemovalCause.EXPLICIT;
×
2922
            discardRefresh(kr);
×
2923
          }
2924
          removed[0] = n;
×
2925
          n.retire();
×
2926
          return null;
×
2927
        }
2928

2929
        long varTime;
2930
        weight[0] = n.getWeight();
×
2931
        weight[1] = weigher.weigh(key, newValue[0]);
×
2932
        now[0] = expirationTicker().read();
×
2933
        if (cause[0] == null) {
×
2934
          if (newValue[0] != oldValue[0]) {
×
2935
            cause[0] = RemovalCause.REPLACED;
×
2936
          }
2937
          varTime = expireAfterUpdate(n, key, newValue[0], expiry, now[0]);
×
2938
        } else {
2939
          varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
×
2940
        }
2941

2942
        n.setValue(newValue[0], valueReferenceQueue());
×
2943
        n.setWeight(weight[1]);
×
2944

2945
        long expirationTime = isComputingAsync(newValue[0]) ? (now[0] + ASYNC_EXPIRY) : now[0];
×
2946
        exceedsTolerance[0] = exceedsWriteTimeTolerance(n, varTime, expirationTime);
×
2947
        if (((cause[0] != null) && cause[0].wasEvicted()) || exceedsTolerance[0]) {
×
2948
          setWriteTime(n, expirationTime);
×
2949
        }
2950
        setAccessTime(n, expirationTime);
×
2951
        setVariableTime(n, varTime);
×
2952

2953
        discardRefresh(kr);
×
2954
        return n;
×
2955
      }
2956
    });
2957

2958
    if (cause[0] != null) {
×
2959
      if (cause[0] == RemovalCause.REPLACED) {
×
2960
        requireNonNull(newValue[0]);
×
2961
        notifyOnReplace(key, oldValue[0], newValue[0]);
×
2962
      } else {
2963
        if (cause[0].wasEvicted()) {
×
2964
          statsCounter().recordEviction(weight[0], cause[0]);
×
2965
        }
2966
        notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
×
2967
      }
2968
    }
2969

2970
    if (removed[0] != null) {
×
2971
      afterWrite(new RemovalTask(removed[0]));
×
2972
    } else if (node == null) {
×
2973
      // absent and not computable
2974
    } else if ((oldValue[0] == null) && (cause[0] == null)) {
×
2975
      afterWrite(new AddTask(node, weight[1]));
×
2976
    } else {
2977
      int weightedDifference = weight[1] - weight[0];
×
2978
      if (exceedsTolerance[0] || (weightedDifference != 0)) {
×
2979
        afterWrite(new UpdateTask(node, weightedDifference));
×
2980
      } else {
2981
        afterRead(node, now[0], /* recordHit= */ false);
×
2982
        if ((cause[0] != null) && cause[0].wasEvicted()) {
×
2983
          scheduleDrainBuffers();
×
2984
        }
2985
      }
2986
    }
2987

2988
    return newValue[0];
×
2989
  }
2990

2991
  @Override
2992
  public void forEach(BiConsumer<? super K, ? super V> action) {
2993
    requireNonNull(action);
×
2994

2995
    for (var iterator = new EntryIterator<>(this); iterator.hasNext();) {
×
2996
      action.accept(iterator.key, iterator.value);
×
2997
      iterator.advance();
×
2998
    }
2999
  }
×
3000

3001
  @Override
3002
  public Set<K> keySet() {
3003
    Set<K> ks = keySet;
×
3004
    return (ks == null) ? (keySet = new KeySetView<>(this)) : ks;
×
3005
  }
3006

3007
  @Override
3008
  public Collection<V> values() {
3009
    Collection<V> vs = values;
×
3010
    return (vs == null) ? (values = new ValuesView<>(this)) : vs;
×
3011
  }
3012

3013
  @Override
3014
  public Set<Entry<K, V>> entrySet() {
3015
    Set<Entry<K, V>> es = entrySet;
×
3016
    return (es == null) ? (entrySet = new EntrySetView<>(this)) : es;
×
3017
  }
3018

3019
  /**
3020
   * Object equality requires reflexive, symmetric, transitive, and consistency properties. Of
3021
   * these, symmetry and consistency require further clarification for how they are upheld.
3022
   * <p>
3023
   * The <i>consistency</i> property between invocations requires that the results are the same if
3024
   * there are no modifications to the information used. Therefore, usages should expect that this
3025
   * operation may return misleading results if either the maps or the data held by them is modified
3026
   * during the execution of this method. This characteristic allows for comparing the map sizes and
3027
   * assuming stable mappings, as done by {@link java.util.AbstractMap}-based maps.
3028
   * <p>
3029
   * The <i>symmetric</i> property requires that the result is the same for all implementations of
3030
   * {@link Map#equals(Object)}. That contract is defined in terms of the stable mappings provided
3031
   * by {@link #entrySet()}, meaning that the {@link #size()} optimization forces that the count is
3032
   * consistent with the mappings when used for an equality check.
3033
   * <p>
3034
   * The cache's {@link #size()} method may include entries that have expired or have been reference
3035
   * collected, but have not yet been removed from the backing map. An iteration over the map may
3036
   * trigger the removal of these dead entries when skipped over during traversal. To ensure
3037
   * consistency and symmetry, usages should call {@link #cleanUp()} before this method while no
3038
   * other concurrent operations are being performed on this cache. This is not done implicitly by
3039
   * {@link #size()} as many usages assume it to be instantaneous and lock-free.
3040
   */
3041
  @Override
3042
  public boolean equals(@Nullable Object o) {
3043
    if (o == this) {
×
3044
      return true;
×
3045
    } else if (!(o instanceof Map)) {
×
3046
      return false;
×
3047
    }
3048

3049
    var map = (Map<?, ?>) o;
×
3050
    if (size() != map.size()) {
×
3051
      return false;
×
3052
    }
3053

3054
    long now = expirationTicker().read();
×
3055
    for (var node : data.values()) {
×
3056
      K key = node.getKey();
×
3057
      V value = node.getValue();
×
3058
      if ((key == null) || (value == null)
×
3059
          || !node.isAlive() || hasExpired(node, now)) {
×
3060
        scheduleDrainBuffers();
×
3061
        return false;
×
3062
      } else {
3063
        var val = map.get(key);
×
3064
        if ((val == null) || ((val != value) && !val.equals(value))) {
×
3065
          return false;
×
3066
        }
3067
      }
3068
    }
×
3069
    return true;
×
3070
  }
3071

3072
  @Override
3073
  public int hashCode() {
3074
    @Var int hash = 0;
×
3075
    long now = expirationTicker().read();
×
3076
    for (var node : data.values()) {
×
3077
      K key = node.getKey();
×
3078
      V value = node.getValue();
×
3079
      if ((key == null) || (value == null)
×
3080
          || !node.isAlive() || hasExpired(node, now)) {
×
3081
        scheduleDrainBuffers();
×
3082
      } else {
3083
        hash += key.hashCode() ^ value.hashCode();
×
3084
      }
3085
    }
×
3086
    return hash;
×
3087
  }
3088

3089
  @Override
3090
  public String toString() {
3091
    var result = new StringBuilder().append('{');
×
3092
    long now = expirationTicker().read();
×
3093
    for (var node : data.values()) {
×
3094
      K key = node.getKey();
×
3095
      V value = node.getValue();
×
3096
      if ((key == null) || (value == null)
×
3097
          || !node.isAlive() || hasExpired(node, now)) {
×
3098
        scheduleDrainBuffers();
×
3099
      } else {
3100
        if (result.length() != 1) {
×
3101
          result.append(',').append(' ');
×
3102
        }
3103
        result.append((key == this) ? "(this Map)" : key);
×
3104
        result.append('=');
×
3105
        result.append((value == this) ? "(this Map)" : value);
×
3106
      }
3107
    }
×
3108
    return result.append('}').toString();
×
3109
  }
3110

3111
  /**
3112
   * Returns the computed result from the ordered traversal of the cache entries.
3113
   *
3114
   * @param hottest the coldest or hottest iteration order
3115
   * @param transformer a function that unwraps the value
3116
   * @param mappingFunction the mapping function to compute a value
3117
   * @return the computed value
3118
   */
3119
  @SuppressWarnings("GuardedByChecker")
3120
  <T> T evictionOrder(boolean hottest, Function<@Nullable V, @Nullable V> transformer,
3121
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3122
    Comparator<Node<K, V>> comparator = Comparator.comparingInt(node -> {
×
3123
      var keyRef = node.getKeyReferenceOrNull();
×
3124
      return (keyRef == null) ? 0 : frequencySketch().frequency(keyRef);
×
3125
    });
3126
    Iterable<Node<K, V>> iterable;
3127
    if (hottest) {
×
3128
      iterable = () -> {
×
3129
        var secondary = PeekingIterator.comparing(
×
3130
            accessOrderProbationDeque().descendingIterator(),
×
3131
            accessOrderWindowDeque().descendingIterator(), comparator);
×
3132
        return PeekingIterator.concat(
×
3133
            accessOrderProtectedDeque().descendingIterator(), secondary);
×
3134
      };
3135
    } else {
3136
      iterable = () -> {
×
3137
        var primary = PeekingIterator.comparing(
×
3138
            accessOrderWindowDeque().iterator(), accessOrderProbationDeque().iterator(),
×
3139
            comparator.reversed());
×
3140
        return PeekingIterator.concat(primary, accessOrderProtectedDeque().iterator());
×
3141
      };
3142
    }
3143
    return snapshot(iterable, transformer, mappingFunction);
×
3144
  }
3145

3146
  /**
3147
   * Returns the computed result from the ordered traversal of the cache entries.
3148
   *
3149
   * @param oldest the youngest or oldest iteration order
3150
   * @param transformer a function that unwraps the value
3151
   * @param mappingFunction the mapping function to compute a value
3152
   * @return the computed value
3153
   */
3154
  @SuppressWarnings("GuardedByChecker")
3155
  <T> T expireAfterAccessOrder(boolean oldest, Function<@Nullable V, @Nullable V> transformer,
3156
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3157
    Iterable<Node<K, V>> iterable;
3158
    if (evicts()) {
×
3159
      iterable = () -> {
×
3160
        @Var Comparator<Node<K, V>> comparator = Comparator.comparingLong(Node::getAccessTime);
×
3161
        PeekingIterator<Node<K, V>> first;
3162
        PeekingIterator<Node<K, V>> second;
3163
        PeekingIterator<Node<K, V>> third;
3164
        if (oldest) {
×
3165
          first = accessOrderWindowDeque().iterator();
×
3166
          second = accessOrderProbationDeque().iterator();
×
3167
          third = accessOrderProtectedDeque().iterator();
×
3168
        } else {
3169
          comparator = comparator.reversed();
×
3170
          first = accessOrderWindowDeque().descendingIterator();
×
3171
          second = accessOrderProbationDeque().descendingIterator();
×
3172
          third = accessOrderProtectedDeque().descendingIterator();
×
3173
        }
3174
        return PeekingIterator.comparing(
×
3175
            PeekingIterator.comparing(first, second, comparator), third, comparator);
×
3176
      };
3177
    } else {
3178
      iterable = oldest
×
3179
          ? accessOrderWindowDeque()
×
3180
          : accessOrderWindowDeque()::descendingIterator;
×
3181
    }
3182
    return snapshot(iterable, transformer, mappingFunction);
×
3183
  }
3184

3185
  /**
3186
   * Returns the computed result from the ordered traversal of the cache entries.
3187
   *
3188
   * @param iterable the supplier of the entries in the cache
3189
   * @param transformer a function that unwraps the value
3190
   * @param mappingFunction the mapping function to compute a value
3191
   * @return the computed value
3192
   */
3193
  <T> T snapshot(Iterable<Node<K, V>> iterable, Function<@Nullable V, @Nullable V> transformer,
3194
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3195
    requireNonNull(mappingFunction);
×
3196
    requireNonNull(transformer);
×
3197
    requireNonNull(iterable);
×
3198

3199
    evictionLock.lock();
×
3200
    try {
3201
      maintenance(/* ignored */ null);
×
3202

3203
      // Obtain the iterator as late as possible for modification count checking
3204
      try (var stream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(
×
3205
           iterable.iterator(), DISTINCT | ORDERED | NONNULL | IMMUTABLE), /* parallel= */ false)) {
×
3206
        return mappingFunction.apply(stream
×
3207
            .map(node -> nodeToCacheEntry(node, transformer))
×
3208
            .filter(Objects::nonNull));
×
3209
      }
3210
    } finally {
3211
      evictionLock.unlock();
×
3212
      rescheduleCleanUpIfIncomplete();
×
3213
    }
3214
  }
3215

3216
  /** Returns an entry for the given node if it can be used externally, else null. */
3217
  @Nullable CacheEntry<K, V> nodeToCacheEntry(
3218
      Node<K, V> node, Function<@Nullable V, @Nullable V> transformer) {
3219
    V value = transformer.apply(node.getValue());
×
3220
    K key = node.getKey();
×
3221
    long now;
3222
    if ((key == null) || (value == null) || !node.isAlive()
×
3223
        || hasExpired(node, (now = expirationTicker().read()))) {
×
3224
      return null;
×
3225
    }
3226

3227
    @Var long expiresAfter = Long.MAX_VALUE;
×
3228
    if (expiresAfterAccess()) {
×
3229
      expiresAfter = Math.min(expiresAfter, now - node.getAccessTime() + expiresAfterAccessNanos());
×
3230
    }
3231
    if (expiresAfterWrite()) {
×
3232
      expiresAfter = Math.min(expiresAfter,
×
3233
          (now & ~1L) - (node.getWriteTime() & ~1L) + expiresAfterWriteNanos());
×
3234
    }
3235
    if (expiresVariable()) {
×
3236
      expiresAfter = node.getVariableTime() - now;
×
3237
    }
3238

3239
    long refreshableAt = refreshAfterWrite()
×
3240
        ? node.getWriteTime() + refreshAfterWriteNanos()
×
3241
        : now + Long.MAX_VALUE;
×
3242
    int weight = node.getPolicyWeight();
×
3243
    return SnapshotEntry.forEntry(key, value, now, weight, now + expiresAfter, refreshableAt);
×
3244
  }
3245

3246
  /** A function that produces an unmodifiable map up to the limit in stream order. */
3247
  static final class SizeLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3248
    private final int expectedSize;
3249
    private final long limit;
3250

3251
    SizeLimiter(int expectedSize, long limit) {
×
3252
      requireArgument(limit >= 0);
×
3253
      this.expectedSize = expectedSize;
×
3254
      this.limit = limit;
×
3255
    }
×
3256

3257
    @Override
3258
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3259
      var map = new LinkedHashMap<K, V>(calculateHashMapCapacity(expectedSize));
×
3260
      stream.limit(limit).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
×
3261
      return Collections.unmodifiableMap(map);
×
3262
    }
3263
  }
3264

3265
  /** A function that produces an unmodifiable map up to the weighted limit in stream order. */
3266
  static final class WeightLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3267
    private final long weightLimit;
3268

3269
    private long weightedSize;
3270

3271
    WeightLimiter(long weightLimit) {
×
3272
      requireArgument(weightLimit >= 0);
×
3273
      this.weightLimit = weightLimit;
×
3274
    }
×
3275

3276
    @Override
3277
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3278
      var map = new LinkedHashMap<K, V>();
×
3279
      stream.takeWhile(entry -> {
×
3280
        weightedSize = Math.addExact(weightedSize, entry.weight());
×
3281
        return (weightedSize <= weightLimit);
×
3282
      }).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
×
3283
      return Collections.unmodifiableMap(map);
×
3284
    }
3285
  }
3286

3287
  /** An adapter to safely externalize the keys. */
3288
  static final class KeySetView<K, V> extends AbstractSet<K> {
3289
    final BoundedLocalCache<K, V> cache;
3290

3291
    KeySetView(BoundedLocalCache<K, V> cache) {
×
3292
      this.cache = requireNonNull(cache);
×
3293
    }
×
3294

3295
    @Override
3296
    public int size() {
3297
      return cache.size();
×
3298
    }
3299

3300
    @Override
3301
    public void clear() {
3302
      cache.clear();
×
3303
    }
×
3304

3305
    @Override
3306
    @SuppressWarnings("SuspiciousMethodCalls")
3307
    public boolean contains(Object o) {
3308
      return cache.containsKey(o);
×
3309
    }
3310

3311
    @Override
3312
    public boolean removeAll(Collection<?> collection) {
3313
      requireNonNull(collection);
×
3314
      @Var boolean modified = false;
×
3315
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
×
3316
        for (K key : this) {
×
3317
          if (collection.contains(key)) {
×
3318
            modified |= remove(key);
×
3319
          }
3320
        }
×
3321
      } else {
3322
        for (var item : collection) {
×
3323
          modified |= (item != null) && remove(item);
×
3324
        }
×
3325
      }
3326
      return modified;
×
3327
    }
3328

3329
    @Override
3330
    public boolean remove(Object o) {
3331
      return (cache.remove(o) != null);
×
3332
    }
3333

3334
    @Override
3335
    public boolean removeIf(Predicate<? super K> filter) {
3336
      requireNonNull(filter);
×
3337
      @Var boolean modified = false;
×
3338
      for (K key : this) {
×
3339
        if (filter.test(key) && remove(key)) {
×
3340
          modified = true;
×
3341
        }
3342
      }
×
3343
      return modified;
×
3344
    }
3345

3346
    @Override
3347
    public boolean retainAll(Collection<?> collection) {
3348
      requireNonNull(collection);
×
3349
      @Var boolean modified = false;
×
3350
      for (K key : this) {
×
3351
        if (!collection.contains(key) && remove(key)) {
×
3352
          modified = true;
×
3353
        }
3354
      }
×
3355
      return modified;
×
3356
    }
3357

3358
    @Override
3359
    public Iterator<K> iterator() {
3360
      return new KeyIterator<>(cache);
×
3361
    }
3362

3363
    @Override
3364
    public Spliterator<K> spliterator() {
3365
      return new KeySpliterator<>(cache);
×
3366
    }
3367
  }
3368

3369
  /** An adapter to safely externalize the key iterator. */
3370
  static final class KeyIterator<K, V> implements Iterator<K> {
3371
    final EntryIterator<K, V> iterator;
3372

3373
    KeyIterator(BoundedLocalCache<K, V> cache) {
×
3374
      this.iterator = new EntryIterator<>(cache);
×
3375
    }
×
3376

3377
    @Override
3378
    public boolean hasNext() {
3379
      return iterator.hasNext();
×
3380
    }
3381

3382
    @Override
3383
    public K next() {
3384
      return iterator.nextKey();
×
3385
    }
3386

3387
    @Override
3388
    public void remove() {
3389
      iterator.remove();
×
3390
    }
×
3391
  }
3392

3393
  /** An adapter to safely externalize the key spliterator. */
3394
  static final class KeySpliterator<K, V> implements Spliterator<K> {
3395
    final Spliterator<Node<K, V>> spliterator;
3396
    final BoundedLocalCache<K, V> cache;
3397

3398
    KeySpliterator(BoundedLocalCache<K, V> cache) {
3399
      this(cache, cache.data.values().spliterator());
×
3400
    }
×
3401

3402
    KeySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
×
3403
      this.spliterator = requireNonNull(spliterator);
×
3404
      this.cache = requireNonNull(cache);
×
3405
    }
×
3406

3407
    @Override
3408
    public void forEachRemaining(Consumer<? super K> action) {
3409
      requireNonNull(action);
×
3410
      Consumer<Node<K, V>> consumer = node -> {
×
3411
        K key = node.getKey();
×
3412
        V value = node.getValue();
×
3413
        long now = cache.expirationTicker().read();
×
3414
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3415
          action.accept(key);
×
3416
        }
3417
      };
×
3418
      spliterator.forEachRemaining(consumer);
×
3419
    }
×
3420

3421
    @Override
3422
    public boolean tryAdvance(Consumer<? super K> action) {
3423
      requireNonNull(action);
×
3424
      boolean[] advanced = { false };
×
3425
      Consumer<Node<K, V>> consumer = node -> {
×
3426
        K key = node.getKey();
×
3427
        V value = node.getValue();
×
3428
        long now = cache.expirationTicker().read();
×
3429
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3430
          action.accept(key);
×
3431
          advanced[0] = true;
×
3432
        }
3433
      };
×
3434
      while (spliterator.tryAdvance(consumer)) {
×
3435
        if (advanced[0]) {
×
3436
          return true;
×
3437
        }
3438
      }
3439
      return false;
×
3440
    }
3441

3442
    @Override
3443
    public @Nullable Spliterator<K> trySplit() {
3444
      Spliterator<Node<K, V>> split = spliterator.trySplit();
×
3445
      return (split == null) ? null : new KeySpliterator<>(cache, split);
×
3446
    }
3447

3448
    @Override
3449
    public long estimateSize() {
3450
      return spliterator.estimateSize();
×
3451
    }
3452

3453
    @Override
3454
    public int characteristics() {
3455
      return DISTINCT | CONCURRENT | NONNULL;
×
3456
    }
3457
  }
3458

3459
  /** An adapter to safely externalize the values. */
3460
  static final class ValuesView<K, V> extends AbstractCollection<V> {
3461
    final BoundedLocalCache<K, V> cache;
3462

3463
    ValuesView(BoundedLocalCache<K, V> cache) {
×
3464
      this.cache = requireNonNull(cache);
×
3465
    }
×
3466

3467
    @Override
3468
    public int size() {
3469
      return cache.size();
×
3470
    }
3471

3472
    @Override
3473
    public void clear() {
3474
      cache.clear();
×
3475
    }
×
3476

3477
    @Override
3478
    @SuppressWarnings("SuspiciousMethodCalls")
3479
    public boolean contains(Object o) {
3480
      return cache.containsValue(o);
×
3481
    }
3482

3483
    @Override
3484
    public boolean removeAll(Collection<?> collection) {
3485
      requireNonNull(collection);
×
3486
      @Var boolean modified = false;
×
3487
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
×
3488
        var key = requireNonNull(iterator.key);
×
3489
        var value = requireNonNull(iterator.value);
×
3490
        if (collection.contains(value) && cache.remove(key, value)) {
×
3491
          modified = true;
×
3492
        }
3493
        iterator.advance();
×
3494
      }
×
3495
      return modified;
×
3496
    }
3497

3498
    @Override
3499
    public boolean remove(Object o) {
3500
      if (o == null) {
×
3501
        return false;
×
3502
      }
3503
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
×
3504
        var key = requireNonNull(iterator.key);
×
3505
        var value = requireNonNull(iterator.value);
×
3506
        if (o.equals(value) && cache.remove(key, value)) {
×
3507
          return true;
×
3508
        }
3509
        iterator.advance();
×
3510
      }
×
3511
      return false;
×
3512
    }
3513

3514
    @Override
3515
    public boolean removeIf(Predicate<? super V> filter) {
3516
      requireNonNull(filter);
×
3517
      @Var boolean modified = false;
×
3518
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
×
3519
        var value = requireNonNull(iterator.value);
×
3520
        if (filter.test(value)) {
×
3521
          var key = requireNonNull(iterator.key);
×
3522
          modified |= cache.remove(key, value);
×
3523
        }
3524
        iterator.advance();
×
3525
      }
×
3526
      return modified;
×
3527
    }
3528

3529
    @Override
3530
    public boolean retainAll(Collection<?> collection) {
3531
      requireNonNull(collection);
×
3532
      @Var boolean modified = false;
×
3533
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
×
3534
        var key = requireNonNull(iterator.key);
×
3535
        var value = requireNonNull(iterator.value);
×
3536
        if (!collection.contains(value) && cache.remove(key, value)) {
×
3537
          modified = true;
×
3538
        }
3539
        iterator.advance();
×
3540
      }
×
3541
      return modified;
×
3542
    }
3543

3544
    @Override
3545
    public Iterator<V> iterator() {
3546
      return new ValueIterator<>(cache);
×
3547
    }
3548

3549
    @Override
3550
    public Spliterator<V> spliterator() {
3551
      return new ValueSpliterator<>(cache);
×
3552
    }
3553
  }
3554

3555
  /** An adapter to safely externalize the value iterator. */
3556
  static final class ValueIterator<K, V> implements Iterator<V> {
3557
    final EntryIterator<K, V> iterator;
3558

3559
    ValueIterator(BoundedLocalCache<K, V> cache) {
×
3560
      this.iterator = new EntryIterator<>(cache);
×
3561
    }
×
3562

3563
    @Override
3564
    public boolean hasNext() {
3565
      return iterator.hasNext();
×
3566
    }
3567

3568
    @Override
3569
    public V next() {
3570
      return iterator.nextValue();
×
3571
    }
3572

3573
    @Override
3574
    public void remove() {
3575
      iterator.remove();
×
3576
    }
×
3577
  }
3578

3579
  /** An adapter to safely externalize the value spliterator. */
3580
  static final class ValueSpliterator<K, V> implements Spliterator<V> {
3581
    final Spliterator<Node<K, V>> spliterator;
3582
    final BoundedLocalCache<K, V> cache;
3583

3584
    ValueSpliterator(BoundedLocalCache<K, V> cache) {
3585
      this(cache, cache.data.values().spliterator());
×
3586
    }
×
3587

3588
    ValueSpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
×
3589
      this.spliterator = requireNonNull(spliterator);
×
3590
      this.cache = requireNonNull(cache);
×
3591
    }
×
3592

3593
    @Override
3594
    public void forEachRemaining(Consumer<? super V> action) {
3595
      requireNonNull(action);
×
3596
      Consumer<Node<K, V>> consumer = node -> {
×
3597
        K key = node.getKey();
×
3598
        V value = node.getValue();
×
3599
        long now = cache.expirationTicker().read();
×
3600
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3601
          action.accept(value);
×
3602
        }
3603
      };
×
3604
      spliterator.forEachRemaining(consumer);
×
3605
    }
×
3606

3607
    @Override
3608
    public boolean tryAdvance(Consumer<? super V> action) {
3609
      requireNonNull(action);
×
3610
      boolean[] advanced = { false };
×
3611
      long now = cache.expirationTicker().read();
×
3612
      Consumer<Node<K, V>> consumer = node -> {
×
3613
        K key = node.getKey();
×
3614
        V value = node.getValue();
×
3615
        if ((key != null) && (value != null) && !cache.hasExpired(node, now) && node.isAlive()) {
×
3616
          action.accept(value);
×
3617
          advanced[0] = true;
×
3618
        }
3619
      };
×
3620
      while (spliterator.tryAdvance(consumer)) {
×
3621
        if (advanced[0]) {
×
3622
          return true;
×
3623
        }
3624
      }
3625
      return false;
×
3626
    }
3627

3628
    @Override
3629
    public @Nullable Spliterator<V> trySplit() {
3630
      Spliterator<Node<K, V>> split = spliterator.trySplit();
×
3631
      return (split == null) ? null : new ValueSpliterator<>(cache, split);
×
3632
    }
3633

3634
    @Override
3635
    public long estimateSize() {
3636
      return spliterator.estimateSize();
×
3637
    }
3638

3639
    @Override
3640
    public int characteristics() {
3641
      return CONCURRENT | NONNULL;
×
3642
    }
3643
  }
3644

3645
  /** An adapter to safely externalize the entries. */
3646
  static final class EntrySetView<K, V> extends AbstractSet<Entry<K, V>> {
3647
    final BoundedLocalCache<K, V> cache;
3648

3649
    EntrySetView(BoundedLocalCache<K, V> cache) {
×
3650
      this.cache = requireNonNull(cache);
×
3651
    }
×
3652

3653
    @Override
3654
    public int size() {
3655
      return cache.size();
×
3656
    }
3657

3658
    @Override
3659
    public void clear() {
3660
      cache.clear();
×
3661
    }
×
3662

3663
    @Override
3664
    public boolean contains(Object o) {
3665
      if (!(o instanceof Entry<?, ?>)) {
×
3666
        return false;
×
3667
      }
3668
      var entry = (Entry<?, ?>) o;
×
3669
      var key = entry.getKey();
×
3670
      var value = entry.getValue();
×
3671
      if ((key == null) || (value == null)) {
×
3672
        return false;
×
3673
      }
3674
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
×
3675
      return (node != null) && node.containsValue(value);
×
3676
    }
3677

3678
    @Override
3679
    public boolean removeAll(Collection<?> collection) {
3680
      requireNonNull(collection);
×
3681
      @Var boolean modified = false;
×
3682
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
×
3683
        for (var entry : this) {
×
3684
          if (collection.contains(entry)) {
×
3685
            modified |= remove(entry);
×
3686
          }
3687
        }
×
3688
      } else {
3689
        for (var item : collection) {
×
3690
          modified |= (item != null) && remove(item);
×
3691
        }
×
3692
      }
3693
      return modified;
×
3694
    }
3695

3696
    @Override
3697
    @SuppressWarnings("SuspiciousMethodCalls")
3698
    public boolean remove(Object o) {
3699
      if (!(o instanceof Entry<?, ?>)) {
×
3700
        return false;
×
3701
      }
3702
      var entry = (Entry<?, ?>) o;
×
3703
      var key = entry.getKey();
×
3704
      return (key != null) && cache.remove(key, entry.getValue());
×
3705
    }
3706

3707
    @Override
3708
    public boolean removeIf(Predicate<? super Entry<K, V>> filter) {
3709
      requireNonNull(filter);
×
3710
      @Var boolean modified = false;
×
3711
      for (Entry<K, V> entry : this) {
×
3712
        if (filter.test(entry)) {
×
3713
          modified |= cache.remove(entry.getKey(), entry.getValue());
×
3714
        }
3715
      }
×
3716
      return modified;
×
3717
    }
3718

3719
    @Override
3720
    public boolean retainAll(Collection<?> collection) {
3721
      requireNonNull(collection);
×
3722
      @Var boolean modified = false;
×
3723
      for (var entry : this) {
×
3724
        if (!collection.contains(entry) && remove(entry)) {
×
3725
          modified = true;
×
3726
        }
3727
      }
×
3728
      return modified;
×
3729
    }
3730

3731
    @Override
3732
    public Iterator<Entry<K, V>> iterator() {
3733
      return new EntryIterator<>(cache);
×
3734
    }
3735

3736
    @Override
3737
    public Spliterator<Entry<K, V>> spliterator() {
3738
      return new EntrySpliterator<>(cache);
×
3739
    }
3740
  }
3741

3742
  /** An adapter to safely externalize the entry iterator. */
3743
  static final class EntryIterator<K, V> implements Iterator<Entry<K, V>> {
3744
    final BoundedLocalCache<K, V> cache;
3745
    final Iterator<Node<K, V>> iterator;
3746

3747
    @Nullable K key;
3748
    @Nullable V value;
3749
    @Nullable K removalKey;
3750
    @Nullable Node<K, V> next;
3751

3752
    EntryIterator(BoundedLocalCache<K, V> cache) {
×
3753
      this.iterator = cache.data.values().iterator();
×
3754
      this.cache = cache;
×
3755
    }
×
3756

3757
    @Override
3758
    public boolean hasNext() {
3759
      if (next != null) {
×
3760
        return true;
×
3761
      }
3762

3763
      long now = cache.expirationTicker().read();
×
3764
      while (iterator.hasNext()) {
×
3765
        next = iterator.next();
×
3766
        value = next.getValue();
×
3767
        key = next.getKey();
×
3768

3769
        boolean evictable = (key == null) || (value == null) || cache.hasExpired(next, now);
×
3770
        if (evictable || !next.isAlive()) {
×
3771
          if (evictable) {
×
3772
            cache.scheduleDrainBuffers();
×
3773
          }
3774
          advance();
×
3775
          continue;
×
3776
        }
3777
        return true;
×
3778
      }
3779
      return false;
×
3780
    }
3781

3782
    /** Invalidates the current position so that the iterator may compute the next position. */
3783
    void advance() {
3784
      value = null;
×
3785
      next = null;
×
3786
      key = null;
×
3787
    }
×
3788

3789
    K nextKey() {
3790
      if (!hasNext()) {
×
3791
        throw new NoSuchElementException();
×
3792
      }
3793
      removalKey = key;
×
3794
      advance();
×
3795
      return requireNonNull(removalKey);
×
3796
    }
3797

3798
    V nextValue() {
3799
      if (!hasNext()) {
×
3800
        throw new NoSuchElementException();
×
3801
      }
3802
      removalKey = key;
×
3803
      V val = value;
×
3804
      advance();
×
3805
      return requireNonNull(val);
×
3806
    }
3807

3808
    @Override
3809
    public Entry<K, V> next() {
3810
      if (!hasNext()) {
×
3811
        throw new NoSuchElementException();
×
3812
      }
3813
      var entry = new WriteThroughEntry<>(cache, requireNonNull(key), requireNonNull(value));
×
3814
      removalKey = key;
×
3815
      advance();
×
3816
      return entry;
×
3817
    }
3818

3819
    @Override
3820
    public void remove() {
3821
      if (removalKey == null) {
×
3822
        throw new IllegalStateException();
×
3823
      }
3824
      cache.remove(removalKey);
×
3825
      removalKey = null;
×
3826
    }
×
3827
  }
3828

3829
  /** An adapter to safely externalize the entry spliterator. */
3830
  static final class EntrySpliterator<K, V> implements Spliterator<Entry<K, V>> {
3831
    final Spliterator<Node<K, V>> spliterator;
3832
    final BoundedLocalCache<K, V> cache;
3833

3834
    EntrySpliterator(BoundedLocalCache<K, V> cache) {
3835
      this(cache, cache.data.values().spliterator());
×
3836
    }
×
3837

3838
    EntrySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
×
3839
      this.spliterator = requireNonNull(spliterator);
×
3840
      this.cache = requireNonNull(cache);
×
3841
    }
×
3842

3843
    @Override
3844
    public void forEachRemaining(Consumer<? super Entry<K, V>> action) {
3845
      requireNonNull(action);
×
3846
      Consumer<Node<K, V>> consumer = node -> {
×
3847
        K key = node.getKey();
×
3848
        V value = node.getValue();
×
3849
        long now = cache.expirationTicker().read();
×
3850
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3851
          action.accept(new WriteThroughEntry<>(cache, key, value));
×
3852
        }
3853
      };
×
3854
      spliterator.forEachRemaining(consumer);
×
3855
    }
×
3856

3857
    @Override
3858
    public boolean tryAdvance(Consumer<? super Entry<K, V>> action) {
3859
      requireNonNull(action);
×
3860
      boolean[] advanced = { false };
×
3861
      Consumer<Node<K, V>> consumer = node -> {
×
3862
        K key = node.getKey();
×
3863
        V value = node.getValue();
×
3864
        long now = cache.expirationTicker().read();
×
3865
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
×
3866
          action.accept(new WriteThroughEntry<>(cache, key, value));
×
3867
          advanced[0] = true;
×
3868
        }
3869
      };
×
3870
      while (spliterator.tryAdvance(consumer)) {
×
3871
        if (advanced[0]) {
×
3872
          return true;
×
3873
        }
3874
      }
3875
      return false;
×
3876
    }
3877

3878
    @Override
3879
    public @Nullable Spliterator<Entry<K, V>> trySplit() {
3880
      Spliterator<Node<K, V>> split = spliterator.trySplit();
×
3881
      return (split == null) ? null : new EntrySpliterator<>(cache, split);
×
3882
    }
3883

3884
    @Override
3885
    public long estimateSize() {
3886
      return spliterator.estimateSize();
×
3887
    }
3888

3889
    @Override
3890
    public int characteristics() {
3891
      return DISTINCT | CONCURRENT | NONNULL;
×
3892
    }
3893
  }
3894

3895
  /** A reusable task that performs the maintenance work; used to avoid wrapping by ForkJoinPool. */
3896
  static final class PerformCleanupTask extends ForkJoinTask<@Nullable Void> implements Runnable {
3897
    private static final long serialVersionUID = 1L;
3898

3899
    final WeakReference<BoundedLocalCache<?, ?>> reference;
3900

3901
    PerformCleanupTask(BoundedLocalCache<?, ?> cache) {
×
3902
      reference = new WeakReference<>(cache);
×
3903
    }
×
3904

3905
    @Override
3906
    public boolean exec() {
3907
      try {
3908
        run();
×
3909
      } catch (Throwable t) {
×
3910
        logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", t);
×
3911
      }
×
3912

3913
      // Indicates that the task has not completed to allow subsequent submissions to execute
3914
      return false;
×
3915
    }
3916

3917
    @Override
3918
    public void run() {
3919
      BoundedLocalCache<?, ?> cache = reference.get();
×
3920
      if (cache != null) {
×
3921
        cache.performCleanUp(/* ignored */ null);
×
3922
      }
3923
    }
×
3924

3925
    /**
3926
     * This method cannot be ignored due to being final, so a hostile user supplied Executor could
3927
     * forcibly complete the task and halt future executions. There are easier ways to intentionally
3928
     * harm a system, so this is assumed to not happen in practice.
3929
     */
3930
    // public final void quietlyComplete() {}
3931

3932
    @Override public void complete(@Nullable Void value) {}
×
3933
    @Override public void setRawResult(@Nullable Void value) {}
×
3934
    @Override public @Nullable Void getRawResult() { return null; }
×
3935
    @Override public void completeExceptionally(@Nullable Throwable t) {}
×
3936
    @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; }
×
3937
  }
3938

3939
  /** Creates a serialization proxy based on the common configuration shared by all cache types. */
3940
  static <K, V> SerializationProxy<K, V> makeSerializationProxy(BoundedLocalCache<?, ?> cache) {
3941
    var proxy = new SerializationProxy<K, V>();
×
3942
    proxy.weakKeys = cache.collectKeys();
×
3943
    proxy.weakValues = cache.nodeFactory.weakValues();
×
3944
    proxy.softValues = cache.nodeFactory.softValues();
×
3945
    proxy.isRecordingStats = cache.isRecordingStats();
×
3946
    proxy.evictionListener = cache.evictionListener;
×
3947
    proxy.removalListener = cache.removalListener();
×
3948
    proxy.ticker = cache.expirationTicker();
×
3949
    if (cache.expiresAfterAccess()) {
×
3950
      proxy.expiresAfterAccessNanos = cache.expiresAfterAccessNanos();
×
3951
    }
3952
    if (cache.expiresAfterWrite()) {
×
3953
      proxy.expiresAfterWriteNanos = cache.expiresAfterWriteNanos();
×
3954
    }
3955
    if (cache.expiresVariable()) {
×
3956
      proxy.expiry = cache.expiry();
×
3957
    }
3958
    if (cache.refreshAfterWrite()) {
×
3959
      proxy.refreshAfterWriteNanos = cache.refreshAfterWriteNanos();
×
3960
    }
3961
    if (cache.evicts()) {
×
3962
      if (cache.isWeighted) {
×
3963
        proxy.weigher = cache.weigher;
×
3964
        proxy.maximumWeight = cache.maximum();
×
3965
      } else {
3966
        proxy.maximumSize = cache.maximum();
×
3967
      }
3968
    }
3969
    proxy.cacheLoader = cache.cacheLoader;
×
3970
    proxy.async = cache.isAsync;
×
3971
    return proxy;
×
3972
  }
3973

3974
  /* --------------- Manual Cache --------------- */
3975

3976
  static class BoundedLocalManualCache<K, V> implements LocalManualCache<K, V>, Serializable {
3977
    private static final long serialVersionUID = 1;
3978

3979
    final BoundedLocalCache<K, V> cache;
3980

3981
    @Nullable Policy<K, V> policy;
3982

3983
    BoundedLocalManualCache(Caffeine<K, V> builder) {
3984
      this(builder, null);
×
3985
    }
×
3986

3987
    BoundedLocalManualCache(Caffeine<K, V> builder, @Nullable CacheLoader<? super K, V> loader) {
×
3988
      cache = LocalCacheFactory.newBoundedLocalCache(builder, loader, /* isAsync= */ false);
×
3989
    }
×
3990

3991
    @Override
3992
    public final BoundedLocalCache<K, V> cache() {
3993
      return cache;
×
3994
    }
3995

3996
    @Override
3997
    public final Policy<K, V> policy() {
3998
      if (policy == null) {
×
3999
        Function<@Nullable V, @Nullable V> identity = v -> v;
×
4000
        policy = new BoundedPolicy<>(cache, identity, cache.isWeighted);
×
4001
      }
4002
      return policy;
×
4003
    }
4004

4005
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4006
      throw new InvalidObjectException("Proxy required");
×
4007
    }
4008

4009
    private Object writeReplace() {
4010
      return makeSerializationProxy(cache);
×
4011
    }
4012
  }
4013

4014
  @SuppressWarnings({"NullableOptional",
4015
    "OptionalAssignedToNull", "OptionalUsedAsFieldOrParameterType"})
4016
  static final class BoundedPolicy<K, V> implements Policy<K, V> {
4017
    final Function<@Nullable V, @Nullable V> transformer;
4018
    final BoundedLocalCache<K, V> cache;
4019
    final boolean isWeighted;
4020

4021
    @Nullable Optional<Eviction<K, V>> eviction;
4022
    @Nullable Optional<FixedRefresh<K, V>> refreshes;
4023
    @Nullable Optional<FixedExpiration<K, V>> afterWrite;
4024
    @Nullable Optional<FixedExpiration<K, V>> afterAccess;
4025
    @Nullable Optional<VarExpiration<K, V>> variable;
4026

4027
    BoundedPolicy(BoundedLocalCache<K, V> cache,
4028
        Function<@Nullable V, @Nullable V> transformer, boolean isWeighted) {
×
4029
      this.transformer = transformer;
×
4030
      this.isWeighted = isWeighted;
×
4031
      this.cache = cache;
×
4032
    }
×
4033

4034
    @Override public boolean isRecordingStats() {
4035
      return cache.isRecordingStats();
×
4036
    }
4037
    @Override public @Nullable V getIfPresentQuietly(K key) {
4038
      return transformer.apply(cache.getIfPresentQuietly(key));
×
4039
    }
4040
    @Override public @Nullable CacheEntry<K, V> getEntryIfPresentQuietly(K key) {
4041
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
×
4042
      return (node == null) ? null : cache.nodeToCacheEntry(node, transformer);
×
4043
    }
4044
    @SuppressWarnings("Java9CollectionFactory")
4045
    @Override public Map<K, CompletableFuture<V>> refreshes() {
4046
      var refreshes = cache.refreshes;
×
4047
      if ((refreshes == null) || refreshes.isEmpty()) {
×
4048
        @SuppressWarnings({"ImmutableMapOf", "RedundantUnmodifiable"})
4049
        Map<K, CompletableFuture<V>> emptyMap = Collections.unmodifiableMap(Collections.emptyMap());
×
4050
        return emptyMap;
×
4051
      } else if (cache.collectKeys()) {
×
4052
        var inFlight = new IdentityHashMap<K, CompletableFuture<V>>(refreshes.size());
×
4053
        for (var entry : refreshes.entrySet()) {
×
4054
          @SuppressWarnings("unchecked")
4055
          var key = ((InternalReference<K>) entry.getKey()).get();
×
4056
          @SuppressWarnings("unchecked")
4057
          var future = (CompletableFuture<V>) entry.getValue();
×
4058
          if (key != null) {
×
4059
            inFlight.put(key, future);
×
4060
          }
4061
        }
×
4062
        return Collections.unmodifiableMap(inFlight);
×
4063
      }
4064
      @SuppressWarnings("unchecked")
4065
      var castedRefreshes = (Map<K, CompletableFuture<V>>) (Object) refreshes;
×
4066
      return Collections.unmodifiableMap(new HashMap<>(castedRefreshes));
×
4067
    }
4068
    @Override public Optional<Eviction<K, V>> eviction() {
4069
      return cache.evicts()
×
4070
          ? (eviction == null) ? (eviction = Optional.of(new BoundedEviction())) : eviction
×
4071
          : Optional.empty();
×
4072
    }
4073
    @Override public Optional<FixedExpiration<K, V>> expireAfterAccess() {
4074
      if (!cache.expiresAfterAccess()) {
×
4075
        return Optional.empty();
×
4076
      }
4077
      return (afterAccess == null)
×
4078
          ? (afterAccess = Optional.of(new BoundedExpireAfterAccess()))
×
4079
          : afterAccess;
×
4080
    }
4081
    @Override public Optional<FixedExpiration<K, V>> expireAfterWrite() {
4082
      if (!cache.expiresAfterWrite()) {
×
4083
        return Optional.empty();
×
4084
      }
4085
      return (afterWrite == null)
×
4086
          ? (afterWrite = Optional.of(new BoundedExpireAfterWrite()))
×
4087
          : afterWrite;
×
4088
    }
4089
    @Override public Optional<VarExpiration<K, V>> expireVariably() {
4090
      if (!cache.expiresVariable()) {
×
4091
        return Optional.empty();
×
4092
      }
4093
      return (variable == null)
×
4094
          ? (variable = Optional.of(new BoundedVarExpiration()))
×
4095
          : variable;
×
4096
    }
4097
    @Override public Optional<FixedRefresh<K, V>> refreshAfterWrite() {
4098
      if (!cache.refreshAfterWrite()) {
×
4099
        return Optional.empty();
×
4100
      }
4101
      return (refreshes == null)
×
4102
          ? (refreshes = Optional.of(new BoundedRefreshAfterWrite()))
×
4103
          : refreshes;
×
4104
    }
4105

4106
    final class BoundedEviction implements Eviction<K, V> {
×
4107
      @Override public boolean isWeighted() {
4108
        return isWeighted;
×
4109
      }
4110
      @Override public OptionalInt weightOf(K key) {
4111
        requireNonNull(key);
×
4112
        if (!isWeighted) {
×
4113
          return OptionalInt.empty();
×
4114
        }
4115
        Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
×
4116
        if ((node == null) || cache.hasExpired(node, cache.expirationTicker().read())) {
×
4117
          return OptionalInt.empty();
×
4118
        }
4119
        synchronized (node) {
×
4120
          return node.isAlive() ? OptionalInt.of(node.getWeight()) : OptionalInt.empty();
×
4121
        }
4122
      }
4123
      @Override public OptionalLong weightedSize() {
4124
        return isWeighted
×
4125
            ? OptionalLong.of(Math.max(0, cache.weightedSizeAcquire()))
×
4126
            : OptionalLong.empty();
×
4127
      }
4128
      @Override public long getMaximum() {
4129
        return cache.maximumAcquire();
×
4130
      }
4131
      @Override public void setMaximum(long maximum) {
4132
        cache.evictionLock.lock();
×
4133
        try {
4134
          cache.setMaximumSize(maximum);
×
4135
          cache.maintenance(/* ignored */ null);
×
4136
        } finally {
4137
          cache.evictionLock.unlock();
×
4138
          cache.rescheduleCleanUpIfIncomplete();
×
4139
        }
4140
      }
×
4141
      @Override public Map<K, V> coldest(int limit) {
4142
        int expectedSize = Math.min(limit, cache.size());
×
4143
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
×
4144
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
×
4145
      }
4146
      @Override public Map<K, V> coldestWeighted(long weightLimit) {
4147
        var limiter = isWeighted()
×
4148
            ? new WeightLimiter<K, V>(weightLimit)
×
4149
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
×
4150
        return cache.evictionOrder(/* hottest= */ false, transformer, limiter);
×
4151
      }
4152
      @Override
4153
      public <T> T coldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4154
        requireNonNull(mappingFunction);
×
4155
        return cache.evictionOrder(/* hottest= */ false, transformer, mappingFunction);
×
4156
      }
4157
      @Override public Map<K, V> hottest(int limit) {
4158
        int expectedSize = Math.min(limit, cache.size());
×
4159
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
×
4160
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
×
4161
      }
4162
      @Override public Map<K, V> hottestWeighted(long weightLimit) {
4163
        var limiter = isWeighted()
×
4164
            ? new WeightLimiter<K, V>(weightLimit)
×
4165
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
×
4166
        return cache.evictionOrder(/* hottest= */ true, transformer, limiter);
×
4167
      }
4168
      @Override
4169
      public <T> T hottest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4170
        requireNonNull(mappingFunction);
×
4171
        return cache.evictionOrder(/* hottest= */ true, transformer, mappingFunction);
×
4172
      }
4173
    }
4174

4175
    @SuppressWarnings("PreferJavaTimeOverload")
4176
    final class BoundedExpireAfterAccess implements FixedExpiration<K, V> {
×
4177
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4178
        requireNonNull(key);
×
4179
        requireNonNull(unit);
×
4180
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4181
        Node<K, V> node = cache.data.get(lookupKey);
×
4182
        if (node == null) {
×
4183
          return OptionalLong.empty();
×
4184
        }
4185
        long now = cache.expirationTicker().read();
×
4186
        return cache.hasExpired(node, now)
×
4187
            ? OptionalLong.empty()
×
4188
            : OptionalLong.of(unit.convert(now - node.getAccessTime(), TimeUnit.NANOSECONDS));
×
4189
      }
4190
      @Override public long getExpiresAfter(TimeUnit unit) {
4191
        return unit.convert(cache.expiresAfterAccessNanos(), TimeUnit.NANOSECONDS);
×
4192
      }
4193
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4194
        requireArgument(duration >= 0);
×
4195
        cache.setExpiresAfterAccessNanos(unit.toNanos(duration));
×
4196
        cache.scheduleAfterWrite();
×
4197
      }
×
4198
      @Override public Map<K, V> oldest(int limit) {
4199
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4200
      }
4201
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4202
        return cache.expireAfterAccessOrder(/* oldest= */ true, transformer, mappingFunction);
×
4203
      }
4204
      @Override public Map<K, V> youngest(int limit) {
4205
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4206
      }
4207
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4208
        return cache.expireAfterAccessOrder(/* oldest= */ false, transformer, mappingFunction);
×
4209
      }
4210
    }
4211

4212
    @SuppressWarnings("PreferJavaTimeOverload")
4213
    final class BoundedExpireAfterWrite implements FixedExpiration<K, V> {
×
4214
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4215
        requireNonNull(key);
×
4216
        requireNonNull(unit);
×
4217
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4218
        Node<K, V> node = cache.data.get(lookupKey);
×
4219
        if (node == null) {
×
4220
          return OptionalLong.empty();
×
4221
        }
4222
        long now = cache.expirationTicker().read();
×
4223
        return cache.hasExpired(node, now)
×
4224
            ? OptionalLong.empty()
×
4225
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
×
4226
      }
4227
      @Override public long getExpiresAfter(TimeUnit unit) {
4228
        return unit.convert(cache.expiresAfterWriteNanos(), TimeUnit.NANOSECONDS);
×
4229
      }
4230
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4231
        requireArgument(duration >= 0);
×
4232
        cache.setExpiresAfterWriteNanos(unit.toNanos(duration));
×
4233
        cache.scheduleAfterWrite();
×
4234
      }
×
4235
      @Override public Map<K, V> oldest(int limit) {
4236
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4237
      }
4238
      @SuppressWarnings("GuardedByChecker")
4239
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4240
        return cache.snapshot(cache.writeOrderDeque(), transformer, mappingFunction);
×
4241
      }
4242
      @Override public Map<K, V> youngest(int limit) {
4243
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4244
      }
4245
      @SuppressWarnings("GuardedByChecker")
4246
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4247
        return cache.snapshot(cache.writeOrderDeque()::descendingIterator,
×
4248
            transformer, mappingFunction);
4249
      }
4250
    }
4251

4252
    @SuppressWarnings("PreferJavaTimeOverload")
4253
    final class BoundedVarExpiration implements VarExpiration<K, V> {
×
4254
      @Override public OptionalLong getExpiresAfter(K key, TimeUnit unit) {
4255
        requireNonNull(key);
×
4256
        requireNonNull(unit);
×
4257
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4258
        Node<K, V> node = cache.data.get(lookupKey);
×
4259
        if (node == null) {
×
4260
          return OptionalLong.empty();
×
4261
        }
4262
        long now = cache.expirationTicker().read();
×
4263
        return cache.hasExpired(node, now)
×
4264
            ? OptionalLong.empty()
×
4265
            : OptionalLong.of(unit.convert(node.getVariableTime() - now, TimeUnit.NANOSECONDS));
×
4266
      }
4267
      @Override public void setExpiresAfter(K key, long duration, TimeUnit unit) {
4268
        requireNonNull(key);
×
4269
        requireNonNull(unit);
×
4270
        requireArgument(duration >= 0);
×
4271
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4272
        Node<K, V> node = cache.data.get(lookupKey);
×
4273
        if (node != null) {
×
4274
          long now;
4275
          long durationNanos = TimeUnit.NANOSECONDS.convert(duration, unit);
×
4276
          synchronized (node) {
×
4277
            now = cache.expirationTicker().read();
×
4278
            if (cache.isComputingAsync(node.getValue()) || cache.hasExpired(node, now)) {
×
4279
              return;
×
4280
            }
4281
            node.setVariableTime(now + Math.min(durationNanos, MAXIMUM_EXPIRY));
×
4282
          }
×
4283
          cache.afterRead(node, now, /* recordHit= */ false);
×
4284
        }
4285
      }
×
4286
      @Override public @Nullable V put(K key, V value, long duration, TimeUnit unit) {
4287
        requireNonNull(unit);
×
4288
        requireNonNull(value);
×
4289
        requireArgument(duration >= 0);
×
4290
        return cache.isAsync
×
4291
            ? putAsync(key, value, duration, unit)
×
4292
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ false);
×
4293
      }
4294
      @Override public @Nullable V putIfAbsent(K key, V value, long duration, TimeUnit unit) {
4295
        requireNonNull(unit);
×
4296
        requireNonNull(value);
×
4297
        requireArgument(duration >= 0);
×
4298
        return cache.isAsync
×
4299
            ? putIfAbsentAsync(key, value, duration, unit)
×
4300
            : putSync(key, value, duration, unit, /* onlyIfAbsent= */ true);
×
4301
      }
4302
      @Nullable V putSync(K key, V value, long duration, TimeUnit unit, boolean onlyIfAbsent) {
4303
        var expiry = new FixedExpireAfterWrite<K, V>(duration, unit);
×
4304
        return cache.put(key, value, expiry, onlyIfAbsent);
×
4305
      }
4306
      @SuppressWarnings("unchecked")
4307
      @Nullable V putIfAbsentAsync(K key, V value, long duration, TimeUnit unit) {
4308
        // Keep in sync with LocalAsyncCache.AsMapView#putIfAbsent(key, value)
4309
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
×
4310
        var asyncValue = (V) CompletableFuture.completedFuture(value);
×
4311

4312
        for (;;) {
4313
          var priorFuture = (CompletableFuture<V>) cache.getIfPresent(
×
4314
              key, /* recordStats= */ false);
4315
          if (priorFuture != null) {
×
4316
            if (!priorFuture.isDone()) {
×
4317
              Async.getWhenSuccessful(priorFuture);
×
4318
              continue;
×
4319
            }
4320

4321
            V prior = Async.getWhenSuccessful(priorFuture);
×
4322
            if (prior != null) {
×
4323
              return prior;
×
4324
            }
4325
          }
4326

4327
          boolean[] added = { false };
×
4328
          var computed = (CompletableFuture<V>) cache.compute(key, (k, oldValue) -> {
×
4329
            var oldValueFuture = (CompletableFuture<V>) oldValue;
×
4330
            added[0] = (oldValueFuture == null)
×
4331
                || (oldValueFuture.isDone() && (Async.getIfReady(oldValueFuture) == null));
×
4332
            return added[0] ? asyncValue : oldValue;
×
4333
          }, expiry, /* recordLoad= */ false, /* recordLoadFailure= */ false);
4334

4335
          if (added[0]) {
×
4336
            return null;
×
4337
          } else {
4338
            V prior = Async.getWhenSuccessful(computed);
×
4339
            if (prior != null) {
×
4340
              return prior;
×
4341
            }
4342
          }
4343
        }
×
4344
      }
4345
      @SuppressWarnings("unchecked")
4346
      @Nullable V putAsync(K key, V value, long duration, TimeUnit unit) {
4347
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
×
4348
        var asyncValue = (V) CompletableFuture.completedFuture(value);
×
4349

4350
        var oldValueFuture = (CompletableFuture<V>) cache.put(
×
4351
            key, asyncValue, expiry, /* onlyIfAbsent= */ false);
4352
        return Async.getWhenSuccessful(oldValueFuture);
×
4353
      }
4354
      @Override public @Nullable V compute(K key,
4355
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4356
          Duration duration) {
4357
        requireNonNull(key);
×
4358
        requireNonNull(duration);
×
4359
        requireNonNull(remappingFunction);
×
4360
        requireArgument(!duration.isNegative(), "duration cannot be negative: %s", duration);
×
4361
        var expiry = new FixedExpireAfterWrite<K, V>(
×
4362
            toNanosSaturated(duration), TimeUnit.NANOSECONDS);
×
4363

4364
        return cache.isAsync
×
4365
            ? computeAsync(key, remappingFunction, expiry)
×
4366
            : cache.compute(key, remappingFunction, expiry,
×
4367
                /* recordLoad= */ true, /* recordLoadFailure= */ true);
4368
      }
4369
      @Nullable V computeAsync(K key,
4370
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4371
          Expiry<? super K, ? super V> expiry) {
4372
        // Keep in sync with LocalAsyncCache.AsMapView#compute(key, remappingFunction)
4373
        @SuppressWarnings("unchecked")
4374
        var delegate = (LocalCache<K, CompletableFuture<V>>) cache;
×
4375

4376
        @SuppressWarnings({"rawtypes", "unchecked"})
4377
        var newValue = (V[]) new Object[1];
×
4378
        for (;;) {
4379
          Async.getWhenSuccessful(delegate.getIfPresentQuietly(key));
×
4380

4381
          CompletableFuture<V> valueFuture = delegate.compute(key, (k, oldValueFuture) -> {
×
4382
            if ((oldValueFuture != null) && !oldValueFuture.isDone()) {
×
4383
              return oldValueFuture;
×
4384
            }
4385

4386
            V oldValue = Async.getIfReady(oldValueFuture);
×
4387
            BiFunction<? super K, ? super V, ? extends V> function = delegate.statsAware(
×
4388
                remappingFunction, /* recordLoad= */ true, /* recordLoadFailure= */ true);
4389
            newValue[0] = function.apply(key, oldValue);
×
4390
            return (newValue[0] == null) ? null : CompletableFuture.completedFuture(newValue[0]);
×
4391
          }, new AsyncExpiry<>(expiry), /* recordLoad= */ false, /* recordLoadFailure= */ false);
4392

4393
          if (newValue[0] != null) {
×
4394
            return newValue[0];
×
4395
          } else if (valueFuture == null) {
×
4396
            return null;
×
4397
          }
4398
        }
×
4399
      }
4400
      @Override public Map<K, V> oldest(int limit) {
4401
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4402
      }
4403
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4404
        return cache.snapshot(cache.timerWheel(), transformer, mappingFunction);
×
4405
      }
4406
      @Override public Map<K, V> youngest(int limit) {
4407
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
×
4408
      }
4409
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4410
        return cache.snapshot(cache.timerWheel()::descendingIterator, transformer, mappingFunction);
×
4411
      }
4412
    }
4413

4414
    static final class FixedExpireAfterWrite<K, V> implements Expiry<K, V> {
4415
      final long duration;
4416
      final TimeUnit unit;
4417

4418
      FixedExpireAfterWrite(long duration, TimeUnit unit) {
×
4419
        this.duration = duration;
×
4420
        this.unit = unit;
×
4421
      }
×
4422
      @Override public long expireAfterCreate(K key, V value, long currentTime) {
4423
        return unit.toNanos(duration);
×
4424
      }
4425
      @Override public long expireAfterUpdate(
4426
          K key, V value, long currentTime, long currentDuration) {
4427
        return unit.toNanos(duration);
×
4428
      }
4429
      @CanIgnoreReturnValue
4430
      @Override public long expireAfterRead(
4431
          K key, V value, long currentTime, long currentDuration) {
4432
        return currentDuration;
×
4433
      }
4434
    }
4435

4436
    @SuppressWarnings("PreferJavaTimeOverload")
4437
    final class BoundedRefreshAfterWrite implements FixedRefresh<K, V> {
×
4438
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4439
        requireNonNull(key);
×
4440
        requireNonNull(unit);
×
4441
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
×
4442
        Node<K, V> node = cache.data.get(lookupKey);
×
4443
        if (node == null) {
×
4444
          return OptionalLong.empty();
×
4445
        }
4446
        long now = cache.expirationTicker().read();
×
4447
        return cache.hasExpired(node, now)
×
4448
            ? OptionalLong.empty()
×
4449
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
×
4450
      }
4451
      @Override public long getRefreshesAfter(TimeUnit unit) {
4452
        return unit.convert(cache.refreshAfterWriteNanos(), TimeUnit.NANOSECONDS);
×
4453
      }
4454
      @Override public void setRefreshesAfter(long duration, TimeUnit unit) {
4455
        requireArgument(duration >= 0);
×
4456
        cache.setRefreshAfterWriteNanos(unit.toNanos(duration));
×
4457
        cache.scheduleAfterWrite();
×
4458
      }
×
4459
    }
4460
  }
4461

4462
  /* --------------- Loading Cache --------------- */
4463

4464
  static final class BoundedLocalLoadingCache<K, V>
4465
      extends BoundedLocalManualCache<K, V> implements LocalLoadingCache<K, V> {
4466
    private static final long serialVersionUID = 1;
4467

4468
    final Function<K, @Nullable V> mappingFunction;
4469
    final @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction;
4470

4471
    BoundedLocalLoadingCache(Caffeine<K, V> builder, CacheLoader<? super K, V> loader) {
4472
      super(builder, loader);
×
4473
      requireNonNull(loader);
×
4474
      mappingFunction = newMappingFunction(loader);
×
4475
      bulkMappingFunction = newBulkMappingFunction(loader);
×
4476
    }
×
4477

4478
    @Override
4479
    @SuppressWarnings("NullAway")
4480
    public AsyncCacheLoader<? super K, V> cacheLoader() {
4481
      return cache.cacheLoader;
×
4482
    }
4483

4484
    @Override
4485
    public Function<K, @Nullable V> mappingFunction() {
4486
      return mappingFunction;
×
4487
    }
4488

4489
    @Override
4490
    public @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction() {
4491
      return bulkMappingFunction;
×
4492
    }
4493

4494
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4495
      throw new InvalidObjectException("Proxy required");
×
4496
    }
4497

4498
    private Object writeReplace() {
4499
      return makeSerializationProxy(cache);
×
4500
    }
4501
  }
4502

4503
  /* --------------- Async Cache --------------- */
4504

4505
  static final class BoundedLocalAsyncCache<K, V> implements LocalAsyncCache<K, V>, Serializable {
4506
    private static final long serialVersionUID = 1;
4507

4508
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4509
    final boolean isWeighted;
4510

4511
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4512
    @Nullable CacheView<K, V> cacheView;
4513
    @Nullable Policy<K, V> policy;
4514

4515
    @SuppressWarnings("unchecked")
4516
    BoundedLocalAsyncCache(Caffeine<K, V> builder) {
×
4517
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
×
4518
          .newBoundedLocalCache(builder, /* cacheLoader= */ null, /* isAsync= */ true);
×
4519
      isWeighted = builder.isWeighted();
×
4520
    }
×
4521

4522
    @Override
4523
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4524
      return cache;
×
4525
    }
4526

4527
    @Override
4528
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4529
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
×
4530
    }
4531

4532
    @Override
4533
    public Cache<K, V> synchronous() {
4534
      return (cacheView == null) ? (cacheView = new CacheView<>(this)) : cacheView;
×
4535
    }
4536

4537
    @Override
4538
    public Policy<K, V> policy() {
4539
      if (policy == null) {
×
4540
        @SuppressWarnings("unchecked")
4541
        var castCache = (BoundedLocalCache<K, V>) cache;
×
4542
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
×
4543
        @SuppressWarnings("unchecked")
4544
        var castTransformer = (Function<@Nullable V, @Nullable V>) transformer;
×
4545
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
×
4546
      }
4547
      return policy;
×
4548
    }
4549

4550
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4551
      throw new InvalidObjectException("Proxy required");
×
4552
    }
4553

4554
    private Object writeReplace() {
4555
      return makeSerializationProxy(cache);
×
4556
    }
4557
  }
4558

4559
  /* --------------- Async Loading Cache --------------- */
4560

4561
  static final class BoundedLocalAsyncLoadingCache<K, V>
4562
      extends LocalAsyncLoadingCache<K, V> implements Serializable {
4563
    private static final long serialVersionUID = 1;
4564

4565
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4566
    final boolean isWeighted;
4567

4568
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4569
    @Nullable Policy<K, V> policy;
4570

4571
    @SuppressWarnings("unchecked")
4572
    BoundedLocalAsyncLoadingCache(Caffeine<K, V> builder, AsyncCacheLoader<? super K, V> loader) {
4573
      super(loader);
×
4574
      isWeighted = builder.isWeighted();
×
4575
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
×
4576
          .newBoundedLocalCache(builder, loader, /* isAsync= */ true);
×
4577
    }
×
4578

4579
    @Override
4580
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4581
      return cache;
×
4582
    }
4583

4584
    @Override
4585
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4586
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
×
4587
    }
4588

4589
    @Override
4590
    public Policy<K, V> policy() {
4591
      if (policy == null) {
×
4592
        @SuppressWarnings("unchecked")
4593
        var castCache = (BoundedLocalCache<K, V>) cache;
×
4594
        Function<CompletableFuture<V>, @Nullable V> transformer = Async::getIfReady;
×
4595
        @SuppressWarnings("unchecked")
4596
        var castTransformer = (Function<@Nullable V, @Nullable V>) transformer;
×
4597
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
×
4598
      }
4599
      return policy;
×
4600
    }
4601

4602
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4603
      throw new InvalidObjectException("Proxy required");
×
4604
    }
4605

4606
    private Object writeReplace() {
4607
      return makeSerializationProxy(cache);
×
4608
    }
4609
  }
4610
}
4611

4612
/** The namespace for field padding through inheritance. */
4613
@SuppressWarnings({"MemberName", "MultiVariableDeclaration"})
4614
final class BLCHeader {
4615

4616
  private BLCHeader() {}
4617

4618
  static class PadDrainStatus {
×
4619
    byte p000, p001, p002, p003, p004, p005, p006, p007;
4620
    byte p008, p009, p010, p011, p012, p013, p014, p015;
4621
    byte p016, p017, p018, p019, p020, p021, p022, p023;
4622
    byte p024, p025, p026, p027, p028, p029, p030, p031;
4623
    byte p032, p033, p034, p035, p036, p037, p038, p039;
4624
    byte p040, p041, p042, p043, p044, p045, p046, p047;
4625
    byte p048, p049, p050, p051, p052, p053, p054, p055;
4626
    byte p056, p057, p058, p059, p060, p061, p062, p063;
4627
    byte p064, p065, p066, p067, p068, p069, p070, p071;
4628
    byte p072, p073, p074, p075, p076, p077, p078, p079;
4629
    byte p080, p081, p082, p083, p084, p085, p086, p087;
4630
    byte p088, p089, p090, p091, p092, p093, p094, p095;
4631
    byte p096, p097, p098, p099, p100, p101, p102, p103;
4632
    byte p104, p105, p106, p107, p108, p109, p110, p111;
4633
    byte p112, p113, p114, p115, p116, p117, p118, p119;
4634
  }
4635

4636
  /** Enforces a memory layout to avoid false sharing by padding the drain status. */
4637
  abstract static class DrainStatusRef extends PadDrainStatus {
×
4638
    static final VarHandle DRAIN_STATUS = findVarHandle(
×
4639
        DrainStatusRef.class, "drainStatus", int.class);
4640

4641
    /** A drain is not taking place. */
4642
    static final int IDLE = 0;
4643
    /** A drain is required due to a pending write modification. */
4644
    static final int REQUIRED = 1;
4645
    /** A drain is in progress and will transition to idle. */
4646
    static final int PROCESSING_TO_IDLE = 2;
4647
    /** A drain is in progress and will transition to required. */
4648
    static final int PROCESSING_TO_REQUIRED = 3;
4649

4650
    /** The draining status of the buffers. */
4651
    volatile int drainStatus = IDLE;
×
4652

4653
    /**
4654
     * Returns whether maintenance work is needed.
4655
     *
4656
     * @param delayable if draining the read buffer can be delayed
4657
     */
4658
    @SuppressWarnings("StatementSwitchToExpressionSwitch")
4659
    boolean shouldDrainBuffers(boolean delayable) {
4660
      switch (drainStatusOpaque()) {
×
4661
        case IDLE:
4662
          return !delayable;
×
4663
        case REQUIRED:
4664
          return true;
×
4665
        case PROCESSING_TO_IDLE:
4666
        case PROCESSING_TO_REQUIRED:
4667
          return false;
×
4668
        default:
4669
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
×
4670
      }
4671
    }
4672

4673
    int drainStatusOpaque() {
4674
      return (int) DRAIN_STATUS.getOpaque(this);
×
4675
    }
4676

4677
    int drainStatusAcquire() {
4678
      return (int) DRAIN_STATUS.getAcquire(this);
×
4679
    }
4680

4681
    void setDrainStatusOpaque(int drainStatus) {
4682
      DRAIN_STATUS.setOpaque(this, drainStatus);
×
4683
    }
×
4684

4685
    void setDrainStatusRelease(int drainStatus) {
4686
      DRAIN_STATUS.setRelease(this, drainStatus);
×
4687
    }
×
4688

4689
    boolean casDrainStatus(int expect, int update) {
4690
      return DRAIN_STATUS.compareAndSet(this, expect, update);
×
4691
    }
4692

4693
    static VarHandle findVarHandle(Class<?> recv, String name, Class<?> type) {
4694
      try {
4695
        return MethodHandles.lookup().findVarHandle(recv, name, type);
×
4696
      } catch (ReflectiveOperationException e) {
×
4697
        throw new ExceptionInInitializerError(e);
×
4698
      }
4699
    }
4700
  }
4701
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc