• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ben-manes / caffeine / #3896

pending completion
#3896

push

github-actions

ben-manes
upgrade jamm library (memory meter)

7542 of 7616 relevant lines covered (99.03%)

0.99 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.3
/caffeine/src/main/java/com/github/benmanes/caffeine/cache/BoundedLocalCache.java
1
/*
2
 * Copyright 2014 Ben Manes. All Rights Reserved.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
package com.github.benmanes.caffeine.cache;
17

18
import static com.github.benmanes.caffeine.cache.Async.ASYNC_EXPIRY;
19
import static com.github.benmanes.caffeine.cache.Caffeine.calculateHashMapCapacity;
20
import static com.github.benmanes.caffeine.cache.Caffeine.ceilingPowerOfTwo;
21
import static com.github.benmanes.caffeine.cache.Caffeine.requireArgument;
22
import static com.github.benmanes.caffeine.cache.Caffeine.saturatedToNanos;
23
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newBulkMappingFunction;
24
import static com.github.benmanes.caffeine.cache.LocalLoadingCache.newMappingFunction;
25
import static com.github.benmanes.caffeine.cache.Node.PROBATION;
26
import static com.github.benmanes.caffeine.cache.Node.PROTECTED;
27
import static com.github.benmanes.caffeine.cache.Node.WINDOW;
28
import static java.util.Locale.US;
29
import static java.util.Objects.requireNonNull;
30
import static java.util.Spliterator.DISTINCT;
31
import static java.util.Spliterator.IMMUTABLE;
32
import static java.util.Spliterator.NONNULL;
33
import static java.util.Spliterator.ORDERED;
34
import static java.util.function.Function.identity;
35

36
import java.io.InvalidObjectException;
37
import java.io.ObjectInputStream;
38
import java.io.Serializable;
39
import java.lang.System.Logger;
40
import java.lang.System.Logger.Level;
41
import java.lang.invoke.MethodHandles;
42
import java.lang.invoke.VarHandle;
43
import java.lang.ref.Reference;
44
import java.lang.ref.ReferenceQueue;
45
import java.lang.ref.WeakReference;
46
import java.time.Duration;
47
import java.util.AbstractCollection;
48
import java.util.AbstractSet;
49
import java.util.ArrayDeque;
50
import java.util.Collection;
51
import java.util.Collections;
52
import java.util.Comparator;
53
import java.util.Deque;
54
import java.util.HashMap;
55
import java.util.IdentityHashMap;
56
import java.util.Iterator;
57
import java.util.LinkedHashMap;
58
import java.util.Map;
59
import java.util.NoSuchElementException;
60
import java.util.Objects;
61
import java.util.Optional;
62
import java.util.OptionalInt;
63
import java.util.OptionalLong;
64
import java.util.Set;
65
import java.util.Spliterator;
66
import java.util.Spliterators;
67
import java.util.concurrent.CancellationException;
68
import java.util.concurrent.CompletableFuture;
69
import java.util.concurrent.ConcurrentHashMap;
70
import java.util.concurrent.ConcurrentMap;
71
import java.util.concurrent.Executor;
72
import java.util.concurrent.ForkJoinPool;
73
import java.util.concurrent.ForkJoinTask;
74
import java.util.concurrent.ThreadLocalRandom;
75
import java.util.concurrent.TimeUnit;
76
import java.util.concurrent.TimeoutException;
77
import java.util.concurrent.locks.ReentrantLock;
78
import java.util.function.BiConsumer;
79
import java.util.function.BiFunction;
80
import java.util.function.Consumer;
81
import java.util.function.Function;
82
import java.util.function.Predicate;
83
import java.util.stream.Stream;
84
import java.util.stream.StreamSupport;
85

86
import org.checkerframework.checker.nullness.qual.Nullable;
87

88
import com.github.benmanes.caffeine.cache.Async.AsyncExpiry;
89
import com.github.benmanes.caffeine.cache.LinkedDeque.PeekingIterator;
90
import com.github.benmanes.caffeine.cache.Policy.CacheEntry;
91
import com.github.benmanes.caffeine.cache.References.InternalReference;
92
import com.github.benmanes.caffeine.cache.stats.StatsCounter;
93
import com.google.errorprone.annotations.CanIgnoreReturnValue;
94
import com.google.errorprone.annotations.concurrent.GuardedBy;
95

96
/**
97
 * An in-memory cache implementation that supports full concurrency of retrievals, a high expected
98
 * concurrency for updates, and multiple ways to bound the cache.
99
 * <p>
100
 * This class is abstract and code generated subclasses provide the complete implementation for a
101
 * particular configuration. This is to ensure that only the fields and execution paths necessary
102
 * for a given configuration are used.
103
 *
104
 * @author ben.manes@gmail.com (Ben Manes)
105
 * @param <K> the type of keys maintained by this cache
106
 * @param <V> the type of mapped values
107
 */
108
@SuppressWarnings("serial")
109
abstract class BoundedLocalCache<K, V> extends BLCHeader.DrainStatusRef
110
    implements LocalCache<K, V> {
111

112
  /*
113
   * This class performs a best-effort bounding of a ConcurrentHashMap using a page-replacement
114
   * algorithm to determine which entries to evict when the capacity is exceeded.
115
   *
116
   * Concurrency:
117
   * ------------
118
   * The page replacement algorithms are kept eventually consistent with the map. An update to the
119
   * map and recording of reads may not be immediately reflected on the policy's data structures.
120
   * These structures are guarded by a lock and operations are applied in batches to avoid lock
121
   * contention. The penalty of applying the batches is spread across threads so that the amortized
122
   * cost is slightly higher than performing just the ConcurrentHashMap operation [1].
123
   *
124
   * A memento of the reads and writes that were performed on the map are recorded in buffers. These
125
   * buffers are drained at the first opportunity after a write or when a read buffer is full. The
126
   * reads are offered to a buffer that will reject additions if contended on or if it is full. Due
127
   * to the concurrent nature of the read and write operations a strict policy ordering is not
128
   * possible, but may be observably strict when single threaded. The buffers are drained
129
   * asynchronously to minimize the request latency and uses a state machine to determine when to
130
   * schedule this work on an executor.
131
   *
132
   * Due to a lack of a strict ordering guarantee, a task can be executed out-of-order, such as a
133
   * removal followed by its addition. The state of the entry is encoded using the key field to
134
   * avoid additional memory. An entry is "alive" if it is in both the hash table and the page
135
   * replacement policy. It is "retired" if it is not in the hash table and is pending removal from
136
   * the page replacement policy. Finally, an entry transitions to the "dead" state when it is not
137
   * in the hash table nor the page replacement policy. Both the retired and dead states are
138
   * represented by a sentinel key that should not be used for map operations.
139
   *
140
   * Eviction:
141
   * ---------
142
   * Maximum size is implemented using the Window TinyLfu policy [2] due to its high hit rate, O(1)
143
   * time complexity, and small footprint. A new entry starts in the admission window and remains
144
   * there as long as it has high temporal locality (recency). Eventually an entry will slip from
145
   * the window into the main space. If the main space is already full, then a historic frequency
146
   * filter determines whether to evict the newly admitted entry or the victim entry chosen by the
147
   * eviction policy. This process ensures that the entries in the window were very recently used
148
   * and entries in the main space are accessed very frequently and are at least moderately recent.
149
   * The windowing allows the policy to have a high hit rate when entries exhibit a bursty access
150
   * pattern while the filter ensures that popular items are retained. The admission window uses
151
   * LRU and the main space uses Segmented LRU.
152
   *
153
   * The optimal size of the window vs main spaces is workload dependent [3]. A large admission
154
   * window is favored by recency-biased workloads while a small one favors frequency-biased
155
   * workloads. When the window is too small then recent arrivals are prematurely evicted, but when
156
   * too large then they pollute the cache and force the eviction of more popular entries. The
157
   * configuration is dynamically determined using hill climbing to walk the hit rate curve. This is
158
   * done by sampling the hit rate and adjusting the window size in the direction that is improving
159
   * (making positive or negative steps). At each interval the step size is decreased until the
160
   * climber converges at the optimal setting. The process is restarted when the hit rate changes
161
   * over a threshold, indicating that the workload altered and a new setting may be required.
162
   *
163
   * The historic usage is retained in a compact popularity sketch, which uses hashing to
164
   * probabilistically estimate an item's frequency. This exposes a flaw where an adversary could
165
   * use hash flooding [4] to artificially raise the frequency of the main space's victim and cause
166
   * all candidates to be rejected. In the worst case, by exploiting hash collisions an attacker
167
   * could cause the cache to never hit and hold only worthless items, resulting in a
168
   * denial-of-service attack against the underlying resource. This is protected against by
169
   * introducing jitter so that candidates which are at least moderately popular have a small,
170
   * random chance of being admitted. This causes the victim to be evicted, but in a way that
171
   * marginally impacts the hit rate.
172
   *
173
   * Expiration:
174
   * -----------
175
   * Expiration is implemented in O(1) time complexity. The time-to-idle policy uses an access-order
176
   * queue, the time-to-live policy uses a write-order queue, and variable expiration uses a
177
   * hierarchical timer wheel [5]. The queuing policies allow for peeking at the oldest entry to
178
   * see if it has expired and, if it has not, then the younger entries must not have too. If a
179
   * maximum size is set then expiration will share the queues so that the per-entry footprint is
180
   * minimized. The timer wheel based policy uses hashing and cascading in a manner that amortizes
181
   * the penalty of sorting to achieve a similar algorithmic cost.
182
   *
183
   * The expiration updates are applied in a best effort fashion. The reordering of variable or
184
   * access-order expiration may be discarded by the read buffer if full or contended on. Similarly,
185
   * the reordering of write expiration may be ignored for an entry if the last update was within a
186
   * short time window in order to avoid overwhelming the write buffer.
187
   *
188
   * [1] BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free
189
   * http://web.cse.ohio-state.edu/hpcs/WWW/HTML/publications/papers/TR-09-1.pdf
190
   * [2] TinyLFU: A Highly Efficient Cache Admission Policy
191
   * https://dl.acm.org/citation.cfm?id=3149371
192
   * [3] Adaptive Software Cache Management
193
   * https://dl.acm.org/citation.cfm?id=3274816
194
   * [4] Denial of Service via Algorithmic Complexity Attack
195
   * https://www.usenix.org/legacy/events/sec03/tech/full_papers/crosby/crosby.pdf
196
   * [5] Hashed and Hierarchical Timing Wheels
197
   * http://www.cs.columbia.edu/~nahum/w6998/papers/ton97-timing-wheels.pdf
198
   */
199

200
  static final Logger logger = System.getLogger(BoundedLocalCache.class.getName());
1✔
201

202
  /** The number of CPUs */
203
  static final int NCPU = Runtime.getRuntime().availableProcessors();
1✔
204
  /** The initial capacity of the write buffer. */
205
  static final int WRITE_BUFFER_MIN = 4;
206
  /** The maximum capacity of the write buffer. */
207
  static final int WRITE_BUFFER_MAX = 128 * ceilingPowerOfTwo(NCPU);
1✔
208
  /** The number of attempts to insert into the write buffer before yielding. */
209
  static final int WRITE_BUFFER_RETRIES = 100;
210
  /** The maximum weighted capacity of the map. */
211
  static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
212
  /** The initial percent of the maximum weighted capacity dedicated to the main space. */
213
  static final double PERCENT_MAIN = 0.99d;
214
  /** The percent of the maximum weighted capacity dedicated to the main's protected space. */
215
  static final double PERCENT_MAIN_PROTECTED = 0.80d;
216
  /** The difference in hit rates that restarts the climber. */
217
  static final double HILL_CLIMBER_RESTART_THRESHOLD = 0.05d;
218
  /** The percent of the total size to adapt the window by. */
219
  static final double HILL_CLIMBER_STEP_PERCENT = 0.0625d;
220
  /** The rate to decrease the step size to adapt by. */
221
  static final double HILL_CLIMBER_STEP_DECAY_RATE = 0.98d;
222
  /** The minimum popularity for allowing randomized admission. */
223
  static final int ADMIT_HASHDOS_THRESHOLD = 6;
224
  /** The maximum number of entries that can be transferred between queues. */
225
  static final int QUEUE_TRANSFER_THRESHOLD = 1_000;
226
  /** The maximum time window between entry updates before the expiration must be reordered. */
227
  static final long EXPIRE_WRITE_TOLERANCE = TimeUnit.SECONDS.toNanos(1);
1✔
228
  /** The maximum duration before an entry expires. */
229
  static final long MAXIMUM_EXPIRY = (Long.MAX_VALUE >> 1); // 150 years
230
  /** The duration to wait on the eviction lock before warning of a possible misuse. */
231
  static final long WARN_AFTER_LOCK_WAIT_NANOS = TimeUnit.SECONDS.toNanos(30);
1✔
232
  /** The number of retries before computing to validate the entry's integrity; pow2 modulus. */
233
  static final int MAX_PUT_SPIN_WAIT_ATTEMPTS = 1024 - 1;
234
  /** The handle for the in-flight refresh operations. */
235
  static final VarHandle REFRESHES;
236

237
  final @Nullable RemovalListener<K, V> evictionListener;
238
  final @Nullable AsyncCacheLoader<K, V> cacheLoader;
239

240
  final MpscGrowableArrayQueue<Runnable> writeBuffer;
241
  final ConcurrentHashMap<Object, Node<K, V>> data;
242
  final PerformCleanupTask drainBuffersTask;
243
  final Consumer<Node<K, V>> accessPolicy;
244
  final Buffer<Node<K, V>> readBuffer;
245
  final NodeFactory<K, V> nodeFactory;
246
  final ReentrantLock evictionLock;
247
  final Weigher<K, V> weigher;
248
  final Executor executor;
249

250
  final boolean isWeighted;
251
  final boolean isAsync;
252

253
  @Nullable Set<K> keySet;
254
  @Nullable Collection<V> values;
255
  @Nullable Set<Entry<K, V>> entrySet;
256
  @Nullable volatile ConcurrentMap<Object, CompletableFuture<?>> refreshes;
257

258
  /** Creates an instance based on the builder's configuration. */
259
  @SuppressWarnings("GuardedBy")
260
  protected BoundedLocalCache(Caffeine<K, V> builder,
261
      @Nullable AsyncCacheLoader<K, V> cacheLoader, boolean isAsync) {
1✔
262
    this.isAsync = isAsync;
1✔
263
    this.cacheLoader = cacheLoader;
1✔
264
    executor = builder.getExecutor();
1✔
265
    isWeighted = builder.isWeighted();
1✔
266
    evictionLock = new ReentrantLock();
1✔
267
    weigher = builder.getWeigher(isAsync);
1✔
268
    drainBuffersTask = new PerformCleanupTask(this);
1✔
269
    nodeFactory = NodeFactory.newFactory(builder, isAsync);
1✔
270
    evictionListener = builder.getEvictionListener(isAsync);
1✔
271
    data = new ConcurrentHashMap<>(builder.getInitialCapacity());
1✔
272
    readBuffer = evicts() || collectKeys() || collectValues() || expiresAfterAccess()
1✔
273
        ? new BoundedBuffer<>()
1✔
274
        : Buffer.disabled();
1✔
275
    accessPolicy = (evicts() || expiresAfterAccess()) ? this::onAccess : e -> {};
1✔
276
    writeBuffer = new MpscGrowableArrayQueue<>(WRITE_BUFFER_MIN, WRITE_BUFFER_MAX);
1✔
277

278
    if (evicts()) {
1✔
279
      setMaximumSize(builder.getMaximum());
1✔
280
    }
281
  }
1✔
282

283
  static {
284
    try {
285
      REFRESHES = MethodHandles.lookup()
1✔
286
          .findVarHandle(BoundedLocalCache.class, "refreshes", ConcurrentMap.class);
1✔
287
    } catch (ReflectiveOperationException e) {
×
288
      throw new ExceptionInInitializerError(e);
×
289
    }
1✔
290
  }
1✔
291

292
  /** Ensures that the node is alive during the map operation. */
293
  void requireIsAlive(Object key, Node<?, ?> node) {
294
    if (!node.isAlive()) {
1✔
295
      throw new IllegalStateException(brokenEqualityMessage(key, node));
1✔
296
    }
297
  }
1✔
298

299
  /** Logs if the node cannot be found in the map but is still alive. */
300
  void logIfAlive(Node<?, ?> node) {
301
    if (node.isAlive()) {
1✔
302
      String message = brokenEqualityMessage(node.getKeyReference(), node);
1✔
303
      logger.log(Level.ERROR, message, new IllegalStateException());
1✔
304
    }
305
  }
1✔
306

307
  /** Returns the formatted broken equality error message. */
308
  String brokenEqualityMessage(Object key, Node<?, ?> node) {
309
    return String.format(US, "An invalid state was detected that occurs if the key's equals or "
1✔
310
        + "hashCode was modified while it resided in the cache. This violation of the Map  "
311
        + "contract can lead to non-deterministic behavior (key: %s, key type: %s, "
312
        + "node type: %s, cache type: %s).", key, key.getClass().getName(),
1✔
313
        node.getClass().getSimpleName(), getClass().getSimpleName());
1✔
314
  }
315

316
  /* --------------- Shared --------------- */
317

318
  @Override
319
  public boolean isAsync() {
320
    return isAsync;
1✔
321
  }
322

323
  /** Returns if the node's value is currently being computed asynchronously. */
324
  final boolean isComputingAsync(Node<?, ?> node) {
325
    return isAsync && !Async.isReady((CompletableFuture<?>) node.getValue());
1✔
326
  }
327

328
  @GuardedBy("evictionLock")
329
  protected AccessOrderDeque<Node<K, V>> accessOrderWindowDeque() {
330
    throw new UnsupportedOperationException();
1✔
331
  }
332

333
  @GuardedBy("evictionLock")
334
  protected AccessOrderDeque<Node<K, V>> accessOrderProbationDeque() {
335
    throw new UnsupportedOperationException();
1✔
336
  }
337

338
  @GuardedBy("evictionLock")
339
  protected AccessOrderDeque<Node<K, V>> accessOrderProtectedDeque() {
340
    throw new UnsupportedOperationException();
1✔
341
  }
342

343
  @GuardedBy("evictionLock")
344
  protected WriteOrderDeque<Node<K, V>> writeOrderDeque() {
345
    throw new UnsupportedOperationException();
1✔
346
  }
347

348
  @Override
349
  public final Executor executor() {
350
    return executor;
1✔
351
  }
352

353
  @Override
354
  @SuppressWarnings("NullAway")
355
  public ConcurrentMap<Object, CompletableFuture<?>> refreshes() {
356
    var pending = refreshes;
1✔
357
    if (pending == null) {
1✔
358
      pending = new ConcurrentHashMap<>();
1✔
359
      if (!REFRESHES.compareAndSet(this, null, pending)) {
1✔
360
        pending = refreshes;
1✔
361
      }
362
    }
363
    return pending;
1✔
364
  }
365

366
  /** Invalidate the in-flight refresh. */
367
  void discardRefresh(Object keyReference) {
368
    var pending = refreshes;
1✔
369
    if ((pending != null) && pending.containsKey(keyReference)) {
1✔
370
      pending.remove(keyReference);
1✔
371
    }
372
  }
1✔
373

374
  @Override
375
  public Object referenceKey(K key) {
376
    return nodeFactory.newLookupKey(key);
1✔
377
  }
378

379
  @Override
380
  public boolean isPendingEviction(K key) {
381
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
382
    return (node != null)
1✔
383
        && ((node.getValue() == null) || hasExpired(node, expirationTicker().read()));
1✔
384
  }
385

386
  /* --------------- Stats Support --------------- */
387

388
  @Override
389
  public boolean isRecordingStats() {
390
    return false;
1✔
391
  }
392

393
  @Override
394
  public StatsCounter statsCounter() {
395
    return StatsCounter.disabledStatsCounter();
1✔
396
  }
397

398
  @Override
399
  public Ticker statsTicker() {
400
    return Ticker.disabledTicker();
1✔
401
  }
402

403
  /* --------------- Removal Listener Support --------------- */
404

405
  @SuppressWarnings("NullAway")
406
  protected RemovalListener<K, V> removalListener() {
407
    return null;
1✔
408
  }
409

410
  protected boolean hasRemovalListener() {
411
    return false;
1✔
412
  }
413

414
  @Override
415
  public void notifyRemoval(@Nullable K key, @Nullable V value, RemovalCause cause) {
416
    if (!hasRemovalListener()) {
1✔
417
      return;
1✔
418
    }
419
    Runnable task = () -> {
1✔
420
      try {
421
        removalListener().onRemoval(key, value, cause);
1✔
422
      } catch (Throwable t) {
1✔
423
        logger.log(Level.WARNING, "Exception thrown by removal listener", t);
1✔
424
      }
1✔
425
    };
1✔
426
    try {
427
      executor.execute(task);
1✔
428
    } catch (Throwable t) {
1✔
429
      logger.log(Level.ERROR, "Exception thrown when submitting removal listener", t);
1✔
430
      task.run();
1✔
431
    }
1✔
432
  }
1✔
433

434
  /* --------------- Eviction Listener Support --------------- */
435

436
  void notifyEviction(@Nullable K key, @Nullable V value, RemovalCause cause) {
437
    if (evictionListener == null) {
1✔
438
      return;
1✔
439
    }
440
    try {
441
      evictionListener.onRemoval(key, value, cause);
1✔
442
    } catch (Throwable t) {
1✔
443
      logger.log(Level.WARNING, "Exception thrown by eviction listener", t);
1✔
444
    }
1✔
445
  }
1✔
446

447
  /* --------------- Reference Support --------------- */
448

449
  /** Returns if the keys are weak reference garbage collected. */
450
  protected boolean collectKeys() {
451
    return false;
1✔
452
  }
453

454
  /** Returns if the values are weak or soft reference garbage collected. */
455
  protected boolean collectValues() {
456
    return false;
1✔
457
  }
458

459
  @SuppressWarnings("NullAway")
460
  protected ReferenceQueue<K> keyReferenceQueue() {
461
    return null;
1✔
462
  }
463

464
  @SuppressWarnings("NullAway")
465
  protected ReferenceQueue<V> valueReferenceQueue() {
466
    return null;
1✔
467
  }
468

469
  /* --------------- Expiration Support --------------- */
470

471
  /** Returns the {@link Pacer} used to schedule the maintenance task. */
472
  protected @Nullable Pacer pacer() {
473
    return null;
1✔
474
  }
475

476
  /** Returns if the cache expires entries after a variable time threshold. */
477
  protected boolean expiresVariable() {
478
    return false;
1✔
479
  }
480

481
  /** Returns if the cache expires entries after an access time threshold. */
482
  protected boolean expiresAfterAccess() {
483
    return false;
1✔
484
  }
485

486
  /** Returns how long after the last access to an entry the map will retain that entry. */
487
  protected long expiresAfterAccessNanos() {
488
    throw new UnsupportedOperationException();
1✔
489
  }
490

491
  protected void setExpiresAfterAccessNanos(long expireAfterAccessNanos) {
492
    throw new UnsupportedOperationException();
1✔
493
  }
494

495
  /** Returns if the cache expires entries after a write time threshold. */
496
  protected boolean expiresAfterWrite() {
497
    return false;
1✔
498
  }
499

500
  /** Returns how long after the last write to an entry the map will retain that entry. */
501
  protected long expiresAfterWriteNanos() {
502
    throw new UnsupportedOperationException();
1✔
503
  }
504

505
  protected void setExpiresAfterWriteNanos(long expireAfterWriteNanos) {
506
    throw new UnsupportedOperationException();
1✔
507
  }
508

509
  /** Returns if the cache refreshes entries after a write time threshold. */
510
  protected boolean refreshAfterWrite() {
511
    return false;
1✔
512
  }
513

514
  /** Returns how long after the last write an entry becomes a candidate for refresh. */
515
  protected long refreshAfterWriteNanos() {
516
    throw new UnsupportedOperationException();
1✔
517
  }
518

519
  protected void setRefreshAfterWriteNanos(long refreshAfterWriteNanos) {
520
    throw new UnsupportedOperationException();
1✔
521
  }
522

523
  @Override
524
  @SuppressWarnings("NullAway")
525
  public Expiry<K, V> expiry() {
526
    return null;
1✔
527
  }
528

529
  /** Returns the {@link Ticker} used by this cache for expiration. */
530
  public Ticker expirationTicker() {
531
    return Ticker.disabledTicker();
1✔
532
  }
533

534
  protected TimerWheel<K, V> timerWheel() {
535
    throw new UnsupportedOperationException();
1✔
536
  }
537

538
  /* --------------- Eviction Support --------------- */
539

540
  /** Returns if the cache evicts entries due to a maximum size or weight threshold. */
541
  protected boolean evicts() {
542
    return false;
1✔
543
  }
544

545
  /** Returns if entries may be assigned different weights. */
546
  protected boolean isWeighted() {
547
    return (weigher != Weigher.singletonWeigher());
1✔
548
  }
549

550
  protected FrequencySketch<K> frequencySketch() {
551
    throw new UnsupportedOperationException();
1✔
552
  }
553

554
  /** Returns if an access to an entry can skip notifying the eviction policy. */
555
  protected boolean fastpath() {
556
    return false;
1✔
557
  }
558

559
  /** Returns the maximum weighted size. */
560
  protected long maximum() {
561
    throw new UnsupportedOperationException();
1✔
562
  }
563

564
  /** Returns the maximum weighted size of the window space. */
565
  protected long windowMaximum() {
566
    throw new UnsupportedOperationException();
1✔
567
  }
568

569
  /** Returns the maximum weighted size of the main's protected space. */
570
  protected long mainProtectedMaximum() {
571
    throw new UnsupportedOperationException();
1✔
572
  }
573

574
  @GuardedBy("evictionLock")
575
  protected void setMaximum(long maximum) {
576
    throw new UnsupportedOperationException();
1✔
577
  }
578

579
  @GuardedBy("evictionLock")
580
  protected void setWindowMaximum(long maximum) {
581
    throw new UnsupportedOperationException();
1✔
582
  }
583

584
  @GuardedBy("evictionLock")
585
  protected void setMainProtectedMaximum(long maximum) {
586
    throw new UnsupportedOperationException();
1✔
587
  }
588

589
  /** Returns the combined weight of the values in the cache (may be negative). */
590
  protected long weightedSize() {
591
    throw new UnsupportedOperationException();
1✔
592
  }
593

594
  /** Returns the uncorrected combined weight of the values in the window space. */
595
  protected long windowWeightedSize() {
596
    throw new UnsupportedOperationException();
1✔
597
  }
598

599
  /** Returns the uncorrected combined weight of the values in the main's protected space. */
600
  protected long mainProtectedWeightedSize() {
601
    throw new UnsupportedOperationException();
1✔
602
  }
603

604
  @GuardedBy("evictionLock")
605
  protected void setWeightedSize(long weightedSize) {
606
    throw new UnsupportedOperationException();
1✔
607
  }
608

609
  @GuardedBy("evictionLock")
610
  protected void setWindowWeightedSize(long weightedSize) {
611
    throw new UnsupportedOperationException();
1✔
612
  }
613

614
  @GuardedBy("evictionLock")
615
  protected void setMainProtectedWeightedSize(long weightedSize) {
616
    throw new UnsupportedOperationException();
1✔
617
  }
618

619
  protected int hitsInSample() {
620
    throw new UnsupportedOperationException();
1✔
621
  }
622

623
  protected int missesInSample() {
624
    throw new UnsupportedOperationException();
1✔
625
  }
626

627
  protected int sampleCount() {
628
    throw new UnsupportedOperationException();
1✔
629
  }
630

631
  protected double stepSize() {
632
    throw new UnsupportedOperationException();
1✔
633
  }
634

635
  protected double previousSampleHitRate() {
636
    throw new UnsupportedOperationException();
1✔
637
  }
638

639
  protected long adjustment() {
640
    throw new UnsupportedOperationException();
1✔
641
  }
642

643
  @GuardedBy("evictionLock")
644
  protected void setHitsInSample(int hitCount) {
645
    throw new UnsupportedOperationException();
1✔
646
  }
647

648
  @GuardedBy("evictionLock")
649
  protected void setMissesInSample(int missCount) {
650
    throw new UnsupportedOperationException();
1✔
651
  }
652

653
  @GuardedBy("evictionLock")
654
  protected void setSampleCount(int sampleCount) {
655
    throw new UnsupportedOperationException();
1✔
656
  }
657

658
  @GuardedBy("evictionLock")
659
  protected void setStepSize(double stepSize) {
660
    throw new UnsupportedOperationException();
1✔
661
  }
662

663
  @GuardedBy("evictionLock")
664
  protected void setPreviousSampleHitRate(double hitRate) {
665
    throw new UnsupportedOperationException();
1✔
666
  }
667

668
  @GuardedBy("evictionLock")
669
  protected void setAdjustment(long amount) {
670
    throw new UnsupportedOperationException();
1✔
671
  }
672

673
  /**
674
   * Sets the maximum weighted size of the cache. The caller may need to perform a maintenance cycle
675
   * to eagerly evicts entries until the cache shrinks to the appropriate size.
676
   */
677
  @GuardedBy("evictionLock")
678
  void setMaximumSize(long maximum) {
679
    requireArgument(maximum >= 0, "maximum must not be negative");
1✔
680
    if (maximum == maximum()) {
1✔
681
      return;
1✔
682
    }
683

684
    long max = Math.min(maximum, MAXIMUM_CAPACITY);
1✔
685
    long window = max - (long) (PERCENT_MAIN * max);
1✔
686
    long mainProtected = (long) (PERCENT_MAIN_PROTECTED * (max - window));
1✔
687

688
    setMaximum(max);
1✔
689
    setWindowMaximum(window);
1✔
690
    setMainProtectedMaximum(mainProtected);
1✔
691

692
    setHitsInSample(0);
1✔
693
    setMissesInSample(0);
1✔
694
    setStepSize(-HILL_CLIMBER_STEP_PERCENT * max);
1✔
695

696
    if ((frequencySketch() != null) && !isWeighted() && (weightedSize() >= (max >>> 1))) {
1✔
697
      // Lazily initialize when close to the maximum size
698
      frequencySketch().ensureCapacity(max);
1✔
699
    }
700
  }
1✔
701

702
  /** Evicts entries if the cache exceeds the maximum. */
703
  @GuardedBy("evictionLock")
704
  void evictEntries() {
705
    if (!evicts()) {
1✔
706
      return;
1✔
707
    }
708
    var candidate = evictFromWindow();
1✔
709
    evictFromMain(candidate);
1✔
710
  }
1✔
711

712
  /**
713
   * Evicts entries from the window space into the main space while the window size exceeds a
714
   * maximum.
715
   *
716
   * @return the first candidate promoted into the probation space
717
   */
718
  @GuardedBy("evictionLock")
719
  @Nullable Node<K, V> evictFromWindow() {
720
    Node<K, V> first = null;
1✔
721
    Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
722
    while (windowWeightedSize() > windowMaximum()) {
1✔
723
      // The pending operations will adjust the size to reflect the correct weight
724
      if (node == null) {
1✔
725
        break;
1✔
726
      }
727

728
      Node<K, V> next = node.getNextInAccessOrder();
1✔
729
      if (node.getPolicyWeight() != 0) {
1✔
730
        node.makeMainProbation();
1✔
731
        accessOrderWindowDeque().remove(node);
1✔
732
        accessOrderProbationDeque().offerLast(node);
1✔
733
        if (first == null) {
1✔
734
          first = node;
1✔
735
        }
736

737
        setWindowWeightedSize(windowWeightedSize() - node.getPolicyWeight());
1✔
738
      }
739
      node = next;
1✔
740
    }
1✔
741

742
    return first;
1✔
743
  }
744

745
  /**
746
   * Evicts entries from the main space if the cache exceeds the maximum capacity. The main space
747
   * determines whether admitting an entry (coming from the window space) is preferable to retaining
748
   * the eviction policy's victim. This decision is made using a frequency filter so that the
749
   * least frequently used entry is removed.
750
   * <p>
751
   * The window space's candidates were previously promoted to the probation space at its MRU
752
   * position and the eviction policy's victim starts at the LRU position. The candidates are
753
   * evaluated in promotion order while an eviction is required, and if exhausted then additional
754
   * entries are retrieved from the window space. Likewise, if the victim selection exhausts the
755
   * probation space then additional entries are retrieved the protected space. The queues are
756
   * consumed in LRU order and the evicted entry is the one with a lower relative frequency, where
757
   * the preference is to retain the main space's victims versus the window space's candidates on a
758
   * tie.
759
   *
760
   * @param candidate the first candidate promoted into the probation space
761
   */
762
  @GuardedBy("evictionLock")
763
  void evictFromMain(@Nullable Node<K, V> candidate) {
764
    int victimQueue = PROBATION;
1✔
765
    int candidateQueue = PROBATION;
1✔
766
    Node<K, V> victim = accessOrderProbationDeque().peekFirst();
1✔
767
    while (weightedSize() > maximum()) {
1✔
768
      // Search the admission window for additional candidates
769
      if ((candidate == null) && (candidateQueue == PROBATION)) {
1✔
770
        candidate = accessOrderWindowDeque().peekFirst();
1✔
771
        candidateQueue = WINDOW;
1✔
772
      }
773

774
      // Try evicting from the protected and window queues
775
      if ((candidate == null) && (victim == null)) {
1✔
776
        if (victimQueue == PROBATION) {
1✔
777
          victim = accessOrderProtectedDeque().peekFirst();
1✔
778
          victimQueue = PROTECTED;
1✔
779
          continue;
1✔
780
        } else if (victimQueue == PROTECTED) {
1✔
781
          victim = accessOrderWindowDeque().peekFirst();
1✔
782
          victimQueue = WINDOW;
1✔
783
          continue;
1✔
784
        }
785

786
        // The pending operations will adjust the size to reflect the correct weight
787
        break;
788
      }
789

790
      // Skip over entries with zero weight
791
      if ((victim != null) && (victim.getPolicyWeight() == 0)) {
1✔
792
        victim = victim.getNextInAccessOrder();
1✔
793
        continue;
1✔
794
      } else if ((candidate != null) && (candidate.getPolicyWeight() == 0)) {
1✔
795
        candidate = candidate.getNextInAccessOrder();
1✔
796
        continue;
1✔
797
      }
798

799
      // Evict immediately if only one of the entries is present
800
      if (victim == null) {
1✔
801
        @SuppressWarnings("NullAway")
802
        Node<K, V> previous = candidate.getNextInAccessOrder();
1✔
803
        Node<K, V> evict = candidate;
1✔
804
        candidate = previous;
1✔
805
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
806
        continue;
1✔
807
      } else if (candidate == null) {
1✔
808
        Node<K, V> evict = victim;
1✔
809
        victim = victim.getNextInAccessOrder();
1✔
810
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
811
        continue;
1✔
812
      }
813

814
      // Evict immediately if both selected the same entry
815
      if (candidate == victim) {
1✔
816
        victim = victim.getNextInAccessOrder();
1✔
817
        evictEntry(candidate, RemovalCause.SIZE, 0L);
1✔
818
        candidate = null;
1✔
819
        continue;
1✔
820
      }
821

822
      // Evict immediately if an entry was collected
823
      K victimKey = victim.getKey();
1✔
824
      K candidateKey = candidate.getKey();
1✔
825
      if (victimKey == null) {
1✔
826
        Node<K, V> evict = victim;
1✔
827
        victim = victim.getNextInAccessOrder();
1✔
828
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
829
        continue;
1✔
830
      } else if (candidateKey == null) {
1✔
831
        Node<K, V> evict = candidate;
1✔
832
        candidate = candidate.getNextInAccessOrder();
1✔
833
        evictEntry(evict, RemovalCause.COLLECTED, 0L);
1✔
834
        continue;
1✔
835
      }
836

837
      // Evict immediately if an entry was removed
838
      if (!victim.isAlive()) {
1✔
839
        Node<K, V> evict = victim;
1✔
840
        victim = victim.getNextInAccessOrder();
1✔
841
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
842
        continue;
1✔
843
      } else if (!candidate.isAlive()) {
1✔
844
        Node<K, V> evict = candidate;
1✔
845
        candidate = candidate.getNextInAccessOrder();
1✔
846
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
847
        continue;
1✔
848
      }
849

850
      // Evict immediately if the candidate's weight exceeds the maximum
851
      if (candidate.getPolicyWeight() > maximum()) {
1✔
852
        Node<K, V> evict = candidate;
1✔
853
        candidate = candidate.getNextInAccessOrder();
1✔
854
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
855
        continue;
1✔
856
      }
857

858
      // Evict the entry with the lowest frequency
859
      if (admit(candidateKey, victimKey)) {
1✔
860
        Node<K, V> evict = victim;
1✔
861
        victim = victim.getNextInAccessOrder();
1✔
862
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
863
        candidate = candidate.getNextInAccessOrder();
1✔
864
      } else {
1✔
865
        Node<K, V> evict = candidate;
1✔
866
        candidate = candidate.getNextInAccessOrder();
1✔
867
        evictEntry(evict, RemovalCause.SIZE, 0L);
1✔
868
      }
869
    }
1✔
870
  }
1✔
871

872
  /**
873
   * Determines if the candidate should be accepted into the main space, as determined by its
874
   * frequency relative to the victim. A small amount of randomness is used to protect against hash
875
   * collision attacks, where the victim's frequency is artificially raised so that no new entries
876
   * are admitted.
877
   *
878
   * @param candidateKey the key for the entry being proposed for long term retention
879
   * @param victimKey the key for the entry chosen by the eviction policy for replacement
880
   * @return if the candidate should be admitted and the victim ejected
881
   */
882
  @GuardedBy("evictionLock")
883
  boolean admit(K candidateKey, K victimKey) {
884
    int victimFreq = frequencySketch().frequency(victimKey);
1✔
885
    int candidateFreq = frequencySketch().frequency(candidateKey);
1✔
886
    if (candidateFreq > victimFreq) {
1✔
887
      return true;
1✔
888
    } else if (candidateFreq >= ADMIT_HASHDOS_THRESHOLD) {
1✔
889
      // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack
890
      // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm
891
      // candidate reduces the number of random acceptances to minimize the impact on the hit rate.
892
      int random = ThreadLocalRandom.current().nextInt();
1✔
893
      return ((random & 127) == 0);
1✔
894
    }
895
    return false;
1✔
896
  }
897

898
  /** Expires entries that have expired by access, write, or variable. */
899
  @GuardedBy("evictionLock")
900
  void expireEntries() {
901
    long now = expirationTicker().read();
1✔
902
    expireAfterAccessEntries(now);
1✔
903
    expireAfterWriteEntries(now);
1✔
904
    expireVariableEntries(now);
1✔
905

906
    Pacer pacer = pacer();
1✔
907
    if (pacer != null) {
1✔
908
      long delay = getExpirationDelay(now);
1✔
909
      if (delay == Long.MAX_VALUE) {
1✔
910
        pacer.cancel();
1✔
911
      } else {
912
        pacer.schedule(executor, drainBuffersTask, now, delay);
1✔
913
      }
914
    }
915
  }
1✔
916

917
  /** Expires entries in the access-order queue. */
918
  @GuardedBy("evictionLock")
919
  void expireAfterAccessEntries(long now) {
920
    if (!expiresAfterAccess()) {
1✔
921
      return;
1✔
922
    }
923

924
    expireAfterAccessEntries(accessOrderWindowDeque(), now);
1✔
925
    if (evicts()) {
1✔
926
      expireAfterAccessEntries(accessOrderProbationDeque(), now);
1✔
927
      expireAfterAccessEntries(accessOrderProtectedDeque(), now);
1✔
928
    }
929
  }
1✔
930

931
  /** Expires entries in an access-order queue. */
932
  @GuardedBy("evictionLock")
933
  void expireAfterAccessEntries(AccessOrderDeque<Node<K, V>> accessOrderDeque, long now) {
934
    long duration = expiresAfterAccessNanos();
1✔
935
    for (;;) {
936
      Node<K, V> node = accessOrderDeque.peekFirst();
1✔
937
      if ((node == null) || ((now - node.getAccessTime()) < duration)
1✔
938
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
939
        return;
1✔
940
      }
941
    }
1✔
942
  }
943

944
  /** Expires entries on the write-order queue. */
945
  @GuardedBy("evictionLock")
946
  void expireAfterWriteEntries(long now) {
947
    if (!expiresAfterWrite()) {
1✔
948
      return;
1✔
949
    }
950
    long duration = expiresAfterWriteNanos();
1✔
951
    for (;;) {
952
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
953
      if ((node == null) || ((now - node.getWriteTime()) < duration)
1✔
954
          || !evictEntry(node, RemovalCause.EXPIRED, now)) {
1✔
955
        break;
1✔
956
      }
957
    }
1✔
958
  }
1✔
959

960
  /** Expires entries in the timer wheel. */
961
  @GuardedBy("evictionLock")
962
  void expireVariableEntries(long now) {
963
    if (expiresVariable()) {
1✔
964
      timerWheel().advance(this, now);
1✔
965
    }
966
  }
1✔
967

968
  /** Returns the duration until the next item expires, or {@link Long#MAX_VALUE} if none. */
969
  @GuardedBy("evictionLock")
970
  long getExpirationDelay(long now) {
971
    long delay = Long.MAX_VALUE;
1✔
972
    if (expiresAfterAccess()) {
1✔
973
      Node<K, V> node = accessOrderWindowDeque().peekFirst();
1✔
974
      if (node != null) {
1✔
975
        delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
976
      }
977
      if (evicts()) {
1✔
978
        node = accessOrderProbationDeque().peekFirst();
1✔
979
        if (node != null) {
1✔
980
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
981
        }
982
        node = accessOrderProtectedDeque().peekFirst();
1✔
983
        if (node != null) {
1✔
984
          delay = Math.min(delay, expiresAfterAccessNanos() - (now - node.getAccessTime()));
1✔
985
        }
986
      }
987
    }
988
    if (expiresAfterWrite()) {
1✔
989
      Node<K, V> node = writeOrderDeque().peekFirst();
1✔
990
      if (node != null) {
1✔
991
        delay = Math.min(delay, expiresAfterWriteNanos() - (now - node.getWriteTime()));
1✔
992
      }
993
    }
994
    if (expiresVariable()) {
1✔
995
      delay = Math.min(delay, timerWheel().getExpirationDelay());
1✔
996
    }
997
    return delay;
1✔
998
  }
999

1000
  /** Returns if the entry has expired. */
1001
  @SuppressWarnings("ShortCircuitBoolean")
1002
  boolean hasExpired(Node<K, V> node, long now) {
1003
    if (isComputingAsync(node)) {
1✔
1004
      return false;
1✔
1005
    }
1006
    return (expiresAfterAccess() && (now - node.getAccessTime() >= expiresAfterAccessNanos()))
1✔
1007
        | (expiresAfterWrite() && (now - node.getWriteTime() >= expiresAfterWriteNanos()))
1✔
1008
        | (expiresVariable() && (now - node.getVariableTime() >= 0));
1✔
1009
  }
1010

1011
  /**
1012
   * Attempts to evict the entry based on the given removal cause. A removal may be ignored if the
1013
   * entry was updated and is no longer eligible for eviction.
1014
   *
1015
   * @param node the entry to evict
1016
   * @param cause the reason to evict
1017
   * @param now the current time, used only if expiring
1018
   * @return if the entry was evicted
1019
   */
1020
  @GuardedBy("evictionLock")
1021
  @SuppressWarnings({"GuardedByChecker", "NullAway", "PMD.CollapsibleIfStatements"})
1022
  boolean evictEntry(Node<K, V> node, RemovalCause cause, long now) {
1023
    K key = node.getKey();
1✔
1024
    @SuppressWarnings("unchecked")
1025
    V[] value = (V[]) new Object[1];
1✔
1026
    boolean[] removed = new boolean[1];
1✔
1027
    boolean[] resurrect = new boolean[1];
1✔
1028
    Object keyReference = node.getKeyReference();
1✔
1029
    RemovalCause[] actualCause = new RemovalCause[1];
1✔
1030

1031
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
1032
      if (n != node) {
1✔
1033
        return n;
1✔
1034
      }
1035
      synchronized (n) {
1✔
1036
        value[0] = n.getValue();
1✔
1037

1038
        if ((key == null) || (value[0] == null)) {
1✔
1039
          actualCause[0] = RemovalCause.COLLECTED;
1✔
1040
        } else if (cause == RemovalCause.COLLECTED) {
1✔
1041
          resurrect[0] = true;
1✔
1042
          return n;
1✔
1043
        } else {
1044
          actualCause[0] = cause;
1✔
1045
        }
1046

1047
        if (actualCause[0] == RemovalCause.EXPIRED) {
1✔
1048
          boolean expired = false;
1✔
1049
          if (expiresAfterAccess()) {
1✔
1050
            expired |= ((now - n.getAccessTime()) >= expiresAfterAccessNanos());
1✔
1051
          }
1052
          if (expiresAfterWrite()) {
1✔
1053
            expired |= ((now - n.getWriteTime()) >= expiresAfterWriteNanos());
1✔
1054
          }
1055
          if (expiresVariable()) {
1✔
1056
            expired |= (n.getVariableTime() <= now);
1✔
1057
          }
1058
          if (!expired) {
1✔
1059
            resurrect[0] = true;
1✔
1060
            return n;
1✔
1061
          }
1062
        } else if (actualCause[0] == RemovalCause.SIZE) {
1✔
1063
          int weight = node.getWeight();
1✔
1064
          if (weight == 0) {
1✔
1065
            resurrect[0] = true;
1✔
1066
            return n;
1✔
1067
          }
1068
        }
1069

1070
        notifyEviction(key, value[0], actualCause[0]);
1✔
1071
        discardRefresh(keyReference);
1✔
1072
        removed[0] = true;
1✔
1073
        node.retire();
1✔
1074
      }
1✔
1075
      return null;
1✔
1076
    });
1077

1078
    // The entry is no longer eligible for eviction
1079
    if (resurrect[0]) {
1✔
1080
      return false;
1✔
1081
    }
1082

1083
    // If the eviction fails due to a concurrent removal of the victim, that removal may cancel out
1084
    // the addition that triggered this eviction. The victim is eagerly unlinked and the size
1085
    // decremented before the removal task so that if an eviction is still required then a new
1086
    // victim will be chosen for removal.
1087
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1088
      accessOrderWindowDeque().remove(node);
1✔
1089
    } else if (evicts()) {
1✔
1090
      if (node.inMainProbation()) {
1✔
1091
        accessOrderProbationDeque().remove(node);
1✔
1092
      } else {
1093
        accessOrderProtectedDeque().remove(node);
1✔
1094
      }
1095
    }
1096
    if (expiresAfterWrite()) {
1✔
1097
      writeOrderDeque().remove(node);
1✔
1098
    } else if (expiresVariable()) {
1✔
1099
      timerWheel().deschedule(node);
1✔
1100
    }
1101

1102
    synchronized (node) {
1✔
1103
      logIfAlive(node);
1✔
1104
      makeDead(node);
1✔
1105
    }
1✔
1106

1107
    if (removed[0]) {
1✔
1108
      statsCounter().recordEviction(node.getWeight(), actualCause[0]);
1✔
1109
      notifyRemoval(key, value[0], actualCause[0]);
1✔
1110
    }
1111

1112
    return true;
1✔
1113
  }
1114

1115
  /** Adapts the eviction policy to towards the optimal recency / frequency configuration. */
1116
  @GuardedBy("evictionLock")
1117
  void climb() {
1118
    if (!evicts()) {
1✔
1119
      return;
1✔
1120
    }
1121

1122
    determineAdjustment();
1✔
1123
    demoteFromMainProtected();
1✔
1124
    long amount = adjustment();
1✔
1125
    if (amount == 0) {
1✔
1126
      return;
1✔
1127
    } else if (amount > 0) {
1✔
1128
      increaseWindow();
1✔
1129
    } else {
1130
      decreaseWindow();
1✔
1131
    }
1132
  }
1✔
1133

1134
  /** Calculates the amount to adapt the window by and sets {@link #adjustment()} accordingly. */
1135
  @GuardedBy("evictionLock")
1136
  void determineAdjustment() {
1137
    if (frequencySketch().isNotInitialized()) {
1✔
1138
      setPreviousSampleHitRate(0.0);
1✔
1139
      setMissesInSample(0);
1✔
1140
      setHitsInSample(0);
1✔
1141
      return;
1✔
1142
    }
1143

1144
    int requestCount = hitsInSample() + missesInSample();
1✔
1145
    if (requestCount < frequencySketch().sampleSize) {
1✔
1146
      return;
1✔
1147
    }
1148

1149
    double hitRate = (double) hitsInSample() / requestCount;
1✔
1150
    double hitRateChange = hitRate - previousSampleHitRate();
1✔
1151
    double amount = (hitRateChange >= 0) ? stepSize() : -stepSize();
1✔
1152
    double nextStepSize = (Math.abs(hitRateChange) >= HILL_CLIMBER_RESTART_THRESHOLD)
1✔
1153
        ? HILL_CLIMBER_STEP_PERCENT * maximum() * (amount >= 0 ? 1 : -1)
1✔
1154
        : HILL_CLIMBER_STEP_DECAY_RATE * amount;
1✔
1155
    setPreviousSampleHitRate(hitRate);
1✔
1156
    setAdjustment((long) amount);
1✔
1157
    setStepSize(nextStepSize);
1✔
1158
    setMissesInSample(0);
1✔
1159
    setHitsInSample(0);
1✔
1160
  }
1✔
1161

1162
  /**
1163
   * Increases the size of the admission window by shrinking the portion allocated to the main
1164
   * space. As the main space is partitioned into probation and protected regions (80% / 20%), for
1165
   * simplicity only the protected is reduced. If the regions exceed their maximums, this may cause
1166
   * protected items to be demoted to the probation region and probation items to be demoted to the
1167
   * admission window.
1168
   */
1169
  @GuardedBy("evictionLock")
1170
  void increaseWindow() {
1171
    if (mainProtectedMaximum() == 0) {
1✔
1172
      return;
1✔
1173
    }
1174

1175
    long quota = Math.min(adjustment(), mainProtectedMaximum());
1✔
1176
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1177
    setWindowMaximum(windowMaximum() + quota);
1✔
1178
    demoteFromMainProtected();
1✔
1179

1180
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1181
      Node<K, V> candidate = accessOrderProbationDeque().peekFirst();
1✔
1182
      boolean probation = true;
1✔
1183
      if ((candidate == null) || (quota < candidate.getPolicyWeight())) {
1✔
1184
        candidate = accessOrderProtectedDeque().peekFirst();
1✔
1185
        probation = false;
1✔
1186
      }
1187
      if (candidate == null) {
1✔
1188
        break;
1✔
1189
      }
1190

1191
      int weight = candidate.getPolicyWeight();
1✔
1192
      if (quota < weight) {
1✔
1193
        break;
1✔
1194
      }
1195

1196
      quota -= weight;
1✔
1197
      if (probation) {
1✔
1198
        accessOrderProbationDeque().remove(candidate);
1✔
1199
      } else {
1200
        setMainProtectedWeightedSize(mainProtectedWeightedSize() - weight);
1✔
1201
        accessOrderProtectedDeque().remove(candidate);
1✔
1202
      }
1203
      setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1204
      accessOrderWindowDeque().offerLast(candidate);
1✔
1205
      candidate.makeWindow();
1✔
1206
    }
1207

1208
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1209
    setWindowMaximum(windowMaximum() - quota);
1✔
1210
    setAdjustment(quota);
1✔
1211
  }
1✔
1212

1213
  /** Decreases the size of the admission window and increases the main's protected region. */
1214
  @GuardedBy("evictionLock")
1215
  void decreaseWindow() {
1216
    if (windowMaximum() <= 1) {
1✔
1217
      return;
1✔
1218
    }
1219

1220
    long quota = Math.min(-adjustment(), Math.max(0, windowMaximum() - 1));
1✔
1221
    setMainProtectedMaximum(mainProtectedMaximum() + quota);
1✔
1222
    setWindowMaximum(windowMaximum() - quota);
1✔
1223

1224
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1225
      Node<K, V> candidate = accessOrderWindowDeque().peekFirst();
1✔
1226
      if (candidate == null) {
1✔
1227
        break;
1✔
1228
      }
1229

1230
      int weight = candidate.getPolicyWeight();
1✔
1231
      if (quota < weight) {
1✔
1232
        break;
1✔
1233
      }
1234

1235
      quota -= weight;
1✔
1236
      setWindowWeightedSize(windowWeightedSize() - weight);
1✔
1237
      accessOrderWindowDeque().remove(candidate);
1✔
1238
      accessOrderProbationDeque().offerLast(candidate);
1✔
1239
      candidate.makeMainProbation();
1✔
1240
    }
1241

1242
    setMainProtectedMaximum(mainProtectedMaximum() - quota);
1✔
1243
    setWindowMaximum(windowMaximum() + quota);
1✔
1244
    setAdjustment(-quota);
1✔
1245
  }
1✔
1246

1247
  /** Transfers the nodes from the protected to the probation region if it exceeds the maximum. */
1248
  @GuardedBy("evictionLock")
1249
  void demoteFromMainProtected() {
1250
    long mainProtectedMaximum = mainProtectedMaximum();
1✔
1251
    long mainProtectedWeightedSize = mainProtectedWeightedSize();
1✔
1252
    if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1253
      return;
1✔
1254
    }
1255

1256
    for (int i = 0; i < QUEUE_TRANSFER_THRESHOLD; i++) {
1✔
1257
      if (mainProtectedWeightedSize <= mainProtectedMaximum) {
1✔
1258
        break;
1✔
1259
      }
1260

1261
      Node<K, V> demoted = accessOrderProtectedDeque().poll();
1✔
1262
      if (demoted == null) {
1✔
1263
        break;
1✔
1264
      }
1265
      demoted.makeMainProbation();
1✔
1266
      accessOrderProbationDeque().offerLast(demoted);
1✔
1267
      mainProtectedWeightedSize -= demoted.getPolicyWeight();
1✔
1268
    }
1269
    setMainProtectedWeightedSize(mainProtectedWeightedSize);
1✔
1270
  }
1✔
1271

1272
  /**
1273
   * Performs the post-processing work required after a read.
1274
   *
1275
   * @param node the entry in the page replacement policy
1276
   * @param now the current time, in nanoseconds
1277
   * @param recordHit if the hit count should be incremented
1278
   * @return the refreshed value if immediately loaded, else null
1279
   */
1280
  @Nullable V afterRead(Node<K, V> node, long now, boolean recordHit) {
1281
    if (recordHit) {
1✔
1282
      statsCounter().recordHits(1);
1✔
1283
    }
1284

1285
    boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL);
1✔
1286
    if (shouldDrainBuffers(delayable)) {
1✔
1287
      scheduleDrainBuffers();
1✔
1288
    }
1289
    return refreshIfNeeded(node, now);
1✔
1290
  }
1291

1292
  /** Returns if the cache should bypass the read buffer. */
1293
  boolean skipReadBuffer() {
1294
    return fastpath() && frequencySketch().isNotInitialized();
1✔
1295
  }
1296

1297
  /**
1298
   * Asynchronously refreshes the entry if eligible.
1299
   *
1300
   * @param node the entry in the cache to refresh
1301
   * @param now the current time, in nanoseconds
1302
   * @return the refreshed value if immediately loaded, else null
1303
   */
1304
  @SuppressWarnings("FutureReturnValueIgnored")
1305
  @Nullable V refreshIfNeeded(Node<K, V> node, long now) {
1306
    if (!refreshAfterWrite()) {
1✔
1307
      return null;
1✔
1308
    }
1309

1310
    K key;
1311
    V oldValue;
1312
    long writeTime = node.getWriteTime();
1✔
1313
    long refreshWriteTime = writeTime | 1L;
1✔
1314
    Object keyReference = node.getKeyReference();
1✔
1315
    ConcurrentMap<Object, CompletableFuture<?>> refreshes;
1316
    if (((now - writeTime) > refreshAfterWriteNanos()) && (keyReference != null)
1✔
1317
        && ((key = node.getKey()) != null) && ((oldValue = node.getValue()) != null)
1✔
1318
        && ((writeTime & 1L) == 0L) && !(refreshes = refreshes()).containsKey(keyReference)
1✔
1319
        && node.isAlive() && node.casWriteTime(writeTime, refreshWriteTime)) {
1✔
1320
      long[] startTime = new long[1];
1✔
1321
      @SuppressWarnings({"rawtypes", "unchecked"})
1322
      CompletableFuture<? extends V>[] refreshFuture = new CompletableFuture[1];
1✔
1323
      try {
1324
        refreshes.computeIfAbsent(keyReference, k -> {
1✔
1325
          try {
1326
            startTime[0] = statsTicker().read();
1✔
1327
            if (isAsync) {
1✔
1328
              @SuppressWarnings("unchecked")
1329
              CompletableFuture<V> future = (CompletableFuture<V>) oldValue;
1✔
1330
              if (Async.isReady(future)) {
1✔
1331
                @SuppressWarnings("NullAway")
1332
                var refresh = cacheLoader.asyncReload(key, future.join(), executor);
1✔
1333
                refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1334
              } else {
1✔
1335
                // no-op if load is pending
1336
                return future;
×
1337
              }
1338
            } else {
1✔
1339
              @SuppressWarnings("NullAway")
1340
              var refresh = cacheLoader.asyncReload(key, oldValue, executor);
1✔
1341
              refreshFuture[0] = requireNonNull(refresh, "Null future");
1✔
1342
            }
1343
            return refreshFuture[0];
1✔
1344
          } catch (InterruptedException e) {
1✔
1345
            Thread.currentThread().interrupt();
1✔
1346
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1347
            return null;
1✔
1348
          } catch (Throwable e) {
1✔
1349
            logger.log(Level.WARNING, "Exception thrown when submitting refresh task", e);
1✔
1350
            return null;
1✔
1351
          }
1352
        });
1353
      } finally {
1354
        node.casWriteTime(refreshWriteTime, writeTime);
1✔
1355
      }
1356

1357
      if (refreshFuture[0] == null) {
1✔
1358
        return null;
1✔
1359
      }
1360

1361
      var refreshed = refreshFuture[0].handle((newValue, error) -> {
1✔
1362
        long loadTime = statsTicker().read() - startTime[0];
1✔
1363
        if (error != null) {
1✔
1364
          if (!(error instanceof CancellationException) && !(error instanceof TimeoutException)) {
1✔
1365
            logger.log(Level.WARNING, "Exception thrown during refresh", error);
1✔
1366
          }
1367
          refreshes.remove(keyReference, refreshFuture[0]);
1✔
1368
          statsCounter().recordLoadFailure(loadTime);
1✔
1369
          return null;
1✔
1370
        }
1371

1372
        @SuppressWarnings("unchecked")
1373
        V value = (isAsync && (newValue != null)) ? (V) refreshFuture[0] : newValue;
1✔
1374

1375
        RemovalCause[] cause = new RemovalCause[1];
1✔
1376
        V result = compute(key, (k, currentValue) -> {
1✔
1377
          if (currentValue == null) {
1✔
1378
            // If the entry is absent then discard the refresh and maybe notifying the listener
1379
            if (value != null) {
1✔
1380
              cause[0] = RemovalCause.EXPLICIT;
1✔
1381
            }
1382
            return null;
1✔
1383
          } else if (currentValue == value) {
1✔
1384
            // If the reloaded value is the same instance then no-op
1385
            return currentValue;
1✔
1386
          } else if (isAsync &&
1✔
1387
              (newValue == Async.getIfReady((CompletableFuture<?>) currentValue))) {
1✔
1388
            // If the completed futures hold the same value instance then no-op
1389
            return currentValue;
1✔
1390
          } else if ((currentValue == oldValue) && (node.getWriteTime() == writeTime)) {
1✔
1391
            // If the entry was not modified while in-flight (no ABA) then replace
1392
            return value;
1✔
1393
          }
1394
          // Otherwise, a write invalidated the refresh so discard it and notify the listener
1395
          cause[0] = RemovalCause.REPLACED;
1✔
1396
          return currentValue;
1✔
1397
        }, expiry(), /* recordLoad */ false, /* recordLoadFailure */ true);
1✔
1398

1399
        if (cause[0] != null) {
1✔
1400
          notifyRemoval(key, value, cause[0]);
1✔
1401
        }
1402
        if (newValue == null) {
1✔
1403
          statsCounter().recordLoadFailure(loadTime);
1✔
1404
        } else {
1405
          statsCounter().recordLoadSuccess(loadTime);
1✔
1406
        }
1407

1408
        refreshes.remove(keyReference, refreshFuture[0]);
1✔
1409
        return result;
1✔
1410
      });
1411
      return Async.getIfReady(refreshed);
1✔
1412
    }
1413

1414
    return null;
1✔
1415
  }
1416

1417
  /**
1418
   * Returns the expiration time for the entry after being created.
1419
   *
1420
   * @param key the key of the entry that was created
1421
   * @param value the value of the entry that was created
1422
   * @param expiry the calculator for the expiration time
1423
   * @param now the current time, in nanoseconds
1424
   * @return the expiration time
1425
   */
1426
  long expireAfterCreate(@Nullable K key, @Nullable V value,
1427
      Expiry<? super K, ? super V> expiry, long now) {
1428
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1429
      long duration = expiry.expireAfterCreate(key, value, now);
1✔
1430
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1431
    }
1432
    return 0L;
1✔
1433
  }
1434

1435
  /**
1436
   * Returns the expiration time for the entry after being updated.
1437
   *
1438
   * @param node the entry in the page replacement policy
1439
   * @param key the key of the entry that was updated
1440
   * @param value the value of the entry that was updated
1441
   * @param expiry the calculator for the expiration time
1442
   * @param now the current time, in nanoseconds
1443
   * @return the expiration time
1444
   */
1445
  long expireAfterUpdate(Node<K, V> node, @Nullable K key,
1446
      @Nullable V value, Expiry<? super K, ? super V> expiry, long now) {
1447
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1448
      long currentDuration = Math.max(1, node.getVariableTime() - now);
1✔
1449
      long duration = expiry.expireAfterUpdate(key, value, now, currentDuration);
1✔
1450
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1451
    }
1452
    return 0L;
1✔
1453
  }
1454

1455
  /**
1456
   * Returns the access time for the entry after a read.
1457
   *
1458
   * @param node the entry in the page replacement policy
1459
   * @param key the key of the entry that was read
1460
   * @param value the value of the entry that was read
1461
   * @param expiry the calculator for the expiration time
1462
   * @param now the current time, in nanoseconds
1463
   * @return the expiration time
1464
   */
1465
  long expireAfterRead(Node<K, V> node, @Nullable K key,
1466
      @Nullable V value, Expiry<K, V> expiry, long now) {
1467
    if (expiresVariable() && (key != null) && (value != null)) {
1✔
1468
      long currentDuration = Math.max(1, node.getVariableTime() - now);
1✔
1469
      long duration = expiry.expireAfterRead(key, value, now, currentDuration);
1✔
1470
      return isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1471
    }
1472
    return 0L;
1✔
1473
  }
1474

1475
  /**
1476
   * Attempts to update the access time for the entry after a read.
1477
   *
1478
   * @param node the entry in the page replacement policy
1479
   * @param key the key of the entry that was read
1480
   * @param value the value of the entry that was read
1481
   * @param expiry the calculator for the expiration time
1482
   * @param now the current time, in nanoseconds
1483
   */
1484
  void tryExpireAfterRead(Node<K, V> node, @Nullable K key,
1485
      @Nullable V value, Expiry<K, V> expiry, long now) {
1486
    if (!expiresVariable() || (key == null) || (value == null)) {
1✔
1487
      return;
1✔
1488
    }
1489

1490
    long variableTime = node.getVariableTime();
1✔
1491
    long currentDuration = Math.max(1, variableTime - now);
1✔
1492
    if (isAsync && (currentDuration > MAXIMUM_EXPIRY)) {
1✔
1493
      // expireAfterCreate has not yet set the duration after completion
1494
      return;
1✔
1495
    }
1496

1497
    long duration = expiry.expireAfterRead(key, value, now, currentDuration);
1✔
1498
    if (duration != currentDuration) {
1✔
1499
      long expirationTime = isAsync ? (now + duration) : (now + Math.min(duration, MAXIMUM_EXPIRY));
1✔
1500
      node.casVariableTime(variableTime, expirationTime);
1✔
1501
    }
1502
  }
1✔
1503

1504
  void setVariableTime(Node<K, V> node, long expirationTime) {
1505
    if (expiresVariable()) {
1✔
1506
      node.setVariableTime(expirationTime);
1✔
1507
    }
1508
  }
1✔
1509

1510
  void setWriteTime(Node<K, V> node, long now) {
1511
    if (expiresAfterWrite() || refreshAfterWrite()) {
1✔
1512
      node.setWriteTime(now & ~1L);
1✔
1513
    }
1514
  }
1✔
1515

1516
  void setAccessTime(Node<K, V> node, long now) {
1517
    if (expiresAfterAccess()) {
1✔
1518
      node.setAccessTime(now);
1✔
1519
    }
1520
  }
1✔
1521

1522
  /**
1523
   * Performs the post-processing work required after a write.
1524
   *
1525
   * @param task the pending operation to be applied
1526
   */
1527
  void afterWrite(Runnable task) {
1528
    for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) {
1✔
1529
      if (writeBuffer.offer(task)) {
1✔
1530
        scheduleAfterWrite();
1✔
1531
        return;
1✔
1532
      }
1533
      scheduleDrainBuffers();
1✔
1534
      Thread.onSpinWait();
1✔
1535
    }
1536

1537
    // In scenarios where the writing threads cannot make progress then they attempt to provide
1538
    // assistance by performing the eviction work directly. This can resolve cases where the
1539
    // maintenance task is scheduled but not running. That might occur due to all of the executor's
1540
    // threads being busy (perhaps writing into this cache), the write rate greatly exceeds the
1541
    // consuming rate, priority inversion, or if the executor silently discarded the maintenance
1542
    // task. Unfortunately this cannot resolve when the eviction is blocked waiting on a long-
1543
    // running computation due to an eviction listener, the victim is being computed on by a writer,
1544
    // or the victim residing in the same hash bin as a computing entry. In those cases a warning is
1545
    // logged to encourage the application to decouple these computations from the map operations.
1546
    lock();
1✔
1547
    try {
1548
      maintenance(task);
1✔
1549
    } catch (RuntimeException e) {
1✔
1550
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1551
    } finally {
1552
      evictionLock.unlock();
1✔
1553
    }
1554
    rescheduleCleanUpIfIncomplete();
1✔
1555
  }
1✔
1556

1557
  /** Acquires the eviction lock. */
1558
  void lock() {
1559
    long remainingNanos = WARN_AFTER_LOCK_WAIT_NANOS;
1✔
1560
    long end = System.nanoTime() + remainingNanos;
1✔
1561
    boolean interrupted = false;
1✔
1562
    try {
1563
      for (;;) {
1564
        try {
1565
          if (evictionLock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
1✔
1566
            return;
1✔
1567
          }
1568
          logger.log(Level.WARNING, "The cache is experiencing excessive wait times for acquiring "
1✔
1569
              + "the eviction lock. This may indicate that a long-running computation has halted "
1570
              + "eviction when trying to remove the victim entry. Consider using AsyncCache to "
1571
              + "decouple the computation from the map operation.", new TimeoutException());
1572
          evictionLock.lock();
1✔
1573
          return;
1✔
1574
        } catch (InterruptedException e) {
1✔
1575
          remainingNanos = end - System.nanoTime();
1✔
1576
          interrupted = true;
1✔
1577
        }
1✔
1578
      }
1579
    } finally {
1580
      if (interrupted) {
1✔
1581
        Thread.currentThread().interrupt();
1✔
1582
      }
1583
    }
1584
  }
1585

1586
  /**
1587
   * Conditionally schedules the asynchronous maintenance task after a write operation. If the
1588
   * task status was IDLE or REQUIRED then the maintenance task is scheduled immediately. If it
1589
   * is already processing then it is set to transition to REQUIRED upon completion so that a new
1590
   * execution is triggered by the next operation.
1591
   */
1592
  void scheduleAfterWrite() {
1593
    int drainStatus = drainStatusOpaque();
1✔
1594
    for (;;) {
1595
      switch (drainStatus) {
1✔
1596
        case IDLE:
1597
          casDrainStatus(IDLE, REQUIRED);
1✔
1598
          scheduleDrainBuffers();
1✔
1599
          return;
1✔
1600
        case REQUIRED:
1601
          scheduleDrainBuffers();
1✔
1602
          return;
1✔
1603
        case PROCESSING_TO_IDLE:
1604
          if (casDrainStatus(PROCESSING_TO_IDLE, PROCESSING_TO_REQUIRED)) {
1✔
1605
            return;
1✔
1606
          }
1607
          drainStatus = drainStatusAcquire();
1✔
1608
          continue;
1✔
1609
        case PROCESSING_TO_REQUIRED:
1610
          return;
1✔
1611
        default:
1612
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
1613
      }
1614
    }
1615
  }
1616

1617
  /**
1618
   * Attempts to schedule an asynchronous task to apply the pending operations to the page
1619
   * replacement policy. If the executor rejects the task then it is run directly.
1620
   */
1621
  void scheduleDrainBuffers() {
1622
    if (drainStatusOpaque() >= PROCESSING_TO_IDLE) {
1✔
1623
      return;
1✔
1624
    }
1625
    if (evictionLock.tryLock()) {
1✔
1626
      try {
1627
        int drainStatus = drainStatusOpaque();
1✔
1628
        if (drainStatus >= PROCESSING_TO_IDLE) {
1✔
1629
          return;
1✔
1630
        }
1631
        setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1632
        executor.execute(drainBuffersTask);
1✔
1633
      } catch (Throwable t) {
1✔
1634
        logger.log(Level.WARNING, "Exception thrown when submitting maintenance task", t);
1✔
1635
        maintenance(/* ignored */ null);
1✔
1636
      } finally {
1637
        evictionLock.unlock();
1✔
1638
      }
1639
    }
1640
  }
1✔
1641

1642
  @Override
1643
  public void cleanUp() {
1644
    try {
1645
      performCleanUp(/* ignored */ null);
1✔
1646
    } catch (RuntimeException e) {
1✔
1647
      logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
1✔
1648
    }
1✔
1649
  }
1✔
1650

1651
  /**
1652
   * Performs the maintenance work, blocking until the lock is acquired.
1653
   *
1654
   * @param task an additional pending task to run, or {@code null} if not present
1655
   */
1656
  void performCleanUp(@Nullable Runnable task) {
1657
    evictionLock.lock();
1✔
1658
    try {
1659
      maintenance(task);
1✔
1660
    } finally {
1661
      evictionLock.unlock();
1✔
1662
    }
1663
    rescheduleCleanUpIfIncomplete();
1✔
1664
  }
1✔
1665

1666
  /**
1667
   * If there remains pending operations that were not handled by the prior clean up then try to
1668
   * schedule an asynchronous maintenance task. This may occur due to a concurrent write after the
1669
   * maintenance work had started or if the amortized threshold of work per clean up was reached.
1670
   */
1671
  void rescheduleCleanUpIfIncomplete() {
1672
    if (drainStatusOpaque() != REQUIRED) {
1✔
1673
      return;
1✔
1674
    }
1675

1676
    // An immediate scheduling cannot be performed on a custom executor because it may use a
1677
    // caller-runs policy. This could cause the caller's penalty to exceed the amortized threshold,
1678
    // e.g. repeated concurrent writes could result in a retry loop.
1679
    if (executor == ForkJoinPool.commonPool()) {
1✔
1680
      scheduleDrainBuffers();
1✔
1681
      return;
1✔
1682
    }
1683

1684
    // If a scheduler was configured then the maintenance can be deferred onto the custom executor
1685
    // and run in the near future. Otherwise, it will be handled due to other cache activity.
1686
    var pacer = pacer();
1✔
1687
    if ((pacer != null) && !pacer.isScheduled() && evictionLock.tryLock()) {
1✔
1688
      try {
1689
        if ((drainStatusOpaque() == REQUIRED) && !pacer.isScheduled()) {
1✔
1690
          pacer.schedule(executor, drainBuffersTask, expirationTicker().read(), Pacer.TOLERANCE);
1✔
1691
        }
1692
      } finally {
1693
        evictionLock.unlock();
1✔
1694
      }
1695
    }
1696
  }
1✔
1697

1698
  /**
1699
   * Performs the pending maintenance work and sets the state flags during processing to avoid
1700
   * excess scheduling attempts. The read buffer, write buffer, and reference queues are drained,
1701
   * followed by expiration, and size-based eviction.
1702
   *
1703
   * @param task an additional pending task to run, or {@code null} if not present
1704
   */
1705
  @GuardedBy("evictionLock")
1706
  void maintenance(@Nullable Runnable task) {
1707
    setDrainStatusRelease(PROCESSING_TO_IDLE);
1✔
1708

1709
    try {
1710
      drainReadBuffer();
1✔
1711

1712
      drainWriteBuffer();
1✔
1713
      if (task != null) {
1✔
1714
        task.run();
1✔
1715
      }
1716

1717
      drainKeyReferences();
1✔
1718
      drainValueReferences();
1✔
1719

1720
      expireEntries();
1✔
1721
      evictEntries();
1✔
1722

1723
      climb();
1✔
1724
    } finally {
1725
      if ((drainStatusOpaque() != PROCESSING_TO_IDLE)
1✔
1726
          || !casDrainStatus(PROCESSING_TO_IDLE, IDLE)) {
1✔
1727
        setDrainStatusOpaque(REQUIRED);
1✔
1728
      }
1729
    }
1730
  }
1✔
1731

1732
  /** Drains the weak key references queue. */
1733
  @GuardedBy("evictionLock")
1734
  void drainKeyReferences() {
1735
    if (!collectKeys()) {
1✔
1736
      return;
1✔
1737
    }
1738
    Reference<? extends K> keyRef;
1739
    while ((keyRef = keyReferenceQueue().poll()) != null) {
1✔
1740
      Node<K, V> node = data.get(keyRef);
1✔
1741
      if (node != null) {
1✔
1742
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1743
      }
1744
    }
1✔
1745
  }
1✔
1746

1747
  /** Drains the weak / soft value references queue. */
1748
  @GuardedBy("evictionLock")
1749
  void drainValueReferences() {
1750
    if (!collectValues()) {
1✔
1751
      return;
1✔
1752
    }
1753
    Reference<? extends V> valueRef;
1754
    while ((valueRef = valueReferenceQueue().poll()) != null) {
1✔
1755
      @SuppressWarnings("unchecked")
1756
      InternalReference<V> ref = (InternalReference<V>) valueRef;
1✔
1757
      Node<K, V> node = data.get(ref.getKeyReference());
1✔
1758
      if ((node != null) && (valueRef == node.getValueReference())) {
1✔
1759
        evictEntry(node, RemovalCause.COLLECTED, 0L);
1✔
1760
      }
1761
    }
1✔
1762
  }
1✔
1763

1764
  /** Drains the read buffer. */
1765
  @GuardedBy("evictionLock")
1766
  void drainReadBuffer() {
1767
    if (!skipReadBuffer()) {
1✔
1768
      readBuffer.drainTo(accessPolicy);
1✔
1769
    }
1770
  }
1✔
1771

1772
  /** Updates the node's location in the page replacement policy. */
1773
  @GuardedBy("evictionLock")
1774
  void onAccess(Node<K, V> node) {
1775
    if (evicts()) {
1✔
1776
      K key = node.getKey();
1✔
1777
      if (key == null) {
1✔
1778
        return;
1✔
1779
      }
1780
      frequencySketch().increment(key);
1✔
1781
      if (node.inWindow()) {
1✔
1782
        reorder(accessOrderWindowDeque(), node);
1✔
1783
      } else if (node.inMainProbation()) {
1✔
1784
        reorderProbation(node);
1✔
1785
      } else {
1786
        reorder(accessOrderProtectedDeque(), node);
1✔
1787
      }
1788
      setHitsInSample(hitsInSample() + 1);
1✔
1789
    } else if (expiresAfterAccess()) {
1✔
1790
      reorder(accessOrderWindowDeque(), node);
1✔
1791
    }
1792
    if (expiresVariable()) {
1✔
1793
      timerWheel().reschedule(node);
1✔
1794
    }
1795
  }
1✔
1796

1797
  /** Promote the node from probation to protected on an access. */
1798
  @GuardedBy("evictionLock")
1799
  void reorderProbation(Node<K, V> node) {
1800
    if (!accessOrderProbationDeque().contains(node)) {
1✔
1801
      // Ignore stale accesses for an entry that is no longer present
1802
      return;
1✔
1803
    } else if (node.getPolicyWeight() > mainProtectedMaximum()) {
1✔
1804
      reorder(accessOrderProbationDeque(), node);
1✔
1805
      return;
1✔
1806
    }
1807

1808
    // If the protected space exceeds its maximum, the LRU items are demoted to the probation space.
1809
    // This is deferred to the adaption phase at the end of the maintenance cycle.
1810
    setMainProtectedWeightedSize(mainProtectedWeightedSize() + node.getPolicyWeight());
1✔
1811
    accessOrderProbationDeque().remove(node);
1✔
1812
    accessOrderProtectedDeque().offerLast(node);
1✔
1813
    node.makeMainProtected();
1✔
1814
  }
1✔
1815

1816
  /** Updates the node's location in the policy's deque. */
1817
  static <K, V> void reorder(LinkedDeque<Node<K, V>> deque, Node<K, V> node) {
1818
    // An entry may be scheduled for reordering despite having been removed. This can occur when the
1819
    // entry was concurrently read while a writer was removing it. If the entry is no longer linked
1820
    // then it does not need to be processed.
1821
    if (deque.contains(node)) {
1✔
1822
      deque.moveToBack(node);
1✔
1823
    }
1824
  }
1✔
1825

1826
  /** Drains the write buffer. */
1827
  @GuardedBy("evictionLock")
1828
  void drainWriteBuffer() {
1829
    for (int i = 0; i <= WRITE_BUFFER_MAX; i++) {
1✔
1830
      Runnable task = writeBuffer.poll();
1✔
1831
      if (task == null) {
1✔
1832
        return;
1✔
1833
      }
1834
      task.run();
1✔
1835
    }
1836
    setDrainStatusOpaque(PROCESSING_TO_REQUIRED);
1✔
1837
  }
1✔
1838

1839
  /**
1840
   * Atomically transitions the node to the <tt>dead</tt> state and decrements the
1841
   * <tt>weightedSize</tt>.
1842
   *
1843
   * @param node the entry in the page replacement policy
1844
   */
1845
  @GuardedBy("evictionLock")
1846
  void makeDead(Node<K, V> node) {
1847
    synchronized (node) {
1✔
1848
      if (node.isDead()) {
1✔
1849
        return;
1✔
1850
      }
1851
      if (evicts()) {
1✔
1852
        // The node's policy weight may be out of sync due to a pending update waiting to be
1853
        // processed. At this point the node's weight is finalized, so the weight can be safely
1854
        // taken from the node's perspective and the sizes will be adjusted correctly.
1855
        if (node.inWindow()) {
1✔
1856
          setWindowWeightedSize(windowWeightedSize() - node.getWeight());
1✔
1857
        } else if (node.inMainProtected()) {
1✔
1858
          setMainProtectedWeightedSize(mainProtectedWeightedSize() - node.getWeight());
1✔
1859
        }
1860
        setWeightedSize(weightedSize() - node.getWeight());
1✔
1861
      }
1862
      node.die();
1✔
1863
    }
1✔
1864
  }
1✔
1865

1866
  /** Adds the node to the page replacement policy. */
1867
  final class AddTask implements Runnable {
1868
    final Node<K, V> node;
1869
    final int weight;
1870

1871
    AddTask(Node<K, V> node, int weight) {
1✔
1872
      this.weight = weight;
1✔
1873
      this.node = node;
1✔
1874
    }
1✔
1875

1876
    @Override
1877
    @GuardedBy("evictionLock")
1878
    @SuppressWarnings("FutureReturnValueIgnored")
1879
    public void run() {
1880
      if (evicts()) {
1✔
1881
        setWeightedSize(weightedSize() + weight);
1✔
1882
        setWindowWeightedSize(windowWeightedSize() + weight);
1✔
1883
        node.setPolicyWeight(node.getPolicyWeight() + weight);
1✔
1884

1885
        long maximum = maximum();
1✔
1886
        if (weightedSize() >= (maximum >>> 1)) {
1✔
1887
          if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
1888
            evictEntries();
1✔
1889
          } else {
1890
            // Lazily initialize when close to the maximum
1891
            long capacity = isWeighted() ? data.mappingCount() : maximum;
1✔
1892
            frequencySketch().ensureCapacity(capacity);
1✔
1893
          }
1894
        }
1895

1896
        K key = node.getKey();
1✔
1897
        if (key != null) {
1✔
1898
          frequencySketch().increment(key);
1✔
1899
        }
1900

1901
        setMissesInSample(missesInSample() + 1);
1✔
1902
      }
1903

1904
      // ignore out-of-order write operations
1905
      boolean isAlive;
1906
      synchronized (node) {
1✔
1907
        isAlive = node.isAlive();
1✔
1908
      }
1✔
1909
      if (isAlive) {
1✔
1910
        if (expiresAfterWrite()) {
1✔
1911
          writeOrderDeque().offerLast(node);
1✔
1912
        }
1913
        if (expiresVariable()) {
1✔
1914
          timerWheel().schedule(node);
1✔
1915
        }
1916
        if (evicts()) {
1✔
1917
          if (weight > maximum()) {
1✔
1918
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
1919
          } else if (weight > windowMaximum()) {
1✔
1920
            accessOrderWindowDeque().offerFirst(node);
1✔
1921
          } else {
1922
            accessOrderWindowDeque().offerLast(node);
1✔
1923
          }
1924
        } else if (expiresAfterAccess()) {
1✔
1925
          accessOrderWindowDeque().offerLast(node);
1✔
1926
        }
1927
      }
1928

1929
      // Ensure that in-flight async computation cannot expire (reset on a completion callback)
1930
      if (isComputingAsync(node)) {
1✔
1931
        synchronized (node) {
1✔
1932
          if (!Async.isReady((CompletableFuture<?>) node.getValue())) {
1✔
1933
            long expirationTime = expirationTicker().read() + ASYNC_EXPIRY;
1✔
1934
            setVariableTime(node, expirationTime);
1✔
1935
            setAccessTime(node, expirationTime);
1✔
1936
            setWriteTime(node, expirationTime);
1✔
1937
          }
1938
        }
1✔
1939
      }
1940
    }
1✔
1941
  }
1942

1943
  /** Removes a node from the page replacement policy. */
1944
  final class RemovalTask implements Runnable {
1945
    final Node<K, V> node;
1946

1947
    RemovalTask(Node<K, V> node) {
1✔
1948
      this.node = node;
1✔
1949
    }
1✔
1950

1951
    @Override
1952
    @GuardedBy("evictionLock")
1953
    public void run() {
1954
      // add may not have been processed yet
1955
      if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
1956
        accessOrderWindowDeque().remove(node);
1✔
1957
      } else if (evicts()) {
1✔
1958
        if (node.inMainProbation()) {
1✔
1959
          accessOrderProbationDeque().remove(node);
1✔
1960
        } else {
1961
          accessOrderProtectedDeque().remove(node);
1✔
1962
        }
1963
      }
1964
      if (expiresAfterWrite()) {
1✔
1965
        writeOrderDeque().remove(node);
1✔
1966
      } else if (expiresVariable()) {
1✔
1967
        timerWheel().deschedule(node);
1✔
1968
      }
1969
      makeDead(node);
1✔
1970
    }
1✔
1971
  }
1972

1973
  /** Updates the weighted size. */
1974
  final class UpdateTask implements Runnable {
1975
    final int weightDifference;
1976
    final Node<K, V> node;
1977

1978
    public UpdateTask(Node<K, V> node, int weightDifference) {
1✔
1979
      this.weightDifference = weightDifference;
1✔
1980
      this.node = node;
1✔
1981
    }
1✔
1982

1983
    @Override
1984
    @GuardedBy("evictionLock")
1985
    public void run() {
1986
      if (expiresAfterWrite()) {
1✔
1987
        reorder(writeOrderDeque(), node);
1✔
1988
      } else if (expiresVariable()) {
1✔
1989
        timerWheel().reschedule(node);
1✔
1990
      }
1991
      if (evicts()) {
1✔
1992
        int oldWeightedSize = node.getPolicyWeight();
1✔
1993
        node.setPolicyWeight(oldWeightedSize + weightDifference);
1✔
1994
        if (node.inWindow()) {
1✔
1995
          setWindowWeightedSize(windowWeightedSize() + weightDifference);
1✔
1996
          if (node.getPolicyWeight() > maximum()) {
1✔
1997
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
1998
          } else if (node.getPolicyWeight() <= windowMaximum()) {
1✔
1999
            onAccess(node);
1✔
2000
          } else if (accessOrderWindowDeque().contains(node)) {
1✔
2001
            accessOrderWindowDeque().moveToFront(node);
1✔
2002
          }
2003
        } else if (node.inMainProbation()) {
1✔
2004
            if (node.getPolicyWeight() <= maximum()) {
1✔
2005
              onAccess(node);
1✔
2006
            } else {
2007
              evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2008
            }
2009
        } else if (node.inMainProtected()) {
1✔
2010
          setMainProtectedWeightedSize(mainProtectedWeightedSize() + weightDifference);
1✔
2011
          if (node.getPolicyWeight() <= maximum()) {
1✔
2012
            onAccess(node);
1✔
2013
          } else {
2014
            evictEntry(node, RemovalCause.SIZE, expirationTicker().read());
1✔
2015
          }
2016
        }
2017

2018
        setWeightedSize(weightedSize() + weightDifference);
1✔
2019
        if (weightedSize() > MAXIMUM_CAPACITY) {
1✔
2020
          evictEntries();
1✔
2021
        }
2022
      } else if (expiresAfterAccess()) {
1✔
2023
        onAccess(node);
1✔
2024
      }
2025
    }
1✔
2026
  }
2027

2028
  /* --------------- Concurrent Map Support --------------- */
2029

2030
  @Override
2031
  public boolean isEmpty() {
2032
    return data.isEmpty();
1✔
2033
  }
2034

2035
  @Override
2036
  public int size() {
2037
    return data.size();
1✔
2038
  }
2039

2040
  @Override
2041
  public long estimatedSize() {
2042
    return data.mappingCount();
1✔
2043
  }
2044

2045
  @Override
2046
  public void clear() {
2047
    Deque<Node<K, V>> entries;
2048
    evictionLock.lock();
1✔
2049
    try {
2050
      // Discard all pending reads
2051
      readBuffer.drainTo(e -> {});
1✔
2052

2053
      // Apply all pending writes
2054
      Runnable task;
2055
      while ((task = writeBuffer.poll()) != null) {
1✔
2056
        task.run();
1✔
2057
      }
2058

2059
      // Cancel the scheduled cleanup
2060
      Pacer pacer = pacer();
1✔
2061
      if (pacer != null) {
1✔
2062
        pacer.cancel();
1✔
2063
      }
2064

2065
      // Discard all entries, falling back to one-by-one to avoid excessive lock hold times
2066
      long now = expirationTicker().read();
1✔
2067
      int threshold = (WRITE_BUFFER_MAX / 2);
1✔
2068
      entries = new ArrayDeque<>(data.values());
1✔
2069
      while (!entries.isEmpty() && (writeBuffer.size() < threshold)) {
1✔
2070
        removeNode(entries.poll(), now);
1✔
2071
      }
2072
    } finally {
2073
      evictionLock.unlock();
1✔
2074
    }
2075

2076
    // Remove any stragglers if released early to more aggressively flush incoming writes
2077
    boolean cleanUp = false;
1✔
2078
    for (var node : entries) {
1✔
2079
      var key = node.getKey();
1✔
2080
      if (key == null) {
1✔
2081
        cleanUp = true;
1✔
2082
      } else {
2083
        remove(key);
1✔
2084
      }
2085
    }
1✔
2086
    if (collectKeys() && cleanUp) {
1✔
2087
      cleanUp();
1✔
2088
    }
2089
  }
1✔
2090

2091
  @GuardedBy("evictionLock")
2092
  @SuppressWarnings("GuardedByChecker")
2093
  void removeNode(Node<K, V> node, long now) {
2094
    K key = node.getKey();
1✔
2095
    @SuppressWarnings("unchecked")
2096
    V[] value = (V[]) new Object[1];
1✔
2097
    RemovalCause[] cause = new RemovalCause[1];
1✔
2098
    Object keyReference = node.getKeyReference();
1✔
2099

2100
    data.computeIfPresent(keyReference, (k, n) -> {
1✔
2101
      if (n != node) {
1✔
2102
        return n;
1✔
2103
      }
2104
      synchronized (n) {
1✔
2105
        value[0] = n.getValue();
1✔
2106

2107
        if ((key == null) || (value[0] == null)) {
1✔
2108
          cause[0] = RemovalCause.COLLECTED;
1✔
2109
        } else if (hasExpired(n, now)) {
1✔
2110
          cause[0] = RemovalCause.EXPIRED;
1✔
2111
        } else {
2112
          cause[0] = RemovalCause.EXPLICIT;
1✔
2113
        }
2114

2115
        if (cause[0].wasEvicted()) {
1✔
2116
          notifyEviction(key, value[0], cause[0]);
1✔
2117
        }
2118

2119
        discardRefresh(node.getKeyReference());
1✔
2120
        node.retire();
1✔
2121
        return null;
1✔
2122
      }
2123
    });
2124

2125
    if (node.inWindow() && (evicts() || expiresAfterAccess())) {
1✔
2126
      accessOrderWindowDeque().remove(node);
1✔
2127
    } else if (evicts()) {
1✔
2128
      if (node.inMainProbation()) {
1✔
2129
        accessOrderProbationDeque().remove(node);
1✔
2130
      } else {
2131
        accessOrderProtectedDeque().remove(node);
1✔
2132
      }
2133
    }
2134
    if (expiresAfterWrite()) {
1✔
2135
      writeOrderDeque().remove(node);
1✔
2136
    } else if (expiresVariable()) {
1✔
2137
      timerWheel().deschedule(node);
1✔
2138
    }
2139

2140
    synchronized (node) {
1✔
2141
      logIfAlive(node);
1✔
2142
      makeDead(node);
1✔
2143
    }
1✔
2144

2145
    if (cause[0] != null) {
1✔
2146
      notifyRemoval(key, value[0], cause[0]);
1✔
2147
    }
2148
  }
1✔
2149

2150
  @Override
2151
  public boolean containsKey(Object key) {
2152
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2153
    return (node != null) && (node.getValue() != null)
1✔
2154
        && !hasExpired(node, expirationTicker().read());
1✔
2155
  }
2156

2157
  @Override
2158
  @SuppressWarnings("SuspiciousMethodCalls")
2159
  public boolean containsValue(Object value) {
2160
    requireNonNull(value);
1✔
2161

2162
    long now = expirationTicker().read();
1✔
2163
    for (Node<K, V> node : data.values()) {
1✔
2164
      if (node.containsValue(value) && !hasExpired(node, now) && (node.getKey() != null)) {
1✔
2165
        return true;
1✔
2166
      }
2167
    }
1✔
2168
    return false;
1✔
2169
  }
2170

2171
  @Override
2172
  public @Nullable V get(Object key) {
2173
    return getIfPresent(key, /* recordStats */ false);
1✔
2174
  }
2175

2176
  @Override
2177
  public @Nullable V getIfPresent(Object key, boolean recordStats) {
2178
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2179
    if (node == null) {
1✔
2180
      if (recordStats) {
1✔
2181
        statsCounter().recordMisses(1);
1✔
2182
      }
2183
      if (drainStatusOpaque() == REQUIRED) {
1✔
2184
        scheduleDrainBuffers();
1✔
2185
      }
2186
      return null;
1✔
2187
    }
2188

2189
    V value = node.getValue();
1✔
2190
    long now = expirationTicker().read();
1✔
2191
    if (hasExpired(node, now) || (collectValues() && (value == null))) {
1✔
2192
      if (recordStats) {
1✔
2193
        statsCounter().recordMisses(1);
1✔
2194
      }
2195
      scheduleDrainBuffers();
1✔
2196
      return null;
1✔
2197
    }
2198

2199
    if (!isComputingAsync(node)) {
1✔
2200
      @SuppressWarnings("unchecked")
2201
      K castedKey = (K) key;
1✔
2202
      setAccessTime(node, now);
1✔
2203
      tryExpireAfterRead(node, castedKey, value, expiry(), now);
1✔
2204
    }
2205
    V refreshed = afterRead(node, now, recordStats);
1✔
2206
    return (refreshed == null) ? value : refreshed;
1✔
2207
  }
2208

2209
  @Override
2210
  public @Nullable V getIfPresentQuietly(K key) {
2211
    V value;
2212
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2213
    if ((node == null) || ((value = node.getValue()) == null)
1✔
2214
        || hasExpired(node, expirationTicker().read())) {
1✔
2215
      return null;
1✔
2216
    }
2217
    return value;
1✔
2218
  }
2219

2220
  /**
2221
   * Returns the key associated with the mapping in this cache, or {@code null} if there is none.
2222
   *
2223
   * @param key the key whose canonical instance is to be returned
2224
   * @return the key used by the mapping, or {@code null} if this cache does not contain a mapping
2225
   *         for the key
2226
   * @throws NullPointerException if the specified key is null
2227
   */
2228
  public @Nullable K getKey(K key) {
2229
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2230
    if (node == null) {
1✔
2231
      if (drainStatusOpaque() == REQUIRED) {
1✔
2232
        scheduleDrainBuffers();
1✔
2233
      }
2234
      return null;
1✔
2235
    }
2236
    afterRead(node, /* now */ 0L, /* recordStats */ false);
1✔
2237
    return node.getKey();
1✔
2238
  }
2239

2240
  @Override
2241
  public Map<K, V> getAllPresent(Iterable<? extends K> keys) {
2242
    var result = new LinkedHashMap<K, V>(calculateHashMapCapacity(keys));
1✔
2243
    for (K key : keys) {
1✔
2244
      result.put(key, null);
1✔
2245
    }
1✔
2246

2247
    int uniqueKeys = result.size();
1✔
2248
    long now = expirationTicker().read();
1✔
2249
    for (var iter = result.entrySet().iterator(); iter.hasNext();) {
1✔
2250
      V value;
2251
      var entry = iter.next();
1✔
2252
      Node<K, V> node = data.get(nodeFactory.newLookupKey(entry.getKey()));
1✔
2253
      if ((node == null) || ((value = node.getValue()) == null) || hasExpired(node, now)) {
1✔
2254
        iter.remove();
1✔
2255
      } else {
2256
        if (!isComputingAsync(node)) {
1✔
2257
          tryExpireAfterRead(node, entry.getKey(), value, expiry(), now);
1✔
2258
          setAccessTime(node, now);
1✔
2259
        }
2260
        V refreshed = afterRead(node, now, /* recordHit */ false);
1✔
2261
        entry.setValue((refreshed == null) ? value : refreshed);
1✔
2262
      }
2263
    }
1✔
2264
    statsCounter().recordHits(result.size());
1✔
2265
    statsCounter().recordMisses(uniqueKeys - result.size());
1✔
2266

2267
    return Collections.unmodifiableMap(result);
1✔
2268
  }
2269

2270
  @Override
2271
  public void putAll(Map<? extends K, ? extends V> map) {
2272
    map.forEach(this::put);
1✔
2273
  }
1✔
2274

2275
  @Override
2276
  public @Nullable V put(K key, V value) {
2277
    return put(key, value, expiry(), /* onlyIfAbsent */ false);
1✔
2278
  }
2279

2280
  @Override
2281
  public @Nullable V putIfAbsent(K key, V value) {
2282
    return put(key, value, expiry(), /* onlyIfAbsent */ true);
1✔
2283
  }
2284

2285
  /**
2286
   * Adds a node to the policy and the data store. If an existing node is found, then its value is
2287
   * updated if allowed.
2288
   *
2289
   * @param key key with which the specified value is to be associated
2290
   * @param value value to be associated with the specified key
2291
   * @param expiry the calculator for the write expiration time
2292
   * @param onlyIfAbsent a write is performed only if the key is not already associated with a value
2293
   * @return the prior value in or null if no mapping was found
2294
   */
2295
  @Nullable V put(K key, V value, Expiry<K, V> expiry, boolean onlyIfAbsent) {
2296
    requireNonNull(key);
1✔
2297
    requireNonNull(value);
1✔
2298

2299
    Node<K, V> node = null;
1✔
2300
    long now = expirationTicker().read();
1✔
2301
    int newWeight = weigher.weigh(key, value);
1✔
2302
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2303
    for (int attempts = 1; ; attempts++) {
1✔
2304
      Node<K, V> prior = data.get(lookupKey);
1✔
2305
      if (prior == null) {
1✔
2306
        if (node == null) {
1✔
2307
          node = nodeFactory.newNode(key, keyReferenceQueue(),
1✔
2308
              value, valueReferenceQueue(), newWeight, now);
1✔
2309
          setVariableTime(node, expireAfterCreate(key, value, expiry, now));
1✔
2310
        }
2311
        prior = data.putIfAbsent(node.getKeyReference(), node);
1✔
2312
        if (prior == null) {
1✔
2313
          afterWrite(new AddTask(node, newWeight));
1✔
2314
          return null;
1✔
2315
        } else if (onlyIfAbsent) {
1✔
2316
          // An optimistic fast path to avoid unnecessary locking
2317
          V currentValue = prior.getValue();
1✔
2318
          if ((currentValue != null) && !hasExpired(prior, now)) {
1✔
2319
            if (!isComputingAsync(prior)) {
1✔
2320
              tryExpireAfterRead(prior, key, currentValue, expiry(), now);
1✔
2321
              setAccessTime(prior, now);
1✔
2322
            }
2323
            afterRead(prior, now, /* recordHit */ false);
1✔
2324
            return currentValue;
1✔
2325
          }
2326
        }
×
2327
      } else if (onlyIfAbsent) {
1✔
2328
        // An optimistic fast path to avoid unnecessary locking
2329
        V currentValue = prior.getValue();
1✔
2330
        if ((currentValue != null) && !hasExpired(prior, now)) {
1✔
2331
          if (!isComputingAsync(prior)) {
1✔
2332
            tryExpireAfterRead(prior, key, currentValue, expiry(), now);
1✔
2333
            setAccessTime(prior, now);
1✔
2334
          }
2335
          afterRead(prior, now, /* recordHit */ false);
1✔
2336
          return currentValue;
1✔
2337
        }
2338
      }
2339

2340
      // A read may race with the entry's removal, so that after the entry is acquired it may no
2341
      // longer be usable. A retry will reread from the map and either find an absent mapping, a
2342
      // new entry, or a stale entry.
2343
      if (!prior.isAlive()) {
1✔
2344
        // A reread of the stale entry may occur if the state transition occurred but the map
2345
        // removal was delayed by a context switch, so that this thread spin waits until resolved.
2346
        if ((attempts & MAX_PUT_SPIN_WAIT_ATTEMPTS) != 0) {
1✔
2347
          Thread.onSpinWait();
1✔
2348
          continue;
1✔
2349
        }
2350

2351
        // If the spin wait attempts are exhausted then fallback to a map computation in order to
2352
        // deschedule this thread until the entry's removal completes. If the key was modified
2353
        // while in the map so that its equals or hashCode changed then the contents may be
2354
        // corrupted, where the cache holds an evicted (dead) entry that could not be removed.
2355
        // That is a violation of the Map contract, so we check that the mapping is in the "alive"
2356
        // state while in the computation.
2357
        data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2358
          requireIsAlive(key, n);
1✔
2359
          return n;
1✔
2360
        });
2361
        continue;
1✔
2362
      }
2363

2364
      V oldValue;
2365
      long varTime;
2366
      int oldWeight;
2367
      boolean expired = false;
1✔
2368
      boolean mayUpdate = true;
1✔
2369
      boolean exceedsTolerance = false;
1✔
2370
      synchronized (prior) {
1✔
2371
        if (!prior.isAlive()) {
1✔
2372
          continue;
1✔
2373
        }
2374
        oldValue = prior.getValue();
1✔
2375
        oldWeight = prior.getWeight();
1✔
2376
        if (oldValue == null) {
1✔
2377
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2378
          notifyEviction(key, null, RemovalCause.COLLECTED);
1✔
2379
        } else if (hasExpired(prior, now)) {
1✔
2380
          expired = true;
1✔
2381
          varTime = expireAfterCreate(key, value, expiry, now);
1✔
2382
          notifyEviction(key, oldValue, RemovalCause.EXPIRED);
1✔
2383
        } else if (onlyIfAbsent) {
1✔
2384
          mayUpdate = false;
1✔
2385
          varTime = expireAfterRead(prior, key, value, expiry, now);
1✔
2386
        } else {
2387
          varTime = expireAfterUpdate(prior, key, value, expiry, now);
1✔
2388
        }
2389

2390
        if (mayUpdate) {
1✔
2391
          exceedsTolerance =
1✔
2392
              (expiresAfterWrite() && (now - prior.getWriteTime()) > EXPIRE_WRITE_TOLERANCE)
1✔
2393
              || (expiresVariable()
1✔
2394
                  && Math.abs(varTime - prior.getVariableTime()) > EXPIRE_WRITE_TOLERANCE);
1✔
2395

2396
          prior.setValue(value, valueReferenceQueue());
1✔
2397
          prior.setWeight(newWeight);
1✔
2398
          setWriteTime(prior, now);
1✔
2399

2400
          discardRefresh(prior.getKeyReference());
1✔
2401
        }
2402

2403
        setVariableTime(prior, varTime);
1✔
2404
        setAccessTime(prior, now);
1✔
2405
      }
1✔
2406

2407
      if (expired) {
1✔
2408
        notifyRemoval(key, oldValue, RemovalCause.EXPIRED);
1✔
2409
      } else if (oldValue == null) {
1✔
2410
        notifyRemoval(key, /* oldValue */ null, RemovalCause.COLLECTED);
1✔
2411
      } else if (mayUpdate) {
1✔
2412
        notifyOnReplace(key, oldValue, value);
1✔
2413
      }
2414

2415
      int weightedDifference = mayUpdate ? (newWeight - oldWeight) : 0;
1✔
2416
      if ((oldValue == null) || (weightedDifference != 0) || expired) {
1✔
2417
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2418
      } else if (!onlyIfAbsent && exceedsTolerance) {
1✔
2419
        afterWrite(new UpdateTask(prior, weightedDifference));
1✔
2420
      } else {
2421
        if (mayUpdate) {
1✔
2422
          setWriteTime(prior, now);
1✔
2423
        }
2424
        afterRead(prior, now, /* recordHit */ false);
1✔
2425
      }
2426

2427
      return expired ? null : oldValue;
1✔
2428
    }
2429
  }
2430

2431
  @Override
2432
  public @Nullable V remove(Object key) {
2433
    @SuppressWarnings("unchecked")
2434
    K castKey = (K) key;
1✔
2435
    @SuppressWarnings({"rawtypes", "unchecked"})
2436
    Node<K, V>[] node = new Node[1];
1✔
2437
    @SuppressWarnings("unchecked")
2438
    V[] oldValue = (V[]) new Object[1];
1✔
2439
    RemovalCause[] cause = new RemovalCause[1];
1✔
2440
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2441

2442
    data.computeIfPresent(lookupKey, (k, n) -> {
1✔
2443
      synchronized (n) {
1✔
2444
        requireIsAlive(key, n);
1✔
2445
        oldValue[0] = n.getValue();
1✔
2446
        if (oldValue[0] == null) {
1✔
2447
          cause[0] = RemovalCause.COLLECTED;
1✔
2448
        } else if (hasExpired(n, expirationTicker().read())) {
1✔
2449
          cause[0] = RemovalCause.EXPIRED;
1✔
2450
        } else {
2451
          cause[0] = RemovalCause.EXPLICIT;
1✔
2452
        }
2453
        if (cause[0].wasEvicted()) {
1✔
2454
          notifyEviction(castKey, oldValue[0], cause[0]);
1✔
2455
        }
2456
        discardRefresh(lookupKey);
1✔
2457
        node[0] = n;
1✔
2458
        n.retire();
1✔
2459
      }
1✔
2460
      return null;
1✔
2461
    });
2462

2463
    if (cause[0] != null) {
1✔
2464
      afterWrite(new RemovalTask(node[0]));
1✔
2465
      notifyRemoval(castKey, oldValue[0], cause[0]);
1✔
2466
    }
2467
    return (cause[0] == RemovalCause.EXPLICIT) ? oldValue[0] : null;
1✔
2468
  }
2469

2470
  @Override
2471
  public boolean remove(Object key, Object value) {
2472
    requireNonNull(key);
1✔
2473
    if (value == null) {
1✔
2474
      return false;
1✔
2475
    }
2476

2477
    @SuppressWarnings({"rawtypes", "unchecked"})
2478
    Node<K, V>[] removed = new Node[1];
1✔
2479
    @SuppressWarnings("unchecked")
2480
    K[] oldKey = (K[]) new Object[1];
1✔
2481
    @SuppressWarnings("unchecked")
2482
    V[] oldValue = (V[]) new Object[1];
1✔
2483
    RemovalCause[] cause = new RemovalCause[1];
1✔
2484
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2485

2486
    data.computeIfPresent(lookupKey, (kR, node) -> {
1✔
2487
      synchronized (node) {
1✔
2488
        requireIsAlive(key, node);
1✔
2489
        oldKey[0] = node.getKey();
1✔
2490
        oldValue[0] = node.getValue();
1✔
2491
        if ((oldKey[0] == null) || (oldValue[0] == null)) {
1✔
2492
          cause[0] = RemovalCause.COLLECTED;
1✔
2493
        } else if (hasExpired(node, expirationTicker().read())) {
1✔
2494
          cause[0] = RemovalCause.EXPIRED;
1✔
2495
        } else if (node.containsValue(value)) {
1✔
2496
          cause[0] = RemovalCause.EXPLICIT;
1✔
2497
        } else {
2498
          return node;
1✔
2499
        }
2500
        if (cause[0].wasEvicted()) {
1✔
2501
          notifyEviction(oldKey[0], oldValue[0], cause[0]);
1✔
2502
        }
2503
        discardRefresh(lookupKey);
1✔
2504
        removed[0] = node;
1✔
2505
        node.retire();
1✔
2506
        return null;
1✔
2507
      }
2508
    });
2509

2510
    if (removed[0] == null) {
1✔
2511
      return false;
1✔
2512
    }
2513
    afterWrite(new RemovalTask(removed[0]));
1✔
2514
    notifyRemoval(oldKey[0], oldValue[0], cause[0]);
1✔
2515

2516
    return (cause[0] == RemovalCause.EXPLICIT);
1✔
2517
  }
2518

2519
  @Override
2520
  public @Nullable V replace(K key, V value) {
2521
    requireNonNull(key);
1✔
2522
    requireNonNull(value);
1✔
2523

2524
    int[] oldWeight = new int[1];
1✔
2525
    @SuppressWarnings("unchecked")
2526
    K[] nodeKey = (K[]) new Object[1];
1✔
2527
    @SuppressWarnings("unchecked")
2528
    V[] oldValue = (V[]) new Object[1];
1✔
2529
    long[] now = new long[1];
1✔
2530
    int weight = weigher.weigh(key, value);
1✔
2531
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2532
      synchronized (n) {
1✔
2533
        requireIsAlive(key, n);
1✔
2534
        nodeKey[0] = n.getKey();
1✔
2535
        oldValue[0] = n.getValue();
1✔
2536
        oldWeight[0] = n.getWeight();
1✔
2537
        if ((nodeKey[0] == null) || (oldValue[0] == null)
1✔
2538
            || hasExpired(n, now[0] = expirationTicker().read())) {
1✔
2539
          oldValue[0] = null;
1✔
2540
          return n;
1✔
2541
        }
2542

2543
        long varTime = expireAfterUpdate(n, key, value, expiry(), now[0]);
1✔
2544
        n.setValue(value, valueReferenceQueue());
1✔
2545
        n.setWeight(weight);
1✔
2546

2547
        setVariableTime(n, varTime);
1✔
2548
        setAccessTime(n, now[0]);
1✔
2549
        setWriteTime(n, now[0]);
1✔
2550
        discardRefresh(k);
1✔
2551
        return n;
1✔
2552
      }
2553
    });
2554

2555
    if (oldValue[0] == null) {
1✔
2556
      return null;
1✔
2557
    }
2558

2559
    int weightedDifference = (weight - oldWeight[0]);
1✔
2560
    if (expiresAfterWrite() || (weightedDifference != 0)) {
1✔
2561
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2562
    } else {
2563
      afterRead(node, now[0], /* recordHit */ false);
1✔
2564
    }
2565

2566
    notifyOnReplace(nodeKey[0], oldValue[0], value);
1✔
2567
    return oldValue[0];
1✔
2568
  }
2569

2570
  @Override
2571
  public boolean replace(K key, V oldValue, V newValue) {
2572
    return replace(key, oldValue, newValue, /* shouldDiscardRefresh */ true);
1✔
2573
  }
2574

2575
  @Override
2576
  public boolean replace(K key, V oldValue, V newValue, boolean shouldDiscardRefresh) {
2577
    requireNonNull(key);
1✔
2578
    requireNonNull(oldValue);
1✔
2579
    requireNonNull(newValue);
1✔
2580

2581
    int weight = weigher.weigh(key, newValue);
1✔
2582
    boolean[] replaced = new boolean[1];
1✔
2583
    @SuppressWarnings("unchecked")
2584
    K[] nodeKey = (K[]) new Object[1];
1✔
2585
    @SuppressWarnings("unchecked")
2586
    V[] prevValue = (V[]) new Object[1];
1✔
2587
    int[] oldWeight = new int[1];
1✔
2588
    long[] now = new long[1];
1✔
2589
    Node<K, V> node = data.computeIfPresent(nodeFactory.newLookupKey(key), (k, n) -> {
1✔
2590
      synchronized (n) {
1✔
2591
        requireIsAlive(key, n);
1✔
2592
        nodeKey[0] = n.getKey();
1✔
2593
        prevValue[0] = n.getValue();
1✔
2594
        oldWeight[0] = n.getWeight();
1✔
2595
        if ((nodeKey[0] == null) || (prevValue[0] == null) || !n.containsValue(oldValue)
1✔
2596
            || hasExpired(n, now[0] = expirationTicker().read())) {
1✔
2597
          return n;
1✔
2598
        }
2599

2600
        long varTime = expireAfterUpdate(n, key, newValue, expiry(), now[0]);
1✔
2601
        n.setValue(newValue, valueReferenceQueue());
1✔
2602
        n.setWeight(weight);
1✔
2603

2604
        setVariableTime(n, varTime);
1✔
2605
        setAccessTime(n, now[0]);
1✔
2606
        setWriteTime(n, now[0]);
1✔
2607
        replaced[0] = true;
1✔
2608

2609
        if (shouldDiscardRefresh) {
1✔
2610
          discardRefresh(k);
1✔
2611
        }
2612
      }
1✔
2613
      return n;
1✔
2614
    });
2615

2616
    if (!replaced[0]) {
1✔
2617
      return false;
1✔
2618
    }
2619

2620
    int weightedDifference = (weight - oldWeight[0]);
1✔
2621
    if (expiresAfterWrite() || (weightedDifference != 0)) {
1✔
2622
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2623
    } else {
2624
      afterRead(node, now[0], /* recordHit */ false);
1✔
2625
    }
2626

2627
    notifyOnReplace(nodeKey[0], prevValue[0], newValue);
1✔
2628
    return true;
1✔
2629
  }
2630

2631
  @Override
2632
  public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
2633
    requireNonNull(function);
1✔
2634

2635
    BiFunction<K, V, V> remappingFunction = (key, oldValue) ->
1✔
2636
        requireNonNull(function.apply(key, oldValue));
1✔
2637
    for (K key : keySet()) {
1✔
2638
      long[] now = { expirationTicker().read() };
1✔
2639
      Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2640
      remap(key, lookupKey, remappingFunction, expiry(), now, /* computeIfAbsent */ false);
1✔
2641
    }
1✔
2642
  }
1✔
2643

2644
  @Override
2645
  public @Nullable V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction,
2646
      boolean recordStats, boolean recordLoad) {
2647
    requireNonNull(key);
1✔
2648
    requireNonNull(mappingFunction);
1✔
2649
    long now = expirationTicker().read();
1✔
2650

2651
    // An optimistic fast path to avoid unnecessary locking
2652
    Node<K, V> node = data.get(nodeFactory.newLookupKey(key));
1✔
2653
    if (node != null) {
1✔
2654
      V value = node.getValue();
1✔
2655
      if ((value != null) && !hasExpired(node, now)) {
1✔
2656
        if (!isComputingAsync(node)) {
1✔
2657
          tryExpireAfterRead(node, key, value, expiry(), now);
1✔
2658
          setAccessTime(node, now);
1✔
2659
        }
2660
        var refreshed = afterRead(node, now, /* recordHit */ recordStats);
1✔
2661
        return (refreshed == null) ? value : refreshed;
1✔
2662
      }
2663
    }
2664
    if (recordStats) {
1✔
2665
      mappingFunction = statsAware(mappingFunction, recordLoad);
1✔
2666
    }
2667
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2668
    return doComputeIfAbsent(key, keyRef, mappingFunction, new long[] { now }, recordStats);
1✔
2669
  }
2670

2671
  /** Returns the current value from a computeIfAbsent invocation. */
2672
  @Nullable V doComputeIfAbsent(K key, Object keyRef,
2673
      Function<? super K, ? extends V> mappingFunction, long[/* 1 */] now, boolean recordStats) {
2674
    @SuppressWarnings("unchecked")
2675
    V[] oldValue = (V[]) new Object[1];
1✔
2676
    @SuppressWarnings("unchecked")
2677
    V[] newValue = (V[]) new Object[1];
1✔
2678
    @SuppressWarnings("unchecked")
2679
    K[] nodeKey = (K[]) new Object[1];
1✔
2680
    @SuppressWarnings({"rawtypes", "unchecked"})
2681
    Node<K, V>[] removed = new Node[1];
1✔
2682

2683
    int[] weight = new int[2]; // old, new
1✔
2684
    RemovalCause[] cause = new RemovalCause[1];
1✔
2685
    Node<K, V> node = data.compute(keyRef, (k, n) -> {
1✔
2686
      if (n == null) {
1✔
2687
        newValue[0] = mappingFunction.apply(key);
1✔
2688
        if (newValue[0] == null) {
1✔
2689
          return null;
1✔
2690
        }
2691
        now[0] = expirationTicker().read();
1✔
2692
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2693
        n = nodeFactory.newNode(key, keyReferenceQueue(),
1✔
2694
            newValue[0], valueReferenceQueue(), weight[1], now[0]);
1✔
2695
        setVariableTime(n, expireAfterCreate(key, newValue[0], expiry(), now[0]));
1✔
2696
        return n;
1✔
2697
      }
2698

2699
      synchronized (n) {
1✔
2700
        requireIsAlive(key, n);
1✔
2701
        nodeKey[0] = n.getKey();
1✔
2702
        weight[0] = n.getWeight();
1✔
2703
        oldValue[0] = n.getValue();
1✔
2704
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
1✔
2705
          cause[0] = RemovalCause.COLLECTED;
1✔
2706
        } else if (hasExpired(n, now[0])) {
1✔
2707
          cause[0] = RemovalCause.EXPIRED;
1✔
2708
        } else {
2709
          return n;
1✔
2710
        }
2711

2712
        if (cause[0].wasEvicted()) {
1✔
2713
          notifyEviction(nodeKey[0], oldValue[0], cause[0]);
1✔
2714
        }
2715
        newValue[0] = mappingFunction.apply(key);
1✔
2716
        if (newValue[0] == null) {
1✔
2717
          removed[0] = n;
1✔
2718
          n.retire();
1✔
2719
          return null;
1✔
2720
        }
2721
        now[0] = expirationTicker().read();
1✔
2722
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2723
        long varTime = expireAfterCreate(key, newValue[0], expiry(), now[0]);
1✔
2724

2725
        n.setValue(newValue[0], valueReferenceQueue());
1✔
2726
        n.setWeight(weight[1]);
1✔
2727

2728
        setVariableTime(n, varTime);
1✔
2729
        setAccessTime(n, now[0]);
1✔
2730
        setWriteTime(n, now[0]);
1✔
2731
        discardRefresh(k);
1✔
2732
        return n;
1✔
2733
      }
2734
    });
2735

2736
    if (cause[0] != null) {
1✔
2737
      if (cause[0].wasEvicted()) {
1✔
2738
        statsCounter().recordEviction(weight[0], cause[0]);
1✔
2739
      }
2740
      notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
1✔
2741
    }
2742
    if (node == null) {
1✔
2743
      if (removed[0] != null) {
1✔
2744
        afterWrite(new RemovalTask(removed[0]));
1✔
2745
      }
2746
      return null;
1✔
2747
    }
2748
    if (newValue[0] == null) {
1✔
2749
      if (!isComputingAsync(node)) {
1✔
2750
        tryExpireAfterRead(node, key, oldValue[0], expiry(), now[0]);
1✔
2751
        setAccessTime(node, now[0]);
1✔
2752
      }
2753

2754
      afterRead(node, now[0], /* recordHit */ recordStats);
1✔
2755
      return oldValue[0];
1✔
2756
    }
2757
    if ((oldValue[0] == null) && (cause[0] == null)) {
1✔
2758
      afterWrite(new AddTask(node, weight[1]));
1✔
2759
    } else {
2760
      int weightedDifference = (weight[1] - weight[0]);
1✔
2761
      afterWrite(new UpdateTask(node, weightedDifference));
1✔
2762
    }
2763

2764
    return newValue[0];
1✔
2765
  }
2766

2767
  @Override
2768
  public @Nullable V computeIfPresent(K key,
2769
      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
2770
    requireNonNull(key);
1✔
2771
    requireNonNull(remappingFunction);
1✔
2772

2773
    // An optimistic fast path to avoid unnecessary locking
2774
    Object lookupKey = nodeFactory.newLookupKey(key);
1✔
2775
    @Nullable Node<K, V> node = data.get(lookupKey);
1✔
2776
    long now;
2777
    if (node == null) {
1✔
2778
      return null;
1✔
2779
    } else if ((node.getValue() == null) || hasExpired(node, (now = expirationTicker().read()))) {
1✔
2780
      scheduleDrainBuffers();
1✔
2781
      return null;
1✔
2782
    }
2783

2784
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2785
        statsAware(remappingFunction, /* recordLoad */ true, /* recordLoadFailure */ true);
1✔
2786
    return remap(key, lookupKey, statsAwareRemappingFunction,
1✔
2787
        expiry(), new long[] { now }, /* computeIfAbsent */ false);
1✔
2788
  }
2789

2790
  @Override
2791
  @SuppressWarnings("NullAway")
2792
  public @Nullable V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction,
2793
      @Nullable Expiry<? super K, ? super V> expiry, boolean recordLoad,
2794
      boolean recordLoadFailure) {
2795
    requireNonNull(key);
1✔
2796
    requireNonNull(remappingFunction);
1✔
2797

2798
    long[] now = { expirationTicker().read() };
1✔
2799
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2800
    BiFunction<? super K, ? super V, ? extends V> statsAwareRemappingFunction =
1✔
2801
        statsAware(remappingFunction, recordLoad, recordLoadFailure);
1✔
2802
    return remap(key, keyRef, statsAwareRemappingFunction,
1✔
2803
        expiry, now, /* computeIfAbsent */ true);
2804
  }
2805

2806
  @Override
2807
  public @Nullable V merge(K key, V value,
2808
      BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2809
    requireNonNull(key);
1✔
2810
    requireNonNull(value);
1✔
2811
    requireNonNull(remappingFunction);
1✔
2812

2813
    long[] now = { expirationTicker().read() };
1✔
2814
    Object keyRef = nodeFactory.newReferenceKey(key, keyReferenceQueue());
1✔
2815
    BiFunction<? super K, ? super V, ? extends V> mergeFunction = (k, oldValue) ->
1✔
2816
        (oldValue == null) ? value : statsAware(remappingFunction).apply(oldValue, value);
1✔
2817
    return remap(key, keyRef, mergeFunction, expiry(), now, /* computeIfAbsent */ true);
1✔
2818
  }
2819

2820
  /**
2821
   * Attempts to compute a mapping for the specified key and its current mapped value (or
2822
   * {@code null} if there is no current mapping).
2823
   * <p>
2824
   * An entry that has expired or been reference collected is evicted and the computation continues
2825
   * as if the entry had not been present. This method does not pre-screen and does not wrap the
2826
   * remappingFunction to be statistics aware.
2827
   *
2828
   * @param key key with which the specified value is to be associated
2829
   * @param keyRef the key to associate with or a lookup only key if not <tt>computeIfAbsent</tt>
2830
   * @param remappingFunction the function to compute a value
2831
   * @param expiry the calculator for the expiration time
2832
   * @param now the current time, according to the ticker
2833
   * @param computeIfAbsent if an absent entry can be computed
2834
   * @return the new value associated with the specified key, or null if none
2835
   */
2836
  @SuppressWarnings("PMD.EmptyControlStatement")
2837
  @Nullable V remap(K key, Object keyRef,
2838
      BiFunction<? super K, ? super V, ? extends V> remappingFunction,
2839
      Expiry<? super K, ? super V> expiry, long[/* 1 */] now, boolean computeIfAbsent) {
2840
    @SuppressWarnings("unchecked")
2841
    K[] nodeKey = (K[]) new Object[1];
1✔
2842
    @SuppressWarnings("unchecked")
2843
    V[] oldValue = (V[]) new Object[1];
1✔
2844
    @SuppressWarnings("unchecked")
2845
    V[] newValue = (V[]) new Object[1];
1✔
2846
    @SuppressWarnings({"rawtypes", "unchecked"})
2847
    Node<K, V>[] removed = new Node[1];
1✔
2848

2849
    int[] weight = new int[2]; // old, new
1✔
2850
    RemovalCause[] cause = new RemovalCause[1];
1✔
2851

2852
    Node<K, V> node = data.compute(keyRef, (kr, n) -> {
1✔
2853
      if (n == null) {
1✔
2854
        if (!computeIfAbsent) {
1✔
2855
          return null;
1✔
2856
        }
2857
        newValue[0] = remappingFunction.apply(key, null);
1✔
2858
        if (newValue[0] == null) {
1✔
2859
          return null;
1✔
2860
        }
2861
        now[0] = expirationTicker().read();
1✔
2862
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2863
        long varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
1✔
2864
        n = nodeFactory.newNode(keyRef, newValue[0], valueReferenceQueue(), weight[1], now[0]);
1✔
2865
        setVariableTime(n, varTime);
1✔
2866
        setAccessTime(n, now[0]);
1✔
2867
        setWriteTime(n, now[0]);
1✔
2868
        discardRefresh(key);
1✔
2869
        return n;
1✔
2870
      }
2871

2872
      synchronized (n) {
1✔
2873
        requireIsAlive(key, n);
1✔
2874
        nodeKey[0] = n.getKey();
1✔
2875
        oldValue[0] = n.getValue();
1✔
2876
        if ((nodeKey[0] == null) || (oldValue[0] == null)) {
1✔
2877
          cause[0] = RemovalCause.COLLECTED;
1✔
2878
        } else if (hasExpired(n, expirationTicker().read())) {
1✔
2879
          cause[0] = RemovalCause.EXPIRED;
1✔
2880
        }
2881
        if (cause[0] != null) {
1✔
2882
          notifyEviction(nodeKey[0], oldValue[0], cause[0]);
1✔
2883
          if (!computeIfAbsent) {
1✔
2884
            removed[0] = n;
1✔
2885
            n.retire();
1✔
2886
            return null;
1✔
2887
          }
2888
        }
2889

2890
        newValue[0] = remappingFunction.apply(nodeKey[0],
1✔
2891
            (cause[0] == null) ? oldValue[0] : null);
1✔
2892
        if (newValue[0] == null) {
1✔
2893
          if (cause[0] == null) {
1✔
2894
            cause[0] = RemovalCause.EXPLICIT;
1✔
2895
            discardRefresh(kr);
1✔
2896
          }
2897
          removed[0] = n;
1✔
2898
          n.retire();
1✔
2899
          return null;
1✔
2900
        }
2901

2902
        long varTime;
2903
        weight[0] = n.getWeight();
1✔
2904
        weight[1] = weigher.weigh(key, newValue[0]);
1✔
2905
        now[0] = expirationTicker().read();
1✔
2906
        if (cause[0] == null) {
1✔
2907
          if (newValue[0] != oldValue[0]) {
1✔
2908
            cause[0] = RemovalCause.REPLACED;
1✔
2909
          }
2910
          varTime = expireAfterUpdate(n, key, newValue[0], expiry, now[0]);
1✔
2911
        } else {
2912
          varTime = expireAfterCreate(key, newValue[0], expiry, now[0]);
1✔
2913
        }
2914

2915
        n.setValue(newValue[0], valueReferenceQueue());
1✔
2916
        n.setWeight(weight[1]);
1✔
2917

2918
        setVariableTime(n, varTime);
1✔
2919
        setAccessTime(n, now[0]);
1✔
2920
        setWriteTime(n, now[0]);
1✔
2921
        discardRefresh(kr);
1✔
2922
        return n;
1✔
2923
      }
2924
    });
2925

2926
    if (cause[0] != null) {
1✔
2927
      if (cause[0] == RemovalCause.REPLACED) {
1✔
2928
        notifyOnReplace(key, oldValue[0], newValue[0]);
1✔
2929
      } else {
2930
        if (cause[0].wasEvicted()) {
1✔
2931
          statsCounter().recordEviction(weight[0], cause[0]);
1✔
2932
        }
2933
        notifyRemoval(nodeKey[0], oldValue[0], cause[0]);
1✔
2934
      }
2935
    }
2936

2937
    if (removed[0] != null) {
1✔
2938
      afterWrite(new RemovalTask(removed[0]));
1✔
2939
    } else if (node == null) {
1✔
2940
      // absent and not computable
2941
    } else if ((oldValue[0] == null) && (cause[0] == null)) {
1✔
2942
      afterWrite(new AddTask(node, weight[1]));
1✔
2943
    } else {
2944
      int weightedDifference = weight[1] - weight[0];
1✔
2945
      if (expiresAfterWrite() || (weightedDifference != 0)) {
1✔
2946
        afterWrite(new UpdateTask(node, weightedDifference));
1✔
2947
      } else {
2948
        afterRead(node, now[0], /* recordHit */ false);
1✔
2949
        if ((cause[0] != null) && cause[0].wasEvicted()) {
1✔
2950
          scheduleDrainBuffers();
1✔
2951
        }
2952
      }
2953
    }
2954

2955
    return newValue[0];
1✔
2956
  }
2957

2958
  @Override
2959
  public void forEach(BiConsumer<? super K, ? super V> action) {
2960
    requireNonNull(action);
1✔
2961

2962
    for (var iterator = new EntryIterator<>(this); iterator.hasNext();) {
1✔
2963
      action.accept(iterator.key, iterator.value);
1✔
2964
      iterator.advance();
1✔
2965
    }
2966
  }
1✔
2967

2968
  @Override
2969
  public Set<K> keySet() {
2970
    final Set<K> ks = keySet;
1✔
2971
    return (ks == null) ? (keySet = new KeySetView<>(this)) : ks;
1✔
2972
  }
2973

2974
  @Override
2975
  public Collection<V> values() {
2976
    final Collection<V> vs = values;
1✔
2977
    return (vs == null) ? (values = new ValuesView<>(this)) : vs;
1✔
2978
  }
2979

2980
  @Override
2981
  public Set<Entry<K, V>> entrySet() {
2982
    final Set<Entry<K, V>> es = entrySet;
1✔
2983
    return (es == null) ? (entrySet = new EntrySetView<>(this)) : es;
1✔
2984
  }
2985

2986
  /**
2987
   * Object equality requires reflexive, symmetric, transitive, and consistency properties. Of
2988
   * these, symmetry and consistency require further clarification for how they are upheld.
2989
   * <p>
2990
   * The <i>consistency</i> property between invocations requires that the results are the same if
2991
   * there are no modifications to the information used. Therefore, usages should expect that this
2992
   * operation may return misleading results if either the maps or the data held by them is modified
2993
   * during the execution of this method. This characteristic allows for comparing the map sizes and
2994
   * assuming stable mappings, as done by {@link java.util.AbstractMap}-based maps.
2995
   * <p>
2996
   * The <i>symmetric</i> property requires that the result is the same for all implementations of
2997
   * {@link Map#equals(Object)}. That contract is defined in terms of the stable mappings provided
2998
   * by {@link #entrySet()}, meaning that the {@link #size()} optimization forces that the count is
2999
   * consistent with the mappings when used for an equality check.
3000
   * <p>
3001
   * The cache's {@link #size()} method may include entries that have expired or have been reference
3002
   * collected, but have not yet been removed from the backing map. An iteration over the map may
3003
   * trigger the removal of these dead entries when skipped over during traversal. To ensure
3004
   * consistency and symmetry, usages should call {@link #cleanUp()} before this method while no
3005
   * other concurrent operations are being performed on this cache. This is not done implicitly by
3006
   * {@link #size()} as many usages assume it to be instantaneous and lock-free.
3007
   */
3008
  @Override
3009
  public boolean equals(Object o) {
3010
    if (o == this) {
1✔
3011
      return true;
1✔
3012
    } else if (!(o instanceof Map)) {
1✔
3013
      return false;
1✔
3014
    }
3015

3016
    var map = (Map<?, ?>) o;
1✔
3017
    if (size() != map.size()) {
1✔
3018
      return false;
1✔
3019
    }
3020

3021
    long now = expirationTicker().read();
1✔
3022
    for (var node : data.values()) {
1✔
3023
      K key = node.getKey();
1✔
3024
      V value = node.getValue();
1✔
3025
      if ((key == null) || (value == null)
1✔
3026
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3027
        scheduleDrainBuffers();
1✔
3028
        return false;
1✔
3029
      } else {
3030
        var val = map.get(key);
1✔
3031
        if ((val == null) || ((val != value) && !val.equals(value))) {
1✔
3032
          return false;
1✔
3033
        }
3034
      }
3035
    }
1✔
3036
    return true;
1✔
3037
  }
3038

3039
  @Override
3040
  @SuppressWarnings("NullAway")
3041
  public int hashCode() {
3042
    int hash = 0;
1✔
3043
    long now = expirationTicker().read();
1✔
3044
    for (var node : data.values()) {
1✔
3045
      K key = node.getKey();
1✔
3046
      V value = node.getValue();
1✔
3047
      if ((key == null) || (value == null)
1✔
3048
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3049
        scheduleDrainBuffers();
1✔
3050
      } else {
3051
        hash += key.hashCode() ^ value.hashCode();
1✔
3052
      }
3053
    }
1✔
3054
    return hash;
1✔
3055
  }
3056

3057
  @Override
3058
  public String toString() {
3059
    var result = new StringBuilder().append('{');
1✔
3060
    long now = expirationTicker().read();
1✔
3061
    for (var node : data.values()) {
1✔
3062
      K key = node.getKey();
1✔
3063
      V value = node.getValue();
1✔
3064
      if ((key == null) || (value == null)
1✔
3065
          || !node.isAlive() || hasExpired(node, now)) {
1✔
3066
        scheduleDrainBuffers();
1✔
3067
      } else {
3068
        if (result.length() != 1) {
1✔
3069
          result.append(',').append(' ');
1✔
3070
        }
3071
        result.append((key == this) ? "(this Map)" : key);
1✔
3072
        result.append('=');
1✔
3073
        result.append((value == this) ? "(this Map)" : value);
1✔
3074
      }
3075
    }
1✔
3076
    return result.append('}').toString();
1✔
3077
  }
3078

3079
  /**
3080
   * Returns the computed result from the ordered traversal of the cache entries.
3081
   *
3082
   * @param hottest the coldest or hottest iteration order
3083
   * @param transformer a function that unwraps the value
3084
   * @param mappingFunction the mapping function to compute a value
3085
   * @return the computed value
3086
   */
3087
  @SuppressWarnings("GuardedByChecker")
3088
  <T> T evictionOrder(boolean hottest, Function<V, V> transformer,
3089
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3090
    Comparator<Node<K, V>> comparator = Comparator.comparingInt(node -> {
1✔
3091
      K key = node.getKey();
1✔
3092
      return (key == null) ? 0 : frequencySketch().frequency(key);
1✔
3093
    });
3094
    Iterable<Node<K, V>> iterable;
3095
    if (hottest) {
1✔
3096
      iterable = () -> {
1✔
3097
        var secondary = PeekingIterator.comparing(
1✔
3098
            accessOrderProbationDeque().descendingIterator(),
1✔
3099
            accessOrderWindowDeque().descendingIterator(), comparator);
1✔
3100
        return PeekingIterator.concat(
1✔
3101
            accessOrderProtectedDeque().descendingIterator(), secondary);
1✔
3102
      };
3103
    } else {
3104
      iterable = () -> {
1✔
3105
        var primary = PeekingIterator.comparing(
1✔
3106
            accessOrderWindowDeque().iterator(), accessOrderProbationDeque().iterator(),
1✔
3107
            comparator.reversed());
1✔
3108
        return PeekingIterator.concat(primary, accessOrderProtectedDeque().iterator());
1✔
3109
      };
3110
    }
3111
    return snapshot(iterable, transformer, mappingFunction);
1✔
3112
  }
3113

3114
  /**
3115
   * Returns the computed result from the ordered traversal of the cache entries.
3116
   *
3117
   * @param oldest the youngest or oldest iteration order
3118
   * @param transformer a function that unwraps the value
3119
   * @param mappingFunction the mapping function to compute a value
3120
   * @return the computed value
3121
   */
3122
  @SuppressWarnings("GuardedByChecker")
3123
  <T> T expireAfterAccessOrder(boolean oldest, Function<V, V> transformer,
3124
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3125
    Iterable<Node<K, V>> iterable;
3126
    if (evicts()) {
1✔
3127
      iterable = () -> {
1✔
3128
        Comparator<Node<K, V>> comparator = Comparator.comparingLong(Node::getAccessTime);
1✔
3129
        PeekingIterator<Node<K, V>> first, second, third;
3130
        if (oldest) {
1✔
3131
          first = accessOrderWindowDeque().iterator();
1✔
3132
          second = accessOrderProbationDeque().iterator();
1✔
3133
          third = accessOrderProtectedDeque().iterator();
1✔
3134
        } else {
3135
          comparator = comparator.reversed();
1✔
3136
          first = accessOrderWindowDeque().descendingIterator();
1✔
3137
          second = accessOrderProbationDeque().descendingIterator();
1✔
3138
          third = accessOrderProtectedDeque().descendingIterator();
1✔
3139
        }
3140
        return PeekingIterator.comparing(
1✔
3141
            PeekingIterator.comparing(first, second, comparator), third, comparator);
1✔
3142
      };
3143
    } else {
3144
      iterable = oldest
1✔
3145
          ? accessOrderWindowDeque()
1✔
3146
          : accessOrderWindowDeque()::descendingIterator;
1✔
3147
    }
3148
    return snapshot(iterable, transformer, mappingFunction);
1✔
3149
  }
3150

3151
  /**
3152
   * Returns the computed result from the ordered traversal of the cache entries.
3153
   *
3154
   * @param iterable the supplier of the entries in the cache
3155
   * @param transformer a function that unwraps the value
3156
   * @param mappingFunction the mapping function to compute a value
3157
   * @return the computed value
3158
   */
3159
  <T> T snapshot(Iterable<Node<K, V>> iterable, Function<V, V> transformer,
3160
      Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
3161
    requireNonNull(mappingFunction);
1✔
3162
    requireNonNull(transformer);
1✔
3163
    requireNonNull(iterable);
1✔
3164

3165
    evictionLock.lock();
1✔
3166
    try {
3167
      maintenance(/* ignored */ null);
1✔
3168

3169
      // Obtain the iterator as late as possible for modification count checking
3170
      try (var stream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(
1✔
3171
           iterable.iterator(), DISTINCT | ORDERED | NONNULL | IMMUTABLE), /* parallel */ false)) {
1✔
3172
        return mappingFunction.apply(stream
1✔
3173
            .map(node -> nodeToCacheEntry(node, transformer))
1✔
3174
            .filter(Objects::nonNull));
1✔
3175
      }
3176
    } finally {
3177
      evictionLock.unlock();
1✔
3178
      rescheduleCleanUpIfIncomplete();
1✔
3179
    }
3180
  }
3181

3182
  /** Returns an entry for the given node if it can be used externally, else null. */
3183
  @Nullable CacheEntry<K, V> nodeToCacheEntry(Node<K, V> node, Function<V, V> transformer) {
3184
    V value = transformer.apply(node.getValue());
1✔
3185
    K key = node.getKey();
1✔
3186
    long now;
3187
    if ((key == null) || (value == null) || !node.isAlive()
1✔
3188
        || hasExpired(node, (now = expirationTicker().read()))) {
1✔
3189
      return null;
1✔
3190
    }
3191

3192
    long expiresAfter = Long.MAX_VALUE;
1✔
3193
    if (expiresAfterAccess()) {
1✔
3194
      expiresAfter = Math.min(expiresAfter, now - node.getAccessTime() + expiresAfterAccessNanos());
1✔
3195
    }
3196
    if (expiresAfterWrite()) {
1✔
3197
      expiresAfter = Math.min(expiresAfter,
1✔
3198
          (now & ~1L) - (node.getWriteTime() & ~1L) + expiresAfterWriteNanos());
1✔
3199
    }
3200
    if (expiresVariable()) {
1✔
3201
      expiresAfter = node.getVariableTime() - now;
1✔
3202
    }
3203

3204
    long refreshableAt = refreshAfterWrite()
1✔
3205
        ? node.getWriteTime() + refreshAfterWriteNanos()
1✔
3206
        : now + Long.MAX_VALUE;
1✔
3207
    int weight = node.getPolicyWeight();
1✔
3208
    return SnapshotEntry.forEntry(key, value, now, weight, now + expiresAfter, refreshableAt);
1✔
3209
  }
3210

3211
  /** A function that produces an unmodifiable map up to the limit in stream order. */
3212
  static final class SizeLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3213
    private final int expectedSize;
3214
    private final long limit;
3215

3216
    SizeLimiter(int expectedSize, long limit) {
1✔
3217
      requireArgument(limit >= 0);
1✔
3218
      this.expectedSize = expectedSize;
1✔
3219
      this.limit = limit;
1✔
3220
    }
1✔
3221

3222
    @Override
3223
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3224
      var map = new LinkedHashMap<K, V>(calculateHashMapCapacity(expectedSize));
1✔
3225
      stream.limit(limit).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3226
      return Collections.unmodifiableMap(map);
1✔
3227
    }
3228
  }
3229

3230
  /** A function that produces an unmodifiable map up to the weighted limit in stream order. */
3231
  static final class WeightLimiter<K, V> implements Function<Stream<CacheEntry<K, V>>, Map<K, V>> {
3232
    private final long weightLimit;
3233

3234
    private long weightedSize;
3235

3236
    WeightLimiter(long weightLimit) {
1✔
3237
      requireArgument(weightLimit >= 0);
1✔
3238
      this.weightLimit = weightLimit;
1✔
3239
    }
1✔
3240

3241
    @Override
3242
    public Map<K, V> apply(Stream<CacheEntry<K, V>> stream) {
3243
      var map = new LinkedHashMap<K, V>();
1✔
3244
      stream.takeWhile(entry -> {
1✔
3245
        weightedSize = Math.addExact(weightedSize, entry.weight());
1✔
3246
        return (weightedSize <= weightLimit);
1✔
3247
      }).forEach(entry -> map.put(entry.getKey(), entry.getValue()));
1✔
3248
      return Collections.unmodifiableMap(map);
1✔
3249
    }
3250
  }
3251

3252
  /** An adapter to safely externalize the keys. */
3253
  static final class KeySetView<K, V> extends AbstractSet<K> {
3254
    final BoundedLocalCache<K, V> cache;
3255

3256
    KeySetView(BoundedLocalCache<K, V> cache) {
1✔
3257
      this.cache = requireNonNull(cache);
1✔
3258
    }
1✔
3259

3260
    @Override
3261
    public int size() {
3262
      return cache.size();
1✔
3263
    }
3264

3265
    @Override
3266
    public void clear() {
3267
      cache.clear();
1✔
3268
    }
1✔
3269

3270
    @Override
3271
    @SuppressWarnings("SuspiciousMethodCalls")
3272
    public boolean contains(Object o) {
3273
      return cache.containsKey(o);
1✔
3274
    }
3275

3276
    @Override
3277
    public boolean removeAll(Collection<?> collection) {
3278
      requireNonNull(collection);
1✔
3279
      boolean modified = false;
1✔
3280
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3281
        for (K key : this) {
1✔
3282
          if (collection.contains(key)) {
1✔
3283
            modified |= remove(key);
1✔
3284
          }
3285
        }
1✔
3286
      } else {
3287
        for (var item : collection) {
1✔
3288
          modified |= (item != null) && remove(item);
1✔
3289
        }
1✔
3290
      }
3291
      return modified;
1✔
3292
    }
3293

3294
    @Override
3295
    public boolean remove(Object o) {
3296
      return (cache.remove(o) != null);
1✔
3297
    }
3298

3299
    @Override
3300
    public boolean removeIf(Predicate<? super K> filter) {
3301
      requireNonNull(filter);
1✔
3302
      boolean modified = false;
1✔
3303
      for (K key : this) {
1✔
3304
        if (filter.test(key) && remove(key)) {
1✔
3305
          modified = true;
1✔
3306
        }
3307
      }
1✔
3308
      return modified;
1✔
3309
    }
3310

3311
    @Override
3312
    public boolean retainAll(Collection<?> collection) {
3313
      requireNonNull(collection);
1✔
3314
      boolean modified = false;
1✔
3315
      for (K key : this) {
1✔
3316
        if (!collection.contains(key) && remove(key)) {
1✔
3317
          modified = true;
1✔
3318
        }
3319
      }
1✔
3320
      return modified;
1✔
3321
    }
3322

3323
    @Override
3324
    public Iterator<K> iterator() {
3325
      return new KeyIterator<>(cache);
1✔
3326
    }
3327

3328
    @Override
3329
    public Spliterator<K> spliterator() {
3330
      return new KeySpliterator<>(cache);
1✔
3331
    }
3332
  }
3333

3334
  /** An adapter to safely externalize the key iterator. */
3335
  static final class KeyIterator<K, V> implements Iterator<K> {
3336
    final EntryIterator<K, V> iterator;
3337

3338
    KeyIterator(BoundedLocalCache<K, V> cache) {
1✔
3339
      this.iterator = new EntryIterator<>(cache);
1✔
3340
    }
1✔
3341

3342
    @Override
3343
    public boolean hasNext() {
3344
      return iterator.hasNext();
1✔
3345
    }
3346

3347
    @Override
3348
    public K next() {
3349
      return iterator.nextKey();
1✔
3350
    }
3351

3352
    @Override
3353
    public void remove() {
3354
      iterator.remove();
1✔
3355
    }
1✔
3356
  }
3357

3358
  /** An adapter to safely externalize the key spliterator. */
3359
  static final class KeySpliterator<K, V> implements Spliterator<K> {
3360
    final Spliterator<Node<K, V>> spliterator;
3361
    final BoundedLocalCache<K, V> cache;
3362

3363
    KeySpliterator(BoundedLocalCache<K, V> cache) {
3364
      this(cache, cache.data.values().spliterator());
1✔
3365
    }
1✔
3366

3367
    KeySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3368
      this.spliterator = requireNonNull(spliterator);
1✔
3369
      this.cache = requireNonNull(cache);
1✔
3370
    }
1✔
3371

3372
    @Override
3373
    public void forEachRemaining(Consumer<? super K> action) {
3374
      requireNonNull(action);
1✔
3375
      Consumer<Node<K, V>> consumer = node -> {
1✔
3376
        K key = node.getKey();
1✔
3377
        V value = node.getValue();
1✔
3378
        long now = cache.expirationTicker().read();
1✔
3379
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3380
          action.accept(key);
1✔
3381
        }
3382
      };
1✔
3383
      spliterator.forEachRemaining(consumer);
1✔
3384
    }
1✔
3385

3386
    @Override
3387
    public boolean tryAdvance(Consumer<? super K> action) {
3388
      requireNonNull(action);
1✔
3389
      boolean[] advanced = { false };
1✔
3390
      Consumer<Node<K, V>> consumer = node -> {
1✔
3391
        K key = node.getKey();
1✔
3392
        V value = node.getValue();
1✔
3393
        long now = cache.expirationTicker().read();
1✔
3394
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3395
          action.accept(key);
1✔
3396
          advanced[0] = true;
1✔
3397
        }
3398
      };
1✔
3399
      while (spliterator.tryAdvance(consumer)) {
1✔
3400
        if (advanced[0]) {
1✔
3401
          return true;
1✔
3402
        }
3403
      }
3404
      return false;
1✔
3405
    }
3406

3407
    @Override
3408
    public @Nullable Spliterator<K> trySplit() {
3409
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3410
      return (split == null) ? null : new KeySpliterator<>(cache, split);
1✔
3411
    }
3412

3413
    @Override
3414
    public long estimateSize() {
3415
      return spliterator.estimateSize();
1✔
3416
    }
3417

3418
    @Override
3419
    public int characteristics() {
3420
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3421
    }
3422
  }
3423

3424
  /** An adapter to safely externalize the values. */
3425
  static final class ValuesView<K, V> extends AbstractCollection<V> {
3426
    final BoundedLocalCache<K, V> cache;
3427

3428
    ValuesView(BoundedLocalCache<K, V> cache) {
1✔
3429
      this.cache = requireNonNull(cache);
1✔
3430
    }
1✔
3431

3432
    @Override
3433
    public int size() {
3434
      return cache.size();
1✔
3435
    }
3436

3437
    @Override
3438
    public void clear() {
3439
      cache.clear();
1✔
3440
    }
1✔
3441

3442
    @Override
3443
    @SuppressWarnings("SuspiciousMethodCalls")
3444
    public boolean contains(Object o) {
3445
      return cache.containsValue(o);
1✔
3446
    }
3447

3448
    @Override
3449
    @SuppressWarnings("NullAway")
3450
    public boolean removeAll(Collection<?> collection) {
3451
      requireNonNull(collection);
1✔
3452
      boolean modified = false;
1✔
3453
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3454
        if (collection.contains(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3455
          modified = true;
1✔
3456
        }
3457
        iterator.advance();
1✔
3458
      }
3459
      return modified;
1✔
3460
    }
3461

3462
    @Override
3463
    @SuppressWarnings("NullAway")
3464
    public boolean remove(Object o) {
3465
      if (o == null) {
1✔
3466
        return false;
1✔
3467
      }
3468
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3469
        if (o.equals(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3470
          return true;
1✔
3471
        }
3472
        iterator.advance();
1✔
3473
      }
3474
      return false;
1✔
3475
    }
3476

3477
    @Override
3478
    @SuppressWarnings("NullAway")
3479
    public boolean removeIf(Predicate<? super V> filter) {
3480
      requireNonNull(filter);
1✔
3481
      boolean modified = false;
1✔
3482
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3483
        if (filter.test(iterator.value)) {
1✔
3484
          modified |= cache.remove(iterator.key, iterator.value);
1✔
3485
        }
3486
        iterator.advance();
1✔
3487
      }
3488
      return modified;
1✔
3489
    }
3490

3491
    @Override
3492
    @SuppressWarnings("NullAway")
3493
    public boolean retainAll(Collection<?> collection) {
3494
      requireNonNull(collection);
1✔
3495
      boolean modified = false;
1✔
3496
      for (var iterator = new EntryIterator<>(cache); iterator.hasNext();) {
1✔
3497
        if (!collection.contains(iterator.value) && cache.remove(iterator.key, iterator.value)) {
1✔
3498
          modified = true;
1✔
3499
        }
3500
        iterator.advance();
1✔
3501
      }
3502
      return modified;
1✔
3503
    }
3504

3505
    @Override
3506
    public Iterator<V> iterator() {
3507
      return new ValueIterator<>(cache);
1✔
3508
    }
3509

3510
    @Override
3511
    public Spliterator<V> spliterator() {
3512
      return new ValueSpliterator<>(cache);
1✔
3513
    }
3514
  }
3515

3516
  /** An adapter to safely externalize the value iterator. */
3517
  static final class ValueIterator<K, V> implements Iterator<V> {
3518
    final EntryIterator<K, V> iterator;
3519

3520
    ValueIterator(BoundedLocalCache<K, V> cache) {
1✔
3521
      this.iterator = new EntryIterator<>(cache);
1✔
3522
    }
1✔
3523

3524
    @Override
3525
    public boolean hasNext() {
3526
      return iterator.hasNext();
1✔
3527
    }
3528

3529
    @Override
3530
    public V next() {
3531
      return iterator.nextValue();
1✔
3532
    }
3533

3534
    @Override
3535
    public void remove() {
3536
      iterator.remove();
1✔
3537
    }
1✔
3538
  }
3539

3540
  /** An adapter to safely externalize the value spliterator. */
3541
  static final class ValueSpliterator<K, V> implements Spliterator<V> {
3542
    final Spliterator<Node<K, V>> spliterator;
3543
    final BoundedLocalCache<K, V> cache;
3544

3545
    ValueSpliterator(BoundedLocalCache<K, V> cache) {
3546
      this(cache, cache.data.values().spliterator());
1✔
3547
    }
1✔
3548

3549
    ValueSpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3550
      this.spliterator = requireNonNull(spliterator);
1✔
3551
      this.cache = requireNonNull(cache);
1✔
3552
    }
1✔
3553

3554
    @Override
3555
    public void forEachRemaining(Consumer<? super V> action) {
3556
      requireNonNull(action);
1✔
3557
      Consumer<Node<K, V>> consumer = node -> {
1✔
3558
        K key = node.getKey();
1✔
3559
        V value = node.getValue();
1✔
3560
        long now = cache.expirationTicker().read();
1✔
3561
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3562
          action.accept(value);
1✔
3563
        }
3564
      };
1✔
3565
      spliterator.forEachRemaining(consumer);
1✔
3566
    }
1✔
3567

3568
    @Override
3569
    public boolean tryAdvance(Consumer<? super V> action) {
3570
      requireNonNull(action);
1✔
3571
      boolean[] advanced = { false };
1✔
3572
      long now = cache.expirationTicker().read();
1✔
3573
      Consumer<Node<K, V>> consumer = node -> {
1✔
3574
        K key = node.getKey();
1✔
3575
        V value = node.getValue();
1✔
3576
        if ((key != null) && (value != null) && !cache.hasExpired(node, now) && node.isAlive()) {
1✔
3577
          action.accept(value);
1✔
3578
          advanced[0] = true;
1✔
3579
        }
3580
      };
1✔
3581
      while (spliterator.tryAdvance(consumer)) {
1✔
3582
        if (advanced[0]) {
1✔
3583
          return true;
1✔
3584
        }
3585
      }
3586
      return false;
1✔
3587
    }
3588

3589
    @Override
3590
    public @Nullable Spliterator<V> trySplit() {
3591
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3592
      return (split == null) ? null : new ValueSpliterator<>(cache, split);
1✔
3593
    }
3594

3595
    @Override
3596
    public long estimateSize() {
3597
      return spliterator.estimateSize();
1✔
3598
    }
3599

3600
    @Override
3601
    public int characteristics() {
3602
      return CONCURRENT | NONNULL;
1✔
3603
    }
3604
  }
3605

3606
  /** An adapter to safely externalize the entries. */
3607
  static final class EntrySetView<K, V> extends AbstractSet<Entry<K, V>> {
3608
    final BoundedLocalCache<K, V> cache;
3609

3610
    EntrySetView(BoundedLocalCache<K, V> cache) {
1✔
3611
      this.cache = requireNonNull(cache);
1✔
3612
    }
1✔
3613

3614
    @Override
3615
    public int size() {
3616
      return cache.size();
1✔
3617
    }
3618

3619
    @Override
3620
    public void clear() {
3621
      cache.clear();
1✔
3622
    }
1✔
3623

3624
    @Override
3625
    public boolean contains(Object o) {
3626
      if (!(o instanceof Entry<?, ?>)) {
1✔
3627
        return false;
1✔
3628
      }
3629
      var entry = (Entry<?, ?>) o;
1✔
3630
      var key = entry.getKey();
1✔
3631
      var value = entry.getValue();
1✔
3632
      if ((key == null) || (value == null)) {
1✔
3633
        return false;
1✔
3634
      }
3635
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
3636
      return (node != null) && node.containsValue(value);
1✔
3637
    }
3638

3639
    @Override
3640
    public boolean removeAll(Collection<?> collection) {
3641
      requireNonNull(collection);
1✔
3642
      boolean modified = false;
1✔
3643
      if ((collection instanceof Set<?>) && (collection.size() > size())) {
1✔
3644
        for (var entry : this) {
1✔
3645
          if (collection.contains(entry)) {
1✔
3646
            modified |= remove(entry);
1✔
3647
          }
3648
        }
1✔
3649
      } else {
3650
        for (var item : collection) {
1✔
3651
          modified |= (item != null) && remove(item);
1✔
3652
        }
1✔
3653
      }
3654
      return modified;
1✔
3655
    }
3656

3657
    @Override
3658
    @SuppressWarnings("SuspiciousMethodCalls")
3659
    public boolean remove(Object o) {
3660
      if (!(o instanceof Entry<?, ?>)) {
1✔
3661
        return false;
1✔
3662
      }
3663
      var entry = (Entry<?, ?>) o;
1✔
3664
      var key = entry.getKey();
1✔
3665
      return (key != null) && cache.remove(key, entry.getValue());
1✔
3666
    }
3667

3668
    @Override
3669
    public boolean removeIf(Predicate<? super Entry<K, V>> filter) {
3670
      requireNonNull(filter);
1✔
3671
      boolean modified = false;
1✔
3672
      for (Entry<K, V> entry : this) {
1✔
3673
        if (filter.test(entry)) {
1✔
3674
          modified |= cache.remove(entry.getKey(), entry.getValue());
1✔
3675
        }
3676
      }
1✔
3677
      return modified;
1✔
3678
    }
3679

3680
    @Override
3681
    public boolean retainAll(Collection<?> collection) {
3682
      requireNonNull(collection);
1✔
3683
      boolean modified = false;
1✔
3684
      for (var entry : this) {
1✔
3685
        if (!collection.contains(entry) && remove(entry)) {
1✔
3686
          modified = true;
1✔
3687
        }
3688
      }
1✔
3689
      return modified;
1✔
3690
    }
3691

3692
    @Override
3693
    public Iterator<Entry<K, V>> iterator() {
3694
      return new EntryIterator<>(cache);
1✔
3695
    }
3696

3697
    @Override
3698
    public Spliterator<Entry<K, V>> spliterator() {
3699
      return new EntrySpliterator<>(cache);
1✔
3700
    }
3701
  }
3702

3703
  /** An adapter to safely externalize the entry iterator. */
3704
  static final class EntryIterator<K, V> implements Iterator<Entry<K, V>> {
3705
    final BoundedLocalCache<K, V> cache;
3706
    final Iterator<Node<K, V>> iterator;
3707

3708
    @Nullable K key;
3709
    @Nullable V value;
3710
    @Nullable K removalKey;
3711
    @Nullable Node<K, V> next;
3712

3713
    EntryIterator(BoundedLocalCache<K, V> cache) {
1✔
3714
      this.iterator = cache.data.values().iterator();
1✔
3715
      this.cache = cache;
1✔
3716
    }
1✔
3717

3718
    @Override
3719
    public boolean hasNext() {
3720
      if (next != null) {
1✔
3721
        return true;
1✔
3722
      }
3723

3724
      long now = cache.expirationTicker().read();
1✔
3725
      while (iterator.hasNext()) {
1✔
3726
        next = iterator.next();
1✔
3727
        value = next.getValue();
1✔
3728
        key = next.getKey();
1✔
3729

3730
        boolean evictable = (key == null) || (value == null) || cache.hasExpired(next, now);
1✔
3731
        if (evictable || !next.isAlive()) {
1✔
3732
          if (evictable) {
1✔
3733
            cache.scheduleDrainBuffers();
1✔
3734
          }
3735
          advance();
1✔
3736
          continue;
1✔
3737
        }
3738
        return true;
1✔
3739
      }
3740
      return false;
1✔
3741
    }
3742

3743
    /** Invalidates the current position so that the iterator may compute the next position. */
3744
    void advance() {
3745
      value = null;
1✔
3746
      next = null;
1✔
3747
      key = null;
1✔
3748
    }
1✔
3749

3750
    @SuppressWarnings("NullAway")
3751
    K nextKey() {
3752
      if (!hasNext()) {
1✔
3753
        throw new NoSuchElementException();
1✔
3754
      }
3755
      removalKey = key;
1✔
3756
      advance();
1✔
3757
      return removalKey;
1✔
3758
    }
3759

3760
    @SuppressWarnings("NullAway")
3761
    V nextValue() {
3762
      if (!hasNext()) {
1✔
3763
        throw new NoSuchElementException();
1✔
3764
      }
3765
      removalKey = key;
1✔
3766
      V val = value;
1✔
3767
      advance();
1✔
3768
      return val;
1✔
3769
    }
3770

3771
    @Override
3772
    public Entry<K, V> next() {
3773
      if (!hasNext()) {
1✔
3774
        throw new NoSuchElementException();
1✔
3775
      }
3776
      @SuppressWarnings("NullAway")
3777
      var entry = new WriteThroughEntry<>(cache, key, value);
1✔
3778
      removalKey = key;
1✔
3779
      advance();
1✔
3780
      return entry;
1✔
3781
    }
3782

3783
    @Override
3784
    public void remove() {
3785
      if (removalKey == null) {
1✔
3786
        throw new IllegalStateException();
1✔
3787
      }
3788
      cache.remove(removalKey);
1✔
3789
      removalKey = null;
1✔
3790
    }
1✔
3791
  }
3792

3793
  /** An adapter to safely externalize the entry spliterator. */
3794
  static final class EntrySpliterator<K, V> implements Spliterator<Entry<K, V>> {
3795
    final Spliterator<Node<K, V>> spliterator;
3796
    final BoundedLocalCache<K, V> cache;
3797

3798
    EntrySpliterator(BoundedLocalCache<K, V> cache) {
3799
      this(cache, cache.data.values().spliterator());
1✔
3800
    }
1✔
3801

3802
    EntrySpliterator(BoundedLocalCache<K, V> cache, Spliterator<Node<K, V>> spliterator) {
1✔
3803
      this.spliterator = requireNonNull(spliterator);
1✔
3804
      this.cache = requireNonNull(cache);
1✔
3805
    }
1✔
3806

3807
    @Override
3808
    public void forEachRemaining(Consumer<? super Entry<K, V>> action) {
3809
      requireNonNull(action);
1✔
3810
      Consumer<Node<K, V>> consumer = node -> {
1✔
3811
        K key = node.getKey();
1✔
3812
        V value = node.getValue();
1✔
3813
        long now = cache.expirationTicker().read();
1✔
3814
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3815
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3816
        }
3817
      };
1✔
3818
      spliterator.forEachRemaining(consumer);
1✔
3819
    }
1✔
3820

3821
    @Override
3822
    public boolean tryAdvance(Consumer<? super Entry<K, V>> action) {
3823
      requireNonNull(action);
1✔
3824
      boolean[] advanced = { false };
1✔
3825
      Consumer<Node<K, V>> consumer = node -> {
1✔
3826
        K key = node.getKey();
1✔
3827
        V value = node.getValue();
1✔
3828
        long now = cache.expirationTicker().read();
1✔
3829
        if ((key != null) && (value != null) && node.isAlive() && !cache.hasExpired(node, now)) {
1✔
3830
          action.accept(new WriteThroughEntry<>(cache, key, value));
1✔
3831
          advanced[0] = true;
1✔
3832
        }
3833
      };
1✔
3834
      while (spliterator.tryAdvance(consumer)) {
1✔
3835
        if (advanced[0]) {
1✔
3836
          return true;
1✔
3837
        }
3838
      }
3839
      return false;
1✔
3840
    }
3841

3842
    @Override
3843
    public @Nullable Spliterator<Entry<K, V>> trySplit() {
3844
      Spliterator<Node<K, V>> split = spliterator.trySplit();
1✔
3845
      return (split == null) ? null : new EntrySpliterator<>(cache, split);
1✔
3846
    }
3847

3848
    @Override
3849
    public long estimateSize() {
3850
      return spliterator.estimateSize();
1✔
3851
    }
3852

3853
    @Override
3854
    public int characteristics() {
3855
      return DISTINCT | CONCURRENT | NONNULL;
1✔
3856
    }
3857
  }
3858

3859
  /** A reusable task that performs the maintenance work; used to avoid wrapping by ForkJoinPool. */
3860
  static final class PerformCleanupTask extends ForkJoinTask<Void> implements Runnable {
3861
    private static final long serialVersionUID = 1L;
3862

3863
    final WeakReference<BoundedLocalCache<?, ?>> reference;
3864

3865
    PerformCleanupTask(BoundedLocalCache<?, ?> cache) {
1✔
3866
      reference = new WeakReference<BoundedLocalCache<?,?>>(cache);
1✔
3867
    }
1✔
3868

3869
    @Override
3870
    public boolean exec() {
3871
      try {
3872
        run();
1✔
3873
      } catch (Throwable t) {
1✔
3874
        logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", t);
1✔
3875
      }
1✔
3876

3877
      // Indicates that the task has not completed to allow subsequent submissions to execute
3878
      return false;
1✔
3879
    }
3880

3881
    @Override
3882
    public void run() {
3883
      BoundedLocalCache<?, ?> cache = reference.get();
1✔
3884
      if (cache != null) {
1✔
3885
        cache.performCleanUp(/* ignored */ null);
1✔
3886
      }
3887
    }
1✔
3888

3889
    /**
3890
     * This method cannot be ignored due to being final, so a hostile user supplied Executor could
3891
     * forcibly complete the task and halt future executions. There are easier ways to intentionally
3892
     * harm a system, so this is assumed to not happen in practice.
3893
     */
3894
    // public final void quietlyComplete() {}
3895

3896
    @Override public Void getRawResult() { return null; }
1✔
3897
    @Override public void setRawResult(Void v) {}
1✔
3898
    @Override public void complete(Void value) {}
1✔
3899
    @Override public void completeExceptionally(Throwable ex) {}
1✔
3900
    @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; }
1✔
3901
  }
3902

3903
  /** Creates a serialization proxy based on the common configuration shared by all cache types. */
3904
  static <K, V> SerializationProxy<K, V> makeSerializationProxy(BoundedLocalCache<?, ?> cache) {
3905
    SerializationProxy<K, V> proxy = new SerializationProxy<>();
1✔
3906
    proxy.weakKeys = cache.collectKeys();
1✔
3907
    proxy.weakValues = cache.nodeFactory.weakValues();
1✔
3908
    proxy.softValues = cache.nodeFactory.softValues();
1✔
3909
    proxy.isRecordingStats = cache.isRecordingStats();
1✔
3910
    proxy.evictionListener = cache.evictionListener;
1✔
3911
    proxy.removalListener = cache.removalListener();
1✔
3912
    proxy.ticker = cache.expirationTicker();
1✔
3913
    if (cache.expiresAfterAccess()) {
1✔
3914
      proxy.expiresAfterAccessNanos = cache.expiresAfterAccessNanos();
1✔
3915
    }
3916
    if (cache.expiresAfterWrite()) {
1✔
3917
      proxy.expiresAfterWriteNanos = cache.expiresAfterWriteNanos();
1✔
3918
    }
3919
    if (cache.expiresVariable()) {
1✔
3920
      proxy.expiry = cache.expiry();
1✔
3921
    }
3922
    if (cache.refreshAfterWrite()) {
1✔
3923
      proxy.refreshAfterWriteNanos = cache.refreshAfterWriteNanos();
1✔
3924
    }
3925
    if (cache.evicts()) {
1✔
3926
      if (cache.isWeighted) {
1✔
3927
        proxy.weigher = cache.weigher;
1✔
3928
        proxy.maximumWeight = cache.maximum();
1✔
3929
      } else {
3930
        proxy.maximumSize = cache.maximum();
1✔
3931
      }
3932
    }
3933
    proxy.cacheLoader = cache.cacheLoader;
1✔
3934
    proxy.async = cache.isAsync;
1✔
3935
    return proxy;
1✔
3936
  }
3937

3938
  /* --------------- Manual Cache --------------- */
3939

3940
  static class BoundedLocalManualCache<K, V> implements LocalManualCache<K, V>, Serializable {
3941
    private static final long serialVersionUID = 1;
3942

3943
    final BoundedLocalCache<K, V> cache;
3944

3945
    @Nullable Policy<K, V> policy;
3946

3947
    BoundedLocalManualCache(Caffeine<K, V> builder) {
3948
      this(builder, null);
1✔
3949
    }
1✔
3950

3951
    BoundedLocalManualCache(Caffeine<K, V> builder, @Nullable CacheLoader<? super K, V> loader) {
1✔
3952
      cache = LocalCacheFactory.newBoundedLocalCache(builder, loader, /* async */ false);
1✔
3953
    }
1✔
3954

3955
    @Override
3956
    public BoundedLocalCache<K, V> cache() {
3957
      return cache;
1✔
3958
    }
3959

3960
    @Override
3961
    public Policy<K, V> policy() {
3962
      var p = policy;
1✔
3963
      return (p == null) ? (policy = new BoundedPolicy<>(cache, identity(), cache.isWeighted)) : p;
1✔
3964
    }
3965

3966
    @SuppressWarnings("UnusedVariable")
3967
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
3968
      throw new InvalidObjectException("Proxy required");
1✔
3969
    }
3970

3971
    private Object writeReplace() {
3972
      return makeSerializationProxy(cache);
1✔
3973
    }
3974
  }
3975

3976
  static final class BoundedPolicy<K, V> implements Policy<K, V> {
3977
    final BoundedLocalCache<K, V> cache;
3978
    final Function<V, V> transformer;
3979
    final boolean isWeighted;
3980

3981
    @Nullable Optional<Eviction<K, V>> eviction;
3982
    @Nullable Optional<FixedRefresh<K, V>> refreshes;
3983
    @Nullable Optional<FixedExpiration<K, V>> afterWrite;
3984
    @Nullable Optional<FixedExpiration<K, V>> afterAccess;
3985
    @Nullable Optional<VarExpiration<K, V>> variable;
3986

3987
    BoundedPolicy(BoundedLocalCache<K, V> cache, Function<V, V> transformer, boolean isWeighted) {
1✔
3988
      this.transformer = transformer;
1✔
3989
      this.isWeighted = isWeighted;
1✔
3990
      this.cache = cache;
1✔
3991
    }
1✔
3992

3993
    @Override public boolean isRecordingStats() {
3994
      return cache.isRecordingStats();
1✔
3995
    }
3996
    @Override public @Nullable V getIfPresentQuietly(K key) {
3997
      return transformer.apply(cache.getIfPresentQuietly(key));
1✔
3998
    }
3999
    @Override public @Nullable CacheEntry<K, V> getEntryIfPresentQuietly(K key) {
4000
      Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4001
      return (node == null) ? null : cache.nodeToCacheEntry(node, transformer);
1✔
4002
    }
4003
    @Override public Map<K, CompletableFuture<V>> refreshes() {
4004
      var refreshes = cache.refreshes;
1✔
4005
      if ((refreshes == null) || refreshes.isEmpty()) {
1✔
4006
        @SuppressWarnings("ImmutableMapOf")
4007
        Map<K, CompletableFuture<V>> emptyMap = Collections.unmodifiableMap(Collections.emptyMap());
1✔
4008
        return emptyMap;
1✔
4009
      } else if (cache.collectKeys()) {
1✔
4010
        var inFlight = new IdentityHashMap<K, CompletableFuture<V>>(refreshes.size());
1✔
4011
        for (var entry : refreshes.entrySet()) {
1✔
4012
          @SuppressWarnings("unchecked")
4013
          var key = ((InternalReference<K>) entry.getKey()).get();
1✔
4014
          @SuppressWarnings("unchecked")
4015
          var future = (CompletableFuture<V>) entry.getValue();
1✔
4016
          if (key != null) {
1✔
4017
            inFlight.put(key, future);
1✔
4018
          }
4019
        }
1✔
4020
        return Collections.unmodifiableMap(inFlight);
1✔
4021
      }
4022
      @SuppressWarnings("unchecked")
4023
      var castedRefreshes = (Map<K, CompletableFuture<V>>) (Object) refreshes;
1✔
4024
      return Collections.unmodifiableMap(new HashMap<>(castedRefreshes));
1✔
4025
    }
4026
    @Override public Optional<Eviction<K, V>> eviction() {
4027
      return cache.evicts()
1✔
4028
          ? (eviction == null) ? (eviction = Optional.of(new BoundedEviction())) : eviction
1✔
4029
          : Optional.empty();
1✔
4030
    }
4031
    @Override public Optional<FixedExpiration<K, V>> expireAfterAccess() {
4032
      if (!cache.expiresAfterAccess()) {
1✔
4033
        return Optional.empty();
1✔
4034
      }
4035
      return (afterAccess == null)
1✔
4036
          ? (afterAccess = Optional.of(new BoundedExpireAfterAccess()))
1✔
4037
          : afterAccess;
1✔
4038
    }
4039
    @Override public Optional<FixedExpiration<K, V>> expireAfterWrite() {
4040
      if (!cache.expiresAfterWrite()) {
1✔
4041
        return Optional.empty();
1✔
4042
      }
4043
      return (afterWrite == null)
1✔
4044
          ? (afterWrite = Optional.of(new BoundedExpireAfterWrite()))
1✔
4045
          : afterWrite;
1✔
4046
    }
4047
    @Override public Optional<VarExpiration<K, V>> expireVariably() {
4048
      if (!cache.expiresVariable()) {
1✔
4049
        return Optional.empty();
1✔
4050
      }
4051
      return (variable == null)
1✔
4052
          ? (variable = Optional.of(new BoundedVarExpiration()))
1✔
4053
          : variable;
1✔
4054
    }
4055
    @Override public Optional<FixedRefresh<K, V>> refreshAfterWrite() {
4056
      if (!cache.refreshAfterWrite()) {
1✔
4057
        return Optional.empty();
1✔
4058
      }
4059
      return (refreshes == null)
1✔
4060
          ? (refreshes = Optional.of(new BoundedRefreshAfterWrite()))
1✔
4061
          : refreshes;
1✔
4062
    }
4063

4064
    final class BoundedEviction implements Eviction<K, V> {
1✔
4065
      @Override public boolean isWeighted() {
4066
        return isWeighted;
1✔
4067
      }
4068
      @Override public OptionalInt weightOf(K key) {
4069
        requireNonNull(key);
1✔
4070
        if (!isWeighted) {
1✔
4071
          return OptionalInt.empty();
1✔
4072
        }
4073
        Node<K, V> node = cache.data.get(cache.nodeFactory.newLookupKey(key));
1✔
4074
        if ((node == null) || cache.hasExpired(node, cache.expirationTicker().read())) {
1✔
4075
          return OptionalInt.empty();
1✔
4076
        }
4077
        synchronized (node) {
1✔
4078
          return OptionalInt.of(node.getWeight());
1✔
4079
        }
4080
      }
4081
      @Override public OptionalLong weightedSize() {
4082
        if (cache.evicts() && isWeighted()) {
1✔
4083
          cache.evictionLock.lock();
1✔
4084
          try {
4085
            if (cache.drainStatusOpaque() == REQUIRED) {
1✔
4086
              cache.maintenance(/* ignored */ null);
1✔
4087
            }
4088
            return OptionalLong.of(Math.max(0, cache.weightedSize()));
1✔
4089
          } finally {
4090
            cache.evictionLock.unlock();
1✔
4091
            cache.rescheduleCleanUpIfIncomplete();
1✔
4092
          }
4093
        }
4094
        return OptionalLong.empty();
1✔
4095
      }
4096
      @Override public long getMaximum() {
4097
        cache.evictionLock.lock();
1✔
4098
        try {
4099
          if (cache.drainStatusOpaque() == REQUIRED) {
1✔
4100
            cache.maintenance(/* ignored */ null);
1✔
4101
          }
4102
          return cache.maximum();
1✔
4103
        } finally {
4104
          cache.evictionLock.unlock();
1✔
4105
          cache.rescheduleCleanUpIfIncomplete();
1✔
4106
        }
4107
      }
4108
      @Override public void setMaximum(long maximum) {
4109
        cache.evictionLock.lock();
1✔
4110
        try {
4111
          cache.setMaximumSize(maximum);
1✔
4112
          cache.maintenance(/* ignored */ null);
1✔
4113
        } finally {
4114
          cache.evictionLock.unlock();
1✔
4115
          cache.rescheduleCleanUpIfIncomplete();
1✔
4116
        }
4117
      }
1✔
4118
      @Override public Map<K, V> coldest(int limit) {
4119
        int expectedSize = Math.min(limit, cache.size());
1✔
4120
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4121
        return cache.evictionOrder(/* hottest */ false, transformer, limiter);
1✔
4122
      }
4123
      @Override public Map<K, V> coldestWeighted(long weightLimit) {
4124
        var limiter = isWeighted()
1✔
4125
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4126
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4127
        return cache.evictionOrder(/* hottest */ false, transformer, limiter);
1✔
4128
      }
4129
      @Override
4130
      public <T> T coldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4131
        requireNonNull(mappingFunction);
1✔
4132
        return cache.evictionOrder(/* hottest */ false, transformer, mappingFunction);
1✔
4133
      }
4134
      @Override public Map<K, V> hottest(int limit) {
4135
        int expectedSize = Math.min(limit, cache.size());
1✔
4136
        var limiter = new SizeLimiter<K, V>(expectedSize, limit);
1✔
4137
        return cache.evictionOrder(/* hottest */ true, transformer, limiter);
1✔
4138
      }
4139
      @Override public Map<K, V> hottestWeighted(long weightLimit) {
4140
        var limiter = isWeighted()
1✔
4141
            ? new WeightLimiter<K, V>(weightLimit)
1✔
4142
            : new SizeLimiter<K, V>((int) Math.min(weightLimit, cache.size()), weightLimit);
1✔
4143
        return cache.evictionOrder(/* hottest */ true, transformer, limiter);
1✔
4144
      }
4145
      @Override
4146
      public <T> T hottest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4147
        requireNonNull(mappingFunction);
1✔
4148
        return cache.evictionOrder(/* hottest */ true, transformer, mappingFunction);
1✔
4149
      }
4150
    }
4151

4152
    @SuppressWarnings("PreferJavaTimeOverload")
4153
    final class BoundedExpireAfterAccess implements FixedExpiration<K, V> {
1✔
4154
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4155
        requireNonNull(key);
1✔
4156
        requireNonNull(unit);
1✔
4157
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4158
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4159
        if (node == null) {
1✔
4160
          return OptionalLong.empty();
1✔
4161
        }
4162
        long now = cache.expirationTicker().read();
1✔
4163
        return cache.hasExpired(node, now)
1✔
4164
            ? OptionalLong.empty()
1✔
4165
            : OptionalLong.of(unit.convert(now - node.getAccessTime(), TimeUnit.NANOSECONDS));
1✔
4166
      }
4167
      @Override public long getExpiresAfter(TimeUnit unit) {
4168
        return unit.convert(cache.expiresAfterAccessNanos(), TimeUnit.NANOSECONDS);
1✔
4169
      }
4170
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4171
        requireArgument(duration >= 0);
1✔
4172
        cache.setExpiresAfterAccessNanos(unit.toNanos(duration));
1✔
4173
        cache.scheduleAfterWrite();
1✔
4174
      }
1✔
4175
      @Override public Map<K, V> oldest(int limit) {
4176
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4177
      }
4178
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4179
        return cache.expireAfterAccessOrder(/* oldest */ true, transformer, mappingFunction);
1✔
4180
      }
4181
      @Override public Map<K, V> youngest(int limit) {
4182
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4183
      }
4184
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4185
        return cache.expireAfterAccessOrder(/* oldest */ false, transformer, mappingFunction);
1✔
4186
      }
4187
    }
4188

4189
    @SuppressWarnings("PreferJavaTimeOverload")
4190
    final class BoundedExpireAfterWrite implements FixedExpiration<K, V> {
1✔
4191
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4192
        requireNonNull(key);
1✔
4193
        requireNonNull(unit);
1✔
4194
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4195
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4196
        if (node == null) {
1✔
4197
          return OptionalLong.empty();
1✔
4198
        }
4199
        long now = cache.expirationTicker().read();
1✔
4200
        return cache.hasExpired(node, now)
1✔
4201
            ? OptionalLong.empty()
1✔
4202
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
1✔
4203
      }
4204
      @Override public long getExpiresAfter(TimeUnit unit) {
4205
        return unit.convert(cache.expiresAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4206
      }
4207
      @Override public void setExpiresAfter(long duration, TimeUnit unit) {
4208
        requireArgument(duration >= 0);
1✔
4209
        cache.setExpiresAfterWriteNanos(unit.toNanos(duration));
1✔
4210
        cache.scheduleAfterWrite();
1✔
4211
      }
1✔
4212
      @Override public Map<K, V> oldest(int limit) {
4213
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4214
      }
4215
      @SuppressWarnings("GuardedByChecker")
4216
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4217
        return cache.snapshot(cache.writeOrderDeque(), transformer, mappingFunction);
1✔
4218
      }
4219
      @Override public Map<K, V> youngest(int limit) {
4220
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4221
      }
4222
      @SuppressWarnings("GuardedByChecker")
4223
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4224
        return cache.snapshot(cache.writeOrderDeque()::descendingIterator,
1✔
4225
            transformer, mappingFunction);
4226
      }
4227
    }
4228

4229
    @SuppressWarnings("PreferJavaTimeOverload")
4230
    final class BoundedVarExpiration implements VarExpiration<K, V> {
1✔
4231
      @Override public OptionalLong getExpiresAfter(K key, TimeUnit unit) {
4232
        requireNonNull(key);
1✔
4233
        requireNonNull(unit);
1✔
4234
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4235
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4236
        if (node == null) {
1✔
4237
          return OptionalLong.empty();
1✔
4238
        }
4239
        long now = cache.expirationTicker().read();
1✔
4240
        return cache.hasExpired(node, now)
1✔
4241
            ? OptionalLong.empty()
1✔
4242
            : OptionalLong.of(unit.convert(node.getVariableTime() - now, TimeUnit.NANOSECONDS));
1✔
4243
      }
4244
      @Override public void setExpiresAfter(K key, long duration, TimeUnit unit) {
4245
        requireNonNull(key);
1✔
4246
        requireNonNull(unit);
1✔
4247
        requireArgument(duration >= 0);
1✔
4248
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4249
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4250
        if (node != null) {
1✔
4251
          long now;
4252
          long durationNanos = TimeUnit.NANOSECONDS.convert(duration, unit);
1✔
4253
          synchronized (node) {
1✔
4254
            now = cache.expirationTicker().read();
1✔
4255
            if (cache.hasExpired(node, now)) {
1✔
4256
              return;
1✔
4257
            }
4258
            node.setVariableTime(now + Math.min(durationNanos, MAXIMUM_EXPIRY));
1✔
4259
          }
1✔
4260
          cache.afterRead(node, now, /* recordHit */ false);
1✔
4261
        }
4262
      }
1✔
4263
      @Override public @Nullable V put(K key, V value, long duration, TimeUnit unit) {
4264
        requireNonNull(unit);
1✔
4265
        requireNonNull(value);
1✔
4266
        requireArgument(duration >= 0);
1✔
4267
        return cache.isAsync
1✔
4268
            ? putAsync(key, value, duration, unit)
1✔
4269
            : putSync(key, value, duration, unit, /* onlyIfAbsent */ false);
1✔
4270
      }
4271
      @Override public @Nullable V putIfAbsent(K key, V value, long duration, TimeUnit unit) {
4272
        requireNonNull(unit);
1✔
4273
        requireNonNull(value);
1✔
4274
        requireArgument(duration >= 0);
1✔
4275
        return cache.isAsync
1✔
4276
            ? putIfAbsentAsync(key, value, duration, unit)
1✔
4277
            : putSync(key, value, duration, unit, /* onlyIfAbsent */ true);
1✔
4278
      }
4279
      @Nullable V putSync(K key, V value, long duration, TimeUnit unit, boolean onlyIfAbsent) {
4280
        var expiry = new FixedExpireAfterWrite<K, V>(duration, unit);
1✔
4281
        return cache.put(key, value, expiry, onlyIfAbsent);
1✔
4282
      }
4283
      @SuppressWarnings("unchecked")
4284
      @Nullable V putIfAbsentAsync(K key, V value, long duration, TimeUnit unit) {
4285
        // Keep in sync with LocalAsyncCache.AsMapView#putIfAbsent(key, value)
4286
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4287
        V asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4288

4289
        for (;;) {
4290
          var priorFuture = (CompletableFuture<V>) cache.getIfPresent(key, /* recordStats */ false);
1✔
4291
          if (priorFuture != null) {
1✔
4292
            if (!priorFuture.isDone()) {
1✔
4293
              Async.getWhenSuccessful(priorFuture);
×
4294
              continue;
×
4295
            }
4296

4297
            V prior = Async.getWhenSuccessful(priorFuture);
1✔
4298
            if (prior != null) {
1✔
4299
              return prior;
1✔
4300
            }
4301
          }
4302

4303
          boolean[] added = { false };
1✔
4304
          var computed = (CompletableFuture<V>) cache.compute(key, (k, oldValue) -> {
1✔
4305
            var oldValueFuture = (CompletableFuture<V>) oldValue;
1✔
4306
            added[0] = (oldValueFuture == null)
1✔
4307
                || (oldValueFuture.isDone() && (Async.getIfReady(oldValueFuture) == null));
1✔
4308
            return added[0] ? asyncValue : oldValue;
1✔
4309
          }, expiry, /* recordLoad */ false, /* recordLoadFailure */ false);
4310

4311
          if (added[0]) {
1✔
4312
            return null;
1✔
4313
          } else {
4314
            V prior = Async.getWhenSuccessful(computed);
×
4315
            if (prior != null) {
×
4316
              return prior;
×
4317
            }
4318
          }
4319
        }
×
4320
      }
4321
      @SuppressWarnings("unchecked")
4322
      @Nullable V putAsync(K key, V value, long duration, TimeUnit unit) {
4323
        var expiry = (Expiry<K, V>) new AsyncExpiry<>(new FixedExpireAfterWrite<>(duration, unit));
1✔
4324
        V asyncValue = (V) CompletableFuture.completedFuture(value);
1✔
4325

4326
        var oldValueFuture = (CompletableFuture<V>) cache.put(
1✔
4327
            key, asyncValue, expiry, /* onlyIfAbsent */ false);
4328
        return Async.getWhenSuccessful(oldValueFuture);
1✔
4329
      }
4330
      @SuppressWarnings("NullAway")
4331
      @Override public V compute(K key,
4332
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4333
          Duration duration) {
4334
        requireNonNull(key);
1✔
4335
        requireNonNull(duration);
1✔
4336
        requireNonNull(remappingFunction);
1✔
4337
        requireArgument(!duration.isNegative(), "duration cannot be negative: %s", duration);
1✔
4338
        var expiry = new FixedExpireAfterWrite<K, V>(
1✔
4339
            saturatedToNanos(duration), TimeUnit.NANOSECONDS);
1✔
4340

4341
        return cache.isAsync
1✔
4342
            ? computeAsync(key, remappingFunction, expiry)
1✔
4343
            : cache.compute(key, remappingFunction, expiry,
1✔
4344
                /* recordLoad */ true, /* recordLoadFailure */ true);
4345
      }
4346
      @Nullable V computeAsync(K key,
4347
          BiFunction<? super K, ? super V, ? extends V> remappingFunction,
4348
          Expiry<? super K, ? super V> expiry) {
4349
        // Keep in sync with LocalAsyncCache.AsMapView#compute(key, remappingFunction)
4350
        @SuppressWarnings("unchecked")
4351
        var delegate = (LocalCache<K, CompletableFuture<V>>) cache;
1✔
4352

4353
        @SuppressWarnings({"rawtypes", "unchecked"})
4354
        V[] newValue = (V[]) new Object[1];
1✔
4355
        for (;;) {
4356
          Async.getWhenSuccessful(delegate.getIfPresentQuietly(key));
1✔
4357

4358
          CompletableFuture<V> valueFuture = delegate.compute(key, (k, oldValueFuture) -> {
1✔
4359
            if ((oldValueFuture != null) && !oldValueFuture.isDone()) {
1✔
4360
              return oldValueFuture;
×
4361
            }
4362

4363
            V oldValue = Async.getIfReady(oldValueFuture);
1✔
4364
            BiFunction<? super K, ? super V, ? extends V> function = delegate.statsAware(
1✔
4365
                remappingFunction, /* recordLoad */ true, /* recordLoadFailure */ true);
4366
            newValue[0] = function.apply(key, oldValue);
1✔
4367
            return (newValue[0] == null) ? null : CompletableFuture.completedFuture(newValue[0]);
1✔
4368
          }, new AsyncExpiry<>(expiry), /* recordLoad */ false, /* recordLoadFailure */ false);
4369

4370
          if (newValue[0] != null) {
1✔
4371
            return newValue[0];
1✔
4372
          } else if (valueFuture == null) {
1✔
4373
            return null;
1✔
4374
          }
4375
        }
×
4376
      }
4377
      @Override public Map<K, V> oldest(int limit) {
4378
        return oldest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4379
      }
4380
      @Override public <T> T oldest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4381
        return cache.snapshot(cache.timerWheel(), transformer, mappingFunction);
1✔
4382
      }
4383
      @Override public Map<K, V> youngest(int limit) {
4384
        return youngest(new SizeLimiter<>(Math.min(limit, cache.size()), limit));
1✔
4385
      }
4386
      @Override public <T> T youngest(Function<Stream<CacheEntry<K, V>>, T> mappingFunction) {
4387
        return cache.snapshot(cache.timerWheel()::descendingIterator, transformer, mappingFunction);
1✔
4388
      }
4389
    }
4390

4391
    static final class FixedExpireAfterWrite<K, V> implements Expiry<K, V> {
4392
      final long duration;
4393
      final TimeUnit unit;
4394

4395
      FixedExpireAfterWrite(long duration, TimeUnit unit) {
1✔
4396
        this.duration = duration;
1✔
4397
        this.unit = unit;
1✔
4398
      }
1✔
4399
      @Override public long expireAfterCreate(K key, V value, long currentTime) {
4400
        return unit.toNanos(duration);
1✔
4401
      }
4402
      @Override public long expireAfterUpdate(
4403
          K key, V value, long currentTime, long currentDuration) {
4404
        return unit.toNanos(duration);
1✔
4405
      }
4406
      @CanIgnoreReturnValue
4407
      @Override public long expireAfterRead(
4408
          K key, V value, long currentTime, long currentDuration) {
4409
        return currentDuration;
1✔
4410
      }
4411
    }
4412

4413
    @SuppressWarnings("PreferJavaTimeOverload")
4414
    final class BoundedRefreshAfterWrite implements FixedRefresh<K, V> {
1✔
4415
      @Override public OptionalLong ageOf(K key, TimeUnit unit) {
4416
        requireNonNull(key);
1✔
4417
        requireNonNull(unit);
1✔
4418
        Object lookupKey = cache.nodeFactory.newLookupKey(key);
1✔
4419
        Node<K, V> node = cache.data.get(lookupKey);
1✔
4420
        if (node == null) {
1✔
4421
          return OptionalLong.empty();
1✔
4422
        }
4423
        long now = cache.expirationTicker().read();
1✔
4424
        return cache.hasExpired(node, now)
1✔
4425
            ? OptionalLong.empty()
1✔
4426
            : OptionalLong.of(unit.convert(now - node.getWriteTime(), TimeUnit.NANOSECONDS));
1✔
4427
      }
4428
      @Override public long getRefreshesAfter(TimeUnit unit) {
4429
        return unit.convert(cache.refreshAfterWriteNanos(), TimeUnit.NANOSECONDS);
1✔
4430
      }
4431
      @Override public void setRefreshesAfter(long duration, TimeUnit unit) {
4432
        requireArgument(duration >= 0);
1✔
4433
        cache.setRefreshAfterWriteNanos(unit.toNanos(duration));
1✔
4434
        cache.scheduleAfterWrite();
1✔
4435
      }
1✔
4436
    }
4437
  }
4438

4439
  /* --------------- Loading Cache --------------- */
4440

4441
  static final class BoundedLocalLoadingCache<K, V>
4442
      extends BoundedLocalManualCache<K, V> implements LocalLoadingCache<K, V> {
4443
    private static final long serialVersionUID = 1;
4444

4445
    final Function<K, V> mappingFunction;
4446
    @Nullable final Function<Set<? extends K>, Map<K, V>> bulkMappingFunction;
4447

4448
    BoundedLocalLoadingCache(Caffeine<K, V> builder, CacheLoader<? super K, V> loader) {
4449
      super(builder, loader);
1✔
4450
      requireNonNull(loader);
1✔
4451
      mappingFunction = newMappingFunction(loader);
1✔
4452
      bulkMappingFunction = newBulkMappingFunction(loader);
1✔
4453
    }
1✔
4454

4455
    @Override
4456
    @SuppressWarnings("NullAway")
4457
    public AsyncCacheLoader<? super K, V> cacheLoader() {
4458
      return cache.cacheLoader;
1✔
4459
    }
4460

4461
    @Override
4462
    public Function<K, V> mappingFunction() {
4463
      return mappingFunction;
1✔
4464
    }
4465

4466
    @Override
4467
    public @Nullable Function<Set<? extends K>, Map<K, V>> bulkMappingFunction() {
4468
      return bulkMappingFunction;
1✔
4469
    }
4470

4471
    @SuppressWarnings("UnusedVariable")
4472
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4473
      throw new InvalidObjectException("Proxy required");
1✔
4474
    }
4475

4476
    private Object writeReplace() {
4477
      return makeSerializationProxy(cache);
1✔
4478
    }
4479
  }
4480

4481
  /* --------------- Async Cache --------------- */
4482

4483
  static final class BoundedLocalAsyncCache<K, V> implements LocalAsyncCache<K, V>, Serializable {
4484
    private static final long serialVersionUID = 1;
4485

4486
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4487
    final boolean isWeighted;
4488

4489
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4490
    @Nullable CacheView<K, V> cacheView;
4491
    @Nullable Policy<K, V> policy;
4492

4493
    @SuppressWarnings("unchecked")
4494
    BoundedLocalAsyncCache(Caffeine<K, V> builder) {
1✔
4495
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4496
          .newBoundedLocalCache(builder, /* loader */ null, /* async */ true);
1✔
4497
      isWeighted = builder.isWeighted();
1✔
4498
    }
1✔
4499

4500
    @Override
4501
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4502
      return cache;
1✔
4503
    }
4504

4505
    @Override
4506
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4507
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4508
    }
4509

4510
    @Override
4511
    public Cache<K, V> synchronous() {
4512
      return (cacheView == null) ? (cacheView = new CacheView<>(this)) : cacheView;
1✔
4513
    }
4514

4515
    @Override
4516
    public Policy<K, V> policy() {
4517
      if (policy == null) {
1✔
4518
        @SuppressWarnings("unchecked")
4519
        BoundedLocalCache<K, V> castCache = (BoundedLocalCache<K, V>) cache;
1✔
4520
        Function<CompletableFuture<V>, V> transformer = Async::getIfReady;
1✔
4521
        @SuppressWarnings("unchecked")
4522
        Function<V, V> castTransformer = (Function<V, V>) transformer;
1✔
4523
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4524
      }
4525
      return policy;
1✔
4526
    }
4527

4528
    @SuppressWarnings("UnusedVariable")
4529
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4530
      throw new InvalidObjectException("Proxy required");
1✔
4531
    }
4532

4533
    private Object writeReplace() {
4534
      return makeSerializationProxy(cache);
1✔
4535
    }
4536
  }
4537

4538
  /* --------------- Async Loading Cache --------------- */
4539

4540
  static final class BoundedLocalAsyncLoadingCache<K, V>
4541
      extends LocalAsyncLoadingCache<K, V> implements Serializable {
4542
    private static final long serialVersionUID = 1;
4543

4544
    final BoundedLocalCache<K, CompletableFuture<V>> cache;
4545
    final boolean isWeighted;
4546

4547
    @Nullable ConcurrentMap<K, CompletableFuture<V>> mapView;
4548
    @Nullable Policy<K, V> policy;
4549

4550
    @SuppressWarnings("unchecked")
4551
    BoundedLocalAsyncLoadingCache(Caffeine<K, V> builder, AsyncCacheLoader<? super K, V> loader) {
4552
      super(loader);
1✔
4553
      isWeighted = builder.isWeighted();
1✔
4554
      cache = (BoundedLocalCache<K, CompletableFuture<V>>) LocalCacheFactory
1✔
4555
          .newBoundedLocalCache(builder, loader, /* async */ true);
1✔
4556
    }
1✔
4557

4558
    @Override
4559
    public BoundedLocalCache<K, CompletableFuture<V>> cache() {
4560
      return cache;
1✔
4561
    }
4562

4563
    @Override
4564
    public ConcurrentMap<K, CompletableFuture<V>> asMap() {
4565
      return (mapView == null) ? (mapView = new AsyncAsMapView<>(this)) : mapView;
1✔
4566
    }
4567

4568
    @Override
4569
    public Policy<K, V> policy() {
4570
      if (policy == null) {
1✔
4571
        @SuppressWarnings("unchecked")
4572
        BoundedLocalCache<K, V> castCache = (BoundedLocalCache<K, V>) cache;
1✔
4573
        Function<CompletableFuture<V>, V> transformer = Async::getIfReady;
1✔
4574
        @SuppressWarnings("unchecked")
4575
        Function<V, V> castTransformer = (Function<V, V>) transformer;
1✔
4576
        policy = new BoundedPolicy<>(castCache, castTransformer, isWeighted);
1✔
4577
      }
4578
      return policy;
1✔
4579
    }
4580

4581
    @SuppressWarnings("UnusedVariable")
4582
    private void readObject(ObjectInputStream stream) throws InvalidObjectException {
4583
      throw new InvalidObjectException("Proxy required");
1✔
4584
    }
4585

4586
    private Object writeReplace() {
4587
      return makeSerializationProxy(cache);
1✔
4588
    }
4589
  }
4590
}
4591

4592
/** The namespace for field padding through inheritance. */
4593
final class BLCHeader {
×
4594

4595
  static class PadDrainStatus {
1✔
4596
    byte p000, p001, p002, p003, p004, p005, p006, p007;
4597
    byte p008, p009, p010, p011, p012, p013, p014, p015;
4598
    byte p016, p017, p018, p019, p020, p021, p022, p023;
4599
    byte p024, p025, p026, p027, p028, p029, p030, p031;
4600
    byte p032, p033, p034, p035, p036, p037, p038, p039;
4601
    byte p040, p041, p042, p043, p044, p045, p046, p047;
4602
    byte p048, p049, p050, p051, p052, p053, p054, p055;
4603
    byte p056, p057, p058, p059, p060, p061, p062, p063;
4604
    byte p064, p065, p066, p067, p068, p069, p070, p071;
4605
    byte p072, p073, p074, p075, p076, p077, p078, p079;
4606
    byte p080, p081, p082, p083, p084, p085, p086, p087;
4607
    byte p088, p089, p090, p091, p092, p093, p094, p095;
4608
    byte p096, p097, p098, p099, p100, p101, p102, p103;
4609
    byte p104, p105, p106, p107, p108, p109, p110, p111;
4610
    byte p112, p113, p114, p115, p116, p117, p118, p119;
4611
  }
4612

4613
  /** Enforces a memory layout to avoid false sharing by padding the drain status. */
4614
  abstract static class DrainStatusRef extends PadDrainStatus {
1✔
4615
    static final VarHandle DRAIN_STATUS;
4616

4617
    /** A drain is not taking place. */
4618
    static final int IDLE = 0;
4619
    /** A drain is required due to a pending write modification. */
4620
    static final int REQUIRED = 1;
4621
    /** A drain is in progress and will transition to idle. */
4622
    static final int PROCESSING_TO_IDLE = 2;
4623
    /** A drain is in progress and will transition to required. */
4624
    static final int PROCESSING_TO_REQUIRED = 3;
4625

4626
    /** The draining status of the buffers. */
4627
    volatile int drainStatus = IDLE;
1✔
4628

4629
    /**
4630
     * Returns whether maintenance work is needed.
4631
     *
4632
     * @param delayable if draining the read buffer can be delayed
4633
     */
4634
    boolean shouldDrainBuffers(boolean delayable) {
4635
      switch (drainStatusOpaque()) {
1✔
4636
        case IDLE:
4637
          return !delayable;
1✔
4638
        case REQUIRED:
4639
          return true;
1✔
4640
        case PROCESSING_TO_IDLE:
4641
        case PROCESSING_TO_REQUIRED:
4642
          return false;
1✔
4643
        default:
4644
          throw new IllegalStateException("Invalid drain status: " + drainStatus);
1✔
4645
      }
4646
    }
4647

4648
    int drainStatusOpaque() {
4649
      return (int) DRAIN_STATUS.getOpaque(this);
1✔
4650
    }
4651

4652
    int drainStatusAcquire() {
4653
      return (int) DRAIN_STATUS.getAcquire(this);
1✔
4654
    }
4655

4656
    void setDrainStatusOpaque(int drainStatus) {
4657
      DRAIN_STATUS.setOpaque(this, drainStatus);
1✔
4658
    }
1✔
4659

4660
    void setDrainStatusRelease(int drainStatus) {
4661
      DRAIN_STATUS.setRelease(this, drainStatus);
1✔
4662
    }
1✔
4663

4664
    boolean casDrainStatus(int expect, int update) {
4665
      return DRAIN_STATUS.compareAndSet(this, expect, update);
1✔
4666
    }
4667

4668
    static {
4669
      try {
4670
        DRAIN_STATUS = MethodHandles.lookup()
1✔
4671
            .findVarHandle(DrainStatusRef.class, "drainStatus", int.class);
1✔
4672
      } catch (ReflectiveOperationException e) {
×
4673
        throw new ExceptionInInitializerError(e);
×
4674
      }
1✔
4675
    }
1✔
4676
  }
4677
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc