• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

nolar / kopf / 21455747283

28 Jan 2026 09:16PM UTC coverage: 91.196% (+0.1%) from 91.066%
21455747283

Pull #844

github

web-flow
Merge 27831f9b3 into cdd29fb50
Pull Request #844: Process events instantly and consistently, stop skipping the events due to "batching"

745 of 789 branches covered (94.42%)

Branch coverage included in aggregate %.

72 of 73 new or added lines in 5 files covered. (98.63%)

1 existing line in 1 file now uncovered.

5708 of 6287 relevant lines covered (90.79%)

9.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.47
/kopf/_core/reactor/processing.py
1
"""
2
Conversion of low-level events to high-level causes, and handling them.
3

4
These functions are invoked from `kopf._core.reactor.processing`,
5
which are the actual event loop of the operator process.
6

7
The conversion of the low-level events to the high-level causes is done by
8
checking the object's state and comparing it to the preserved last-seen state.
9

10
The framework itself makes the necessary changes to the object, -- such as the
11
finalizers attachment, last-seen state updates, and handler status tracking, --
12
thus provoking the low-level watch-events and additional queueing calls.
13
But these internal changes are filtered out from the cause detection
14
and therefore do not trigger the user-defined handlers.
15
"""
16
import asyncio
11✔
17
from collections.abc import Collection
11✔
18
from typing import NamedTuple
11✔
19

20
from kopf._cogs.aiokits import aiotime, aiotoggles
11✔
21
from kopf._cogs.configs import configuration
11✔
22
from kopf._cogs.structs import bodies, diffs, ephemera, finalizers, patches, references
11✔
23
from kopf._core.actions import application, execution, lifecycles, loggers, progression, throttlers
11✔
24
from kopf._core.engines import daemons, indexing, posting
11✔
25
from kopf._core.intents import causes, registries
11✔
26
from kopf._core.reactor import inventory, subhandling
11✔
27

28

29
async def process_resource_event(
11✔
30
        lifecycle: execution.LifeCycleFn,
31
        indexers: indexing.OperatorIndexers,
32
        registry: registries.OperatorRegistry,
33
        settings: configuration.OperatorSettings,
34
        memories: inventory.ResourceMemories,
35
        memobase: ephemera.AnyMemo,
36
        resource: references.Resource,
37
        raw_event: bodies.RawEvent,
38
        event_queue: posting.K8sEventQueue,
39
        stream_pressure: asyncio.Event | None = None,  # None for tests
40
        resource_indexed: aiotoggles.Toggle | None = None,  # None for tests & observation
41
        operator_indexed: aiotoggles.ToggleSet | None = None,  # None for tests & observation
42
        consistency_time: float | None = None,  # None for tests
43
) -> str | None:  # patched resource version, if patched
44
    """
45
    Handle a single custom object low-level watch-event.
46

47
    Convert the low-level events, as provided by the watching/queueing tasks,
48
    to the high-level causes, and then call the cause-handling logic.
49
    """
50

51
    # Recall what is stored about that object. Share it in little portions with the consumers.
52
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
53
    raw_type, raw_body = raw_event['type'], raw_event['object']
11✔
54
    memory = await memories.recall(raw_body, noticed_by_listing=raw_type is None, memobase=memobase)
11✔
55
    if memory.daemons_memory.live_fresh_body is not None:
11✔
56
        memory.daemons_memory.live_fresh_body._replace_with(raw_body)
11✔
57
    if raw_type == 'DELETED':
11✔
58
        await memories.forget(raw_body)
11✔
59

60
    # Convert to a heavy mapping-view wrapper only now, when heavy processing begins.
61
    # Raw-event streaming, queueing, and batching use regular lightweight dicts.
62
    # Why here? 1. Before it splits into multiple causes & handlers for the same object's body;
63
    # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known;
64
    # 4. Same as where a patch object of a similar wrapping semantics is created.
65
    live_fresh_body = memory.daemons_memory.live_fresh_body
11✔
66
    body = live_fresh_body if live_fresh_body is not None else bodies.Body(raw_body)
11✔
67
    patch = patches.Patch()
11✔
68

69
    # Different loggers for different cases with different verbosity and exposure.
70
    local_logger = loggers.LocalObjectLogger(body=body, settings=settings)
11✔
71
    terse_logger = loggers.TerseObjectLogger(body=body, settings=settings)
11✔
72
    event_logger = loggers.ObjectLogger(body=body, settings=settings)
11✔
73

74
    # Throttle the non-handler-related errors. The regular event watching/batching continues
75
    # to prevent queue overfilling, but the processing is skipped (events are ignored).
76
    # Choice of place: late enough to have a per-resource memory for a throttler; also, a logger.
77
    # But early enough to catch environment errors from K8s API, and from most of the complex code.
78
    async with throttlers.throttled(
11✔
79
        throttler=memory.error_throttler,
80
        logger=local_logger,
81
        delays=settings.queueing.error_delays,
82
        wakeup=stream_pressure,
83
    ) as should_run:
84
        if should_run:
11✔
85

86
            # Each object has its own prefixed logger, to distinguish parallel handling.
87
            posting.event_queue_loop_var.set(asyncio.get_running_loop())
11✔
88
            posting.event_queue_var.set(event_queue)  # till the end of this object's task.
11✔
89

90
            # [Pre-]populate the indices. This must be lightweight.
91
            await indexing.index_resource(
11✔
92
                registry=registry,
93
                indexers=indexers,
94
                settings=settings,
95
                resource=resource,
96
                raw_event=raw_event,
97
                body=body,
98
                memo=memory.memo,
99
                memory=memory.indexing_memory,
100
                logger=terse_logger,
101
            )
102

103
            # Wait for all other individual resources and all other resource kinds' lists to finish.
104
            # If this one has changed while waiting for the global readiness, let it be reprocessed.
105
            if operator_indexed is not None and resource_indexed is not None:
11✔
106
                await operator_indexed.drop_toggle(resource_indexed)
11✔
107
            if operator_indexed is not None:
11✔
108
                # TODO: reconsider this! should on-event be called before indexing is finished?
109
                await operator_indexed.wait_for(True)  # other resource kinds & objects.
11✔
110
            if stream_pressure is not None and stream_pressure.is_set():
11✔
111
                # TODO: reconsider this! we should process events even if the newer ones arrived.
NEW
112
                return None
×
113

114
            # Do the magic -- do the job.
115
            delays, matched = await process_resource_causes(
11✔
116
                lifecycle=lifecycle,
117
                indexers=indexers,
118
                registry=registry,
119
                settings=settings,
120
                resource=resource,
121
                raw_event=raw_event,
122
                body=body,
123
                patch=patch,
124
                memory=memory,
125
                local_logger=local_logger,
126
                event_logger=event_logger,
127
                stream_pressure=stream_pressure,
128
                consistency_time=consistency_time,
129
            )
130

131
            # Whatever was done, apply the accumulated changes to the object, or sleep-n-touch for delays.
132
            # But only once, to reduce the number of API calls and the generated irrelevant events.
133
            # And only if the object is at least supposed to exist (not "GONE"), even if actually does not.
134
            if raw_event['type'] != 'DELETED':
11✔
135
                applied, resource_version = await application.apply(
11✔
136
                    settings=settings,
137
                    resource=resource,
138
                    body=body,
139
                    patch=patch,
140
                    logger=local_logger,
141
                    delays=delays,
142
                    stream_pressure=stream_pressure,
143
                )
144
                if applied and matched:
11✔
145
                    local_logger.debug("Handling cycle is finished, waiting for new changes.")
11✔
146
                return resource_version
11✔
147
    return None
11✔
148

149

150
class _Causes(NamedTuple):
11✔
151
    watching_cause: causes.WatchingCause | None
8✔
152
    spawning_cause: causes.SpawningCause | None
8✔
153
    changing_cause: causes.ChangingCause | None
8✔
154

155

156
def _detect_causes(
11✔
157
        indexers: indexing.OperatorIndexers,
158
        registry: registries.OperatorRegistry,
159
        settings: configuration.OperatorSettings,
160
        resource: references.Resource,
161
        raw_event: bodies.RawEvent,
162
        body: bodies.Body,
163
        patch: patches.Patch,
164
        memory: inventory.ResourceMemory,
165
        local_logger: loggers.ObjectLogger,
166
        event_logger: loggers.ObjectLogger,
167
) -> _Causes:
168
    """Detect what are we going to do (or to skip) on this processing cycle."""
169

170
    finalizer = settings.persistence.finalizer
11✔
171
    extra_fields = (
11✔
172
        # NB: indexing handlers are useless here, they are handled on their own.
173
        registry._watching.get_extra_fields(resource=resource) |
174
        registry._changing.get_extra_fields(resource=resource) |
175
        registry._spawning.get_extra_fields(resource=resource))
176
    old = settings.persistence.diffbase_storage.fetch(body=body)
11✔
177
    new = settings.persistence.diffbase_storage.build(body=body, extra_fields=extra_fields)
11✔
178
    old = settings.persistence.progress_storage.clear(essence=old) if old is not None else None
11✔
179
    new = settings.persistence.progress_storage.clear(essence=new) if new is not None else None
11✔
180
    diff = diffs.diff(old, new)
11✔
181

182
    watching_cause = causes.detect_watching_cause(
11✔
183
        raw_event=raw_event,
184
        resource=resource,
185
        indices=indexers.indices,
186
        logger=local_logger,
187
        patch=patch,
188
        body=body,
189
        memo=memory.memo,
190
    ) if registry._watching.has_handlers(resource=resource) else None
191

192
    spawning_cause = causes.detect_spawning_cause(
11✔
193
        resource=resource,
194
        indices=indexers.indices,
195
        logger=event_logger,
196
        patch=patch,
197
        body=body,
198
        memo=memory.memo,
199
        reset=bool(diff),  # only essential changes reset idling, not every event
200
    ) if registry._spawning.has_handlers(resource=resource) else None
201

202
    changing_cause = causes.detect_changing_cause(
11✔
203
        finalizer=finalizer,
204
        raw_event=raw_event,
205
        resource=resource,
206
        indices=indexers.indices,
207
        logger=event_logger,
208
        patch=patch,
209
        body=body,
210
        old=old,
211
        new=new,
212
        diff=diff,
213
        memo=memory.memo,
214
        initial=memory.noticed_by_listing and not memory.fully_handled_once,
215
    ) if registry._changing.has_handlers(resource=resource) else None
216

217
    return _Causes(watching_cause, spawning_cause, changing_cause)
11✔
218

219

220
async def process_resource_causes(
11✔
221
        lifecycle: execution.LifeCycleFn,
222
        indexers: indexing.OperatorIndexers,
223
        registry: registries.OperatorRegistry,
224
        settings: configuration.OperatorSettings,
225
        resource: references.Resource,
226
        raw_event: bodies.RawEvent,
227
        body: bodies.Body,
228
        patch: patches.Patch,
229
        memory: inventory.ResourceMemory,
230
        local_logger: loggers.ObjectLogger,
231
        event_logger: loggers.ObjectLogger,
232
        stream_pressure: asyncio.Event | None,  # None for tests
233
        consistency_time: float | None,
234
) -> tuple[Collection[float], bool]:
235
    finalizer = settings.persistence.finalizer
11✔
236
    watching_cause, spawning_cause, changing_cause = _detect_causes(
11✔
237
        indexers=indexers,
238
        registry=registry,
239
        settings=settings,
240
        resource=resource,
241
        raw_event=raw_event,
242
        body=body,
243
        patch=patch,
244
        memory=memory,
245
        local_logger=local_logger,
246
        event_logger=event_logger,
247
    )
248

249
    # Invoke all the handlers that should or could be invoked at this processing cycle.
250
    # The low-level spies go ASAP always. However, the daemons are spawned before the high-level
251
    # handlers and killed after them: the daemons should live throughout the full object lifecycle.
252
    if watching_cause is not None:
11✔
253
        await process_watching_cause(
11✔
254
            lifecycle=lifecycles.all_at_once,
255
            registry=registry,
256
            settings=settings,
257
            cause=watching_cause,
258
        )
259

260
    spawning_delays: Collection[float] = []
11✔
261
    if spawning_cause is not None:
11✔
262
        spawning_delays = await process_spawning_cause(
11✔
263
            registry=registry,
264
            settings=settings,
265
            memory=memory,
266
            cause=spawning_cause,
267
        )
268

269
    # TODO: ----- SPLIT it here! ABOVE: consistency-independent handlers.
270
    # TODO: ----- SPLIT it here! BELOW: consistency-requiring handling.
271
    # TODO: BUT: finalizers? They do not use any state. But they use the spawning/changing cause.
272

273
    # If there are any handlers for this resource kind in general, but not for this specific object
274
    # due to filters, then be blind to it, store no state, and log nothing about the handling cycle.
275
    if changing_cause is not None and not registry._changing.prematch(cause=changing_cause):
11✔
276
        changing_cause = None
11✔
277

278
    # Block the object from deletion if we have anything to do in its end of life:
279
    # specifically, if there are daemons to kill or mandatory on-deletion handlers to call.
280
    # The high-level handlers are prevented if this event cycle is dedicated to the finalizer.
281
    # The low-level handlers (on-event spying & daemon spawning) are still executed asap.
282
    deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body)
11✔
283
    deletion_is_blocked = finalizers.is_deletion_blocked(body=body, finalizer=finalizer)
11✔
284
    deletion_must_be_blocked = (
11✔
285
        (spawning_cause is not None and
286
         registry._spawning.requires_finalizer(
287
             cause=spawning_cause,
288
             excluded=memory.daemons_memory.forever_stopped,
289
         ))
290
        or
291
        (changing_cause is not None and
292
         registry._changing.requires_finalizer(
293
             cause=changing_cause,
294
         )))
295

296
    if deletion_must_be_blocked and not deletion_is_blocked and not deletion_is_ongoing:
11✔
297
        local_logger.debug("Adding the finalizer, thus preventing the actual deletion.")
11✔
298
        finalizers.block_deletion(body=body, patch=patch, finalizer=finalizer)
11✔
299
        changing_cause = None  # prevent further high-level processing this time
11✔
300

301
    if not deletion_must_be_blocked and deletion_is_blocked:
11✔
302
        local_logger.debug("Removing the finalizer, as there are no handlers requiring it.")
11✔
303
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)
11✔
304
        changing_cause = None  # prevent further high-level processing this time
11✔
305

306
    # If the state is inconsistent (yet), wait for new events in a hope that they bring consistency.
307
    # If the wait exceeds its time and no new consistent events arrive, then fake the consistency.
308
    # However, if a patch is accumulated by now, skip waiting and apply it instantly (by exiting).
309
    # In that case, we are guaranteed to be inconsistent, so also skip the state-dependent handlers.
310
    # Never release the object (i.e., remove the finalizer) in the inconsistent state, always wait.
311
    consistency_is_required = changing_cause is not None
11✔
312
    consistency_is_achieved = consistency_time is None  # i.e. preexisting consistency
11✔
313
    if consistency_is_required and not consistency_is_achieved and not patch and consistency_time:
11✔
314
        loop = asyncio.get_running_loop()
11✔
315
        unslept = await aiotime.sleep(consistency_time - loop.time(), wakeup=stream_pressure)
11✔
316
        consistency_is_achieved = unslept is None  # "woke up" vs. "timed out"
11✔
317
    if consistency_is_required and not consistency_is_achieved:
11✔
318
        return list(spawning_delays), False  # exit to PATCHing and/or re-iterating over new events.
11✔
319

320
    # Now, the consistency is either pre-proven (by receiving or not expecting any resource version)
321
    # or implied (by exceeding the allowed consistency-waiting timeout while getting no new events).
322
    # So we can go for state-dependent handlers (change detection requires a consistent state).
323
    changing_delays: Collection[float] = []
11✔
324
    if changing_cause is not None:
11✔
325
        changing_delays = await process_changing_cause(
11✔
326
            lifecycle=lifecycle,
327
            registry=registry,
328
            settings=settings,
329
            memory=memory,
330
            cause=changing_cause,
331
        )
332

333
    # Release the object if everything is done, and it is marked for deletion.
334
    # But not when it has already gone.
335
    if deletion_is_ongoing and deletion_is_blocked and not spawning_delays and not changing_delays:
11✔
336
        local_logger.debug("Removing the finalizer, thus allowing the actual deletion.")
11✔
337
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)
11✔
338

339
    delays = list(spawning_delays) + list(changing_delays)
11✔
340
    return (delays, changing_cause is not None)
11✔
341

342

343
async def process_watching_cause(
11✔
344
        lifecycle: execution.LifeCycleFn,
345
        registry: registries.OperatorRegistry,
346
        settings: configuration.OperatorSettings,
347
        cause: causes.WatchingCause,
348
) -> None:
349
    """
350
    Handle a received event, log but ignore all errors.
351

352
    This is a lightweight version of the cause handling, but for the raw events,
353
    without any progress persistence. Multi-step calls are also not supported.
354
    If the handler fails, it fails and is never retried.
355

356
    Note: K8s-event posting is skipped for `kopf.on.event` handlers,
357
    as they should be silent. Still, the messages are logged normally.
358
    """
359
    handlers = registry._watching.get_handlers(cause=cause)
11✔
360
    outcomes = await execution.execute_handlers_once(
11✔
361
        lifecycle=lifecycle,
362
        settings=settings,
363
        handlers=handlers,
364
        cause=cause,
365
        state=progression.State.from_scratch().with_handlers(handlers),
366
        default_errors=execution.ErrorsMode.IGNORED,
367
    )
368

369
    # Store the results, but not the handlers' progress.
370
    progression.deliver_results(outcomes=outcomes, patch=cause.patch)
11✔
371

372

373
async def process_spawning_cause(
11✔
374
        registry: registries.OperatorRegistry,
375
        settings: configuration.OperatorSettings,
376
        memory: inventory.ResourceMemory,
377
        cause: causes.SpawningCause,
378
) -> Collection[float]:
379
    """
380
    Spawn/kill all the background tasks of a resource.
381

382
    The spawning and killing happens in parallel with the resource-changing
383
    handlers invocation (even if it takes a few cycles). For this, the signal
384
    to terminate is sent to the daemons immediately, but the actual check
385
    of their shutdown is performed only when all the on-deletion handlers
386
    have succeeded (or after they were invoked if they are optional;
387
    or immediately if there were no on-deletion handlers to invoke at all).
388

389
    The resource remains blocked by the finalizers until all the daemons exit
390
    (except those marked as tolerating being orphaned).
391
    """
392

393
    # Refresh the up-to-date body & essential timestamp for all the daemons/timers.
394
    if memory.daemons_memory.live_fresh_body is None:
11✔
395
        memory.daemons_memory.live_fresh_body = cause.body
11✔
396
    if cause.reset:
11✔
397
        memory.daemons_memory.idle_reset_time = asyncio.get_running_loop().time()
11✔
398

399
    if finalizers.is_deletion_ongoing(cause.body):
11✔
400
        stopping_delays = await daemons.stop_daemons(
11✔
401
            settings=settings,
402
            daemons=memory.daemons_memory.running_daemons,
403
        )
404
        return stopping_delays
11✔
405

406
    else:
407
        handlers = registry._spawning.get_handlers(
11✔
408
            cause=cause,
409
            excluded=memory.daemons_memory.forever_stopped,
410
        )
411
        spawning_delays = await daemons.spawn_daemons(
11✔
412
            settings=settings,
413
            daemons=memory.daemons_memory.running_daemons,
414
            cause=cause,
415
            memory=memory.daemons_memory,
416
            handlers=handlers,
417
        )
418
        matching_delays = await daemons.match_daemons(
11✔
419
            settings=settings,
420
            daemons=memory.daemons_memory.running_daemons,
421
            handlers=handlers,
422
        )
423
        return list(spawning_delays) + list(matching_delays)
11✔
424

425

426
async def process_changing_cause(
11✔
427
        lifecycle: execution.LifeCycleFn,
428
        registry: registries.OperatorRegistry,
429
        settings: configuration.OperatorSettings,
430
        memory: inventory.ResourceMemory,
431
        cause: causes.ChangingCause,
432
) -> Collection[float]:
433
    """
434
    Handle a detected cause, as part of the bigger handler routine.
435
    """
436
    logger = cause.logger
11✔
437
    patch = cause.patch  # TODO get rid of this alias
11✔
438
    body = cause.body  # TODO get rid of this alias
11✔
439
    delays: Collection[float] = []
11✔
440
    done: bool | None = None
11✔
441
    skip: bool | None = None
11✔
442

443
    # Regular causes invoke the handlers.
444
    if cause.reason in causes.HANDLER_REASONS:
11✔
445
        title = causes.TITLES.get(cause.reason.value, repr(cause.reason.value))
11✔
446

447
        resource_registry = registry._changing
11✔
448
        owned_handlers = resource_registry.get_resource_handlers(resource=cause.resource)
11✔
449
        cause_handlers = resource_registry.get_handlers(cause=cause)
11✔
450
        storage = settings.persistence.progress_storage
11✔
451
        state = progression.State.from_storage(body=cause.body, storage=storage, handlers=owned_handlers)
11✔
452
        state = state.with_purpose(cause.reason).with_handlers(cause_handlers)
11✔
453

454
        # Report the causes that have been superseded (intercepted, overridden) by the current one.
455
        # The mix-in causes (i.e. resuming) is re-purposed if its handlers are still selected.
456
        # To the next cycle, all extras are purged or re-purposed, so the message does not repeat.
457
        for extra_purpose, counters in state.extras.items():  # usually 0..1 items, rarely 2+.
11✔
458
            extra_title = causes.TITLES.get(extra_purpose, repr(extra_purpose))
11✔
459
            logger.info(f"{extra_title.capitalize()} is superseded by {title.lower()}: "
11✔
460
                        f"{counters.success} succeeded; "
461
                        f"{counters.failure} failed; "
462
                        f"{counters.running} left to the moment.")
463
            state = state.with_purpose(purpose=cause.reason, handlers=cause_handlers)
11✔
464

465
        # Purge the now-irrelevant handlers if they were not re-purposed (extras are recalculated!).
466
        # The current cause continues afterwards, and overrides its own pre-purged handler states.
467
        # TODO: purge only the handlers that fell out of current purpose; but it is not critical
468
        if state.extras:
11✔
469
            state.purge(body=cause.body, patch=cause.patch,
11✔
470
                        storage=storage, handlers=owned_handlers)
471

472
        # Inform on the current cause/event on every processing cycle. Even if there are
473
        # no handlers -- to show what has happened and why the diff-base is patched.
474
        logger.debug(f"{title.capitalize()} is in progress: {body!r}")
11✔
475
        if cause.diff and cause.old is not None and cause.new is not None:
11✔
476
            logger.debug(f"{title.capitalize()} diff: {cause.diff!r}")
11✔
477

478
        if cause_handlers:
11✔
479
            outcomes = await execution.execute_handlers_once(
11✔
480
                lifecycle=lifecycle,
481
                settings=settings,
482
                handlers=cause_handlers,
483
                cause=cause,
484
                state=state,
485
                extra_context=subhandling.subhandling_context,
486
            )
487
            state = state.with_outcomes(outcomes)
11✔
488
            state.store(body=cause.body, patch=cause.patch, storage=storage)
11✔
489
            progression.deliver_results(outcomes=outcomes, patch=cause.patch)
11✔
490

491
            if state.done:
11✔
492
                counters = state.counts  # calculate only once
11✔
493
                logger.info(f"{title.capitalize()} is processed: "
11✔
494
                            f"{counters.success} succeeded; "
495
                            f"{counters.failure} failed.")
496
                state.purge(body=cause.body, patch=cause.patch,
11✔
497
                            storage=storage, handlers=owned_handlers)
498

499
            done = state.done
11✔
500
            delays = state.delays
11✔
501
        else:
502
            skip = True
11✔
503

504
    # Regular causes also do some implicit post-handling when all handlers are done.
505
    if done or skip:
11✔
506
        if cause.new is not None and cause.old != cause.new:
11✔
507
            settings.persistence.diffbase_storage.store(body=body, patch=patch, essence=cause.new)
11✔
508

509
        # Once all handlers have succeeded at least once for any reason, or if there were none,
510
        # prevent further resume-handlers (which otherwise happens on each watch-stream re-listing).
511
        memory.fully_handled_once = True
11✔
512

513
    # Informational causes just print the log lines.
514
    if cause.reason == causes.Reason.GONE:
11✔
515
        logger.debug("Deleted, really deleted, and we are notified.")
11✔
516

517
    if cause.reason == causes.Reason.FREE:
11✔
518
        logger.debug("Deletion, but we are done with it, and we do not care.")
11✔
519

520
    if cause.reason == causes.Reason.NOOP:
11✔
521
        logger.debug("Something has changed, but we are not interested (the essence is the same).")
11✔
522

523
    # The delay is then consumed by the main handling routine (in different ways).
524
    return delays
11✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc