• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

temporalio / sdk-java / #175

pending completion
#175

push

github-actions

web-flow
Worker / Build Id versioning (#1786)

Implement new worker build id based versioning feature

236 of 236 new or added lines in 24 files covered. (100.0%)

18343 of 23697 relevant lines covered (77.41%)

0.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

91.51
/temporal-sdk/src/main/java/io/temporal/internal/worker/WorkflowWorker.java
1
/*
2
 * Copyright (C) 2022 Temporal Technologies, Inc. All Rights Reserved.
3
 *
4
 * Copyright (C) 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5
 *
6
 * Modifications copyright (C) 2017 Uber Technologies, Inc.
7
 *
8
 * Licensed under the Apache License, Version 2.0 (the "License");
9
 * you may not use this material except in compliance with the License.
10
 * You may obtain a copy of the License at
11
 *
12
 *   http://www.apache.org/licenses/LICENSE-2.0
13
 *
14
 * Unless required by applicable law or agreed to in writing, software
15
 * distributed under the License is distributed on an "AS IS" BASIS,
16
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
 * See the License for the specific language governing permissions and
18
 * limitations under the License.
19
 */
20

21
package io.temporal.internal.worker;
22

23
import static io.temporal.serviceclient.MetricsTag.METRICS_TAGS_CALL_OPTIONS_KEY;
24

25
import com.google.common.base.Preconditions;
26
import com.google.common.base.Strings;
27
import com.google.protobuf.ByteString;
28
import com.uber.m3.tally.Scope;
29
import com.uber.m3.tally.Stopwatch;
30
import com.uber.m3.util.ImmutableMap;
31
import io.temporal.api.common.v1.WorkflowExecution;
32
import io.temporal.api.enums.v1.TaskQueueKind;
33
import io.temporal.api.workflowservice.v1.*;
34
import io.temporal.internal.logging.LoggerTag;
35
import io.temporal.internal.retryer.GrpcRetryer;
36
import io.temporal.serviceclient.MetricsTag;
37
import io.temporal.serviceclient.RpcRetryOptions;
38
import io.temporal.serviceclient.WorkflowServiceStubs;
39
import io.temporal.worker.MetricsType;
40
import io.temporal.worker.WorkerMetricsTag;
41
import io.temporal.worker.WorkflowTaskDispatchHandle;
42
import java.util.Objects;
43
import java.util.Optional;
44
import java.util.concurrent.CompletableFuture;
45
import java.util.concurrent.RejectedExecutionException;
46
import java.util.concurrent.Semaphore;
47
import java.util.concurrent.TimeUnit;
48
import javax.annotation.Nonnull;
49
import javax.annotation.Nullable;
50
import org.slf4j.Logger;
51
import org.slf4j.LoggerFactory;
52
import org.slf4j.MDC;
53

54
final class WorkflowWorker implements SuspendableWorker {
55
  private static final Logger log = LoggerFactory.getLogger(WorkflowWorker.class);
1✔
56

57
  private final WorkflowRunLockManager runLocks;
58

59
  private final WorkflowServiceStubs service;
60
  private final String namespace;
61
  private final String taskQueue;
62
  private final SingleWorkerOptions options;
63
  private final WorkflowExecutorCache cache;
64
  private final WorkflowTaskHandler handler;
65
  private final String stickyTaskQueueName;
66
  private final PollerOptions pollerOptions;
67
  private final Scope workerMetricsScope;
68
  private final GrpcRetryer grpcRetryer;
69
  private final EagerActivityDispatcher eagerActivityDispatcher;
70
  private final int executorSlots;
71
  private final Semaphore executorSlotsSemaphore;
72

73
  private PollTaskExecutor<WorkflowTask> pollTaskExecutor;
74

75
  // TODO this ideally should be volatile or final (and NoopWorker should go away)
76
  //  Currently the implementation looks safe without volatile, but it's brittle.
77
  @Nonnull private SuspendableWorker poller = new NoopWorker();
1✔
78

79
  public WorkflowWorker(
80
      @Nonnull WorkflowServiceStubs service,
81
      @Nonnull String namespace,
82
      @Nonnull String taskQueue,
83
      @Nullable String stickyTaskQueueName,
84
      @Nonnull SingleWorkerOptions options,
85
      @Nonnull WorkflowRunLockManager runLocks,
86
      @Nonnull WorkflowExecutorCache cache,
87
      @Nonnull WorkflowTaskHandler handler,
88
      @Nonnull EagerActivityDispatcher eagerActivityDispatcher) {
1✔
89
    this.service = Objects.requireNonNull(service);
1✔
90
    this.namespace = Objects.requireNonNull(namespace);
1✔
91
    this.taskQueue = Objects.requireNonNull(taskQueue);
1✔
92
    this.options = Objects.requireNonNull(options);
1✔
93
    this.stickyTaskQueueName = stickyTaskQueueName;
1✔
94
    this.pollerOptions = getPollerOptions(options);
1✔
95
    this.workerMetricsScope =
1✔
96
        MetricsTag.tagged(options.getMetricsScope(), WorkerMetricsTag.WorkerType.WORKFLOW_WORKER);
1✔
97
    this.runLocks = Objects.requireNonNull(runLocks);
1✔
98
    this.cache = Objects.requireNonNull(cache);
1✔
99
    this.handler = Objects.requireNonNull(handler);
1✔
100
    this.grpcRetryer = new GrpcRetryer(service.getServerCapabilities());
1✔
101
    this.eagerActivityDispatcher = eagerActivityDispatcher;
1✔
102
    this.executorSlots = options.getTaskExecutorThreadPoolSize();
1✔
103
    this.executorSlotsSemaphore = new Semaphore(executorSlots);
1✔
104
  }
1✔
105

106
  @Override
107
  public boolean start() {
108
    if (handler.isAnyTypeSupported()) {
1✔
109
      pollTaskExecutor =
1✔
110
          new PollTaskExecutor<>(
111
              namespace,
112
              taskQueue,
113
              options.getIdentity(),
1✔
114
              new TaskHandlerImpl(handler),
115
              pollerOptions,
116
              options.getTaskExecutorThreadPoolSize(),
1✔
117
              workerMetricsScope,
118
              true);
119
      StickyQueueBalancer stickyQueueBalancer =
1✔
120
          new StickyQueueBalancer(
121
              options.getPollerOptions().getPollThreadCount(), stickyTaskQueueName != null);
1✔
122

123
      poller =
1✔
124
          new Poller<>(
125
              options.getIdentity(),
1✔
126
              new WorkflowPollTask(
127
                  service,
128
                  namespace,
129
                  taskQueue,
130
                  stickyTaskQueueName,
131
                  options.getIdentity(),
1✔
132
                  options.getBuildId(),
1✔
133
                  options.isUsingBuildIdForVersioning(),
1✔
134
                  executorSlotsSemaphore,
135
                  stickyQueueBalancer,
136
                  workerMetricsScope,
137
                  service.getServerCapabilities()),
1✔
138
              pollTaskExecutor,
139
              pollerOptions,
140
              workerMetricsScope);
141
      poller.start();
1✔
142

143
      workerMetricsScope.counter(MetricsType.WORKER_START_COUNTER).inc(1);
1✔
144

145
      return true;
1✔
146
    } else {
147
      return false;
1✔
148
    }
149
  }
150

151
  @Override
152
  public CompletableFuture<Void> shutdown(ShutdownManager shutdownManager, boolean interruptTasks) {
153
    String semaphoreName = this + "#executorSlotsSemaphore";
1✔
154
    return poller
1✔
155
        .shutdown(shutdownManager, interruptTasks)
1✔
156
        .thenCompose(
1✔
157
            ignore ->
158
                !interruptTasks
1✔
159
                    ? shutdownManager.waitForSemaphorePermitsReleaseUntimed(
1✔
160
                        executorSlotsSemaphore, executorSlots, semaphoreName)
161
                    : CompletableFuture.completedFuture(null))
1✔
162
        .thenCompose(
1✔
163
            ignore ->
164
                pollTaskExecutor != null
1✔
165
                    ? pollTaskExecutor.shutdown(shutdownManager, interruptTasks)
1✔
166
                    : CompletableFuture.completedFuture(null))
1✔
167
        .exceptionally(
1✔
168
            e -> {
169
              log.error("Unexpected exception during shutdown", e);
×
170
              return null;
×
171
            });
172
  }
173

174
  @Override
175
  public void awaitTermination(long timeout, TimeUnit unit) {
176
    long timeoutMillis = ShutdownManager.awaitTermination(poller, unit.toMillis(timeout));
1✔
177
    // relies on the fact that the pollTaskExecutor is the last one to be shutdown, no need to
178
    // wait separately for intermediate steps
179
    ShutdownManager.awaitTermination(pollTaskExecutor, timeoutMillis);
1✔
180
  }
1✔
181

182
  @Override
183
  public void suspendPolling() {
184
    poller.suspendPolling();
1✔
185
  }
1✔
186

187
  @Override
188
  public void resumePolling() {
189
    poller.resumePolling();
1✔
190
  }
1✔
191

192
  @Override
193
  public boolean isSuspended() {
194
    return poller.isSuspended();
1✔
195
  }
196

197
  @Override
198
  public boolean isShutdown() {
199
    return poller.isShutdown();
×
200
  }
201

202
  @Override
203
  public boolean isTerminated() {
204
    return poller.isTerminated() && (pollTaskExecutor == null || pollTaskExecutor.isTerminated());
×
205
  }
206

207
  @Override
208
  public WorkerLifecycleState getLifecycleState() {
209
    return poller.getLifecycleState();
×
210
  }
211

212
  private PollerOptions getPollerOptions(SingleWorkerOptions options) {
213
    PollerOptions pollerOptions = options.getPollerOptions();
1✔
214
    if (pollerOptions.getPollThreadNamePrefix() == null) {
1✔
215
      pollerOptions =
1✔
216
          PollerOptions.newBuilder(pollerOptions)
1✔
217
              .setPollThreadNamePrefix(
1✔
218
                  WorkerThreadsNameHelper.getWorkflowPollerThreadPrefix(namespace, taskQueue))
1✔
219
              .build();
1✔
220
    }
221
    return pollerOptions;
1✔
222
  }
223

224
  @Nullable
225
  public WorkflowTaskDispatchHandle reserveWorkflowExecutor() {
226
    // to avoid pollTaskExecutor to become null inside the lambda, we are caching it here
227
    final PollTaskExecutor<WorkflowTask> executor = pollTaskExecutor;
1✔
228
    return executor != null && !isSuspended() && executorSlotsSemaphore.tryAcquire()
1✔
229
        ? new WorkflowTaskDispatchHandle(
1✔
230
            workflowTask -> {
231
              String queueName =
1✔
232
                  workflowTask.getResponse().getWorkflowExecutionTaskQueue().getName();
1✔
233
              TaskQueueKind queueKind =
1✔
234
                  workflowTask.getResponse().getWorkflowExecutionTaskQueue().getKind();
1✔
235
              Preconditions.checkArgument(
1✔
236
                  this.taskQueue.equals(queueName)
1✔
237
                      || TaskQueueKind.TASK_QUEUE_KIND_STICKY.equals(queueKind)
×
238
                          && this.stickyTaskQueueName.equals(queueName),
1✔
239
                  "Got a WFT for a wrong queue %s, expected %s or %s",
240
                  queueName,
241
                  this.taskQueue,
242
                  this.stickyTaskQueueName);
243
              try {
244
                pollTaskExecutor.process(workflowTask);
1✔
245
                return true;
1✔
246
              } catch (RejectedExecutionException e) {
×
247
                return false;
×
248
              }
249
            },
250
            executorSlotsSemaphore)
251
        : null;
1✔
252
  }
253

254
  @Override
255
  public String toString() {
256
    return String.format(
1✔
257
        "WorkflowWorker{identity=%s, namespace=%s, taskQueue=%s}",
258
        options.getIdentity(), namespace, taskQueue);
1✔
259
  }
260

261
  private class TaskHandlerImpl implements PollTaskExecutor.TaskHandler<WorkflowTask> {
262

263
    final WorkflowTaskHandler handler;
264

265
    private TaskHandlerImpl(WorkflowTaskHandler handler) {
1✔
266
      this.handler = handler;
1✔
267
    }
1✔
268

269
    @Override
270
    public void handle(WorkflowTask task) throws Exception {
271
      PollWorkflowTaskQueueResponse workflowTaskResponse = task.getResponse();
1✔
272
      WorkflowExecution workflowExecution = workflowTaskResponse.getWorkflowExecution();
1✔
273
      String runId = workflowExecution.getRunId();
1✔
274
      String workflowType = workflowTaskResponse.getWorkflowType().getName();
1✔
275

276
      Scope workflowTypeScope =
1✔
277
          workerMetricsScope.tagged(ImmutableMap.of(MetricsTag.WORKFLOW_TYPE, workflowType));
1✔
278

279
      MDC.put(LoggerTag.WORKFLOW_ID, workflowExecution.getWorkflowId());
1✔
280
      MDC.put(LoggerTag.WORKFLOW_TYPE, workflowType);
1✔
281
      MDC.put(LoggerTag.RUN_ID, runId);
1✔
282

283
      boolean locked = false;
1✔
284
      if (!Strings.isNullOrEmpty(stickyTaskQueueName)) {
1✔
285
        // Serialize workflow task processing for a particular workflow run.
286
        // This is used to make sure that query tasks and real workflow tasks
287
        // are serialized when sticky is on.
288
        //
289
        // Acquiring a lock with a timeout to avoid having lots of workflow tasks for the same run
290
        // id waiting for a lock and consuming threads in case if lock is unavailable.
291
        //
292
        // Throws interrupted exception which is propagated. It's a correct way to handle it here.
293
        //
294
        // TODO 1: 5 seconds is chosen as a half of normal workflow task timeout.
295
        //   This value should be dynamically configured.
296
        // TODO 2: Does "consider increasing workflow task timeout" advice in this exception makes
297
        //   any sense?
298
        //   This MAYBE makes sense only if a previous workflow task timed out, it's still in
299
        //   progress on the worker and the next workflow task got picked up by the same exact
300
        //   worker from the general non-sticky task queue.
301
        //   Even in this case, this advice looks misleading, something else is going on
302
        //   (like an extreme network latency).
303
        locked = runLocks.tryLock(runId, 5, TimeUnit.SECONDS);
1✔
304

305
        if (!locked) {
1✔
306
          throw new UnableToAcquireLockException(
×
307
              "Workflow lock for the run id hasn't been released by one of previous execution attempts, "
308
                  + "consider increasing workflow task timeout.");
309
        }
310
      }
311

312
      Stopwatch swTotal =
1✔
313
          workflowTypeScope.timer(MetricsType.WORKFLOW_TASK_EXECUTION_TOTAL_LATENCY).start();
1✔
314
      try {
315
        Optional<PollWorkflowTaskQueueResponse> nextWFTResponse = Optional.of(workflowTaskResponse);
1✔
316
        do {
317
          PollWorkflowTaskQueueResponse currentTask = nextWFTResponse.get();
1✔
318
          nextWFTResponse = Optional.empty();
1✔
319
          WorkflowTaskHandler.Result result = handleTask(currentTask, workflowTypeScope);
1✔
320
          try {
321
            RespondWorkflowTaskCompletedRequest taskCompleted = result.getTaskCompleted();
1✔
322
            RespondWorkflowTaskFailedRequest taskFailed = result.getTaskFailed();
1✔
323
            RespondQueryTaskCompletedRequest queryCompleted = result.getQueryCompleted();
1✔
324

325
            if (taskCompleted != null) {
1✔
326
              RespondWorkflowTaskCompletedRequest.Builder requestBuilder =
1✔
327
                  taskCompleted.toBuilder();
1✔
328
              try (EagerActivitySlotsReservation activitySlotsReservation =
1✔
329
                  new EagerActivitySlotsReservation(eagerActivityDispatcher)) {
1✔
330
                activitySlotsReservation.applyToRequest(requestBuilder);
1✔
331
                RespondWorkflowTaskCompletedResponse response =
1✔
332
                    sendTaskCompleted(
1✔
333
                        currentTask.getTaskToken(),
1✔
334
                        requestBuilder,
335
                        result.getRequestRetryOptions(),
1✔
336
                        workflowTypeScope);
337
                // If we were processing a speculative WFT the server may instruct us that the task
338
                // was dropped by resting out event ID.
339
                long resetEventId = response.getResetHistoryEventId();
1✔
340
                if (resetEventId != 0) {
1✔
341
                  result.getEventIdSetHandle().apply(resetEventId);
×
342
                }
343
                nextWFTResponse =
344
                    response.hasWorkflowTask()
1✔
345
                        ? Optional.of(response.getWorkflowTask())
×
346
                        : Optional.empty();
1✔
347
                // TODO we don't have to do this under the runId lock
348
                activitySlotsReservation.handleResponse(response);
1✔
349
              }
350
            } else if (taskFailed != null) {
1✔
351
              sendTaskFailed(
1✔
352
                  currentTask.getTaskToken(),
1✔
353
                  taskFailed.toBuilder(),
1✔
354
                  result.getRequestRetryOptions(),
1✔
355
                  workflowTypeScope);
356
            } else if (queryCompleted != null) {
1✔
357
              sendDirectQueryCompletedResponse(
1✔
358
                  currentTask.getTaskToken(), queryCompleted.toBuilder(), workflowTypeScope);
1✔
359
            }
360
          } catch (Exception e) {
1✔
361
            logExceptionDuringResultReporting(e, currentTask, result);
1✔
362
            workflowTypeScope.counter(MetricsType.WORKFLOW_TASK_EXECUTION_FAILURE_COUNTER).inc(1);
1✔
363
            // if we failed to report the workflow task completion back to the server,
364
            // our cached version of the workflow may be more advanced than the server is aware of.
365
            // We should discard this execution and perform a clean replay based on what server
366
            // knows next time.
367
            cache.invalidate(
1✔
368
                workflowExecution, workflowTypeScope, "Failed result reporting to the server", e);
369
            throw e;
1✔
370
          }
1✔
371

372
          // this should be after sendReply, otherwise we may log
373
          // WORKFLOW_TASK_EXECUTION_FAILURE_COUNTER twice if sendReply throws
374
          if (result.getTaskFailed() != null) {
1✔
375
            // we don't trigger the counter in case of the legacy query
376
            // (which never has taskFailed set)
377
            workflowTypeScope.counter(MetricsType.WORKFLOW_TASK_EXECUTION_FAILURE_COUNTER).inc(1);
1✔
378
          }
379
          if (nextWFTResponse.isPresent()) {
1✔
380
            workflowTypeScope.counter(MetricsType.WORKFLOW_TASK_HEARTBEAT_COUNTER).inc(1);
×
381
          }
382
        } while (nextWFTResponse.isPresent());
1✔
383
      } finally {
384
        swTotal.stop();
1✔
385
        MDC.remove(LoggerTag.WORKFLOW_ID);
1✔
386
        MDC.remove(LoggerTag.WORKFLOW_TYPE);
1✔
387
        MDC.remove(LoggerTag.RUN_ID);
1✔
388

389
        task.getCompletionCallback().apply();
1✔
390

391
        if (locked) {
1✔
392
          runLocks.unlock(runId);
1✔
393
        }
394
      }
395
    }
1✔
396

397
    @Override
398
    public Throwable wrapFailure(WorkflowTask task, Throwable failure) {
399
      WorkflowExecution execution = task.getResponse().getWorkflowExecution();
1✔
400
      return new RuntimeException(
1✔
401
          "Failure processing workflow task. WorkflowId="
402
              + execution.getWorkflowId()
1✔
403
              + ", RunId="
404
              + execution.getRunId()
1✔
405
              + ", Attempt="
406
              + task.getResponse().getAttempt(),
1✔
407
          failure);
408
    }
409

410
    private WorkflowTaskHandler.Result handleTask(
411
        PollWorkflowTaskQueueResponse task, Scope workflowTypeMetricsScope) throws Exception {
412
      Stopwatch sw =
1✔
413
          workflowTypeMetricsScope.timer(MetricsType.WORKFLOW_TASK_EXECUTION_LATENCY).start();
1✔
414
      try {
415
        return handler.handleWorkflowTask(task);
1✔
416
      } catch (Throwable e) {
1✔
417
        // more detailed logging that we can do here is already done inside `handler`
418
        workflowTypeMetricsScope
1✔
419
            .counter(MetricsType.WORKFLOW_TASK_EXECUTION_FAILURE_COUNTER)
1✔
420
            .inc(1);
1✔
421
        workflowTypeMetricsScope.counter(MetricsType.WORKFLOW_TASK_NO_COMPLETION_COUNTER).inc(1);
1✔
422
        throw e;
1✔
423
      } finally {
424
        sw.stop();
1✔
425
      }
426
    }
427

428
    private RespondWorkflowTaskCompletedResponse sendTaskCompleted(
429
        ByteString taskToken,
430
        RespondWorkflowTaskCompletedRequest.Builder taskCompleted,
431
        RpcRetryOptions retryOptions,
432
        Scope workflowTypeMetricsScope) {
433
      GrpcRetryer.GrpcRetryerOptions grpcRetryOptions =
1✔
434
          new GrpcRetryer.GrpcRetryerOptions(
435
              RpcRetryOptions.newBuilder().buildWithDefaultsFrom(retryOptions), null);
1✔
436

437
      taskCompleted
1✔
438
          .setIdentity(options.getIdentity())
1✔
439
          .setNamespace(namespace)
1✔
440
          .setTaskToken(taskToken);
1✔
441
      if (service.getServerCapabilities().get().getBuildIdBasedVersioning()) {
1✔
442
        taskCompleted.setWorkerVersionStamp(options.workerVersionStamp());
×
443
      } else {
444
        taskCompleted.setBinaryChecksum(options.getBuildId());
1✔
445
      }
446

447
      return grpcRetryer.retryWithResult(
1✔
448
          () ->
449
              service
1✔
450
                  .blockingStub()
1✔
451
                  .withOption(METRICS_TAGS_CALL_OPTIONS_KEY, workflowTypeMetricsScope)
1✔
452
                  .respondWorkflowTaskCompleted(taskCompleted.build()),
1✔
453
          grpcRetryOptions);
454
    }
455

456
    private void sendTaskFailed(
457
        ByteString taskToken,
458
        RespondWorkflowTaskFailedRequest.Builder taskFailed,
459
        RpcRetryOptions retryOptions,
460
        Scope workflowTypeMetricsScope) {
461
      GrpcRetryer.GrpcRetryerOptions grpcRetryOptions =
1✔
462
          new GrpcRetryer.GrpcRetryerOptions(
463
              RpcRetryOptions.newBuilder().buildWithDefaultsFrom(retryOptions), null);
1✔
464

465
      taskFailed.setIdentity(options.getIdentity()).setNamespace(namespace).setTaskToken(taskToken);
1✔
466

467
      if (service.getServerCapabilities().get().getBuildIdBasedVersioning()) {
1✔
468
        taskFailed.setWorkerVersion(options.workerVersionStamp());
×
469
      }
470

471
      grpcRetryer.retry(
1✔
472
          () ->
473
              service
1✔
474
                  .blockingStub()
1✔
475
                  .withOption(METRICS_TAGS_CALL_OPTIONS_KEY, workflowTypeMetricsScope)
1✔
476
                  .respondWorkflowTaskFailed(taskFailed.build()),
1✔
477
          grpcRetryOptions);
478
    }
1✔
479

480
    private void sendDirectQueryCompletedResponse(
481
        ByteString taskToken,
482
        RespondQueryTaskCompletedRequest.Builder queryCompleted,
483
        Scope workflowTypeMetricsScope) {
484
      queryCompleted.setTaskToken(taskToken).setNamespace(namespace);
1✔
485
      // Do not retry query response
486
      service
1✔
487
          .blockingStub()
1✔
488
          .withOption(METRICS_TAGS_CALL_OPTIONS_KEY, workflowTypeMetricsScope)
1✔
489
          .respondQueryTaskCompleted(queryCompleted.build());
1✔
490
    }
1✔
491

492
    private void logExceptionDuringResultReporting(
493
        Exception e, PollWorkflowTaskQueueResponse currentTask, WorkflowTaskHandler.Result result) {
494
      if (log.isDebugEnabled()) {
1✔
495
        log.debug(
×
496
            "Failure during reporting of workflow progress to the server. If seen continuously the workflow might be stuck. WorkflowId={}, RunId={}, startedEventId={}, WFTResult={}",
497
            currentTask.getWorkflowExecution().getWorkflowId(),
×
498
            currentTask.getWorkflowExecution().getRunId(),
×
499
            currentTask.getStartedEventId(),
×
500
            result,
501
            e);
502
      } else {
503
        log.warn(
1✔
504
            "Failure while reporting workflow progress to the server. If seen continuously the workflow might be stuck. WorkflowId={}, RunId={}, startedEventId={}",
505
            currentTask.getWorkflowExecution().getWorkflowId(),
1✔
506
            currentTask.getWorkflowExecution().getRunId(),
1✔
507
            currentTask.getStartedEventId(),
1✔
508
            e);
509
      }
510
    }
1✔
511
  }
512
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc