• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #3633

11 Mar 2025 12:59PM UTC coverage: 0.0% (-60.7%) from 60.719%
#3633

push

travis-ci

web-flow
Merge pull request #30118 from taosdata/wl30

udpate ci workflow

0 of 280412 branches covered (0.0%)

Branch coverage included in aggregate %.

0 of 275582 relevant lines covered (0.0%)

0.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/source/libs/stream/src/streamStartTask.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#include "executor.h"
17
#include "streamBackendRocksdb.h"
18
#include "streamInt.h"
19
#include "tmisce.h"
20
#include "tref.h"
21
#include "tsched.h"
22
#include "tstream.h"
23
#include "ttimer.h"
24
#include "wal.h"
25

26
typedef struct STaskInitTs {
27
  int64_t start;
28
  int64_t end;
29
  bool    success;
30
} STaskInitTs;
31

32
static int32_t prepareBeforeStartTasks(SStreamMeta* pMeta, SArray** pList, int64_t now);
33
static bool    allCheckDownstreamRsp(SStreamMeta* pMeta, STaskStartInfo* pStartInfo, int32_t numOfTotal);
34
static void    displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ);
35

36
// restore the checkpoint id by negotiating the latest consensus checkpoint id
37
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
×
38
  int32_t code = TSDB_CODE_SUCCESS;
×
39
  int32_t vgId = pMeta->vgId;
×
40
  int64_t now = taosGetTimestampMs();
×
41
  SArray* pTaskList = NULL;
×
42

43
  int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
×
44
  stInfo("vgId:%d start to consensus checkpointId for all %d task(s), start ts:%" PRId64, vgId, numOfTasks, now);
×
45

46
  if (numOfTasks == 0) {
×
47
    stInfo("vgId:%d no tasks exist, quit from consensus checkpointId", pMeta->vgId);
×
48

49
    streamMetaWLock(pMeta);
×
50
    streamMetaResetStartInfo(&pMeta->startInfo, vgId);
×
51
    streamMetaWUnLock(pMeta);
×
52
    return TSDB_CODE_SUCCESS;
×
53
  }
54

55
  code = prepareBeforeStartTasks(pMeta, &pTaskList, now);
×
56
  if (code != TSDB_CODE_SUCCESS) {
×
57
    return TSDB_CODE_SUCCESS;  // ignore the error and return directly
×
58
  }
59

60
  // broadcast the check downstream tasks msg only for tasks with related fill-history tasks.
61
  numOfTasks = taosArrayGetSize(pTaskList);
×
62

63
  // prepare the fill-history task before starting all stream tasks, to avoid fill-history tasks are started without
64
  // initialization, when the operation of check downstream tasks status is executed far quickly.
65
  for (int32_t i = 0; i < numOfTasks; ++i) {
×
66
    SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
×
67
    SStreamTask*   pTask = NULL;
×
68
    code = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask);
×
69
    if ((pTask == NULL) || (code != 0)) {
×
70
      stError("vgId:%d failed to acquire task:0x%x during start task, it may be dropped", pMeta->vgId, pTaskId->taskId);
×
71
      int32_t ret = streamMetaAddFailedTask(pMeta, pTaskId->streamId, pTaskId->taskId);
×
72
      if (ret) {
×
73
        stError("s-task:0x%x add check downstream failed, core:%s", pTaskId->taskId, tstrerror(ret));
×
74
      }
75
      continue;
×
76
    }
77

78
    if ((pTask->pBackend == NULL) && ((pTask->info.fillHistory == 1) || HAS_RELATED_FILLHISTORY_TASK(pTask))) {
×
79
      code = pMeta->expandTaskFn(pTask);
×
80
      if (code != TSDB_CODE_SUCCESS) {
×
81
        stError("s-task:0x%x vgId:%d failed to expand stream backend", pTaskId->taskId, vgId);
×
82
        streamMetaAddFailedTaskSelf(pTask, pTask->execInfo.readyTs);
×
83
      }
84
    }
85

86
    streamMetaReleaseTask(pMeta, pTask);
×
87
  }
88

89
  // Tasks, with related fill-history task or without any checkpoint yet, can be started directly here.
90
  for (int32_t i = 0; i < numOfTasks; ++i) {
×
91
    SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
×
92

93
    SStreamTask* pTask = NULL;
×
94
    code = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask);
×
95
    if ((pTask == NULL )|| (code != 0)) {
×
96
      stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId);
×
97
      int32_t ret = streamMetaAddFailedTask(pMeta, pTaskId->streamId, pTaskId->taskId);
×
98
      if (ret) {
×
99
        stError("s-task:0x%x failed add check downstream failed, core:%s", pTaskId->taskId, tstrerror(ret));
×
100
      }
101

102
      continue;
×
103
    }
104

105
    STaskExecStatisInfo* pInfo = &pTask->execInfo;
×
106

107
    // fill-history task can only be launched by related stream tasks.
108
    if (pTask->info.fillHistory == 1) {
×
109
      stDebug("s-task:%s fill-history task wait related stream task start", pTask->id.idStr);
×
110
      streamMetaReleaseTask(pMeta, pTask);
×
111
      continue;
×
112
    }
113

114
    // ready now, start the related fill-history task
115
    if (pTask->status.downstreamReady == 1) {
×
116
      if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
×
117
        stDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task",
×
118
                pTask->id.idStr);
119
        code = streamLaunchFillHistoryTask(pTask);  // todo: how about retry launch fill-history task?
×
120
        if (code) {
×
121
          stError("s-task:%s failed to launch history task, code:%s", pTask->id.idStr, tstrerror(code));
×
122
        }
123
      }
124

125
      code = streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->checkTs, pInfo->readyTs,
×
126
                                           true);
127
      streamMetaReleaseTask(pMeta, pTask);
×
128
      continue;
×
129
    }
130

131
    if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
×
132
      int32_t ret = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
×
133
      if (ret != TSDB_CODE_SUCCESS) {
×
134
        stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT);
×
135
        code = ret;
×
136

137
        // do no added into result hashmap if it is failed due to concurrently starting of this stream task.
138
        if (code != TSDB_CODE_STREAM_CONFLICT_EVENT) {
×
139
          streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs);
×
140
        }
141
      }
142

143
      streamMetaReleaseTask(pMeta, pTask);
×
144
      continue;
×
145
    }
146

147
    // negotiate the consensus checkpoint id for current task
148
    code = streamTaskSendNegotiateChkptIdMsg(pTask);
×
149

150
    // this task may has no checkpoint, but others tasks may generate checkpoint already?
151
    streamMetaReleaseTask(pMeta, pTask);
×
152
  }
153

154
  // prepare the fill-history task before starting all stream tasks, to avoid fill-history tasks are started without
155
  // initialization, when the operation of check downstream tasks status is executed far quickly.
156
  stInfo("vgId:%d start all task(s) completed", pMeta->vgId);
×
157
  taosArrayDestroy(pTaskList);
×
158
  return code;
×
159
}
160

161
int32_t prepareBeforeStartTasks(SStreamMeta* pMeta, SArray** pList, int64_t now) {
×
162
  streamMetaWLock(pMeta);
×
163

164
  if (pMeta->closeFlag) {
×
165
    streamMetaWUnLock(pMeta);
×
166
    stError("vgId:%d vnode is closed, not start check task(s) downstream status", pMeta->vgId);
×
167
    return TSDB_CODE_FAILED;
×
168
  }
169

170
  *pList = taosArrayDup(pMeta->pTaskList, NULL);
×
171
  if (*pList == NULL) {
×
172
    return terrno;
×
173
  }
174

175
  taosHashClear(pMeta->startInfo.pReadyTaskSet);
×
176
  taosHashClear(pMeta->startInfo.pFailedTaskSet);
×
177
  pMeta->startInfo.startTs = now;
×
178

179
  int32_t code = streamMetaResetTaskStatus(pMeta);
×
180
  streamMetaWUnLock(pMeta);
×
181

182
  return code;
×
183
}
184

185
void streamMetaResetStartInfo(STaskStartInfo* pStartInfo, int32_t vgId) {
×
186
  taosHashClear(pStartInfo->pReadyTaskSet);
×
187
  taosHashClear(pStartInfo->pFailedTaskSet);
×
188
  pStartInfo->tasksWillRestart = 0;
×
189
  pStartInfo->readyTs = 0;
×
190
  pStartInfo->elapsedTime = 0;
×
191

192
  // reset the sentinel flag value to be 0
193
  pStartInfo->startAllTasks = 0;
×
194
  stDebug("vgId:%d clear start-all-task info", vgId);
×
195
}
×
196

197
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
×
198
                                      int64_t endTs, bool ready) {
199
  STaskStartInfo* pStartInfo = &pMeta->startInfo;
×
200
  STaskId         id = {.streamId = streamId, .taskId = taskId};
×
201
  int32_t         vgId = pMeta->vgId;
×
202
  bool            allRsp = true;
×
203
  SStreamTask*    p = NULL;
×
204

205
  streamMetaWLock(pMeta);
×
206
  int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &p);
×
207
  if (code != 0) {  // task does not exist in current vnode, not record the complete info
×
208
    stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId);
×
209
    streamMetaWUnLock(pMeta);
×
210
    return 0;
×
211
  }
212

213
  streamMetaReleaseTask(pMeta, p);
×
214

215
  if (pStartInfo->startAllTasks != 1) {
×
216
    int64_t el = endTs - startTs;
×
217
    stDebug(
×
218
        "vgId:%d not in start all task(s) process, not record launch result status, s-task:0x%x launch succ:%d elapsed "
219
        "time:%" PRId64 "ms",
220
        vgId, taskId, ready, el);
221
    streamMetaWUnLock(pMeta);
×
222
    return 0;
×
223
  }
224

225
  STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready};
×
226
  SHashObj*   pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet;
×
227
  code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs));
×
228
  if (code) {
×
229
    if (code == TSDB_CODE_DUP_KEY) {
×
230
      stError("vgId:%d record start task result failed, s-task:0x%" PRIx64
×
231
              " already exist start results in meta start task result hashmap",
232
              vgId, id.taskId);
233
    } else {
234
      stError("vgId:%d failed to record start task:0x%" PRIx64 " results, start all tasks failed", vgId, id.taskId);
×
235
    }
236
    streamMetaWUnLock(pMeta);
×
237
    return code;
×
238
  }
239

240
  int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta);
×
241
  int32_t numOfRecv = taosHashGetSize(pStartInfo->pReadyTaskSet) + taosHashGetSize(pStartInfo->pFailedTaskSet);
×
242

243
  allRsp = allCheckDownstreamRsp(pMeta, pStartInfo, numOfTotal);
×
244
  if (allRsp) {
×
245
    pStartInfo->readyTs = taosGetTimestampMs();
×
246
    pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0;
×
247

248
    stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64
×
249
                ", readyTs:%" PRId64 " total elapsed time:%.2fs",
250
            vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs,
251
            pStartInfo->elapsedTime / 1000.0);
252

253
    // print the initialization elapsed time and info
254
    displayStatusInfo(pMeta, pStartInfo->pReadyTaskSet, true);
×
255
    displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false);
×
256
    streamMetaResetStartInfo(pStartInfo, vgId);
×
257
    streamMetaWUnLock(pMeta);
×
258

259
    code = pStartInfo->completeFn(pMeta);
×
260
  } else {
261
    streamMetaWUnLock(pMeta);
×
262
    stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d, total:%d", vgId, taskId, ready,
×
263
            numOfRecv, numOfTotal);
264
  }
265

266
  return code;
×
267
}
268

269
// check all existed tasks are received rsp
270
bool allCheckDownstreamRsp(SStreamMeta* pMeta, STaskStartInfo* pStartInfo, int32_t numOfTotal) {
×
271
  for (int32_t i = 0; i < numOfTotal; ++i) {
×
272
    SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
×
273
    if (pTaskId == NULL) {
×
274
      continue;
×
275
    }
276

277
    STaskId idx = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
×
278
    void*   px = taosHashGet(pStartInfo->pReadyTaskSet, &idx, sizeof(idx));
×
279
    if (px == NULL) {
×
280
      px = taosHashGet(pStartInfo->pFailedTaskSet, &idx, sizeof(idx));
×
281
      if (px == NULL) {
×
282
        return false;
×
283
      }
284
    }
285
  }
286

287
  return true;
×
288
}
289

290
void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) {
×
291
  int32_t vgId = pMeta->vgId;
×
292
  void*   pIter = NULL;
×
293
  size_t  keyLen = 0;
×
294

295
  stInfo("vgId:%d %d tasks check-downstream completed, %s", vgId, taosHashGetSize(pTaskSet),
×
296
         succ ? "success" : "failed");
297

298
  while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) {
×
299
    STaskInitTs* pInfo = pIter;
×
300
    void*        key = taosHashGetKey(pIter, &keyLen);
×
301
    SStreamTask* pTask = NULL;
×
302
    int32_t      code = streamMetaAcquireTaskUnsafe(pMeta, key, &pTask);
×
303
    if (code == 0) {
×
304
      stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", pTask->id.idStr,
×
305
             pTask->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed");
306
      streamMetaReleaseTask(pMeta, pTask);
×
307
    } else {
308
      stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed");
×
309
    }
310
  }
311
}
×
312

313
int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo) {
×
314
  _hash_fn_t fp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR);
×
315

316
  pStartInfo->pReadyTaskSet = taosHashInit(64, fp, false, HASH_NO_LOCK);
×
317
  if (pStartInfo->pReadyTaskSet == NULL) {
×
318
    return terrno;
×
319
  }
320

321
  pStartInfo->pFailedTaskSet = taosHashInit(4, fp, false, HASH_NO_LOCK);
×
322
  if (pStartInfo->pFailedTaskSet == NULL) {
×
323
    return terrno;
×
324
  }
325

326
  return 0;
×
327
}
328

329
void streamMetaClearStartInfo(STaskStartInfo* pStartInfo) {
×
330
  taosHashCleanup(pStartInfo->pReadyTaskSet);
×
331
  taosHashCleanup(pStartInfo->pFailedTaskSet);
×
332
  pStartInfo->readyTs = 0;
×
333
  pStartInfo->elapsedTime = 0;
×
334
  pStartInfo->startTs = 0;
×
335
  pStartInfo->startAllTasks = 0;
×
336
  pStartInfo->tasksWillRestart = 0;
×
337
  pStartInfo->restartCount = 0;
×
338
}
×
339

340
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
×
341
  int32_t      code = 0;
×
342
  int32_t      vgId = pMeta->vgId;
×
343
  SStreamTask* pTask = NULL;
×
344
  bool         continueExec = true;
×
345

346
  stInfo("vgId:%d start task:0x%x by checking it's downstream status", vgId, taskId);
×
347

348
  code = streamMetaAcquireTask(pMeta, streamId, taskId, &pTask);
×
349
  if ((pTask == NULL) || (code != 0)) {
×
350
    stError("vgId:%d failed to acquire task:0x%x when starting task", vgId, taskId);
×
351
    int32_t ret = streamMetaAddFailedTask(pMeta, streamId, taskId);
×
352
    if (ret) {
×
353
      stError("s-task:0x%x add check downstream failed, core:%s", taskId, tstrerror(ret));
×
354
    }
355

356
    return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
×
357
  }
358

359
  // fill-history task can only be launched by related stream tasks.
360
  STaskExecStatisInfo* pInfo = &pTask->execInfo;
×
361
  if (pTask->info.fillHistory == 1) {
×
362
    stError("s-task:0x%x vgId:%d fill-history task, not start here", taskId, vgId);
×
363
    streamMetaReleaseTask(pMeta, pTask);
×
364
    return TSDB_CODE_SUCCESS;
×
365
  }
366

367
  // the start all tasks procedure may happen to start the newly deployed stream task, and results in the
368
  // concurrently start this task by two threads.
369
  streamMutexLock(&pTask->lock);
×
370

371
  SStreamTaskState status = streamTaskGetStatus(pTask);
×
372
  if (status.state != TASK_STATUS__UNINIT) {
×
373
    stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name);
×
374
    continueExec = false;
×
375
  } else {
376
    continueExec = true;
×
377
  }
378
  streamMutexUnlock(&pTask->lock);
×
379

380
  if (!continueExec) {
×
381
    streamMetaReleaseTask(pMeta, pTask);
×
382
    return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
×
383
  }
384

385
  if(pTask->status.downstreamReady != 0) {
×
386
    stFatal("s-task:0x%x downstream should be not ready, but it ready here, internal error happens", taskId);
×
387
    streamMetaReleaseTask(pMeta, pTask);
×
388
    return TSDB_CODE_STREAM_INTERNAL_ERROR;
×
389
  }
390

391
  // avoid initialization and destroy running concurrently.
392
  streamMutexLock(&pTask->lock);
×
393
  if (pTask->pBackend == NULL) {
×
394
    code = pMeta->expandTaskFn(pTask);
×
395
    streamMutexUnlock(&pTask->lock);
×
396

397
    if (code != TSDB_CODE_SUCCESS) {
×
398
      streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs);
×
399
    }
400
  } else {
401
    streamMutexUnlock(&pTask->lock);
×
402
  }
403

404
  // concurrently start task may cause the latter started task be failed, and also failed to added into meta result.
405
  if (code == TSDB_CODE_SUCCESS) {
×
406
    code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
×
407
    if (code != TSDB_CODE_SUCCESS) {
×
408
      stError("s-task:%s vgId:%d failed to handle event:init-task, code:%s", pTask->id.idStr, pMeta->vgId,
×
409
              tstrerror(code));
410

411
      // do no added into result hashmap if it is failed due to concurrently starting of this stream task.
412
      if (code != TSDB_CODE_STREAM_CONFLICT_EVENT) {
×
413
        streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs);
×
414
      }
415
    }
416
  }
417

418
  streamMetaReleaseTask(pMeta, pTask);
×
419
  return code;
×
420
}
421

422
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
×
423
  streamMetaRLock(pMeta);
×
424

425
  SArray* pTaskList = NULL;
×
426
  int32_t num = taosArrayGetSize(pMeta->pTaskList);
×
427
  stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num);
×
428

429
  if (num == 0) {
×
430
    stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num);
×
431
    streamMetaRUnLock(pMeta);
×
432
    return TSDB_CODE_SUCCESS;
×
433
  }
434

435
  int64_t st = taosGetTimestampMs();
×
436

437
  // send hb msg to mnode before closing all tasks.
438
  int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList);
×
439
  if (code != TSDB_CODE_SUCCESS) {
×
440
    streamMetaRUnLock(pMeta);
×
441
    return code;
×
442
  }
443

444
  int32_t numOfTasks = taosArrayGetSize(pTaskList);
×
445
  for (int32_t i = 0; i < numOfTasks; ++i) {
×
446
    SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
×
447
    SStreamTask*   pTask = NULL;
×
448

449
    code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask);
×
450
    if (code != TSDB_CODE_SUCCESS) {
×
451
      continue;
×
452
    }
453

454
    int32_t ret = streamTaskStop(pTask);
×
455
    if (ret) {
×
456
      stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret));
×
457
    }
458

459
    streamMetaReleaseTask(pMeta, pTask);
×
460
  }
461

462
  taosArrayDestroy(pTaskList);
×
463

464
  double el = (taosGetTimestampMs() - st) / 1000.0;
×
465
  stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, num, el);
×
466

467
  streamMetaRUnLock(pMeta);
×
468
  return code;
×
469
}
470

471
int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
×
472
  SConsenChkptInfo* pConChkptInfo = &pTask->status.consenChkptInfo;
×
473

474
  int32_t vgId = pTask->pMeta->vgId;
×
475
  if (pConChkptInfo->status == TASK_CONSEN_CHKPT_REQ) {
×
476
    // mark the sending of req consensus checkpoint request.
477
    pConChkptInfo->status = TASK_CONSEN_CHKPT_SEND;
×
478
    pConChkptInfo->statusTs = ts;
×
479
    stDebug("s-task:%s vgId:%d set requiring consensus-chkptId in hbMsg, ts:%" PRId64, pTask->id.idStr,
×
480
            vgId, pConChkptInfo->statusTs);
481
    return 1;
×
482
  } else {
483
    int32_t el = (ts - pConChkptInfo->statusTs) / 1000;
×
484

485
    // not recv consensus-checkpoint rsp for 60sec, send it again in hb to mnode
486
    if ((pConChkptInfo->status == TASK_CONSEN_CHKPT_SEND) && el > 60) {
×
487
      pConChkptInfo->statusTs = ts;
×
488

489
      stWarn(
×
490
          "s-task:%s vgId:%d not recv consensus-chkptId for %ds(more than 60s), set requiring in Hb again, ts:%" PRId64,
491
          pTask->id.idStr, vgId, el, pConChkptInfo->statusTs);
492
      return 1;
×
493
    }
494
  }
495

496
  return 0;
×
497
}
498

499
void streamTaskSetConsenChkptIdRecv(SStreamTask* pTask, int32_t transId, int64_t ts) {
×
500
  SConsenChkptInfo* pInfo = &pTask->status.consenChkptInfo;
×
501
  pInfo->consenChkptTransId = transId;
×
502
  pInfo->status = TASK_CONSEN_CHKPT_RECV;
×
503
  pInfo->statusTs = ts;
×
504

505
  stInfo("s-task:%s set recv consen-checkpointId, transId:%d", pTask->id.idStr, transId);
×
506
}
×
507

508
void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
×
509
  SConsenChkptInfo* pInfo = &pTask->status.consenChkptInfo;
×
510
  int32_t           prevTrans = pInfo->consenChkptTransId;
×
511

512
  pInfo->status = TASK_CONSEN_CHKPT_REQ;
×
513
  pInfo->statusTs = ts;
×
514
  pInfo->consenChkptTransId = 0;
×
515

516
  stDebug("s-task:%s set req consen-checkpointId flag, prev transId:%d, ts:%" PRId64, pTask->id.idStr, prevTrans, ts);
×
517
}
×
518

519
int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
×
520
  int32_t      code = TSDB_CODE_SUCCESS;
×
521
  int64_t      now = taosGetTimestampMs();
×
522
  int64_t      startTs = 0;
×
523
  bool         hasFillhistoryTask = false;
×
524
  STaskId      hId = {0};
×
525
  STaskId      id = {.streamId = streamId, .taskId = taskId};
×
526
  SStreamTask* pTask = NULL;
×
527

528
  stDebug("vgId:%d add start failed task:0x%x", pMeta->vgId, taskId);
×
529

530
  streamMetaRLock(pMeta);
×
531

532
  code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
×
533
  if (code == 0) {
×
534
    startTs = pTask->taskCheckInfo.startTs;
×
535
    hasFillhistoryTask = HAS_RELATED_FILLHISTORY_TASK(pTask);
×
536
    hId = pTask->hTaskInfo.id;
×
537
    streamMetaReleaseTask(pMeta, pTask);
×
538

539
    streamMetaRUnLock(pMeta);
×
540

541
    // add the failed task info, along with the related fill-history task info into tasks list.
542
    code = streamMetaAddTaskLaunchResult(pMeta, streamId, taskId, startTs, now, false);
×
543
    if (hasFillhistoryTask) {
×
544
      code = streamMetaAddTaskLaunchResult(pMeta, hId.streamId, hId.taskId, startTs, now, false);
×
545
    }
546
  } else {
547
    streamMetaRUnLock(pMeta);
×
548

549
    stError("failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped",
×
550
            streamId, taskId, pMeta->vgId);
551
    code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
×
552
  }
553

554
  return code;
×
555
}
556

557
void streamMetaAddFailedTaskSelf(SStreamTask* pTask, int64_t failedTs) {
×
558
  int32_t startTs = pTask->execInfo.checkTs;
×
559
  int32_t code = streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, startTs, failedTs, false);
×
560
  if (code) {
×
561
    stError("s-task:%s failed to add self task failed to start, code:%s", pTask->id.idStr, tstrerror(code));
×
562
  }
563

564
  // automatically set the related fill-history task to be failed.
565
  if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
×
566
    STaskId* pId = &pTask->hTaskInfo.id;
×
567
    code = streamMetaAddTaskLaunchResult(pTask->pMeta, pId->streamId, pId->taskId, startTs, failedTs, false);
×
568
    if (code) {
×
569
      stError("s-task:0x%" PRIx64 " failed to add self task failed to start, code:%s", pId->taskId, tstrerror(code));
×
570
    }
571
  }
572
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc