• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #4142

20 May 2025 07:22AM UTC coverage: 62.238% (-0.9%) from 63.096%
#4142

push

travis-ci

web-flow
docs(datain): add topic meta options docs in tmq (#31147)

155113 of 318088 branches covered (48.76%)

Branch coverage included in aggregate %.

240242 of 317147 relevant lines covered (75.75%)

13602566.83 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.26
/source/libs/stream/src/streamStartTask.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#include "executor.h"
17
#include "streamBackendRocksdb.h"
18
#include "streamInt.h"
19
#include "tmisce.h"
20
#include "tref.h"
21
#include "tsched.h"
22
#include "tstream.h"
23
#include "ttimer.h"
24
#include "wal.h"
25

26
typedef struct STaskInitTs {
27
  int64_t start;
28
  int64_t end;
29
  bool    success;
30
} STaskInitTs;
31

32
static int32_t prepareBeforeStartTasks(SStreamMeta* pMeta, SArray** pList, int64_t now);
33
static bool    allCheckDownstreamRsp(SStreamMeta* pMeta, STaskStartInfo* pStartInfo, int32_t numOfTotal);
34
static void    displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ);
35

36
// restore the checkpoint id by negotiating the latest consensus checkpoint id
37
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
10,338✔
38
  int32_t code = TSDB_CODE_SUCCESS;
10,338✔
39
  int32_t vgId = pMeta->vgId;
10,338✔
40
  int64_t now = taosGetTimestampMs();
10,343✔
41
  SArray* pTaskList = NULL;
10,343✔
42
  int32_t numOfConsensusChkptIdTasks = 0;
10,343✔
43
  int32_t numOfTasks = 0;
10,343✔
44

45
  numOfTasks = taosArrayGetSize(pMeta->pTaskList);
10,343✔
46
  if (numOfTasks == 0) {
10,339✔
47
    stInfo("vgId:%d no tasks exist, quit from consensus checkpointId", pMeta->vgId);
10,238!
48
    streamMetaResetStartInfo(&pMeta->startInfo, vgId);
10,242✔
49
    return TSDB_CODE_SUCCESS;
10,243✔
50
  }
51

52
  stInfo("vgId:%d start to consensus checkpointId for all %d task(s), start ts:%" PRId64, vgId, numOfTasks, now);
101!
53

54
  code = prepareBeforeStartTasks(pMeta, &pTaskList, now);
101✔
55
  if (code != TSDB_CODE_SUCCESS) {
102!
56
    return TSDB_CODE_SUCCESS;  // ignore the error and return directly
×
57
  }
58

59
  // broadcast the check downstream tasks msg only for tasks with related fill-history tasks.
60
  numOfTasks = taosArrayGetSize(pTaskList);
102✔
61

62
  // prepare the fill-history task before starting all stream tasks, to avoid fill-history tasks are started without
63
  // initialization, when the operation of check downstream tasks status is executed far quickly.
64
  for (int32_t i = 0; i < numOfTasks; ++i) {
519✔
65
    SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
417✔
66
    SStreamTask*   pTask = NULL;
417✔
67

68
    code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask);
417✔
69
    if ((pTask == NULL) || (code != 0)) {
417!
70
      stError("vgId:%d failed to acquire task:0x%x during start task, it may be dropped", pMeta->vgId, pTaskId->taskId);
×
71
      int32_t ret = streamMetaAddFailedTask(pMeta, pTaskId->streamId, pTaskId->taskId, false);
×
72
      if (ret) {
×
73
        stError("s-task:0x%x add check downstream failed, core:%s", pTaskId->taskId, tstrerror(ret));
×
74
      }
75
      continue;
×
76
    }
77

78
    if ((pTask->pBackend == NULL) && ((pTask->info.fillHistory == 1) || HAS_RELATED_FILLHISTORY_TASK(pTask))) {
417!
79
      code = pMeta->expandTaskFn(pTask);
78✔
80
      if (code != TSDB_CODE_SUCCESS) {
78!
81
        stError("s-task:0x%x vgId:%d failed to expand stream backend", pTaskId->taskId, vgId);
×
82
        streamMetaAddFailedTaskSelf(pTask, pTask->execInfo.readyTs, false);
×
83
      }
84
    }
85

86
    streamMetaReleaseTask(pMeta, pTask);
417✔
87
  }
88

89
  // Tasks, with related fill-history task or without any checkpoint yet, can be started directly here.
90
  for (int32_t i = 0; i < numOfTasks; ++i) {
519✔
91
    SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
416✔
92

93
    SStreamTask* pTask = NULL;
417✔
94
    code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask);
417✔
95
    if ((pTask == NULL) || (code != 0)) {
417!
96
      stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId);
×
97
      int32_t ret = streamMetaAddFailedTask(pMeta, pTaskId->streamId, pTaskId->taskId, false);
×
98
      if (ret) {
×
99
        stError("s-task:0x%x failed add check downstream failed, core:%s", pTaskId->taskId, tstrerror(ret));
×
100
      }
101

102
      continue;
78✔
103
    }
104

105
    STaskExecStatisInfo* pInfo = &pTask->execInfo;
417✔
106

107
    // fill-history task can only be launched by related stream tasks.
108
    if (pTask->info.fillHistory == 1) {
417✔
109
      stDebug("s-task:%s fill-history task wait related stream task start", pTask->id.idStr);
39!
110
      streamMetaReleaseTask(pMeta, pTask);
39✔
111
      continue;
39✔
112
    }
113

114
    // ready now, start the related fill-history task
115
    if (pTask->status.downstreamReady == 1) {
378!
116
      if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
×
117
        stDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task",
×
118
                pTask->id.idStr);
119
        code = streamLaunchFillHistoryTask(pTask, false);  // todo: how about retry launch fill-history task?
×
120
        if (code) {
×
121
          stError("s-task:%s failed to launch history task, code:%s", pTask->id.idStr, tstrerror(code));
×
122
        }
123
      }
124

125
      code = streamMetaAddTaskLaunchResultNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->checkTs,
×
126
                                                 pInfo->readyTs, true);
127
      streamMetaReleaseTask(pMeta, pTask);
×
128
      continue;
×
129
    }
130

131
    if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
378✔
132
      int32_t ret = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
39✔
133
      if (ret != TSDB_CODE_SUCCESS) {
39!
134
        stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT);
×
135
        code = ret;
×
136

137
        // do no added into result hashmap if it is failed due to concurrently starting of this stream task.
138
        if (code != TSDB_CODE_STREAM_CONFLICT_EVENT) {
×
139
          streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs, false);
×
140
        }
141
      }
142

143
      streamMetaReleaseTask(pMeta, pTask);
39✔
144
      continue;
39✔
145
    }
146

147
    // negotiate the consensus checkpoint id for current task
148
    code = streamTaskSendNegotiateChkptIdMsg(pTask);
339✔
149
    if (code == 0) {
339!
150
      numOfConsensusChkptIdTasks += 1;
339✔
151
    }
152

153
    // this task may have no checkpoint, but others tasks may generate checkpoint already?
154
    streamMetaReleaseTask(pMeta, pTask);
339✔
155
  }
156

157
  if (numOfConsensusChkptIdTasks > 0) {
103✔
158
    pMeta->startInfo.curStage = START_MARK_REQ_CHKPID;
82✔
159
    SStartTaskStageInfo info = {.stage = pMeta->startInfo.curStage, .ts = now};
82✔
160

161
    void*   p = taosArrayPush(pMeta->startInfo.pStagesList, &info);
82✔
162
    int32_t num = (int32_t)taosArrayGetSize(pMeta->startInfo.pStagesList);
82✔
163

164
    if (p != NULL) {
82!
165
      stDebug("vgId:%d %d task(s) 0 stage -> mark_req stage, reqTs:%" PRId64 " numOfStageHist:%d", pMeta->vgId,
82✔
166
              numOfConsensusChkptIdTasks, info.ts, num);
167
    } else {
168
      stError("vgId:%d %d task(s) 0 stage -> mark_req stage, reqTs:%" PRId64
×
169
              " numOfStageHist:%d, FAILED, out of memory",
170
              pMeta->vgId, numOfConsensusChkptIdTasks, info.ts, num);
171
    }
172
  }
173

174
  // prepare the fill-history task before starting all stream tasks, to avoid fill-history tasks are started without
175
  // initialization, when the operation of check downstream tasks status is executed far quickly.
176
  stInfo("vgId:%d start all task(s) completed", pMeta->vgId);
103✔
177
  taosArrayDestroy(pTaskList);
103✔
178
  return code;
102✔
179
}
180

181
int32_t prepareBeforeStartTasks(SStreamMeta* pMeta, SArray** pList, int64_t now) {
102✔
182
  STaskStartInfo* pInfo = &pMeta->startInfo;
102✔
183
  if (pMeta->closeFlag) {
102!
184
    stError("vgId:%d vnode is closed, not start check task(s) downstream status", pMeta->vgId);
×
185
    return TSDB_CODE_FAILED;
×
186
  }
187

188
  *pList = taosArrayDup(pMeta->pTaskList, NULL);
102✔
189
  if (*pList == NULL) {
102!
190
    stError("vgId:%d failed to dup tasklist, before restart tasks, code:%s", pMeta->vgId, tstrerror(terrno));
×
191
    return terrno;
×
192
  }
193

194
  taosHashClear(pInfo->pReadyTaskSet);
102✔
195
  taosHashClear(pInfo->pFailedTaskSet);
102✔
196
  taosArrayClear(pInfo->pStagesList);
102✔
197
  taosArrayClear(pInfo->pRecvChkptIdTasks);
102✔
198

199
  pInfo->partialTasksStarted = false;
102✔
200
  pInfo->curStage = 0;
102✔
201
  pInfo->startTs = now;
102✔
202

203
  int32_t code = streamMetaResetTaskStatus(pMeta);
102✔
204
  return code;
102✔
205
}
206

207
void streamMetaResetStartInfo(STaskStartInfo* pStartInfo, int32_t vgId) {
17,718✔
208
  taosHashClear(pStartInfo->pReadyTaskSet);
17,718✔
209
  taosHashClear(pStartInfo->pFailedTaskSet);
17,718✔
210
  taosArrayClear(pStartInfo->pStagesList);
17,718✔
211
  taosArrayClear(pStartInfo->pRecvChkptIdTasks);
17,718✔
212

213
  pStartInfo->tasksWillRestart = 0;
17,717✔
214
  pStartInfo->readyTs = 0;
17,717✔
215
  pStartInfo->elapsedTime = 0;
17,717✔
216
  pStartInfo->curStage = 0;
17,717✔
217
  pStartInfo->partialTasksStarted = false;
17,717✔
218

219
  // reset the sentinel flag value to be 0
220
  pStartInfo->startAllTasks = 0;
17,717✔
221
  stDebug("vgId:%d clear start-all-task info", vgId);
17,717✔
222
}
17,718✔
223

224
static void streamMetaLogLaunchTasksInfo(SStreamMeta* pMeta, int32_t numOfTotal, int32_t taskId, bool ready) {
33✔
225
  STaskStartInfo* pStartInfo = &pMeta->startInfo;
33✔
226

227
  pStartInfo->readyTs = taosGetTimestampMs();
33✔
228
  pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0;
33!
229

230
  for (int32_t i = 0; i < taosArrayGetSize(pStartInfo->pStagesList); ++i) {
99✔
231
    SStartTaskStageInfo* pStageInfo = taosArrayGet(pStartInfo->pStagesList, i);
66✔
232
    stDebug("vgId:%d start task procedure, stage:%d, ts:%" PRId64, pMeta->vgId, pStageInfo->stage, pStageInfo->ts);
66✔
233
  }
234

235
  stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64
33✔
236
          ", readyTs:%" PRId64 " total elapsed time:%.2fs",
237
          pMeta->vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs,
238
          pStartInfo->elapsedTime / 1000.0);
239

240
  // print the initialization elapsed time and info
241
  displayStatusInfo(pMeta, pStartInfo->pReadyTaskSet, true);
33✔
242
  displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false);
33✔
243
}
33✔
244

245
int32_t streamMetaAddTaskLaunchResultNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
11,836✔
246
                                            int64_t endTs, bool ready) {
247
  STaskStartInfo* pStartInfo = &pMeta->startInfo;
11,836✔
248
  STaskId         id = {.streamId = streamId, .taskId = taskId};
11,836✔
249
  int32_t         vgId = pMeta->vgId;
11,836✔
250
  bool            allRsp = true;
11,836✔
251
  SStreamTask*    p = NULL;
11,836✔
252

253
  int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &p);
11,836✔
254
  if (code != 0) {  // task does not exist in current vnode, not record the complete info
11,842!
255
    stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId);
×
256
    return 0;
×
257
  }
258

259
  streamMetaReleaseTask(pMeta, p);
11,842✔
260

261
  if (pStartInfo->startAllTasks != 1) {
11,839✔
262
    int64_t el = endTs - startTs;
11,679✔
263
    stDebug(
11,679✔
264
        "vgId:%d not in start all task(s) process, not record launch result status, s-task:0x%x launch succ:%d elapsed "
265
        "time:%" PRId64 "ms",
266
        vgId, taskId, ready, el);
267
    return 0;
11,680✔
268
  }
269

270
  STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready};
160✔
271
  SHashObj*   pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet;
160✔
272
  code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs));
160✔
273
  if (code) {
160!
274
    if (code == TSDB_CODE_DUP_KEY) {
×
275
      stError("vgId:%d record start task result failed, s-task:0x%" PRIx64
×
276
              " already exist start results in meta start task result hashmap",
277
              vgId, id.taskId);
278
      code = 0;
×
279
    } else {
280
      stError("vgId:%d failed to record start task:0x%" PRIx64 " results, start all tasks failed, code:%s", vgId,
×
281
              id.taskId, tstrerror(code));
282
    }
283
  }
284

285
  int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta);
160✔
286
  int32_t numOfSucc = taosHashGetSize(pStartInfo->pReadyTaskSet);
160✔
287
  int32_t numOfRecv = numOfSucc + taosHashGetSize(pStartInfo->pFailedTaskSet);
160✔
288

289
  if (pStartInfo->partialTasksStarted) {
160!
290
    int32_t newTotal = taosArrayGetSize(pStartInfo->pRecvChkptIdTasks);
×
291
    stDebug(
×
292
        "vgId:%d start all tasks procedure is interrupted by transId:%d, wait for partial tasks rsp. recv check "
293
        "downstream results, s-task:0x%x succ:%d, received:%d results, waited for tasks:%d, total tasks:%d",
294
        vgId, pMeta->updateInfo.activeTransId, taskId, ready, numOfRecv, newTotal, numOfTotal);
295

296
    allRsp = allCheckDownstreamRspPartial(pStartInfo, newTotal, pMeta->vgId);
×
297
  } else {
298
    allRsp = allCheckDownstreamRsp(pMeta, pStartInfo, numOfTotal);
160✔
299
  }
300

301
  if (allRsp) {
160✔
302
    streamMetaLogLaunchTasksInfo(pMeta, numOfTotal, taskId, ready);
33✔
303
    streamMetaResetStartInfo(pStartInfo, vgId);
33✔
304

305
    code = pStartInfo->completeFn(pMeta);
33✔
306
  } else {
307
    stDebug("vgId:%d recv check downstream results, s-task:0x%x succ:%d, received:%d results, total:%d", vgId, taskId,
127✔
308
            ready, numOfRecv, numOfTotal);
309
  }
310

311
  return code;
160✔
312
}
313

314
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
5,935✔
315
                                      int64_t endTs, bool ready) {
316
  int32_t code = 0;
5,935✔
317

318
  streamMetaWLock(pMeta);
5,935✔
319
  code = streamMetaAddTaskLaunchResultNoLock(pMeta, streamId, taskId, startTs, endTs, ready);
5,941✔
320
  streamMetaWUnLock(pMeta);
5,937✔
321

322
  return code;
5,938✔
323
}
324

325
// check all existed tasks are received rsp
326
bool allCheckDownstreamRsp(SStreamMeta* pMeta, STaskStartInfo* pStartInfo, int32_t numOfTotal) {
160✔
327
  for (int32_t i = 0; i < numOfTotal; ++i) {
369✔
328
    SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
336✔
329
    if (pTaskId == NULL) {
336!
330
      continue;
×
331
    }
332

333
    STaskId idx = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
336✔
334
    void*   px = taosHashGet(pStartInfo->pReadyTaskSet, &idx, sizeof(idx));
336✔
335
    if (px == NULL) {
336✔
336
      px = taosHashGet(pStartInfo->pFailedTaskSet, &idx, sizeof(idx));
141✔
337
      if (px == NULL) {
141✔
338
        stDebug("vgId:%d s-task:0x%x start result not rsp yet", pMeta->vgId, (int32_t)idx.taskId);
127✔
339
        return false;
127✔
340
      }
341
    }
342
  }
343

344
  return true;
33✔
345
}
346

347
bool allCheckDownstreamRspPartial(STaskStartInfo* pStartInfo, int32_t num, int32_t vgId) {
×
348
  for (int32_t i = 0; i < num; ++i) {
×
349
    STaskId* pTaskId = taosArrayGet(pStartInfo->pRecvChkptIdTasks, i);
×
350
    if (pTaskId == NULL) {
×
351
      continue;
×
352
    }
353

354
    void* px = taosHashGet(pStartInfo->pReadyTaskSet, pTaskId, sizeof(STaskId));
×
355
    if (px == NULL) {
×
356
      px = taosHashGet(pStartInfo->pFailedTaskSet, pTaskId, sizeof(STaskId));
×
357
      if (px == NULL) {
×
358
        stDebug("vgId:%d s-task:0x%x start result not rsp yet", vgId, (int32_t)pTaskId->taskId);
×
359
        return false;
×
360
      }
361
    }
362
  }
363

364
  return true;
×
365
}
366

367
void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) {
66✔
368
  int32_t vgId = pMeta->vgId;
66✔
369
  void*   pIter = NULL;
66✔
370
  size_t  keyLen = 0;
66✔
371

372
  stInfo("vgId:%d %d tasks complete check-downstream, %s", vgId, taosHashGetSize(pTaskSet),
66!
373
         succ ? "success" : "failed");
374

375
  while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) {
200✔
376
    STaskInitTs* pInfo = pIter;
134✔
377
    void*        key = taosHashGetKey(pIter, &keyLen);
134✔
378
    SStreamTask* pTask = NULL;
134✔
379
    int32_t      code = streamMetaAcquireTaskUnsafe(pMeta, key, &pTask);
134✔
380
    if (code == 0) {
134!
381
      stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", pTask->id.idStr,
134!
382
             pTask->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed");
383
      streamMetaReleaseTask(pMeta, pTask);
134✔
384
    } else {
385
      stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed");
×
386
    }
387
  }
388
}
66✔
389

390
int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo) {
13,941✔
391
  _hash_fn_t fp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR);
13,941✔
392

393
  pStartInfo->pReadyTaskSet = taosHashInit(64, fp, false, HASH_NO_LOCK);
13,951✔
394
  if (pStartInfo->pReadyTaskSet == NULL) {
13,952!
395
    return terrno;
×
396
  }
397

398
  pStartInfo->pFailedTaskSet = taosHashInit(4, fp, false, HASH_NO_LOCK);
13,952✔
399
  if (pStartInfo->pFailedTaskSet == NULL) {
13,951!
400
    return terrno;
×
401
  }
402

403
  pStartInfo->pStagesList = taosArrayInit(4, sizeof(SStartTaskStageInfo));
13,951✔
404
  if (pStartInfo->pStagesList == NULL) {
13,951!
405
    return terrno;
×
406
  }
407

408
  pStartInfo->pRecvChkptIdTasks = taosArrayInit(4, sizeof(STaskId));
13,951✔
409
  if (pStartInfo->pRecvChkptIdTasks == NULL) {
13,952!
410
    return terrno;
×
411
  }
412

413
  pStartInfo->partialTasksStarted = false;
13,952✔
414
  return 0;
13,952✔
415
}
416

417
void streamMetaClearStartInfo(STaskStartInfo* pStartInfo) {
13,944✔
418
  streamMetaClearStartInfoPartial(pStartInfo);
13,944✔
419

420
  pStartInfo->startAllTasks = 0;
13,944✔
421
  pStartInfo->tasksWillRestart = 0;
13,944✔
422
  pStartInfo->restartCount = 0;
13,944✔
423
}
13,944✔
424

425
void streamMetaClearStartInfoPartial(STaskStartInfo* pStartInfo) {
13,944✔
426
  taosHashCleanup(pStartInfo->pReadyTaskSet);
13,944✔
427
  taosHashCleanup(pStartInfo->pFailedTaskSet);
13,944✔
428
  taosArrayDestroy(pStartInfo->pStagesList);
13,943✔
429
  taosArrayDestroy(pStartInfo->pRecvChkptIdTasks);
13,943✔
430

431
  pStartInfo->pReadyTaskSet = NULL;
13,944✔
432
  pStartInfo->pFailedTaskSet = NULL;
13,944✔
433
  pStartInfo->pStagesList = NULL;
13,944✔
434
  pStartInfo->pRecvChkptIdTasks = NULL;
13,944✔
435

436
  pStartInfo->readyTs = 0;
13,944✔
437
  pStartInfo->elapsedTime = 0;
13,944✔
438
  pStartInfo->startTs = 0;
13,944✔
439
}
13,944✔
440

441
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
8,184✔
442
  int32_t      code = 0;
8,184✔
443
  int32_t      vgId = pMeta->vgId;
8,184✔
444
  SStreamTask* pTask = NULL;
8,184✔
445
  bool         continueExec = true;
8,184✔
446

447
  stInfo("vgId:%d start task:0x%x by checking it's downstream status", vgId, taskId);
8,184!
448

449
  code = streamMetaAcquireTask(pMeta, streamId, taskId, &pTask);
8,184✔
450
  if ((pTask == NULL) || (code != 0)) {
8,185!
451
    stError("vgId:%d failed to acquire task:0x%x when starting task", vgId, taskId);
×
452
    int32_t ret = streamMetaAddFailedTask(pMeta, streamId, taskId, true);
×
453
    if (ret) {
×
454
      stError("s-task:0x%x add check downstream failed, core:%s", taskId, tstrerror(ret));
×
455
    }
456

457
    return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
×
458
  }
459

460
  // fill-history task can only be launched by related stream tasks.
461
  STaskExecStatisInfo* pInfo = &pTask->execInfo;
8,185✔
462
  if (pTask->info.fillHistory == 1) {
8,185!
463
    stError("s-task:0x%x vgId:%d fill-history task, not start here", taskId, vgId);
×
464
    streamMetaReleaseTask(pMeta, pTask);
×
465
    return TSDB_CODE_SUCCESS;
×
466
  }
467

468
  // the start all tasks procedure may happen to start the newly deployed stream task, and results in the
469
  // concurrent start this task by two threads.
470
  streamMutexLock(&pTask->lock);
8,185✔
471

472
  SStreamTaskState status = streamTaskGetStatus(pTask);
8,184✔
473
  if (status.state != TASK_STATUS__UNINIT) {
8,184!
474
    stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name);
×
475
    continueExec = false;
×
476
  } else {
477
    continueExec = true;
8,184✔
478
  }
479
  streamMutexUnlock(&pTask->lock);
8,184✔
480

481
  if (!continueExec) {
8,184!
482
    streamMetaReleaseTask(pMeta, pTask);
×
483
    return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
×
484
  }
485

486
  if (pTask->status.downstreamReady != 0) {
8,184!
487
    stFatal("s-task:0x%x downstream should be not ready, but it ready here, internal error happens", taskId);
×
488
    streamMetaReleaseTask(pMeta, pTask);
×
489
    return TSDB_CODE_STREAM_INTERNAL_ERROR;
×
490
  }
491

492
  streamMetaWLock(pMeta);
8,184✔
493

494
  // avoid initialization and destroy running concurrently.
495
  streamMutexLock(&pTask->lock);
8,185✔
496
  if (pTask->pBackend == NULL) {
8,184!
497
    code = pMeta->expandTaskFn(pTask);
8,184✔
498
    streamMutexUnlock(&pTask->lock);
8,183✔
499

500
    if (code != TSDB_CODE_SUCCESS) {
8,184!
501
      streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs, false);
×
502
    }
503
  } else {
504
    streamMutexUnlock(&pTask->lock);
×
505
  }
506

507
  // concurrently start task may cause the latter started task be failed, and also failed to added into meta result.
508
  if (code == TSDB_CODE_SUCCESS) {
8,184!
509
    code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
8,184✔
510
    if (code != TSDB_CODE_SUCCESS) {
8,184!
511
      stError("s-task:%s vgId:%d failed to handle event:init-task, code:%s", pTask->id.idStr, pMeta->vgId,
×
512
              tstrerror(code));
513

514
      // do no added into result hashmap if it is failed due to concurrently starting of this stream task.
515
      if (code != TSDB_CODE_STREAM_CONFLICT_EVENT) {
×
516
        streamMetaAddFailedTaskSelf(pTask, pInfo->readyTs, false);
×
517
      }
518
    }
519
  }
520

521
  streamMetaWUnLock(pMeta);
8,184✔
522
  streamMetaReleaseTask(pMeta, pTask);
8,184✔
523

524
  return code;
8,185✔
525
}
526

527
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
10,199✔
528
  streamMetaWLock(pMeta);
10,199✔
529

530
  SArray* pTaskList = NULL;
10,211✔
531
  int32_t num = taosArrayGetSize(pMeta->pTaskList);
10,211✔
532
  stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num);
10,217✔
533

534
  if (num == 0) {
10,219✔
535
    stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num);
10,110✔
536
    streamMetaWUnLock(pMeta);
10,110✔
537
    return TSDB_CODE_SUCCESS;
10,103✔
538
  }
539

540
  int64_t st = taosGetTimestampMs();
109✔
541

542
  // send hb msg to mnode before closing all tasks.
543
  int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList);
109✔
544
  if (code != TSDB_CODE_SUCCESS) {
109!
545
    streamMetaWUnLock(pMeta);
×
546
    return code;
×
547
  }
548

549
  int32_t numOfTasks = taosArrayGetSize(pTaskList);
109✔
550
  for (int32_t i = 0; i < numOfTasks; ++i) {
411✔
551
    SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
302✔
552
    SStreamTask*   pTask = NULL;
302✔
553

554
    code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask);
302✔
555
    if (code != TSDB_CODE_SUCCESS) {
302✔
556
      continue;
66✔
557
    }
558

559
    int32_t ret = streamTaskStop(pTask);
236✔
560
    if (ret) {
236!
561
      stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret));
×
562
    }
563

564
    streamMetaReleaseTask(pMeta, pTask);
236✔
565
  }
566

567
  taosArrayDestroy(pTaskList);
109✔
568

569
  double el = (taosGetTimestampMs() - st) / 1000.0;
109✔
570
  stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, num, el);
109✔
571

572
  streamMetaWUnLock(pMeta);
109✔
573
  return code;
109✔
574
}
575

576
int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
32,784✔
577
  SConsenChkptInfo* pConChkptInfo = &pTask->status.consenChkptInfo;
32,784✔
578
  int32_t           vgId = pTask->pMeta->vgId;
32,784✔
579

580
  if (pTask->pMeta->startInfo.curStage == START_MARK_REQ_CHKPID) {
32,784✔
581
    if (pConChkptInfo->status == TASK_CONSEN_CHKPT_REQ) {
185!
582
      // mark the sending of req consensus checkpoint request.
583
      pConChkptInfo->status = TASK_CONSEN_CHKPT_SEND;
185✔
584
      stDebug("s-task:%s vgId:%d set requiring consensus-chkptId in hbMsg, ts:%" PRId64, pTask->id.idStr, vgId,
185✔
585
              pConChkptInfo->statusTs);
586
      return 1;
185✔
587
    } else if (pConChkptInfo->status == 0) {
×
588
      stDebug("vgId:%d s-task:%s not need to set the req checkpointId, current stage:%d", vgId, pTask->id.idStr,
×
589
              pConChkptInfo->status);
590
    } else {
591
      stWarn("vgId:%d, s-task:%s restart procedure expired, start stage:%d", vgId, pTask->id.idStr,
×
592
             pConChkptInfo->status);
593
    }
594
  }
595

596
  return 0;
32,599✔
597
}
598

599
void streamTaskSetConsenChkptIdRecv(SStreamTask* pTask, int32_t transId, int64_t ts) {
100✔
600
  SConsenChkptInfo* pInfo = &pTask->status.consenChkptInfo;
100✔
601
  pInfo->consenChkptTransId = transId;
100✔
602
  pInfo->status = TASK_CONSEN_CHKPT_RECV;
100✔
603
  pInfo->statusTs = ts;
100✔
604

605
  stInfo("s-task:%s set recv consen-checkpointId, transId:%d", pTask->id.idStr, transId);
100!
606
}
100✔
607

608
void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
339✔
609
  SConsenChkptInfo* pInfo = &pTask->status.consenChkptInfo;
339✔
610
  int32_t           prevTrans = pInfo->consenChkptTransId;
339✔
611

612
  pInfo->status = TASK_CONSEN_CHKPT_REQ;
339✔
613
  pInfo->statusTs = ts;
339✔
614
  pInfo->consenChkptTransId = 0;
339✔
615

616
  stDebug("s-task:%s set req consen-checkpointId flag, prev transId:%d, ts:%" PRId64 ", task created ts:%" PRId64,
339✔
617
          pTask->id.idStr, prevTrans, ts, pTask->execInfo.created);
618
}
339✔
619

620
int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, bool lock) {
107✔
621
  int32_t      code = TSDB_CODE_SUCCESS;
107✔
622
  int64_t      now = taosGetTimestampMs();
107✔
623
  int64_t      startTs = 0;
107✔
624
  bool         hasFillhistoryTask = false;
107✔
625
  STaskId      hId = {0};
107✔
626
  STaskId      id = {.streamId = streamId, .taskId = taskId};
107✔
627
  SStreamTask* pTask = NULL;
107✔
628

629
  stDebug("vgId:%d add start failed task:0x%x", pMeta->vgId, taskId);
107!
630

631
  if (lock) {
107!
632
    streamMetaRLock(pMeta);
107✔
633
  }
634

635
  code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
107✔
636
  if (code == 0) {
107✔
637
    startTs = pTask->taskCheckInfo.startTs;
2✔
638
    hasFillhistoryTask = HAS_RELATED_FILLHISTORY_TASK(pTask);
2✔
639
    hId = pTask->hTaskInfo.id;
2✔
640
    streamMetaReleaseTask(pMeta, pTask);
2✔
641

642
    if (lock) {
2!
643
      streamMetaRUnLock(pMeta);
2✔
644
    }
645

646
    // add the failed task info, along with the related fill-history task info into tasks list.
647
    if (lock) {
2!
648
      code = streamMetaAddTaskLaunchResult(pMeta, streamId, taskId, startTs, now, false);
2✔
649
      if (hasFillhistoryTask) {
2!
650
        code = streamMetaAddTaskLaunchResult(pMeta, hId.streamId, hId.taskId, startTs, now, false);
×
651
      }
652
    } else {
653
      code = streamMetaAddTaskLaunchResultNoLock(pMeta, streamId, taskId, startTs, now, false);
×
654
      if (hasFillhistoryTask) {
×
655
        code = streamMetaAddTaskLaunchResultNoLock(pMeta, hId.streamId, hId.taskId, startTs, now, false);
×
656
      }
657
    }
658
  } else {
659
    if (lock) {
105!
660
      streamMetaRUnLock(pMeta);
105✔
661
    }
662

663
    stError("failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped",
105!
664
            streamId, taskId, pMeta->vgId);
665
    code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
105✔
666
  }
667

668
  return code;
107✔
669
}
670

671
void streamMetaAddFailedTaskSelf(SStreamTask* pTask, int64_t failedTs, bool lock) {
10✔
672
  int32_t startTs = pTask->execInfo.checkTs;
10✔
673
  int32_t code = 0;
10✔
674

675
  if (lock) {
10!
676
    code = streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, startTs, failedTs, false);
10✔
677
  } else {
678
    code = streamMetaAddTaskLaunchResultNoLock(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, startTs, failedTs,
×
679
                                               false);
680
  }
681

682
  if (code) {
10!
683
    stError("s-task:%s failed to add self task failed to start, code:%s", pTask->id.idStr, tstrerror(code));
×
684
  }
685

686
  // automatically set the related fill-history task to be failed.
687
  if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
10✔
688
    STaskId* pId = &pTask->hTaskInfo.id;
9✔
689

690
    if (lock) {
9!
691
      code = streamMetaAddTaskLaunchResult(pTask->pMeta, pId->streamId, pId->taskId, startTs, failedTs, false);
9✔
692
    } else {
693
      code = streamMetaAddTaskLaunchResultNoLock(pTask->pMeta, pId->streamId, pId->taskId, startTs, failedTs, false);
×
694
    }
695

696
    if (code) {
9!
697
      stError("s-task:0x%" PRIx64 " failed to add self task failed to start, code:%s", pId->taskId, tstrerror(code));
×
698
    }
699
  }
700
}
10✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc