• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #3768

28 Mar 2025 10:15AM UTC coverage: 33.726% (-0.3%) from 33.993%
#3768

push

travis-ci

happyguoxy
test:alter lcov result

144891 of 592084 branches covered (24.47%)

Branch coverage included in aggregate %.

218795 of 486283 relevant lines covered (44.99%)

765715.29 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

53.78
/source/libs/stream/src/streamCheckStatus.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#include "cos.h"
17
#include "rsync.h"
18
#include "streamBackendRocksdb.h"
19
#include "streamInt.h"
20

21
#define CHECK_NOT_RSP_DURATION 60 * 1000  // 60 sec
22

23
static void    processDownstreamReadyRsp(SStreamTask* pTask, bool lock);
24
static void    rspMonitorFn(void* param, void* tmrId);
25
static void    streamTaskInitTaskCheckInfo(STaskCheckInfo* pInfo, STaskOutputInfo* pOutputInfo, int64_t startTs);
26
static int32_t streamTaskStartCheckDownstream(STaskCheckInfo* pInfo, const char* id);
27
static void    streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char* id);
28
static void    streamTaskAddReqInfo(STaskCheckInfo* pInfo, int64_t reqId, int32_t taskId, int32_t vgId, const char* id);
29
static int32_t doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p);
30
static void    handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList);
31
static void    handleNotReadyDownstreamTask(SStreamTask* pTask, SArray* pNotReadyList);
32
static int32_t streamTaskUpdateCheckInfo(STaskCheckInfo* pInfo, int32_t taskId, int32_t status, int64_t rspTs,
33
                                         int64_t reqId, int32_t* pNotReady, const char* id);
34
static void setCheckDownstreamReqInfo(SStreamTaskCheckReq* pReq, int64_t reqId, int32_t dstTaskId, int32_t dstNodeId);
35
static void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, int32_t* numOfFault,
36
                              int32_t* numOfNotRsp, SArray* pTimeoutList, SArray* pNotReadyList, const char* id);
37
static int32_t addDownstreamFailedStatusResultAsync(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId);
38
static void    findCheckRspStatus(STaskCheckInfo* pInfo, int32_t taskId, SDownstreamStatusInfo** pStatusInfo);
39

40
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage,
27✔
41
                              int64_t* oldStage) {
42
  SStreamUpstreamEpInfo* pInfo = NULL;
27✔
43
  streamTaskGetUpstreamTaskEpInfo(pTask, upstreamTaskId, &pInfo);
27✔
44
  if (pInfo == NULL) {
27!
45
    return TSDB_CODE_STREAM_TASK_NOT_EXIST;
×
46
  }
47

48
  *oldStage = pInfo->stage;
27✔
49
  const char* id = pTask->id.idStr;
27✔
50
  if (stage == -1) {
27!
51
    stDebug("s-task:%s receive check msg from upstream task:0x%x(vgId:%d), invalid stageId:%" PRId64 ", not ready", id,
×
52
            upstreamTaskId, vgId, stage);
53
    return 0;
×
54
  }
55

56
  if (pInfo->stage == -1) {
27✔
57
    pInfo->stage = stage;
22✔
58
    stDebug("s-task:%s receive check msg from upstream task:0x%x(vgId:%d) first time, init stage value:%" PRId64, id,
22!
59
            upstreamTaskId, vgId, stage);
60
  }
61

62
  if (pInfo->stage < stage) {
27!
63
    stError("s-task:%s receive check msg from upstream task:0x%x(vgId:%d), new stage received:%" PRId64
×
64
            ", prev:%" PRId64,
65
            id, upstreamTaskId, vgId, stage, pInfo->stage);
66
    // record the checkpoint failure id and sent to mnode
67
    streamTaskSetCheckpointFailed(pTask);
×
68
  }
69

70
  if (pInfo->stage != stage) {
27!
71
    return TASK_UPSTREAM_NEW_STAGE;
×
72
  } else if (pTask->status.downstreamReady != 1) {
27✔
73
    stDebug("s-task:%s vgId:%d leader:%d, downstream not ready", id, vgId, (pTask->pMeta->role == NODE_ROLE_LEADER));
5!
74
    return TASK_DOWNSTREAM_NOT_READY;
5✔
75
  } else {
76
    return TASK_DOWNSTREAM_READY;
22✔
77
  }
78
}
79

80
// check status
81
void streamTaskSendCheckMsg(SStreamTask* pTask) {
28✔
82
  SDataRange*  pRange = &pTask->dataRange;
28✔
83
  STimeWindow* pWindow = &pRange->window;
28✔
84
  const char*  idstr = pTask->id.idStr;
28✔
85
  int32_t      code = 0;
28✔
86

87
  SStreamTaskCheckReq req = {
28✔
88
      .streamId = pTask->id.streamId,
28✔
89
      .upstreamTaskId = pTask->id.taskId,
28✔
90
      .upstreamNodeId = pTask->info.nodeId,
28✔
91
      .childId = pTask->info.selfChildId,
28✔
92
      .stage = pTask->pMeta->stage,
28✔
93
  };
94

95
  // serialize streamProcessScanHistoryFinishRsp
96
  if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
28✔
97
    streamTaskStartMonitorCheckRsp(pTask);
9✔
98

99
    STaskDispatcherFixed* pDispatch = &pTask->outputInfo.fixedDispatcher;
9✔
100

101
    setCheckDownstreamReqInfo(&req, tGenIdPI64(), pDispatch->taskId, pDispatch->nodeId);
9✔
102
    streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, pDispatch->taskId, pDispatch->nodeId, idstr);
9✔
103

104
    stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " check single downstream task:0x%x(vgId:%d) ver:%" PRId64 "-%" PRId64
9!
105
            " window:%" PRId64 "-%" PRId64 " QID:0x%" PRIx64,
106
            idstr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, pRange->range.minVer,
107
            pRange->range.maxVer, pWindow->skey, pWindow->ekey, req.reqId);
108

109
    code = streamSendCheckMsg(pTask, &req, pTask->outputInfo.fixedDispatcher.nodeId,
9✔
110
                              &pTask->outputInfo.fixedDispatcher.epSet);
111

112
  } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
19✔
113
    streamTaskStartMonitorCheckRsp(pTask);
6✔
114

115
    SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos;
6✔
116

117
    int32_t numOfVgs = taosArrayGetSize(vgInfo);
6✔
118
    stDebug("s-task:%s check %d downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64, idstr,
6!
119
            numOfVgs, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey);
120

121
    for (int32_t i = 0; i < numOfVgs; i++) {
19✔
122
      SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
13✔
123
      if (pVgInfo == NULL) {
13!
124
        continue;
×
125
      }
126

127
      setCheckDownstreamReqInfo(&req, tGenIdPI64(), pVgInfo->taskId, pVgInfo->vgId);
13✔
128
      streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, pVgInfo->taskId, pVgInfo->vgId, idstr);
13✔
129

130
      stDebug("s-task:%s (vgId:%d) stage:%" PRId64
13!
131
              " check downstream task:0x%x (vgId:%d) (shuffle), idx:%d, QID:0x%" PRIx64,
132
              idstr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, i, req.reqId);
133
      code = streamSendCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
13✔
134
    }
135
  } else if (pTask->outputInfo.type == TASK_OUTPUT__VTABLE_MAP) {
13!
136
    streamTaskStartMonitorCheckRsp(pTask);
×
137

138
    SArray* pTaskInfos = pTask->outputInfo.vtableMapDispatcher.taskInfos;
×
139
    int32_t numTasks = taosArrayGetSize(pTaskInfos);
×
140
    stDebug("s-task:%s check %d vtable downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64,
×
141
            idstr, numTasks, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey);
142

143
    for (int32_t i = 0; i < numTasks; ++i) {
×
144
      STaskDispatcherFixed* pAddr = taosArrayGet(pTaskInfos, i);
×
145
      if (pAddr == NULL) {
×
146
        continue;
×
147
      }
148

149
      setCheckDownstreamReqInfo(&req, tGenIdPI64(), pAddr->taskId, pAddr->nodeId);
×
150
      streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, pAddr->taskId, pAddr->nodeId, idstr);
×
151

152
      stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " check vtable downstream task:0x%x (vgId:%d), QID:0x%" PRIx64,
×
153
              idstr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, req.reqId);
154
      code = streamSendCheckMsg(pTask, &req, pAddr->nodeId, &pAddr->epSet);
×
155
      if (code != TSDB_CODE_SUCCESS) {
×
156
        stError("s-task:%s failed to send check msg to vtable downstream task:0x%x (vgId:%d), code:%s", idstr,
×
157
                req.downstreamTaskId, req.downstreamNodeId, tstrerror(code));
158
      }
159
    }
160
  } else {  // for sink task, set it ready directly.
161
//    streamTaskSetConsenChkptIdRecv(pTask, 0, taosGetTimestampMs());
162
//
163
    stDebug("s-task:%s (vgId:%d) set downstream ready, since no downstream", idstr, pTask->info.nodeId);
13!
164
    streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, idstr);
13✔
165
    processDownstreamReadyRsp(pTask, false);
13✔
166
  }
167

168
  if (code) {
28!
169
    stError("s-task:%s failed to send check msg to downstream, code:%s", idstr, tstrerror(code));
×
170
  }
171
}
28✔
172

173
void streamTaskProcessCheckMsg(SStreamMeta* pMeta, SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp) {
27✔
174
  int32_t taskId = pReq->downstreamTaskId;
27✔
175

176
  *pRsp = (SStreamTaskCheckRsp){
27✔
177
      .reqId = pReq->reqId,
27✔
178
      .streamId = pReq->streamId,
27✔
179
      .childId = pReq->childId,
27✔
180
      .downstreamNodeId = pReq->downstreamNodeId,
27✔
181
      .downstreamTaskId = pReq->downstreamTaskId,
27✔
182
      .upstreamNodeId = pReq->upstreamNodeId,
27✔
183
      .upstreamTaskId = pReq->upstreamTaskId,
27✔
184
  };
185

186
  // only the leader node handle the check request
187
  if (pMeta->role == NODE_ROLE_FOLLOWER) {
27!
188
    stError(
×
189
        "s-task:0x%x invalid check msg from upstream:0x%x(vgId:%d), vgId:%d is follower, not handle check status msg",
190
        taskId, pReq->upstreamTaskId, pReq->upstreamNodeId, pMeta->vgId);
191
    pRsp->status = TASK_DOWNSTREAM_NOT_LEADER;
×
192
  } else {
193
    SStreamTask* pTask = NULL;
27✔
194
    int32_t      code = streamMetaAcquireTask(pMeta, pReq->streamId, taskId, &pTask);
27✔
195
    if (pTask != NULL) {
27!
196
      pRsp->status =
27✔
197
          streamTaskCheckStatus(pTask, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->stage, &pRsp->oldStage);
27✔
198

199
      SStreamTaskState pState = streamTaskGetStatus(pTask);
27✔
200
      stDebug("s-task:%s status:%s, stage:%" PRId64 " recv task check req(QID:0x%" PRIx64
27!
201
              ") task:0x%x (vgId:%d), check_status:%d",
202
              pTask->id.idStr, pState.name, pRsp->oldStage, pRsp->reqId, pRsp->upstreamTaskId, pRsp->upstreamNodeId,
203
              pRsp->status);
204
      streamMetaReleaseTask(pMeta, pTask);
27✔
205
    } else {
206
      pRsp->status = TASK_DOWNSTREAM_NOT_READY;
×
207
      stDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(QID:0x%" PRIx64
×
208
              ") from task:0x%x (vgId:%d), rsp check_status %d",
209
              pReq->streamId, taskId, pRsp->reqId, pRsp->upstreamTaskId, pRsp->upstreamNodeId, pRsp->status);
210
    }
211
  }
212
}
27✔
213

214
int32_t streamTaskProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp) {
27✔
215
  int64_t         now = taosGetTimestampMs();
27✔
216
  const char*     id = pTask->id.idStr;
27✔
217
  STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
27✔
218
  int32_t         total = streamTaskGetNumOfDownstream(pTask);
27✔
219
  int32_t         left = -1;
27✔
220

221
  if (streamTaskShouldStop(pTask)) {
27!
222
    stDebug("s-task:%s should stop, do not do check downstream again", id);
×
223
    return TSDB_CODE_SUCCESS;
×
224
  }
225

226
  if (pTask->id.taskId != pRsp->upstreamTaskId) {
27!
227
    stError("s-task:%s invalid check downstream rsp, upstream task:0x%x discard", id, pRsp->upstreamTaskId);
×
228
    return TSDB_CODE_INVALID_MSG;
×
229
  }
230

231
  if (pRsp->status == TASK_DOWNSTREAM_READY) {
27✔
232
    int32_t code = streamTaskUpdateCheckInfo(pInfo, pRsp->downstreamTaskId, pRsp->status, now, pRsp->reqId, &left, id);
22✔
233
    if (code != TSDB_CODE_SUCCESS) {
22!
234
      return TSDB_CODE_SUCCESS;
×
235
    }
236

237
    if (left == 0) {
22✔
238
      processDownstreamReadyRsp(pTask, true);  // all downstream tasks are ready, set the complete check downstream flag
15✔
239
      streamTaskStopMonitorCheckRsp(pInfo, id);
15✔
240
    } else {
241
      stDebug("s-task:%s (vgId:%d) recv check rsp from task:0x%x (vgId:%d) status:%d, total:%d not ready:%d", id,
7!
242
              pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status, total, left);
243
    }
244
  } else {  // not ready, wait for 100ms and retry
245
    int32_t code = streamTaskUpdateCheckInfo(pInfo, pRsp->downstreamTaskId, pRsp->status, now, pRsp->reqId, &left, id);
5✔
246
    if (code != TSDB_CODE_SUCCESS) {
5!
247
      return TSDB_CODE_SUCCESS;  // return success in any cases.
×
248
    }
249

250
    if (pRsp->status == TASK_UPSTREAM_NEW_STAGE || pRsp->status == TASK_DOWNSTREAM_NOT_LEADER) {
5!
251
      if (pRsp->status == TASK_UPSTREAM_NEW_STAGE) {
×
252
        stError("s-task:%s vgId:%d self vnode-transfer/leader-change/restart detected, old stage:%" PRId64
×
253
                ", current stage:%" PRId64 ", not check wait for downstream task nodeUpdate, and all tasks restart",
254
                id, pRsp->upstreamNodeId, pRsp->oldStage, pTask->pMeta->stage);
255
        code = streamTaskAddIntoNodeUpdateList(pTask, pRsp->upstreamNodeId);
×
256
      } else {
257
        stError(
×
258
            "s-task:%s downstream taskId:0x%x (vgId:%d) not leader, self dispatch epset needs to be updated, not check "
259
            "downstream again, nodeUpdate needed",
260
            id, pRsp->downstreamTaskId, pRsp->downstreamNodeId);
261
        code = streamTaskAddIntoNodeUpdateList(pTask, pRsp->downstreamNodeId);
×
262
      }
263

264
      streamMetaAddFailedTaskSelf(pTask, now, true);
×
265
    } else {  // TASK_DOWNSTREAM_NOT_READY, rsp-check monitor will retry in 300 ms
266
      stDebug("s-task:%s (vgId:%d) recv check rsp from task:0x%x (vgId:%d) status:%d, total:%d not ready:%d", id,
5!
267
              pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status, total, left);
268
    }
269
  }
270

271
  return 0;
27✔
272
}
273

274
int32_t streamTaskSendCheckRsp(const SStreamMeta* pMeta, int32_t vgId, SStreamTaskCheckRsp* pRsp,
27✔
275
                               SRpcHandleInfo* pRpcInfo, int32_t taskId) {
276
  SEncoder encoder;
277
  int32_t  code = 0;
27✔
278
  int32_t  len;
279

280
  tEncodeSize(tEncodeStreamTaskCheckRsp, pRsp, len, code);
27!
281
  if (code < 0) {
27!
282
    stError("vgId:%d failed to encode task check rsp, s-task:0x%x", pMeta->vgId, taskId);
×
283
    return TSDB_CODE_INVALID_MSG;
×
284
  }
285

286
  void* buf = rpcMallocCont(sizeof(SMsgHead) + len);
27✔
287
  if (buf == NULL) {
27!
288
    stError("s-task:0x%x vgId:%d failed prepare msg, %s at line:%d code:%s", taskId, pMeta->vgId, __func__, __LINE__,
×
289
            tstrerror(code));
290
    return terrno;
×
291
  }
292

293
  ((SMsgHead*)buf)->vgId = htonl(vgId);
27✔
294

295
  void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
27✔
296
  tEncoderInit(&encoder, (uint8_t*)abuf, len);
27✔
297
  code = tEncodeStreamTaskCheckRsp(&encoder, pRsp);
27✔
298
  tEncoderClear(&encoder);
27✔
299

300
  SRpcMsg rspMsg = {.code = 0, .pCont = buf, .contLen = sizeof(SMsgHead) + len, .info = *pRpcInfo};
27✔
301
  tmsgSendRsp(&rspMsg);
27✔
302

303
  code = TMIN(code, 0);
27✔
304
  return code;
27✔
305
}
306

307
void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) {
15✔
308
  int32_t         vgId = pTask->pMeta->vgId;
15✔
309
  STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
15✔
310

311
  streamMutexLock(&pInfo->checkInfoLock);
15✔
312

313
  // drop procedure already started, not start check downstream now
314
  ETaskStatus s = streamTaskGetStatus(pTask).state;
15✔
315
  if (s == TASK_STATUS__DROPPING) {
15!
316
    stDebug("s-task:%s task not in uninit status, status:%s not start monitor check-rsp", pTask->id.idStr,
×
317
            streamTaskGetStatusStr(s));
318
    streamMutexUnlock(&pInfo->checkInfoLock);
×
319
    return;
×
320
  }
321

322
  int32_t code = streamTaskStartCheckDownstream(pInfo, pTask->id.idStr);
15✔
323
  if (code != TSDB_CODE_SUCCESS) {
15!
324
    streamMutexUnlock(&pInfo->checkInfoLock);
×
325
    return;
×
326
  }
327

328
  streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
15✔
329

330
  int64_t* pTaskRefId = NULL;
15✔
331
  code = streamTaskAllocRefId(pTask, &pTaskRefId);
15✔
332
  if (code == 0) {
15!
333
    streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTaskRefId, streamTimer, &pInfo->checkRspTmr, vgId,
15✔
334
                   "check-status-monitor");
335
  }
336

337
  streamMutexUnlock(&pInfo->checkInfoLock);
15✔
338
}
339

340
void streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id) {
33✔
341
  streamMutexLock(&pInfo->checkInfoLock);
33✔
342
  pInfo->stopCheckProcess = 1;
33✔
343
  streamMutexUnlock(&pInfo->checkInfoLock);
33✔
344

345
  stDebug("s-task:%s set stop check-rsp monitor flag", id);
33!
346
}
33✔
347

348
void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo) {
115✔
349
  taosArrayDestroy(pInfo->pList);
115✔
350
  pInfo->pList = NULL;
115✔
351

352
  if (pInfo->checkRspTmr != NULL) {
115✔
353
    streamTmrStop(pInfo->checkRspTmr);
15✔
354
    pInfo->checkRspTmr = NULL;
15✔
355
  }
356
}
115✔
357

358
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
359
void processDownstreamReadyRsp(SStreamTask* pTask, bool lock) {
28✔
360
  EStreamTaskEvent event = (pTask->info.fillHistory != STREAM_HISTORY_TASK) ? TASK_EVENT_INIT : TASK_EVENT_INIT_SCANHIST;
28✔
361
  int32_t          code = streamTaskOnHandleEventSuccess(pTask->status.pSM, event, NULL, NULL);
28✔
362
  if (code) {
28!
363
    stError("s-task:%s failed to set event succ, code:%s", pTask->id.idStr, tstrerror(code));
×
364
  }
365

366
  int64_t checkTs = pTask->execInfo.checkTs;
28✔
367
  int64_t readyTs = pTask->execInfo.readyTs;
28✔
368
  if (lock) {
28✔
369
    code = streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, checkTs, readyTs, true);
15✔
370
  } else {
371
    code = streamMetaAddTaskLaunchResultNoLock(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, checkTs, readyTs, true);
13✔
372
  }
373

374
  if (code) {
28!
375
    stError("s-task:%s failed to record the downstream task status, code:%s", pTask->id.idStr, tstrerror(code));
×
376
  }
377

378
  if (pTask->status.taskStatus == TASK_STATUS__HALT) {
28!
379
    if (!HAS_RELATED_FILLHISTORY_TASK(pTask) || (pTask->info.fillHistory != 0)) {
×
380
      stError("s-task:%s status:halt fillhistory:%d not handle the ready rsp", pTask->id.idStr,
×
381
              pTask->info.fillHistory);
382
    }
383

384
    // halt itself for count window stream task until the related fill history task completed.
385
    stDebug("s-task:%s level:%d initial status is %s from mnode, set it to be halt", pTask->id.idStr,
×
386
            pTask->info.taskLevel, streamTaskGetStatusStr(pTask->status.taskStatus));
387
    code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_HALT);
×
388
    if (code != 0) {  // todo: handle error
×
389
      stError("s-task:%s failed to handle halt event, code:%s", pTask->id.idStr, tstrerror(code));
×
390
    }
391
  }
392

393
  // start the related fill-history task, when current task is ready
394
  // not invoke in success callback due to the deadlock.
395
  // todo: let's retry
396
  if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
28✔
397
    stDebug("s-task:%s try to launch related task", pTask->id.idStr);
5!
398
    code = streamLaunchFillHistoryTask(pTask, lock);
5✔
399
    if (code) {
5!
400
      stError("s-task:%s failed to launch related task, code:%s", pTask->id.idStr, tstrerror(code));
×
401
    }
402
  }
403
}
28✔
404

405
int32_t streamTaskAddIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId) {
47✔
406
  int32_t vgId = pTask->pMeta->vgId;
47✔
407
  int32_t code = 0;
47✔
408
  bool    existed = false;
47✔
409

410
  streamMutexLock(&pTask->lock);
47✔
411

412
  int32_t num = taosArrayGetSize(pTask->outputInfo.pNodeEpsetUpdateList);
47✔
413
  for (int i = 0; i < num; ++i) {
47✔
414
    SDownstreamTaskEpset* p = taosArrayGet(pTask->outputInfo.pNodeEpsetUpdateList, i);
39✔
415
    if (p == NULL) {
39!
416
      continue;
×
417
    }
418

419
    if (p->nodeId == nodeId) {
39!
420
      existed = true;
39✔
421
      break;
39✔
422
    }
423
  }
424

425
  if (!existed) {
47✔
426
    SDownstreamTaskEpset t = {.nodeId = nodeId};
8✔
427

428
    void* p = taosArrayPush(pTask->outputInfo.pNodeEpsetUpdateList, &t);
8✔
429
    if (p == NULL) {
8!
430
      code = terrno;
×
431
      stError("s-task:%s vgId:%d failed to update epset, code:%s", pTask->id.idStr, vgId, tstrerror(code));
×
432
    } else {
433
      stInfo("s-task:%s vgId:%d downstream nodeId:%d needs to be updated, total needs updated:%d", pTask->id.idStr,
8!
434
             vgId, t.nodeId, (num + 1));
435
    }
436
  }
437

438
  streamMutexUnlock(&pTask->lock);
47✔
439
  return code;
47✔
440
}
441

442
void streamTaskInitTaskCheckInfo(STaskCheckInfo* pInfo, STaskOutputInfo* pOutputInfo, int64_t startTs) {
15✔
443
  taosArrayClear(pInfo->pList);
15✔
444

445
  if (pOutputInfo->type == TASK_OUTPUT__FIXED_DISPATCH) {
15✔
446
    pInfo->notReadyTasks = 1;
9✔
447
  } else if (pOutputInfo->type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
6!
448
    pInfo->notReadyTasks = taosArrayGetSize(pOutputInfo->shuffleDispatcher.dbInfo.pVgroupInfos);
6✔
449
  } else if (pOutputInfo->type == TASK_OUTPUT__VTABLE_MAP) {
×
450
    pInfo->notReadyTasks = taosArrayGetSize(pOutputInfo->vtableMapDispatcher.taskInfos);
×
451
  }
452

453
  pInfo->startTs = startTs;
15✔
454
  pInfo->timeoutStartTs = startTs;
15✔
455
  pInfo->stopCheckProcess = 0;
15✔
456
}
15✔
457

458
void findCheckRspStatus(STaskCheckInfo* pInfo, int32_t taskId, SDownstreamStatusInfo** pStatusInfo) {
54✔
459
  if (pStatusInfo == NULL) {
54!
460
    return;
×
461
  }
462

463
  *pStatusInfo = NULL;
54✔
464
  for (int32_t j = 0; j < taosArrayGetSize(pInfo->pList); ++j) {
110✔
465
    SDownstreamStatusInfo* p = taosArrayGet(pInfo->pList, j);
56✔
466
    if (p == NULL) {
56!
467
      continue;
×
468
    }
469

470
    if (p->taskId == taskId) {
56✔
471
      *pStatusInfo = p;
32✔
472
    }
473
  }
474
}
475

476
int32_t streamTaskUpdateCheckInfo(STaskCheckInfo* pInfo, int32_t taskId, int32_t status, int64_t rspTs, int64_t reqId,
27✔
477
                                  int32_t* pNotReady, const char* id) {
478
  SDownstreamStatusInfo* p = NULL;
27✔
479

480
  streamMutexLock(&pInfo->checkInfoLock);
27✔
481
  findCheckRspStatus(pInfo, taskId, &p);
27✔
482
  if (p != NULL) {
27!
483
    if (reqId != p->reqId) {
27!
484
      stError("s-task:%sQID:0x%" PRIx64 " expected:0x%" PRIx64
×
485
              " expired check-rsp recv from downstream task:0x%x, discarded",
486
              id, reqId, p->reqId, taskId);
487
      streamMutexUnlock(&pInfo->checkInfoLock);
×
488
      return TSDB_CODE_FAILED;
×
489
    }
490

491
    // subtract one not-ready-task, since it is ready now
492
    if ((p->status != TASK_DOWNSTREAM_READY) && (status == TASK_DOWNSTREAM_READY)) {
27!
493
      *pNotReady = atomic_sub_fetch_32(&pInfo->notReadyTasks, 1);
22✔
494
    } else {
495
      *pNotReady = pInfo->notReadyTasks;
5✔
496
    }
497

498
    p->status = status;
27✔
499
    p->rspTs = rspTs;
27✔
500

501
    streamMutexUnlock(&pInfo->checkInfoLock);
27✔
502
    return TSDB_CODE_SUCCESS;
27✔
503
  }
504

505
  streamMutexUnlock(&pInfo->checkInfoLock);
×
506
  stError("s-task:%s unexpected check rsp msg, invalid downstream task:0x%x, QID:0x%" PRIx64 " discarded", id, taskId,
×
507
          reqId);
508
  return TSDB_CODE_FAILED;
×
509
}
510

511
int32_t streamTaskStartCheckDownstream(STaskCheckInfo* pInfo, const char* id) {
15✔
512
  if (pInfo->inCheckProcess == 0) {
15!
513
    pInfo->inCheckProcess = 1;
15✔
514
  } else {
515
    stError("s-task:%s already in check procedure, checkTs:%" PRId64 ", start monitor check rsp failed", id,
×
516
            pInfo->startTs);
517
    pInfo->stopCheckProcess = 0;  // disable auto stop of check process
×
518
    return TSDB_CODE_FAILED;
×
519
  }
520

521
  stDebug("s-task:%s set the in check-rsp flag", id);
15!
522
  return TSDB_CODE_SUCCESS;
15✔
523
}
524

525
void streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char* id) {
15✔
526
  if (lock) {
15✔
527
    streamMutexLock(&pInfo->checkInfoLock);
10✔
528
  }
529

530
  if (pInfo->inCheckProcess) {
15!
531
    int64_t el = (pInfo->startTs != 0) ? (taosGetTimestampMs() - pInfo->startTs) : 0;
30!
532
    stDebug("s-task:%s clear the in check-rsp flag, set the check-rsp done, elapsed time:%" PRId64 " ms", id, el);
15!
533

534
    pInfo->startTs = 0;
15✔
535
    pInfo->timeoutStartTs = 0;
15✔
536
    pInfo->notReadyTasks = 0;
15✔
537
    pInfo->inCheckProcess = 0;
15✔
538
    pInfo->stopCheckProcess = 0;
15✔
539

540
    pInfo->notReadyRetryCount = 0;
15✔
541
    pInfo->timeoutRetryCount = 0;
15✔
542

543
    taosArrayClear(pInfo->pList);
15✔
544
  } else {
545
    stDebug("s-task:%s already not in check-rsp procedure", id);
×
546
  }
547

548
  if (lock) {
15✔
549
    streamMutexUnlock(&pInfo->checkInfoLock);
10✔
550
  }
551
}
15✔
552

553
// todo: retry until success
554
void streamTaskAddReqInfo(STaskCheckInfo* pInfo, int64_t reqId, int32_t taskId, int32_t vgId, const char* id) {
22✔
555
  SDownstreamStatusInfo info = {.taskId = taskId, .status = -1, .vgId = vgId, .reqId = reqId, .rspTs = 0};
22✔
556
  streamMutexLock(&pInfo->checkInfoLock);
22✔
557

558
  SDownstreamStatusInfo* p = NULL;
22✔
559
  findCheckRspStatus(pInfo, taskId, &p);
22✔
560
  if (p != NULL) {
22!
561
    stDebug("s-task:%s check info to task:0x%x already sent", id, taskId);
×
562
    streamMutexUnlock(&pInfo->checkInfoLock);
×
563
    return;
×
564
  }
565

566
  void* px = taosArrayPush(pInfo->pList, &info);
44✔
567
  if (px == NULL) {
568
    // todo: retry
569
  }
570

571
  streamMutexUnlock(&pInfo->checkInfoLock);
22✔
572
}
573

574
int32_t doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p) {
5✔
575
  const char* id = pTask->id.idStr;
5✔
576
  int32_t     code = 0;
5✔
577

578
  SStreamTaskCheckReq req = {
5✔
579
      .streamId = pTask->id.streamId,
5✔
580
      .upstreamTaskId = pTask->id.taskId,
5✔
581
      .upstreamNodeId = pTask->info.nodeId,
5✔
582
      .childId = pTask->info.selfChildId,
5✔
583
      .stage = pTask->pMeta->stage,
5✔
584
  };
585

586
  // update the reqId for the new check msg
587
  p->reqId = tGenIdPI64();
5✔
588

589
  STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
5✔
590
  if (pOutputInfo->type == TASK_OUTPUT__FIXED_DISPATCH) {
5!
591
    STaskDispatcherFixed* pDispatch = &pOutputInfo->fixedDispatcher;
5✔
592
    setCheckDownstreamReqInfo(&req, p->reqId, pDispatch->taskId, pDispatch->nodeId);
5✔
593

594
    stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " re-send check downstream task:0x%x(vgId:%d) QID:0x%" PRIx64, id,
5!
595
            pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, req.reqId);
596

597
    code = streamSendCheckMsg(pTask, &req, pOutputInfo->fixedDispatcher.nodeId, &pOutputInfo->fixedDispatcher.epSet);
5✔
598
  } else if (pOutputInfo->type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
×
599
    SArray* vgInfo = pOutputInfo->shuffleDispatcher.dbInfo.pVgroupInfos;
×
600
    int32_t numOfVgs = taosArrayGetSize(vgInfo);
×
601

602
    for (int32_t i = 0; i < numOfVgs; i++) {
×
603
      SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
×
604
      if (pVgInfo == NULL) {
×
605
        continue;
×
606
      }
607

608
      if (p->taskId == pVgInfo->taskId) {
×
609
        setCheckDownstreamReqInfo(&req, p->reqId, pVgInfo->taskId, pVgInfo->vgId);
×
610

611
        stDebug("s-task:%s (vgId:%d) stage:%" PRId64
×
612
                " re-send check downstream task:0x%x(vgId:%d) (shuffle), idx:%d QID:0x%" PRIx64,
613
                id, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, i, p->reqId);
614
        code = streamSendCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
×
615
        break;
×
616
      }
617
    }
618
  } else if (pOutputInfo->type == TASK_OUTPUT__VTABLE_MAP) {
×
619
    SArray* pTaskInfos = pTask->outputInfo.vtableMapDispatcher.taskInfos;
×
620
    int32_t numTasks = taosArrayGetSize(pTaskInfos);
×
621

622
    for (int32_t i = 0; i < numTasks; ++i) {
×
623
      STaskDispatcherFixed* pAddr = taosArrayGet(pTaskInfos, i);
×
624
      if (pAddr == NULL) {
×
625
        continue;
×
626
      }
627

628
      if (p->taskId == pAddr->taskId) {
×
629
        setCheckDownstreamReqInfo(&req, p->reqId, pAddr->taskId, pAddr->nodeId);
×
630

631
        stDebug("s-task:%s (vgId:%d) stage:%" PRId64
×
632
                " re-send check vtable downstream task:0x%x(vgId:%d), QID:0x%" PRIx64,
633
                id, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, p->reqId);
634
        code = streamSendCheckMsg(pTask, &req, pAddr->nodeId, &pAddr->epSet);
×
635
        break;
×
636
      }
637
    }
638
  }
639

640
  if (code) {
5!
641
    stError("s-task:%s failed to send check msg to downstream, code:%s", pTask->id.idStr, tstrerror(code));
×
642
  }
643
  return code;
5✔
644
}
645

646
void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, int32_t* numOfFault,
5✔
647
                       int32_t* numOfNotRsp, SArray* pTimeoutList, SArray* pNotReadyList, const char* id) {
648
  for (int32_t i = 0; i < taosArrayGetSize(pInfo->pList); ++i) {
10✔
649
    SDownstreamStatusInfo* p = taosArrayGet(pInfo->pList, i);
5✔
650
    if (p == NULL) {
5!
651
      continue;
×
652
    }
653

654
    if (p->status == TASK_DOWNSTREAM_READY) {
5!
655
      (*numOfReady) += 1;
×
656
    } else if (p->status == TASK_UPSTREAM_NEW_STAGE || p->status == TASK_DOWNSTREAM_NOT_LEADER) {
5!
657
      stDebug("s-task:%s recv status:NEW_STAGE/NOT_LEADER from downstream, task:0x%x, quit from check downstream", id,
×
658
              p->taskId);
659
      (*numOfFault) += 1;
×
660
    } else {                                 // TASK_DOWNSTREAM_NOT_READY
661
      if (p->rspTs == 0) {                   // not response yet
5!
662
        if (el >= CHECK_NOT_RSP_DURATION) {  // not receive info for 10 sec.
×
663
          void* px = taosArrayPush(pTimeoutList, &p->taskId);
×
664
          if (px == NULL) {
×
665
            stError("s-task:%s failed to record time out task:0x%x", id, p->taskId);
×
666
          }
667
        } else {                // el < CHECK_NOT_RSP_DURATION
668
          (*numOfNotRsp) += 1;  // do nothing and continue waiting for their rsp
×
669
        }
670
      } else {
671
        void* px = taosArrayPush(pNotReadyList, &p->taskId);
5✔
672
        if (px == NULL) {
5!
673
          stError("s-task:%s failed to record not ready task:0x%x", id, p->taskId);
×
674
        }
675
      }
676
    }
677
  }
678
}
5✔
679

680
void setCheckDownstreamReqInfo(SStreamTaskCheckReq* pReq, int64_t reqId, int32_t dstTaskId, int32_t dstNodeId) {
27✔
681
  pReq->reqId = reqId;
27✔
682
  pReq->downstreamTaskId = dstTaskId;
27✔
683
  pReq->downstreamNodeId = dstNodeId;
27✔
684
}
27✔
685

686
void handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList) {
×
687
  STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
×
688
  const char*     id = pTask->id.idStr;
×
689
  int32_t         vgId = pTask->pMeta->vgId;
×
690
  int32_t         numOfTimeout = taosArrayGetSize(pTimeoutList);
×
691
  int32_t         code = 0;
×
692

693
  pInfo->timeoutStartTs = taosGetTimestampMs();
×
694
  for (int32_t i = 0; i < numOfTimeout; ++i) {
×
695
    int32_t* px = taosArrayGet(pTimeoutList, i);
×
696
    if (px == NULL) {
×
697
      continue;
×
698
    }
699

700
    int32_t                taskId = *px;
×
701
    SDownstreamStatusInfo* p = NULL;
×
702
    findCheckRspStatus(pInfo, taskId, &p);
×
703

704
    if (p != NULL) {
×
705
      if (p->status != -1 || p->rspTs != 0) {
×
706
        stError("s-task:%s invalid rsp record entry, index:%d, status:%d, rspTs:%" PRId64, id, i, p->status, p->rspTs);
×
707
        continue;
×
708
      }
709
      code = doSendCheckMsg(pTask, p);
×
710
    }
711
  }
712

713
  pInfo->timeoutRetryCount += 1;
×
714

715
  // timeout more than 600 sec, add into node update list
716
  if (pInfo->timeoutRetryCount > 10) {
×
717
    pInfo->timeoutRetryCount = 0;
×
718

719
    for (int32_t i = 0; i < numOfTimeout; ++i) {
×
720
      int32_t* pTaskId = taosArrayGet(pTimeoutList, i);
×
721
      if (pTaskId == NULL) {
×
722
        continue;
×
723
      }
724

725
      SDownstreamStatusInfo* p = NULL;
×
726
      findCheckRspStatus(pInfo, *pTaskId, &p);
×
727
      if (p != NULL) {
×
728
        code = streamTaskAddIntoNodeUpdateList(pTask, p->vgId);
×
729
        stDebug("s-task:%s vgId:%d downstream task:0x%x (vgId:%d) timeout more than 600sec, add into nodeUpdate list",
×
730
                id, vgId, p->taskId, p->vgId);
731
      }
732
    }
733

734
    stDebug("s-task:%s vgId:%d %d downstream task(s) all add into nodeUpate list", id, vgId, numOfTimeout);
×
735
  } else {
736
    stDebug("s-task:%s vgId:%d %d downstream task(s) timeout, send check msg again, retry:%d start time:%" PRId64, id,
×
737
            vgId, numOfTimeout, pInfo->timeoutRetryCount, pInfo->timeoutStartTs);
738
  }
739
}
×
740

741
void handleNotReadyDownstreamTask(SStreamTask* pTask, SArray* pNotReadyList) {
5✔
742
  STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
5✔
743
  const char*     id = pTask->id.idStr;
5✔
744
  int32_t         vgId = pTask->pMeta->vgId;
5✔
745
  int32_t         numOfNotReady = taosArrayGetSize(pNotReadyList);
5✔
746

747
  // reset the info, and send the check msg to failure downstream again
748
  for (int32_t i = 0; i < numOfNotReady; ++i) {
10✔
749
    int32_t* pTaskId = taosArrayGet(pNotReadyList, i);
5✔
750
    if (pTaskId == NULL) {
5!
751
      continue;
×
752
    }
753

754
    SDownstreamStatusInfo* p = NULL;
5✔
755
    findCheckRspStatus(pInfo, *pTaskId, &p);
5✔
756
    if (p != NULL) {
5!
757
      p->rspTs = 0;
5✔
758
      p->status = -1;
5✔
759
      int32_t code = doSendCheckMsg(pTask, p);
5✔
760
    }
761
  }
762

763
  pInfo->notReadyRetryCount += 1;
5✔
764
  stDebug("s-task:%s vgId:%d %d downstream task(s) not ready, send check msg again, retry:%d start time:%" PRId64, id,
5!
765
          vgId, numOfNotReady, pInfo->notReadyRetryCount, pInfo->startTs);
766
}
5✔
767

768
// the action of add status may incur the restart procedure, which should NEVER be executed in the timer thread.
769
// The restart of all tasks requires that all tasks should not have active timer for now. Therefore, the execution
770
// of restart in timer thread will result in a deadlock.
771
int32_t addDownstreamFailedStatusResultAsync(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId) {
×
772
  return streamTaskSchedTask(pMsgCb, vgId, streamId, taskId, STREAM_EXEC_T_ADD_FAILED_TASK, false);
×
773
}
774

775
static void doCleanup(SStreamTask* pTask, SArray* pNotReadyList, SArray* pTimeoutList, void* param) {
20✔
776
  streamMetaReleaseTask(pTask->pMeta, pTask);
20✔
777

778
  taosArrayDestroy(pNotReadyList);
20✔
779
  taosArrayDestroy(pTimeoutList);
20✔
780
  streamTaskFreeRefId(param);
20✔
781
}
20✔
782

783
// this function is executed in timer thread
784
void rspMonitorFn(void* param, void* tmrId) {
20✔
785
  int32_t         numOfReady = 0;
20✔
786
  int32_t         numOfFault = 0;
20✔
787
  int32_t         numOfNotRsp = 0;
20✔
788
  int32_t         numOfNotReady = 0;
20✔
789
  int32_t         numOfTimeout = 0;
20✔
790
  int64_t         taskRefId = *(int64_t*)param;
20✔
791
  int64_t         now = taosGetTimestampMs();
20✔
792
  SArray*         pNotReadyList = NULL;
20✔
793
  SArray*         pTimeoutList = NULL;
20✔
794
  SStreamMeta*    pMeta = NULL;
20✔
795
  STaskCheckInfo* pInfo = NULL;
20✔
796
  int32_t         vgId = -1;
20✔
797
  int64_t         timeoutDuration = 0;
20✔
798
  const char*     id = NULL;
20✔
799
  int32_t         total = 0;
20✔
800

801
  SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
20✔
802
  if (pTask == NULL) {
20!
803
    stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
×
804
    streamTaskFreeRefId(param);
×
805
    return;
15✔
806
  }
807

808
  pMeta = pTask->pMeta;
20✔
809
  pInfo = &pTask->taskCheckInfo;
20✔
810
  vgId = pTask->pMeta->vgId;
20✔
811
  timeoutDuration = now - pInfo->timeoutStartTs;
20✔
812
  id = pTask->id.idStr;
20✔
813
  total = (int32_t) taosArrayGetSize(pInfo->pList);
20✔
814

815
  stDebug("s-task:%s start to do check-downstream-rsp check in tmr", id);
20!
816

817
  streamMutexLock(&pTask->lock);
20✔
818
  SStreamTaskState state = streamTaskGetStatus(pTask);
20✔
819
  streamMutexUnlock(&pTask->lock);
20✔
820

821
  if (state.state == TASK_STATUS__STOP) {
20!
822
    stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId);
×
823
    streamTaskCompleteCheckRsp(pInfo, true, id);
×
824

825
    // not record the failure of the current task if try to close current vnode
826
    // otherwise, the put of message operation may incur invalid read of message queue.
827
    if (!pMeta->closeFlag) {
×
828
      int32_t code = addDownstreamFailedStatusResultAsync(pTask->pMsgCb, vgId, pTask->id.streamId, pTask->id.taskId);
×
829
      if (code) {
×
830
        stError("s-task:%s failed to create async record start failed task, code:%s", id, tstrerror(code));
×
831
      }
832
    }
833

834
    doCleanup(pTask, pNotReadyList, pTimeoutList, param);
×
835
    return;
×
836
  }
837

838
  if (state.state == TASK_STATUS__DROPPING || state.state == TASK_STATUS__READY) {
20!
839
    stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId);
10!
840

841
    streamTaskCompleteCheckRsp(pInfo, true, id);
10✔
842
    doCleanup(pTask, pNotReadyList, pTimeoutList, param);
10✔
843
    return;
10✔
844
  }
845

846
  streamMutexLock(&pInfo->checkInfoLock);
10✔
847
  if (pInfo->notReadyTasks == 0) {
10✔
848
    stDebug("s-task:%s status:%s vgId:%d all downstream ready, quit from monitor rsp tmr", id, state.name, vgId);
5!
849

850
    streamTaskCompleteCheckRsp(pInfo, false, id);
5✔
851
    streamMutexUnlock(&pInfo->checkInfoLock);
5✔
852
    doCleanup(pTask, pNotReadyList, pTimeoutList, param);
5✔
853
    return;
5✔
854
  }
855

856
  pNotReadyList = taosArrayInit(4, sizeof(int64_t));
5✔
857
  pTimeoutList = taosArrayInit(4, sizeof(int64_t));
5✔
858

859
  if (state.state == TASK_STATUS__UNINIT) {
5!
860
    getCheckRspStatus(pInfo, timeoutDuration, &numOfReady, &numOfFault, &numOfNotRsp, pTimeoutList, pNotReadyList, id);
5✔
861

862
    numOfNotReady = (int32_t)taosArrayGetSize(pNotReadyList);
5✔
863
    numOfTimeout = (int32_t)taosArrayGetSize(pTimeoutList);
5✔
864

865
    // fault tasks detected, not try anymore
866
    bool jumpOut = false;
5✔
867
    if ((numOfReady + numOfFault + numOfNotReady + numOfTimeout + numOfNotRsp) != total) {
5!
868
      stError(
×
869
          "s-task:%s vgId:%d internal error in handling the check downstream procedure, rsp number is inconsistent, "
870
          "stop rspMonitor tmr, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d",
871
          id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
872
      jumpOut = true;
×
873
    }
874

875
    if (numOfFault > 0) {
5!
876
      stDebug(
×
877
          "s-task:%s status:%s vgId:%d all rsp. quit from monitor rsp tmr, since vnode-transfer/leader-change/restart "
878
          "detected, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d",
879
          id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
880
      jumpOut = true;
×
881
    }
882

883
    if (jumpOut) {
5!
884
      streamTaskCompleteCheckRsp(pInfo, false, id);
×
885
      streamMutexUnlock(&pInfo->checkInfoLock);
×
886
      doCleanup(pTask, pNotReadyList, pTimeoutList, param);
×
887
      return;
×
888
    }
889
  } else {  // unexpected status
890
    stError("s-task:%s unexpected task status:%s during waiting for check rsp", id, state.name);
×
891
  }
892

893
  // checking of downstream tasks has been stopped by other threads
894
  if (pInfo->stopCheckProcess == 1) {
5!
895
    stDebug(
×
896
        "s-task:%s status:%s vgId:%d stopped by other threads to check downstream process, total:%d, notRsp:%d, "
897
        "notReady:%d, fault:%d, timeout:%d, ready:%d",
898
        id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
899

900
    streamTaskCompleteCheckRsp(pInfo, false, id);
×
901
    streamMutexUnlock(&pInfo->checkInfoLock);
×
902

903
    int32_t code = addDownstreamFailedStatusResultAsync(pTask->pMsgCb, vgId, pTask->id.streamId, pTask->id.taskId);
×
904
    if (code) {
×
905
      stError("s-task:%s failed to create async record start failed task, code:%s", id, tstrerror(code));
×
906
    }
907

908
    doCleanup(pTask, pNotReadyList, pTimeoutList, param);
×
909
    return;
×
910
  }
911

912
  if (numOfNotReady > 0) {  // check to make sure not in recheck timer
5!
913
    handleNotReadyDownstreamTask(pTask, pNotReadyList);
5✔
914
  }
915

916
  if (numOfTimeout > 0) {
5!
917
    handleTimeoutDownstreamTasks(pTask, pTimeoutList);
×
918
  }
919

920
  streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, param, streamTimer, &pInfo->checkRspTmr, vgId,
5✔
921
                 "check-status-monitor");
922
  streamMutexUnlock(&pInfo->checkInfoLock);
5✔
923

924
  stDebug(
5!
925
      "s-task:%s vgId:%d continue checking rsp in 300ms, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, "
926
      "ready:%d",
927
      id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
928
  doCleanup(pTask, pNotReadyList, pTimeoutList, NULL);
5✔
929
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc