• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #3825

01 Apr 2025 11:58AM UTC coverage: 34.067% (+0.003%) from 34.064%
#3825

push

travis-ci

happyguoxy
test:alter gcda dir

148492 of 599532 branches covered (24.77%)

Branch coverage included in aggregate %.

222504 of 489471 relevant lines covered (45.46%)

762290.2 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

55.84
/source/libs/stream/src/streamCheckStatus.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#include "cos.h"
17
#include "rsync.h"
18
#include "streamBackendRocksdb.h"
19
#include "streamInt.h"
20

21
#define CHECK_NOT_RSP_DURATION 60 * 1000  // 60 sec
22

23
static void    processDownstreamReadyRsp(SStreamTask* pTask, bool lock);
24
static void    rspMonitorFn(void* param, void* tmrId);
25
static void    streamTaskInitTaskCheckInfo(STaskCheckInfo* pInfo, STaskOutputInfo* pOutputInfo, int64_t startTs);
26
static int32_t streamTaskStartCheckDownstream(STaskCheckInfo* pInfo, const char* id);
27
static void    streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char* id);
28
static void    streamTaskAddReqInfo(STaskCheckInfo* pInfo, int64_t reqId, int32_t taskId, int32_t vgId, const char* id);
29
static int32_t doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p);
30
static void    handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList);
31
static void    handleNotReadyDownstreamTask(SStreamTask* pTask, SArray* pNotReadyList);
32
static int32_t streamTaskUpdateCheckInfo(STaskCheckInfo* pInfo, int32_t taskId, int32_t status, int64_t rspTs,
33
                                         int64_t reqId, int32_t* pNotReady, const char* id);
34
static void setCheckDownstreamReqInfo(SStreamTaskCheckReq* pReq, int64_t reqId, int32_t dstTaskId, int32_t dstNodeId);
35
static void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, int32_t* numOfFault,
36
                              int32_t* numOfNotRsp, SArray* pTimeoutList, SArray* pNotReadyList, const char* id);
37
static int32_t addDownstreamFailedStatusResultAsync(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId);
38
static void    findCheckRspStatus(STaskCheckInfo* pInfo, int32_t taskId, SDownstreamStatusInfo** pStatusInfo);
39

40
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage,
31✔
41
                              int64_t* oldStage) {
42
  SStreamUpstreamEpInfo* pInfo = NULL;
31✔
43
  streamTaskGetUpstreamTaskEpInfo(pTask, upstreamTaskId, &pInfo);
31✔
44
  if (pInfo == NULL) {
31!
45
    return TSDB_CODE_STREAM_TASK_NOT_EXIST;
×
46
  }
47

48
  *oldStage = pInfo->stage;
31✔
49
  const char* id = pTask->id.idStr;
31✔
50
  if (stage == -1) {
31!
51
    stDebug("s-task:%s receive check msg from upstream task:0x%x(vgId:%d), invalid stageId:%" PRId64 ", not ready", id,
×
52
            upstreamTaskId, vgId, stage);
53
    return 0;
×
54
  }
55

56
  if (pInfo->stage == -1) {
31✔
57
    pInfo->stage = stage;
22✔
58
    stDebug("s-task:%s receive check msg from upstream task:0x%x(vgId:%d) first time, init stage value:%" PRId64, id,
22!
59
            upstreamTaskId, vgId, stage);
60
  }
61

62
  if (pInfo->stage < stage) {
31!
63
    stError("s-task:%s receive check msg from upstream task:0x%x(vgId:%d), new stage received:%" PRId64
×
64
            ", prev:%" PRId64,
65
            id, upstreamTaskId, vgId, stage, pInfo->stage);
66
    // record the checkpoint failure id and sent to mnode
67
    streamTaskSetCheckpointFailed(pTask);
×
68
  }
69

70
  if (pInfo->stage != stage) {
31!
71
    return TASK_UPSTREAM_NEW_STAGE;
×
72
  } else if (pTask->status.downstreamReady != 1) {
31✔
73
    stDebug("s-task:%s vgId:%d leader:%d, downstream not ready", id, vgId, (pTask->pMeta->role == NODE_ROLE_LEADER));
9!
74
    return TASK_DOWNSTREAM_NOT_READY;
9✔
75
  } else {
76
    return TASK_DOWNSTREAM_READY;
22✔
77
  }
78
}
79

80
// check status
81
void streamTaskSendCheckMsg(SStreamTask* pTask) {
28✔
82
  SDataRange*  pRange = &pTask->dataRange;
28✔
83
  STimeWindow* pWindow = &pRange->window;
28✔
84
  const char*  idstr = pTask->id.idStr;
28✔
85
  int32_t      code = 0;
28✔
86

87
  SStreamTaskCheckReq req = {
28✔
88
      .streamId = pTask->id.streamId,
28✔
89
      .upstreamTaskId = pTask->id.taskId,
28✔
90
      .upstreamNodeId = pTask->info.nodeId,
28✔
91
      .childId = pTask->info.selfChildId,
28✔
92
      .stage = pTask->pMeta->stage,
28✔
93
  };
94

95
  // serialize streamProcessScanHistoryFinishRsp
96
  if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
28✔
97
    streamTaskStartMonitorCheckRsp(pTask);
9✔
98

99
    STaskDispatcherFixed* pDispatch = &pTask->outputInfo.fixedDispatcher;
9✔
100

101
    setCheckDownstreamReqInfo(&req, tGenIdPI64(), pDispatch->taskId, pDispatch->nodeId);
9✔
102
    streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, pDispatch->taskId, pDispatch->nodeId, idstr);
9✔
103

104
    stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " check single downstream task:0x%x(vgId:%d) ver:%" PRId64 "-%" PRId64
9!
105
            " window:%" PRId64 "-%" PRId64 " QID:0x%" PRIx64,
106
            idstr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, pRange->range.minVer,
107
            pRange->range.maxVer, pWindow->skey, pWindow->ekey, req.reqId);
108

109
    code = streamSendCheckMsg(pTask, &req, pTask->outputInfo.fixedDispatcher.nodeId,
9✔
110
                              &pTask->outputInfo.fixedDispatcher.epSet);
111

112
  } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
19✔
113
    streamTaskStartMonitorCheckRsp(pTask);
6✔
114

115
    SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos;
6✔
116

117
    int32_t numOfVgs = taosArrayGetSize(vgInfo);
6✔
118
    stDebug("s-task:%s check %d downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64, idstr,
6!
119
            numOfVgs, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey);
120

121
    for (int32_t i = 0; i < numOfVgs; i++) {
19✔
122
      SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
13✔
123
      if (pVgInfo == NULL) {
13!
124
        continue;
×
125
      }
126

127
      setCheckDownstreamReqInfo(&req, tGenIdPI64(), pVgInfo->taskId, pVgInfo->vgId);
13✔
128
      streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, pVgInfo->taskId, pVgInfo->vgId, idstr);
13✔
129

130
      stDebug("s-task:%s (vgId:%d) stage:%" PRId64
13!
131
              " check downstream task:0x%x (vgId:%d) (shuffle), idx:%d, QID:0x%" PRIx64,
132
              idstr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, i, req.reqId);
133
      code = streamSendCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
13✔
134
    }
135
  } else if (pTask->outputInfo.type == TASK_OUTPUT__VTABLE_MAP) {
13!
136
    streamTaskStartMonitorCheckRsp(pTask);
×
137

138
    SArray* pTaskInfos = pTask->outputInfo.vtableMapDispatcher.taskInfos;
×
139
    int32_t numTasks = taosArrayGetSize(pTaskInfos);
×
140
    stDebug("s-task:%s check %d vtable downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64,
×
141
            idstr, numTasks, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey);
142

143
    for (int32_t i = 0; i < numTasks; ++i) {
×
144
      STaskDispatcherFixed* pAddr = taosArrayGet(pTaskInfos, i);
×
145
      if (pAddr == NULL) {
×
146
        continue;
×
147
      }
148

149
      setCheckDownstreamReqInfo(&req, tGenIdPI64(), pAddr->taskId, pAddr->nodeId);
×
150
      streamTaskAddReqInfo(&pTask->taskCheckInfo, req.reqId, pAddr->taskId, pAddr->nodeId, idstr);
×
151

152
      stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " check vtable downstream task:0x%x (vgId:%d), QID:0x%" PRIx64,
×
153
              idstr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, req.reqId);
154
      code = streamSendCheckMsg(pTask, &req, pAddr->nodeId, &pAddr->epSet);
×
155
      if (code != TSDB_CODE_SUCCESS) {
×
156
        stError("s-task:%s failed to send check msg to vtable downstream task:0x%x (vgId:%d), code:%s", idstr,
×
157
                req.downstreamTaskId, req.downstreamNodeId, tstrerror(code));
158
      }
159
    }
160
  } else {  // for sink task, set it ready directly.
161
//    streamTaskSetConsenChkptIdRecv(pTask, 0, taosGetTimestampMs());
162
//
163
    stDebug("s-task:%s (vgId:%d) set downstream ready, since no downstream", idstr, pTask->info.nodeId);
13!
164
    streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, idstr);
13✔
165
    processDownstreamReadyRsp(pTask, false);
13✔
166
  }
167

168
  if (code) {
28!
169
    stError("s-task:%s failed to send check msg to downstream, code:%s", idstr, tstrerror(code));
×
170
  }
171
}
28✔
172

173
void streamTaskProcessCheckMsg(SStreamMeta* pMeta, SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp) {
31✔
174
  int32_t taskId = pReq->downstreamTaskId;
31✔
175

176
  *pRsp = (SStreamTaskCheckRsp){
31✔
177
      .reqId = pReq->reqId,
31✔
178
      .streamId = pReq->streamId,
31✔
179
      .childId = pReq->childId,
31✔
180
      .downstreamNodeId = pReq->downstreamNodeId,
31✔
181
      .downstreamTaskId = pReq->downstreamTaskId,
31✔
182
      .upstreamNodeId = pReq->upstreamNodeId,
31✔
183
      .upstreamTaskId = pReq->upstreamTaskId,
31✔
184
  };
185

186
  // only the leader node handle the check request
187
  if (pMeta->role == NODE_ROLE_FOLLOWER) {
31!
188
    stError(
×
189
        "s-task:0x%x invalid check msg from upstream:0x%x(vgId:%d), vgId:%d is follower, not handle check status msg",
190
        taskId, pReq->upstreamTaskId, pReq->upstreamNodeId, pMeta->vgId);
191
    pRsp->status = TASK_DOWNSTREAM_NOT_LEADER;
×
192
  } else {
193
    SStreamTask* pTask = NULL;
31✔
194
    int32_t      code = streamMetaAcquireTask(pMeta, pReq->streamId, taskId, &pTask);
31✔
195
    if (pTask != NULL) {
31!
196
      pRsp->status =
31✔
197
          streamTaskCheckStatus(pTask, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->stage, &pRsp->oldStage);
31✔
198

199
      SStreamTaskState pState = streamTaskGetStatus(pTask);
31✔
200
      stDebug("s-task:%s status:%s, stage:%" PRId64 " recv task check req(QID:0x%" PRIx64
31!
201
              ") task:0x%x (vgId:%d), check_status:%d",
202
              pTask->id.idStr, pState.name, pRsp->oldStage, pRsp->reqId, pRsp->upstreamTaskId, pRsp->upstreamNodeId,
203
              pRsp->status);
204
      streamMetaReleaseTask(pMeta, pTask);
31✔
205
    } else {
206
      pRsp->status = TASK_DOWNSTREAM_NOT_READY;
×
207
      stDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(QID:0x%" PRIx64
×
208
              ") from task:0x%x (vgId:%d), rsp check_status %d",
209
              pReq->streamId, taskId, pRsp->reqId, pRsp->upstreamTaskId, pRsp->upstreamNodeId, pRsp->status);
210
    }
211
  }
212
}
31✔
213

214
int32_t streamTaskProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp) {
31✔
215
  int64_t         now = taosGetTimestampMs();
31✔
216
  const char*     id = pTask->id.idStr;
31✔
217
  STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
31✔
218
  int32_t         total = streamTaskGetNumOfDownstream(pTask);
31✔
219
  int32_t         left = -1;
31✔
220

221
  if (streamTaskShouldStop(pTask)) {
31!
222
    stDebug("s-task:%s should stop, do not do check downstream again", id);
×
223
    return TSDB_CODE_SUCCESS;
×
224
  }
225

226
  if (pTask->id.taskId != pRsp->upstreamTaskId) {
31!
227
    stError("s-task:%s invalid check downstream rsp, upstream task:0x%x discard", id, pRsp->upstreamTaskId);
×
228
    return TSDB_CODE_INVALID_MSG;
×
229
  }
230

231
  if (pRsp->status == TASK_DOWNSTREAM_READY) {
31✔
232
    int32_t code = streamTaskUpdateCheckInfo(pInfo, pRsp->downstreamTaskId, pRsp->status, now, pRsp->reqId, &left, id);
22✔
233
    if (code != TSDB_CODE_SUCCESS) {
22!
234
      return TSDB_CODE_SUCCESS;
×
235
    }
236

237
    if (left == 0) {
22✔
238
      processDownstreamReadyRsp(pTask, true);  // all downstream tasks are ready, set the complete check downstream flag
15✔
239
      streamTaskStopMonitorCheckRsp(pInfo, id);
15✔
240
    } else {
241
      stDebug("s-task:%s (vgId:%d) recv check rsp from task:0x%x (vgId:%d) status:%d, total:%d not ready:%d", id,
7!
242
              pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status, total, left);
243
    }
244
  } else {  // not ready, wait for 100ms and retry
245
    int32_t code = streamTaskUpdateCheckInfo(pInfo, pRsp->downstreamTaskId, pRsp->status, now, pRsp->reqId, &left, id);
9✔
246
    if (code != TSDB_CODE_SUCCESS) {
9!
247
      return TSDB_CODE_SUCCESS;  // return success in any cases.
×
248
    }
249

250
    if (pRsp->status == TASK_UPSTREAM_NEW_STAGE || pRsp->status == TASK_DOWNSTREAM_NOT_LEADER) {
9!
251
      if (pRsp->status == TASK_UPSTREAM_NEW_STAGE) {
×
252
        stError("s-task:%s vgId:%d self vnode-transfer/leader-change/restart detected, old stage:%" PRId64
×
253
                ", current stage:%" PRId64 ", not check wait for downstream task nodeUpdate, and all tasks restart",
254
                id, pRsp->upstreamNodeId, pRsp->oldStage, pTask->pMeta->stage);
255
        code = streamTaskAddIntoNodeUpdateList(pTask, pRsp->upstreamNodeId);
×
256
      } else {
257
        stError(
×
258
            "s-task:%s downstream taskId:0x%x (vgId:%d) not leader, self dispatch epset needs to be updated, not check "
259
            "downstream again, nodeUpdate needed",
260
            id, pRsp->downstreamTaskId, pRsp->downstreamNodeId);
261
        code = streamTaskAddIntoNodeUpdateList(pTask, pRsp->downstreamNodeId);
×
262
      }
263

264
      streamMetaAddFailedTaskSelf(pTask, now, true);
×
265
    } else {  // TASK_DOWNSTREAM_NOT_READY, rsp-check monitor will retry in 300 ms
266
      stDebug("s-task:%s (vgId:%d) recv check rsp from task:0x%x (vgId:%d) status:%d, total:%d not ready:%d", id,
9!
267
              pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status, total, left);
268
    }
269
  }
270

271
  return 0;
31✔
272
}
273

274
int32_t streamTaskSendCheckRsp(const SStreamMeta* pMeta, int32_t vgId, SStreamTaskCheckRsp* pRsp,
31✔
275
                               SRpcHandleInfo* pRpcInfo, int32_t taskId) {
276
  SEncoder encoder;
277
  int32_t  code = 0;
31✔
278
  int32_t  len;
279

280
  tEncodeSize(tEncodeStreamTaskCheckRsp, pRsp, len, code);
31!
281
  if (code < 0) {
31!
282
    stError("vgId:%d failed to encode task check rsp, s-task:0x%x", pMeta->vgId, taskId);
×
283
    return TSDB_CODE_INVALID_MSG;
×
284
  }
285

286
  void* buf = rpcMallocCont(sizeof(SMsgHead) + len);
31✔
287
  if (buf == NULL) {
31!
288
    stError("s-task:0x%x vgId:%d failed prepare msg, %s at line:%d code:%s", taskId, pMeta->vgId, __func__, __LINE__,
×
289
            tstrerror(code));
290
    return terrno;
×
291
  }
292

293
  ((SMsgHead*)buf)->vgId = htonl(vgId);
31✔
294

295
  void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
31✔
296
  tEncoderInit(&encoder, (uint8_t*)abuf, len);
31✔
297
  code = tEncodeStreamTaskCheckRsp(&encoder, pRsp);
31✔
298
  tEncoderClear(&encoder);
31✔
299

300
  SRpcMsg rspMsg = {.code = 0, .pCont = buf, .contLen = sizeof(SMsgHead) + len, .info = *pRpcInfo};
31✔
301
  tmsgSendRsp(&rspMsg);
31✔
302

303
  code = TMIN(code, 0);
31✔
304
  return code;
31✔
305
}
306

307
void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) {
15✔
308
  int32_t         vgId = pTask->pMeta->vgId;
15✔
309
  STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
15✔
310

311
  streamMutexLock(&pInfo->checkInfoLock);
15✔
312

313
  // drop procedure already started, not start check downstream now
314
  ETaskStatus s = streamTaskGetStatus(pTask).state;
15✔
315
  if (s == TASK_STATUS__DROPPING) {
15!
316
    stDebug("s-task:%s task not in uninit status, status:%s not start monitor check-rsp", pTask->id.idStr,
×
317
            streamTaskGetStatusStr(s));
318
    streamMutexUnlock(&pInfo->checkInfoLock);
×
319
    return;
×
320
  }
321

322
  int32_t code = streamTaskStartCheckDownstream(pInfo, pTask->id.idStr);
15✔
323
  if (code != TSDB_CODE_SUCCESS) {
15!
324
    streamMutexUnlock(&pInfo->checkInfoLock);
×
325
    return;
×
326
  }
327

328
  streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
15✔
329

330
  int64_t* pTaskRefId = NULL;
15✔
331
  code = streamTaskAllocRefId(pTask, &pTaskRefId);
15✔
332
  if (code == 0) {
15!
333
    streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTaskRefId, streamTimer, &pInfo->checkRspTmr, vgId,
15✔
334
                   "check-status-monitor");
335
  }
336

337
  streamMutexUnlock(&pInfo->checkInfoLock);
15✔
338
}
339

340
void streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id) {
33✔
341
  streamMutexLock(&pInfo->checkInfoLock);
33✔
342
  pInfo->stopCheckProcess = 1;
33✔
343
  streamMutexUnlock(&pInfo->checkInfoLock);
33✔
344

345
  stDebug("s-task:%s set stop check-rsp monitor flag", id);
33!
346
}
33✔
347

348
void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo) {
115✔
349
  taosArrayDestroy(pInfo->pList);
115✔
350
  pInfo->pList = NULL;
115✔
351

352
  if (pInfo->checkRspTmr != NULL) {
115✔
353
    streamTmrStop(pInfo->checkRspTmr);
15✔
354
    pInfo->checkRspTmr = NULL;
15✔
355
  }
356
}
115✔
357

358
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
359
void processDownstreamReadyRsp(SStreamTask* pTask, bool lock) {
28✔
360
  EStreamTaskEvent event = (pTask->info.fillHistory != STREAM_HISTORY_TASK) ? TASK_EVENT_INIT : TASK_EVENT_INIT_SCANHIST;
28✔
361
  int32_t          code = streamTaskOnHandleEventSuccess(pTask->status.pSM, event, NULL, NULL);
28✔
362
  if (code) {
28!
363
    stError("s-task:%s failed to set event succ, code:%s", pTask->id.idStr, tstrerror(code));
×
364
  }
365

366
  int64_t checkTs = pTask->execInfo.checkTs;
28✔
367
  int64_t readyTs = pTask->execInfo.readyTs;
28✔
368
  if (lock) {
28✔
369
    code = streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, checkTs, readyTs, true);
15✔
370
  } else {
371
    code = streamMetaAddTaskLaunchResultNoLock(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, checkTs, readyTs, true);
13✔
372
  }
373

374
  if (code) {
28!
375
    stError("s-task:%s failed to record the downstream task status, code:%s", pTask->id.idStr, tstrerror(code));
×
376
  }
377

378
  if (pTask->status.taskStatus == TASK_STATUS__HALT) {
28!
379
    if (!HAS_RELATED_FILLHISTORY_TASK(pTask) || (pTask->info.fillHistory != 0)) {
×
380
      stError("s-task:%s status:halt fillhistory:%d not handle the ready rsp", pTask->id.idStr,
×
381
              pTask->info.fillHistory);
382
    }
383

384
    // halt itself for count window stream task until the related fill history task completed.
385
    stDebug("s-task:%s level:%d initial status is %s from mnode, set it to be halt", pTask->id.idStr,
×
386
            pTask->info.taskLevel, streamTaskGetStatusStr(pTask->status.taskStatus));
387
    code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_HALT);
×
388
    if (code != 0) {  // todo: handle error
×
389
      stError("s-task:%s failed to handle halt event, code:%s", pTask->id.idStr, tstrerror(code));
×
390
    }
391
  }
392

393
  // start the related fill-history task, when current task is ready
394
  // not invoke in success callback due to the deadlock.
395
  // todo: let's retry
396
  if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
28✔
397
    stDebug("s-task:%s try to launch related task", pTask->id.idStr);
5!
398
    code = streamLaunchFillHistoryTask(pTask, lock);
5✔
399
    if (code) {
5!
400
      stError("s-task:%s failed to launch related task, code:%s", pTask->id.idStr, tstrerror(code));
×
401
    }
402
  }
403
}
28✔
404

405
int32_t streamTaskAddIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId) {
52✔
406
  int32_t vgId = pTask->pMeta->vgId;
52✔
407
  int32_t code = 0;
52✔
408
  bool    existed = false;
52✔
409

410
  streamMutexLock(&pTask->lock);
52✔
411

412
  int32_t num = taosArrayGetSize(pTask->outputInfo.pNodeEpsetUpdateList);
52✔
413
  for (int i = 0; i < num; ++i) {
52✔
414
    SDownstreamTaskEpset* p = taosArrayGet(pTask->outputInfo.pNodeEpsetUpdateList, i);
44✔
415
    if (p == NULL) {
44!
416
      continue;
×
417
    }
418

419
    if (p->nodeId == nodeId) {
44!
420
      existed = true;
44✔
421
      break;
44✔
422
    }
423
  }
424

425
  if (!existed) {
52✔
426
    SDownstreamTaskEpset t = {.nodeId = nodeId};
8✔
427

428
    void* p = taosArrayPush(pTask->outputInfo.pNodeEpsetUpdateList, &t);
8✔
429
    if (p == NULL) {
8!
430
      code = terrno;
×
431
      stError("s-task:%s vgId:%d failed to update epset, code:%s", pTask->id.idStr, vgId, tstrerror(code));
×
432
    } else {
433
      stInfo("s-task:%s vgId:%d nodeId:%d needs to be updated, total needs updated:%d", pTask->id.idStr,
8!
434
             vgId, t.nodeId, (num + 1));
435
    }
436
  }
437

438
  streamMutexUnlock(&pTask->lock);
52✔
439
  return code;
52✔
440
}
441

442
void streamTaskInitTaskCheckInfo(STaskCheckInfo* pInfo, STaskOutputInfo* pOutputInfo, int64_t startTs) {
15✔
443
  taosArrayClear(pInfo->pList);
15✔
444

445
  if (pOutputInfo->type == TASK_OUTPUT__FIXED_DISPATCH) {
15✔
446
    pInfo->notReadyTasks = 1;
9✔
447
  } else if (pOutputInfo->type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
6!
448
    pInfo->notReadyTasks = taosArrayGetSize(pOutputInfo->shuffleDispatcher.dbInfo.pVgroupInfos);
6✔
449
  } else if (pOutputInfo->type == TASK_OUTPUT__VTABLE_MAP) {
×
450
    pInfo->notReadyTasks = taosArrayGetSize(pOutputInfo->vtableMapDispatcher.taskInfos);
×
451
  }
452

453
  pInfo->startTs = startTs;
15✔
454
  pInfo->timeoutStartTs = startTs;
15✔
455
  pInfo->stopCheckProcess = 0;
15✔
456
}
15✔
457

458
void findCheckRspStatus(STaskCheckInfo* pInfo, int32_t taskId, SDownstreamStatusInfo** pStatusInfo) {
62✔
459
  if (pStatusInfo == NULL) {
62!
460
    return;
×
461
  }
462

463
  *pStatusInfo = NULL;
62✔
464
  for (int32_t j = 0; j < taosArrayGetSize(pInfo->pList); ++j) {
130✔
465
    SDownstreamStatusInfo* p = taosArrayGet(pInfo->pList, j);
68✔
466
    if (p == NULL) {
68!
467
      continue;
×
468
    }
469

470
    if (p->taskId == taskId) {
68✔
471
      *pStatusInfo = p;
40✔
472
    }
473
  }
474
}
475

476
int32_t streamTaskUpdateCheckInfo(STaskCheckInfo* pInfo, int32_t taskId, int32_t status, int64_t rspTs, int64_t reqId,
31✔
477
                                  int32_t* pNotReady, const char* id) {
478
  SDownstreamStatusInfo* p = NULL;
31✔
479

480
  streamMutexLock(&pInfo->checkInfoLock);
31✔
481
  findCheckRspStatus(pInfo, taskId, &p);
31✔
482
  if (p != NULL) {
31!
483
    if (reqId != p->reqId) {
31!
484
      stError("s-task:%sQID:0x%" PRIx64 " expected:0x%" PRIx64
×
485
              " expired check-rsp recv from downstream task:0x%x, discarded",
486
              id, reqId, p->reqId, taskId);
487
      streamMutexUnlock(&pInfo->checkInfoLock);
×
488
      return TSDB_CODE_FAILED;
×
489
    }
490

491
    // subtract one not-ready-task, since it is ready now
492
    if ((p->status != TASK_DOWNSTREAM_READY) && (status == TASK_DOWNSTREAM_READY)) {
31!
493
      *pNotReady = atomic_sub_fetch_32(&pInfo->notReadyTasks, 1);
22✔
494
    } else {
495
      *pNotReady = pInfo->notReadyTasks;
9✔
496
    }
497

498
    p->status = status;
31✔
499
    p->rspTs = rspTs;
31✔
500

501
    streamMutexUnlock(&pInfo->checkInfoLock);
31✔
502
    return TSDB_CODE_SUCCESS;
31✔
503
  }
504

505
  streamMutexUnlock(&pInfo->checkInfoLock);
×
506
  stError("s-task:%s unexpected check rsp msg, invalid downstream task:0x%x, QID:0x%" PRIx64 " discarded", id, taskId,
×
507
          reqId);
508
  return TSDB_CODE_FAILED;
×
509
}
510

511
int32_t streamTaskStartCheckDownstream(STaskCheckInfo* pInfo, const char* id) {
15✔
512
  if (pInfo->inCheckProcess == 0) {
15!
513
    pInfo->inCheckProcess = 1;
15✔
514
  } else {
515
    stError("s-task:%s already in check procedure, checkTs:%" PRId64 ", start monitor check rsp failed", id,
×
516
            pInfo->startTs);
517
    pInfo->stopCheckProcess = 0;  // disable auto stop of check process
×
518
    return TSDB_CODE_FAILED;
×
519
  }
520

521
  stDebug("s-task:%s set the in check-rsp flag", id);
15!
522
  return TSDB_CODE_SUCCESS;
15✔
523
}
524

525
void streamTaskCompleteCheckRsp(STaskCheckInfo* pInfo, bool lock, const char* id) {
15✔
526
  if (lock) {
15✔
527
    streamMutexLock(&pInfo->checkInfoLock);
12✔
528
  }
529

530
  if (pInfo->inCheckProcess) {
15!
531
    int64_t el = (pInfo->startTs != 0) ? (taosGetTimestampMs() - pInfo->startTs) : 0;
30!
532
    stDebug("s-task:%s clear the in check-rsp flag, set the check-rsp done, elapsed time:%" PRId64 " ms", id, el);
15!
533

534
    pInfo->startTs = 0;
15✔
535
    pInfo->timeoutStartTs = 0;
15✔
536
    pInfo->notReadyTasks = 0;
15✔
537
    pInfo->inCheckProcess = 0;
15✔
538
    pInfo->stopCheckProcess = 0;
15✔
539

540
    pInfo->notReadyRetryCount = 0;
15✔
541
    pInfo->timeoutRetryCount = 0;
15✔
542

543
    taosArrayClear(pInfo->pList);
15✔
544
  } else {
545
    stDebug("s-task:%s already not in check-rsp procedure", id);
×
546
  }
547

548
  if (lock) {
15✔
549
    streamMutexUnlock(&pInfo->checkInfoLock);
12✔
550
  }
551
}
15✔
552

553
// todo: retry until success
554
void streamTaskAddReqInfo(STaskCheckInfo* pInfo, int64_t reqId, int32_t taskId, int32_t vgId, const char* id) {
22✔
555
  SDownstreamStatusInfo info = {.taskId = taskId, .status = -1, .vgId = vgId, .reqId = reqId, .rspTs = 0};
22✔
556
  streamMutexLock(&pInfo->checkInfoLock);
22✔
557

558
  SDownstreamStatusInfo* p = NULL;
22✔
559
  findCheckRspStatus(pInfo, taskId, &p);
22✔
560
  if (p != NULL) {
22!
561
    stDebug("s-task:%s check info to task:0x%x already sent", id, taskId);
×
562
    streamMutexUnlock(&pInfo->checkInfoLock);
×
563
    return;
×
564
  }
565

566
  void* px = taosArrayPush(pInfo->pList, &info);
44✔
567
  if (px == NULL) {
568
    // todo: retry
569
  }
570

571
  streamMutexUnlock(&pInfo->checkInfoLock);
22✔
572
}
573

574
int32_t doSendCheckMsg(SStreamTask* pTask, SDownstreamStatusInfo* p) {
9✔
575
  const char* id = pTask->id.idStr;
9✔
576
  int32_t     code = 0;
9✔
577

578
  SStreamTaskCheckReq req = {
9✔
579
      .streamId = pTask->id.streamId,
9✔
580
      .upstreamTaskId = pTask->id.taskId,
9✔
581
      .upstreamNodeId = pTask->info.nodeId,
9✔
582
      .childId = pTask->info.selfChildId,
9✔
583
      .stage = pTask->pMeta->stage,
9✔
584
  };
585

586
  // update the reqId for the new check msg
587
  p->reqId = tGenIdPI64();
9✔
588

589
  STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
9✔
590
  if (pOutputInfo->type == TASK_OUTPUT__FIXED_DISPATCH) {
9✔
591
    STaskDispatcherFixed* pDispatch = &pOutputInfo->fixedDispatcher;
7✔
592
    setCheckDownstreamReqInfo(&req, p->reqId, pDispatch->taskId, pDispatch->nodeId);
7✔
593

594
    stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " re-send check downstream task:0x%x(vgId:%d) QID:0x%" PRIx64, id,
7!
595
            pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, req.reqId);
596

597
    code = streamSendCheckMsg(pTask, &req, pOutputInfo->fixedDispatcher.nodeId, &pOutputInfo->fixedDispatcher.epSet);
7✔
598
  } else if (pOutputInfo->type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
2!
599
    SArray* vgInfo = pOutputInfo->shuffleDispatcher.dbInfo.pVgroupInfos;
2✔
600
    int32_t numOfVgs = taosArrayGetSize(vgInfo);
2✔
601

602
    for (int32_t i = 0; i < numOfVgs; i++) {
3!
603
      SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
3✔
604
      if (pVgInfo == NULL) {
3!
605
        continue;
×
606
      }
607

608
      if (p->taskId == pVgInfo->taskId) {
3✔
609
        setCheckDownstreamReqInfo(&req, p->reqId, pVgInfo->taskId, pVgInfo->vgId);
2✔
610

611
        stDebug("s-task:%s (vgId:%d) stage:%" PRId64
2!
612
                " re-send check downstream task:0x%x(vgId:%d) (shuffle), idx:%d QID:0x%" PRIx64,
613
                id, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, i, p->reqId);
614
        code = streamSendCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
2✔
615
        break;
2✔
616
      }
617
    }
618
  } else if (pOutputInfo->type == TASK_OUTPUT__VTABLE_MAP) {
×
619
    SArray* pTaskInfos = pTask->outputInfo.vtableMapDispatcher.taskInfos;
×
620
    int32_t numTasks = taosArrayGetSize(pTaskInfos);
×
621

622
    for (int32_t i = 0; i < numTasks; ++i) {
×
623
      STaskDispatcherFixed* pAddr = taosArrayGet(pTaskInfos, i);
×
624
      if (pAddr == NULL) {
×
625
        continue;
×
626
      }
627

628
      if (p->taskId == pAddr->taskId) {
×
629
        setCheckDownstreamReqInfo(&req, p->reqId, pAddr->taskId, pAddr->nodeId);
×
630

631
        stDebug("s-task:%s (vgId:%d) stage:%" PRId64
×
632
                " re-send check vtable downstream task:0x%x(vgId:%d), QID:0x%" PRIx64,
633
                id, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, p->reqId);
634
        code = streamSendCheckMsg(pTask, &req, pAddr->nodeId, &pAddr->epSet);
×
635
        break;
×
636
      }
637
    }
638
  }
639

640
  if (code) {
9!
641
    stError("s-task:%s failed to send check msg to downstream, code:%s", pTask->id.idStr, tstrerror(code));
×
642
  }
643
  return code;
9✔
644
}
645

646
void getCheckRspStatus(STaskCheckInfo* pInfo, int64_t el, int32_t* numOfReady, int32_t* numOfFault,
8✔
647
                       int32_t* numOfNotRsp, SArray* pTimeoutList, SArray* pNotReadyList, const char* id) {
648
  for (int32_t i = 0; i < taosArrayGetSize(pInfo->pList); ++i) {
17✔
649
    SDownstreamStatusInfo* p = taosArrayGet(pInfo->pList, i);
9✔
650
    if (p == NULL) {
9!
651
      continue;
×
652
    }
653

654
    if (p->status == TASK_DOWNSTREAM_READY) {
9!
655
      (*numOfReady) += 1;
×
656
    } else if (p->status == TASK_UPSTREAM_NEW_STAGE || p->status == TASK_DOWNSTREAM_NOT_LEADER) {
9!
657
      stDebug("s-task:%s recv status:NEW_STAGE/NOT_LEADER from downstream, task:0x%x, quit from check downstream", id,
×
658
              p->taskId);
659
      (*numOfFault) += 1;
×
660
    } else {                                 // TASK_DOWNSTREAM_NOT_READY
661
      if (p->rspTs == 0) {                   // not response yet
9!
662
        if (el >= CHECK_NOT_RSP_DURATION) {  // not receive info for 10 sec.
×
663
          void* px = taosArrayPush(pTimeoutList, &p->taskId);
×
664
          if (px == NULL) {
×
665
            stError("s-task:%s failed to record time out task:0x%x", id, p->taskId);
×
666
          }
667
        } else {                // el < CHECK_NOT_RSP_DURATION
668
          (*numOfNotRsp) += 1;  // do nothing and continue waiting for their rsp
×
669
        }
670
      } else {
671
        void* px = taosArrayPush(pNotReadyList, &p->taskId);
9✔
672
        if (px == NULL) {
9!
673
          stError("s-task:%s failed to record not ready task:0x%x", id, p->taskId);
×
674
        }
675
      }
676
    }
677
  }
678
}
8✔
679

680
void setCheckDownstreamReqInfo(SStreamTaskCheckReq* pReq, int64_t reqId, int32_t dstTaskId, int32_t dstNodeId) {
31✔
681
  pReq->reqId = reqId;
31✔
682
  pReq->downstreamTaskId = dstTaskId;
31✔
683
  pReq->downstreamNodeId = dstNodeId;
31✔
684
}
31✔
685

686
void handleTimeoutDownstreamTasks(SStreamTask* pTask, SArray* pTimeoutList) {
×
687
  STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
×
688
  const char*     id = pTask->id.idStr;
×
689
  int32_t         vgId = pTask->pMeta->vgId;
×
690
  int32_t         numOfTimeout = taosArrayGetSize(pTimeoutList);
×
691
  int32_t         code = 0;
×
692

693
  pInfo->timeoutStartTs = taosGetTimestampMs();
×
694
  for (int32_t i = 0; i < numOfTimeout; ++i) {
×
695
    int32_t* px = taosArrayGet(pTimeoutList, i);
×
696
    if (px == NULL) {
×
697
      continue;
×
698
    }
699

700
    int32_t                taskId = *px;
×
701
    SDownstreamStatusInfo* p = NULL;
×
702
    findCheckRspStatus(pInfo, taskId, &p);
×
703

704
    if (p != NULL) {
×
705
      if (p->status != -1 || p->rspTs != 0) {
×
706
        stError("s-task:%s invalid rsp record entry, index:%d, status:%d, rspTs:%" PRId64, id, i, p->status, p->rspTs);
×
707
        continue;
×
708
      }
709
      code = doSendCheckMsg(pTask, p);
×
710
    }
711
  }
712

713
  pInfo->timeoutRetryCount += 1;
×
714

715
  // timeout more than 600 sec, add into node update list
716
  if (pInfo->timeoutRetryCount > 10) {
×
717
    pInfo->timeoutRetryCount = 0;
×
718

719
    for (int32_t i = 0; i < numOfTimeout; ++i) {
×
720
      int32_t* pTaskId = taosArrayGet(pTimeoutList, i);
×
721
      if (pTaskId == NULL) {
×
722
        continue;
×
723
      }
724

725
      SDownstreamStatusInfo* p = NULL;
×
726
      findCheckRspStatus(pInfo, *pTaskId, &p);
×
727
      if (p != NULL) {
×
728
        code = streamTaskAddIntoNodeUpdateList(pTask, p->vgId);
×
729
        stDebug("s-task:%s vgId:%d downstream task:0x%x (vgId:%d) timeout more than 600sec, add into nodeUpdate list",
×
730
                id, vgId, p->taskId, p->vgId);
731
      }
732
    }
733

734
    stDebug("s-task:%s vgId:%d %d downstream task(s) all add into nodeUpate list", id, vgId, numOfTimeout);
×
735
  } else {
736
    stDebug("s-task:%s vgId:%d %d downstream task(s) timeout, send check msg again, retry:%d start time:%" PRId64, id,
×
737
            vgId, numOfTimeout, pInfo->timeoutRetryCount, pInfo->timeoutStartTs);
738
  }
739
}
×
740

741
void handleNotReadyDownstreamTask(SStreamTask* pTask, SArray* pNotReadyList) {
8✔
742
  STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
8✔
743
  const char*     id = pTask->id.idStr;
8✔
744
  int32_t         vgId = pTask->pMeta->vgId;
8✔
745
  int32_t         numOfNotReady = taosArrayGetSize(pNotReadyList);
8✔
746

747
  // reset the info, and send the check msg to failure downstream again
748
  for (int32_t i = 0; i < numOfNotReady; ++i) {
17✔
749
    int32_t* pTaskId = taosArrayGet(pNotReadyList, i);
9✔
750
    if (pTaskId == NULL) {
9!
751
      continue;
×
752
    }
753

754
    SDownstreamStatusInfo* p = NULL;
9✔
755
    findCheckRspStatus(pInfo, *pTaskId, &p);
9✔
756
    if (p != NULL) {
9!
757
      p->rspTs = 0;
9✔
758
      p->status = -1;
9✔
759
      int32_t code = doSendCheckMsg(pTask, p);
9✔
760
    }
761
  }
762

763
  pInfo->notReadyRetryCount += 1;
8✔
764
  stDebug("s-task:%s vgId:%d %d downstream task(s) not ready, send check msg again, retry:%d start time:%" PRId64, id,
8!
765
          vgId, numOfNotReady, pInfo->notReadyRetryCount, pInfo->startTs);
766
}
8✔
767

768
// the action of add status may incur the restart procedure, which should NEVER be executed in the timer thread.
769
// The restart of all tasks requires that all tasks should not have active timer for now. Therefore, the execution
770
// of restart in timer thread will result in a deadlock.
771
int32_t addDownstreamFailedStatusResultAsync(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId) {
×
772
  return streamTaskSchedTask(pMsgCb, vgId, streamId, taskId, STREAM_EXEC_T_ADD_FAILED_TASK, false);
×
773
}
774

775
static void doCleanup(SStreamTask* pTask, SArray* pNotReadyList, SArray* pTimeoutList, void* param) {
23✔
776
  streamMetaReleaseTask(pTask->pMeta, pTask);
23✔
777

778
  taosArrayDestroy(pNotReadyList);
23✔
779
  taosArrayDestroy(pTimeoutList);
23✔
780
  streamTaskFreeRefId(param);
23✔
781
}
23✔
782

783
// this function is executed in timer thread
784
void rspMonitorFn(void* param, void* tmrId) {
23✔
785
  int32_t         numOfReady = 0;
23✔
786
  int32_t         numOfFault = 0;
23✔
787
  int32_t         numOfNotRsp = 0;
23✔
788
  int32_t         numOfNotReady = 0;
23✔
789
  int32_t         numOfTimeout = 0;
23✔
790
  int64_t         taskRefId = *(int64_t*)param;
23✔
791
  int64_t         now = taosGetTimestampMs();
23✔
792
  SArray*         pNotReadyList = NULL;
23✔
793
  SArray*         pTimeoutList = NULL;
23✔
794
  SStreamMeta*    pMeta = NULL;
23✔
795
  STaskCheckInfo* pInfo = NULL;
23✔
796
  int32_t         vgId = -1;
23✔
797
  int64_t         timeoutDuration = 0;
23✔
798
  const char*     id = NULL;
23✔
799
  int32_t         total = 0;
23✔
800

801
  SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
23✔
802
  if (pTask == NULL) {
23!
803
    stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
×
804
    streamTaskFreeRefId(param);
×
805
    return;
15✔
806
  }
807

808
  pMeta = pTask->pMeta;
23✔
809
  pInfo = &pTask->taskCheckInfo;
23✔
810
  vgId = pTask->pMeta->vgId;
23✔
811
  timeoutDuration = now - pInfo->timeoutStartTs;
23✔
812
  id = pTask->id.idStr;
23✔
813
  total = (int32_t) taosArrayGetSize(pInfo->pList);
23✔
814

815
  stDebug("s-task:%s start to do check-downstream-rsp check in tmr", id);
23!
816

817
  streamMutexLock(&pTask->lock);
23✔
818
  SStreamTaskState state = streamTaskGetStatus(pTask);
23✔
819
  streamMutexUnlock(&pTask->lock);
23✔
820

821
  if (state.state == TASK_STATUS__STOP) {
23!
822
    stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId);
×
823
    streamTaskCompleteCheckRsp(pInfo, true, id);
×
824

825
    // not record the failure of the current task if try to close current vnode
826
    // otherwise, the put of message operation may incur invalid read of message queue.
827
    if (!pMeta->closeFlag) {
×
828
      int32_t code = addDownstreamFailedStatusResultAsync(pTask->pMsgCb, vgId, pTask->id.streamId, pTask->id.taskId);
×
829
      if (code) {
×
830
        stError("s-task:%s failed to create async record start failed task, code:%s", id, tstrerror(code));
×
831
      }
832
    }
833

834
    doCleanup(pTask, pNotReadyList, pTimeoutList, param);
×
835
    return;
×
836
  }
837

838
  if (state.state == TASK_STATUS__DROPPING || state.state == TASK_STATUS__READY) {
23!
839
    stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId);
12!
840

841
    streamTaskCompleteCheckRsp(pInfo, true, id);
12✔
842
    doCleanup(pTask, pNotReadyList, pTimeoutList, param);
12✔
843
    return;
12✔
844
  }
845

846
  streamMutexLock(&pInfo->checkInfoLock);
11✔
847
  if (pInfo->notReadyTasks == 0) {
11✔
848
    stDebug("s-task:%s status:%s vgId:%d all downstream ready, quit from monitor rsp tmr", id, state.name, vgId);
3!
849

850
    streamTaskCompleteCheckRsp(pInfo, false, id);
3✔
851
    streamMutexUnlock(&pInfo->checkInfoLock);
3✔
852
    doCleanup(pTask, pNotReadyList, pTimeoutList, param);
3✔
853
    return;
3✔
854
  }
855

856
  pNotReadyList = taosArrayInit(4, sizeof(int64_t));
8✔
857
  pTimeoutList = taosArrayInit(4, sizeof(int64_t));
8✔
858

859
  if (state.state == TASK_STATUS__UNINIT) {
8!
860
    getCheckRspStatus(pInfo, timeoutDuration, &numOfReady, &numOfFault, &numOfNotRsp, pTimeoutList, pNotReadyList, id);
8✔
861

862
    numOfNotReady = (int32_t)taosArrayGetSize(pNotReadyList);
8✔
863
    numOfTimeout = (int32_t)taosArrayGetSize(pTimeoutList);
8✔
864

865
    // fault tasks detected, not try anymore
866
    bool jumpOut = false;
8✔
867
    if ((numOfReady + numOfFault + numOfNotReady + numOfTimeout + numOfNotRsp) != total) {
8!
868
      stError(
×
869
          "s-task:%s vgId:%d internal error in handling the check downstream procedure, rsp number is inconsistent, "
870
          "stop rspMonitor tmr, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d",
871
          id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
872
      jumpOut = true;
×
873
    }
874

875
    if (numOfFault > 0) {
8!
876
      stDebug(
×
877
          "s-task:%s status:%s vgId:%d all rsp. quit from monitor rsp tmr, since vnode-transfer/leader-change/restart "
878
          "detected, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d",
879
          id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
880
      jumpOut = true;
×
881
    }
882

883
    if (jumpOut) {
8!
884
      streamTaskCompleteCheckRsp(pInfo, false, id);
×
885
      streamMutexUnlock(&pInfo->checkInfoLock);
×
886
      doCleanup(pTask, pNotReadyList, pTimeoutList, param);
×
887
      return;
×
888
    }
889
  } else {  // unexpected status
890
    stError("s-task:%s unexpected task status:%s during waiting for check rsp", id, state.name);
×
891
  }
892

893
  // checking of downstream tasks has been stopped by other threads
894
  if (pInfo->stopCheckProcess == 1) {
8!
895
    stDebug(
×
896
        "s-task:%s status:%s vgId:%d stopped by other threads to check downstream process, total:%d, notRsp:%d, "
897
        "notReady:%d, fault:%d, timeout:%d, ready:%d",
898
        id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
899

900
    streamTaskCompleteCheckRsp(pInfo, false, id);
×
901
    streamMutexUnlock(&pInfo->checkInfoLock);
×
902

903
    int32_t code = addDownstreamFailedStatusResultAsync(pTask->pMsgCb, vgId, pTask->id.streamId, pTask->id.taskId);
×
904
    if (code) {
×
905
      stError("s-task:%s failed to create async record start failed task, code:%s", id, tstrerror(code));
×
906
    }
907

908
    doCleanup(pTask, pNotReadyList, pTimeoutList, param);
×
909
    return;
×
910
  }
911

912
  if (numOfNotReady > 0) {  // check to make sure not in recheck timer
8!
913
    handleNotReadyDownstreamTask(pTask, pNotReadyList);
8✔
914
  }
915

916
  if (numOfTimeout > 0) {
8!
917
    handleTimeoutDownstreamTasks(pTask, pTimeoutList);
×
918
  }
919

920
  streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, param, streamTimer, &pInfo->checkRspTmr, vgId,
8✔
921
                 "check-status-monitor");
922
  streamMutexUnlock(&pInfo->checkInfoLock);
8✔
923

924
  stDebug(
8!
925
      "s-task:%s vgId:%d continue checking rsp in 300ms, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, "
926
      "ready:%d",
927
      id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
928
  doCleanup(pTask, pNotReadyList, pTimeoutList, NULL);
8✔
929
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc