• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #4791

13 Oct 2025 06:50AM UTC coverage: 57.628% (-0.8%) from 58.476%
#4791

push

travis-ci

web-flow
Merge pull request #33213 from taosdata/fix/huoh/timemoe_model_directory

fix: fix tdgpt timemoe model directory

136628 of 303332 branches covered (45.04%)

Branch coverage included in aggregate %.

208121 of 294900 relevant lines covered (70.57%)

4250784.02 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

73.75
/source/dnode/snode/src/snode.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#include "executor.h"
17
#include "sndInt.h"
18
#include "tdatablock.h"
19
#include "tuuid.h"
20
#include "stream.h"
21
#include "streamRunner.h"
22

23
// clang-format off
24
#define sndError(...) do {  if (sndDebugFlag & DEBUG_ERROR) { taosPrintLog("SND ERROR ", DEBUG_ERROR, sndDebugFlag, __VA_ARGS__);}} while (0)
25
#define sndInfo(...)  do {  if (sndDebugFlag & DEBUG_INFO)  { taosPrintLog("SND INFO  ", DEBUG_INFO,  sndDebugFlag, __VA_ARGS__);}} while (0)
26
#define sndDebug(...) do {  if (sndDebugFlag & DEBUG_DEBUG) { taosPrintLog("SND DEBUG ", DEBUG_DEBUG, sndDebugFlag, __VA_ARGS__);}} while (0)
27

28
SSnode *sndOpen(const char *path, const SSnodeOpt *pOption) {
219✔
29
  int32_t code = 0;
219✔
30
  SSnode *pSnode = taosMemoryCalloc(1, sizeof(SSnode));
219!
31
  if (pSnode == NULL) {
219!
32
    return NULL;
×
33
  }
34

35
  pSnode->msgCb = pOption->msgCb;
219✔
36

37
  return pSnode;
219✔
38
}
39

40
int32_t sndInit(SSnode *pSnode) {
219✔
41
  streamSetSnodeEnabled(&pSnode->msgCb);
219✔
42
  return 0;
219✔
43
}
44

45
void sndClose(SSnode *pSnode) {
219✔
46
  streamSetSnodeDisabled(false);
219✔
47
  taosMemoryFree(pSnode);
219!
48
}
219✔
49

50
static int32_t handleTriggerCalcReq(SSnode* pSnode, void* pWorkerCb, SRpcMsg* pRpcMsg) {
13,320✔
51
  SSTriggerCalcRequest req = {0};
13,320✔
52
  SStreamRunnerTask* pTask = NULL;
13,320✔
53
  void* taskAddr = NULL;
13,320✔
54
  int32_t code = 0, lino = 0;
13,320✔
55
  TAOS_CHECK_EXIT(tDeserializeSTriggerCalcRequest(POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead)), pRpcMsg->contLen - sizeof(SMsgHead), &req));
13,320!
56
  TAOS_CHECK_EXIT(streamAcquireTask(req.streamId, req.runnerTaskId, (SStreamTask**)&pTask, &taskAddr));
13,320!
57

58
  req.brandNew = true;
13,320✔
59
  req.execId = -1;
13,320✔
60
  pTask->msgCb = pSnode->msgCb;
13,320✔
61
  //pTask->pMsgCb = &pSnode->msgCb;
62
  pTask->pWorkerCb = pWorkerCb;
13,320✔
63
  req.curWinIdx = 0;
13,320✔
64
  TAOS_CHECK_EXIT(stRunnerTaskExecute(pTask, &req));
13,320✔
65

66
_exit:
1,103✔
67

68
  tDestroySTriggerCalcRequest(&req);
13,319✔
69
  SRpcMsg rsp = {.code = code, .msgType = TDMT_STREAM_TRIGGER_CALC_RSP, .contLen = 0, .pCont = NULL, .info = pRpcMsg->info};
13,319✔
70
  if (rpcSendResponse(&rsp) != 0) {
13,319!
71
    sndError("failed to send response, msg:%p", &rsp);
×
72
  }
73
  
74
  streamReleaseTask(taskAddr);
13,320✔
75

76
  if(code == TSDB_CODE_MND_STREAM_TABLE_NOT_CREATE) {
13,320✔
77
    code = 0; // not real error, just notify trigger the table is not created
70✔
78
  }
79

80
  if (code) {
13,320✔
81
    sndError("%s failed at line %d, error:%s", __FUNCTION__, lino, tstrerror(code));
12,146!
82
  }
83
  
84
  return code;
13,320✔
85
}
86

87
static int32_t handleSyncDeleteCheckPointReq(SSnode* pSnode, SRpcMsg* pRpcMsg) {
2✔
88
  int64_t streamId = *(int64_t*)POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead));
2✔
89
  streamDeleteCheckPoint(streamId);
2✔
90
  return 0;
2✔
91
}
92

93
static int32_t handleSyncWriteCheckPointReq(SSnode* pSnode, SRpcMsg* pRpcMsg) {
15✔
94
  int32_t ver = *(int32_t*)POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead) + INT_BYTES);
15✔
95
  int64_t streamId = *(int64_t*)POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead) + 2 * INT_BYTES);
15✔
96
  SRpcMsg rsp = {.code = 0, .msgType = TDMT_STREAM_SYNC_CHECKPOINT_RSP, .info = pRpcMsg->info};
15✔
97

98
  stDebug("[checkpoint] handleSyncWriteCheckPointReq streamId:%" PRIx64 ",ver:%d", streamId, ver);
15!
99
  void*   data = NULL;
15✔
100
  int64_t dataLen = 0;
15✔
101
  int32_t code = streamReadCheckPoint(streamId, &data, &dataLen);
15✔
102
  if (code != 0 || (terrno == TAOS_SYSTEM_ERROR(ENOENT) && ver == -1)){
15!
103
    goto end;
5✔
104
  }
105
  if (terrno == TAOS_SYSTEM_ERROR(ENOENT) || ver > *(int32_t*)POINTER_SHIFT(data, INT_BYTES)) {
10✔
106
    int32_t ret = streamWriteCheckPoint(streamId, POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead)), pRpcMsg->contLen - sizeof(SMsgHead));
7✔
107
    stDebug("[checkpoint] streamId:%" PRIx64 ", checkpoint local updated, ver:%d, dataLen:%" PRId64 ", ret:%d", streamId, ver, dataLen, ret);
7!
108
  }
109
  if (terrno == TAOS_SYSTEM_ERROR(ENOENT) || ver >= *(int32_t*)POINTER_SHIFT(data, INT_BYTES)) {
10!
110
    stDebug("[checkpoint] streamId:%" PRIx64 ", checkpoint no need send back, ver:%d, dataLen:%" PRId64, streamId, ver, dataLen);
10!
111
    dataLen = 0;
10✔
112
    taosMemoryFreeClear(data);
10!
113
  }
114
  
115
end:
4✔
116
  if (data == NULL) {
15!
117
    rsp.contLen = 2 * INT_BYTES + LONG_BYTES;
15✔
118
    rsp.pCont = rpcMallocCont(rsp.contLen);
15✔
119
    if (rsp.pCont == NULL) {
15!
120
      rsp.code = TSDB_CODE_OUT_OF_MEMORY;
×
121
    } else {
122
      *(int32_t*)(POINTER_SHIFT(rsp.pCont, INT_BYTES)) = -1;  // no checkpoint
15✔
123
      *(int64_t*)(POINTER_SHIFT(rsp.pCont, 2 * INT_BYTES)) = streamId;
15✔
124
    }
125
  } else {
126
    rsp.pCont = rpcMallocCont(dataLen);
×
127
    if (rsp.pCont == NULL) {
×
128
      rsp.code = TSDB_CODE_OUT_OF_MEMORY;
×
129
    } else {
130
      memcpy(rsp.pCont, data, dataLen);
×
131
      rsp.contLen = dataLen;
×
132
      taosMemoryFreeClear(data); 
×
133
    } 
134
  }
135
  
136
  if (rpcSendResponse(&rsp) != 0) {
15!
137
    sndError("failed to send write checkpoint response, msg:%p", &rsp);
×
138
  }
139

140
  return 0;
15✔
141
}
142

143
static int32_t handleSyncWriteCheckPointRsp(SSnode* pSnode, SRpcMsg* pRpcMsg) {
16✔
144
  if (pRpcMsg->code != 0) {
16!
145
    stError("[checkpoint] handleSyncWriteCheckPointRsp, code:%d, msgType:%d", pRpcMsg->code, pRpcMsg->msgType);
×
146
    return pRpcMsg->code;
×
147
  } 
148
  void* data = pRpcMsg->pCont;
16✔
149
  int32_t dataLen = pRpcMsg->contLen;
16✔
150
  
151
  int32_t ver = *(int32_t*)(POINTER_SHIFT(data, INT_BYTES));
16✔
152
  int64_t streamId = *(int64_t*)(POINTER_SHIFT(data, 2 * INT_BYTES));
16✔
153
  stDebug("[checkpoint] handleSyncWriteCheckPointRsp, ver:%d, streamId:%"PRIx64",dataLen:%d", 
16!
154
    ver, streamId, dataLen);
155

156
  if (ver != -1){
16!
157
    (void)streamWriteCheckPoint(streamId, data, dataLen);
×
158
  }
159
  return streamCheckpointSetReady(streamId);
16✔
160
}
161

162
static int32_t buildFetchRsp(SSDataBlock* pBlock, void** data, size_t* size, int8_t precision, bool finished) {
8,848✔
163
  int32_t code = 0;
8,848✔
164
  int32_t lino = 0;
8,848✔
165
  void*   buf =  NULL;
8,848✔
166

167
  int32_t blockSize = pBlock == NULL ? 0 : blockGetEncodeSize(pBlock);
8,848✔
168
  size_t dataEncodeBufSize = sizeof(SRetrieveTableRsp) + INT_BYTES * 2 + blockSize;
8,849✔
169
  buf = rpcMallocCont(dataEncodeBufSize);
8,849✔
170
  if (!buf) {
8,849!
171
    code = terrno;
×
172
    goto end;
×
173
  }
174

175
  SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
8,849✔
176
  pRetrieve->version = 0;
8,849✔
177
  pRetrieve->precision = precision;
8,849✔
178
  pRetrieve->compressed = 0;
8,849✔
179
  *((int32_t*)(pRetrieve->data)) = blockSize;
8,849✔
180
  *((int32_t*)(pRetrieve->data + INT_BYTES)) = blockSize;
8,849✔
181
  if (pBlock == NULL || pBlock->info.rows == 0) {
8,849!
182
    pRetrieve->numOfRows = 0;
5,968✔
183
    pRetrieve->numOfBlocks = 0;
5,968✔
184
    pRetrieve->completed = 1;
5,968✔
185
  } else {
186
    pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows);
2,881✔
187
    pRetrieve->numOfBlocks = htonl(1);
2,881✔
188
    int32_t actualLen = blockEncode(pBlock, pRetrieve->data + INT_BYTES * 2, blockSize, taosArrayGetSize(pBlock->pDataBlock));
2,881✔
189
    if (actualLen < 0) {
2,881!
190
      code = terrno;
×
191
      goto end;
×
192
    }
193
  }
194
  if (finished) {
8,849✔
195
    pRetrieve->completed = 1;
6,758✔
196
  }
197

198
  *data = buf;
8,849✔
199
  *size = dataEncodeBufSize;
8,849✔
200
  buf = NULL;
8,849✔
201

202
end:
8,849✔
203
  rpcFreeCont(buf);
8,849✔
204
  return code;
8,849✔
205
}
206

207
static int32_t handleStreamFetchData(SSnode* pSnode, void *pWorkerCb, SRpcMsg* pRpcMsg) {
80✔
208
  int32_t code = 0, lino = 0;
80✔
209
  void* taskAddr = NULL;
80✔
210
  SResFetchReq req = {0};
80✔
211
  SSTriggerCalcRequest calcReq = {0};
80✔
212
  SStreamRunnerTask* pTask = NULL;
80✔
213
  void* buf = NULL;
80✔
214
  size_t size = 0;
80✔
215

216
  stDebug("handleStreamFetchData, msgType:%s, contLen:%d 0x%" PRIx64 ":0x%" PRIx64, 
80!
217
      TMSG_INFO(pRpcMsg->msgType), pRpcMsg->contLen, TRACE_GET_ROOTID(&pRpcMsg->info.traceId), TRACE_GET_MSGID(&pRpcMsg->info.traceId));
218
  
219
  TAOS_CHECK_EXIT(tDeserializeSResFetchReq(pRpcMsg->pCont,pRpcMsg->contLen, &req));
80!
220

221
  calcReq.streamId = req.queryId;
80✔
222
  calcReq.runnerTaskId = req.taskId;
80✔
223
  calcReq.brandNew = req.reset;
80✔
224
  calcReq.execId = req.execId;
80✔
225
  calcReq.sessionId = req.pStRtFuncInfo->sessionId;
80✔
226
  calcReq.triggerType = req.pStRtFuncInfo->triggerType;
80✔
227
  TSWAP(calcReq.groupColVals, req.pStRtFuncInfo->pStreamPartColVals);
80✔
228
  TSWAP(calcReq.params, req.pStRtFuncInfo->pStreamPesudoFuncVals);
80✔
229
  calcReq.gid = req.pStRtFuncInfo->groupId;
80✔
230
  calcReq.curWinIdx = req.pStRtFuncInfo->curIdx;
80✔
231
  calcReq.pOutBlock = NULL;
80✔
232

233
  TAOS_CHECK_EXIT(streamAcquireTask(calcReq.streamId, calcReq.runnerTaskId, (SStreamTask**)&pTask, &taskAddr));
80!
234

235
  pTask->msgCb = pSnode->msgCb;
80✔
236
  //pTask->pMsgCb = &pSnode->msgCb;
237
  pTask->pWorkerCb = pWorkerCb;
80✔
238
  
239
  TAOS_CHECK_EXIT(stRunnerTaskExecute(pTask, &calcReq));
80!
240

241
  TAOS_CHECK_EXIT(buildFetchRsp(calcReq.pOutBlock, &buf, &size, 0, false));
80!
242

243
_exit:
80✔
244

245
  tDestroySTriggerCalcRequest(&calcReq);
80✔
246
  tDestroySResFetchReq(&req);
80✔
247
  SRpcMsg rsp = {.code = code, .msgType = TDMT_STREAM_FETCH_FROM_RUNNER_RSP, .contLen = size, .pCont = buf, .info = pRpcMsg->info};
80✔
248
  tmsgSendRsp(&rsp);
80✔
249
  
250
  streamReleaseTask(taskAddr);
80✔
251

252
  if (code) {
80!
253
    sndError("%s failed at line %d, error:%s", __FUNCTION__, lino, tstrerror(code));
×
254
  }
255
  
256
  return code;
80✔
257
}
258

259
static int32_t handleStreamFetchFromCache(SSnode* pSnode, SRpcMsg* pRpcMsg) {
8,770✔
260
  int32_t code = 0, lino = 0;
8,770✔
261
  SResFetchReq req = {0};
8,770✔
262
  SStreamCacheReadInfo readInfo = {0};
8,770✔
263
  void* buf = NULL;
8,770✔
264
  int64_t streamId = 0;
8,770✔
265
  size_t size = 0;
8,770✔
266
  TAOS_CHECK_EXIT(tDeserializeSResFetchReq(pRpcMsg->pCont, pRpcMsg->contLen, &req));
8,770!
267

268
  streamId = req.queryId;
8,770✔
269
  readInfo.taskInfo.streamId = req.queryId;
8,770✔
270
  readInfo.taskInfo.taskId = req.taskId;
8,770✔
271
  readInfo.taskInfo.sessionId = req.pStRtFuncInfo->sessionId;
8,770✔
272
  readInfo.gid = req.pStRtFuncInfo->groupId;
8,770✔
273
  //SSTriggerCalcParam* pParam = taosArrayGet(req.pStRtFuncInfo->pStreamPesudoFuncVals, req.pStRtFuncInfo->curIdx);
274
  readInfo.start = req.pStRtFuncInfo->curWindow.skey;
8,770✔
275
  readInfo.end = req.pStRtFuncInfo->curWindow.ekey;
8,770✔
276
  bool finished;
277
  TAOS_CHECK_EXIT(stRunnerFetchDataFromCache(&readInfo,&finished));
8,770!
278

279
  TAOS_CHECK_EXIT(buildFetchRsp(readInfo.pBlock, &buf, &size, 0, finished));
8,768!
280

281
_exit:
8,769✔
282

283
  printDataBlock(readInfo.pBlock, __func__, "fetchFromCache", streamId);
8,769✔
284

285
  stsDebug("task %" PRIx64 " TDMT_STREAM_FETCH_FROM_CACHE_RSP with code:%d rows:%" PRId64 ", size:%d, time range:[%" PRId64 ", %" PRId64 "]", 
8,769✔
286
      req.taskId, code, readInfo.pBlock ? readInfo.pBlock->info.rows : 0, (int32_t)size, readInfo.start, readInfo.end);  
287
      
288
  SRpcMsg rsp = {.code = code, .msgType = TDMT_STREAM_FETCH_FROM_CACHE_RSP, .contLen = size, .pCont = buf, .info = pRpcMsg->info};
8,769✔
289
  tmsgSendRsp(&rsp);
8,769✔
290

291
  if (code) {
8,770!
292
    sndError("%s failed at line %d, error:%s", __FUNCTION__, lino, tstrerror(code));
×
293
  }
294

295
  blockDataDestroy(readInfo.pBlock);
8,770✔
296
  tDestroySResFetchReq(&req);
8,770✔
297
  
298
  return code;
8,770✔
299
}
300

301
static void sndSendErrorRrsp(SRpcMsg *pMsg, int32_t errCode) {
×
302
  SRpcMsg             rspMsg = {0};
×
303

304
  rspMsg.info = pMsg->info;
×
305
  rspMsg.pCont = NULL;
×
306
  rspMsg.contLen = 0;
×
307
  rspMsg.code = errCode;
×
308
  rspMsg.msgType = pMsg->msgType;
×
309

310
  tmsgSendRsp(&rspMsg);
×
311
}
×
312

313
static int32_t handleStreamDropTableReq(SSnode* pSnode, SRpcMsg* pRpcMsg) {
1✔
314
  SSTriggerDropRequest req = {0};
1✔
315
  SStreamRunnerTask* pTask = NULL;
1✔
316
  void* taskAddr = NULL;
1✔
317
  int32_t code = 0, lino = 0;
1✔
318
  TAOS_CHECK_EXIT(tDeserializeSTriggerDropTableRequest(POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead)), pRpcMsg->contLen - sizeof(SMsgHead), &req));
1!
319
  TAOS_CHECK_EXIT(streamAcquireTask(req.streamId, req.runnerTaskId, (SStreamTask**)&pTask, &taskAddr));
1!
320
  
321
  pTask->msgCb = pSnode->msgCb;
1✔
322
  TAOS_CHECK_EXIT(stRunnerTaskDropTable(pTask, &req));
1!
323

324
_exit:
1✔
325
  tDestroySSTriggerDropRequest(&req);
1✔
326
  if (code) {
1!
327
    sndError("%s failed at line %d, error:%s", __FUNCTION__, lino, tstrerror(code));
×
328
    sndSendErrorRrsp(pRpcMsg, code);
×
329
  } else {
330
    SRpcMsg rsp = {.code = 0, .msgType = TDMT_STREAM_TRIGGER_DROP_RSP, .contLen = 0, .pCont = NULL, .info = pRpcMsg->info};
1✔
331
    tmsgSendRsp(&rsp);
1✔
332
  }
333
  streamReleaseTask(taskAddr);
1✔
334

335
  return code;
1✔
336
}
337

338

339
int32_t sndProcessStreamMsg(SSnode *pSnode, void *pWorkerCb, SRpcMsg *pMsg) {
22,204✔
340
  int32_t code = 0, lino = 0;
22,204✔
341
  switch (pMsg->msgType) {
22,204!
342
    case TDMT_STREAM_TRIGGER_CALC:
13,320✔
343
      TAOS_CHECK_EXIT(handleTriggerCalcReq(pSnode, pWorkerCb, pMsg));
13,320✔
344
      break;
1,174✔
345
    case TDMT_STREAM_DELETE_CHECKPOINT:
2✔
346
      TAOS_CHECK_EXIT(handleSyncDeleteCheckPointReq(pSnode, pMsg));
2!
347
      break;
2✔
348
    case TDMT_STREAM_SYNC_CHECKPOINT:
15✔
349
      TAOS_CHECK_EXIT(handleSyncWriteCheckPointReq(pSnode, pMsg));
15!
350
      break;
15✔
351
    case TDMT_STREAM_SYNC_CHECKPOINT_RSP:
16✔
352
      TAOS_CHECK_EXIT(handleSyncWriteCheckPointRsp(pSnode, pMsg));
16✔
353
      break;
14✔
354
    case TDMT_STREAM_FETCH_FROM_RUNNER:
80✔
355
      TAOS_CHECK_EXIT(handleStreamFetchData(pSnode, pWorkerCb, pMsg));
80!
356
      break;
80✔
357
    case TDMT_STREAM_FETCH_FROM_CACHE:
8,770✔
358
      TAOS_CHECK_EXIT(handleStreamFetchFromCache(pSnode, pMsg));
8,770!
359
      break;
8,770✔
360
      case TDMT_STREAM_TRIGGER_DROP:
1✔
361
     TAOS_CHECK_EXIT(handleStreamDropTableReq(pSnode, pMsg));
1!
362
      break;
1✔
363
    default:
×
364
      sndError("invalid snode msg:%d", pMsg->msgType);
×
365
      TAOS_CHECK_EXIT(TSDB_CODE_INVALID_MSG);
×
366
  }
367

368
_exit:
×
369

370
  if (code) {
22,204✔
371
    sndError("%s failed at line %d, error:%s", __FUNCTION__, lino, tstrerror(code));
12,148!
372
  }
373
  
374
  return code;
22,204✔
375
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc