• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #5000

22 Mar 2026 10:21AM UTC coverage: 72.307% (-0.003%) from 72.31%
#5000

push

travis-ci

web-flow
feat(subq/some): some/any/exists for stream subq (#34860)

50 of 68 new or added lines in 1 file covered. (73.53%)

614 existing lines in 138 files now uncovered.

253462 of 350536 relevant lines covered (72.31%)

134798164.58 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

81.92
/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http:www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "vmInt.h"
18
#include "vnodeInt.h"
19

20
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
8,593,730✔
21
  if (pMsg->info.handle == NULL) return;
8,593,730✔
22
  SRpcMsg rsp = {
17,178,024✔
23
      .code = code,
24
      .pCont = pMsg->info.rsp,
8,592,959✔
25
      .contLen = pMsg->info.rspLen,
8,592,163✔
26
      .info = pMsg->info,
27
  };
28
  tmsgSendRsp(&rsp);
8,593,864✔
29
}
30

31
static void vmProcessMultiMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
3,068,141✔
32
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
3,068,141✔
33
  int32_t         code = -1;
3,092,637✔
34
  const STraceId *trace = &pMsg->info.traceId;
3,092,637✔
35

36
  dGTrace("msg:%p, get from vnode-multi-mgmt queue", pMsg);
3,091,980✔
37
  switch (pMsg->msgType) {
3,091,980✔
38
    case TDMT_DND_CREATE_VNODE:
3,087,600✔
39
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
3,087,600✔
40
      break;
3,090,730✔
41
#ifdef USE_MOUNT
42
    case TDMT_DND_RETRIEVE_MOUNT_PATH:
823✔
43
      code = vmProcessRetrieveMountPathReq(pMgmt, pMsg);
823✔
44
      break;
823✔
45
    case TDMT_DND_MOUNT_VNODE:
1,084✔
46
      code = vmProcessMountVnodeReq(pMgmt, pMsg);
1,084✔
47
      break;
1,084✔
48
#endif
49
  }
50

51
  if (IsReq(pMsg)) {
3,093,193✔
52
    if (code != 0) {
3,092,310✔
53
      if (terrno != 0) code = terrno;
552✔
54
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
552✔
55
    }
56
    vmSendRsp(pMsg, code);
3,092,310✔
57
  }
58

59
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
3,089,355✔
60
  rpcFreeCont(pMsg->pCont);
3,089,355✔
61
  taosFreeQitem(pMsg);
3,088,798✔
62
}
3,090,376✔
63

64
static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
4,554,862✔
65
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
4,554,862✔
66
  int32_t         code = -1;
4,554,862✔
67
  const STraceId *trace = &pMsg->info.traceId;
4,554,862✔
68

69
  dGTrace("msg:%p, get from vnode-mgmt queue", pMsg);
4,554,862✔
70
  switch (pMsg->msgType) {
4,554,862✔
71
    case TDMT_DND_CREATE_VNODE:
×
72
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
×
73
      break;
×
74
    case TDMT_DND_DROP_VNODE:
1,632,881✔
75
      code = vmProcessDropVnodeReq(pMgmt, pMsg);
1,632,881✔
76
      break;
1,632,881✔
77
    case TDMT_VND_ALTER_REPLICA:
683,860✔
78
      code = vmProcessAlterVnodeReplicaReq(pMgmt, pMsg);
683,860✔
79
      break;
683,860✔
80
    case TDMT_VND_DISABLE_WRITE:
26,820✔
81
      code = vmProcessDisableVnodeWriteReq(pMgmt, pMsg);
26,820✔
82
      break;
26,820✔
83
    case TDMT_VND_SET_KEEP_VERSION:
3,366✔
84
      code = vmProcessSetKeepVersionReq(pMgmt, pMsg);
3,366✔
85
      break;
3,366✔
86
    case TDMT_VND_ALTER_HASHRANGE:
26,087✔
87
      code = vmProcessAlterHashRangeReq(pMgmt, pMsg);
26,087✔
88
      break;
26,087✔
89
    case TDMT_DND_ALTER_VNODE_TYPE:
2,036,124✔
90
      code = vmProcessAlterVnodeTypeReq(pMgmt, pMsg);
2,036,124✔
91
      break;
2,036,124✔
92
    case TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP:
×
93
      code = vmProcessCheckLearnCatchupReq(pMgmt, pMsg);
×
94
      break;
×
95
    case TDMT_VND_ARB_HEARTBEAT:
112,466✔
96
      code = vmProcessArbHeartBeatReq(pMgmt, pMsg);
112,466✔
97
      break;
112,466✔
98
    case TDMT_VND_ALTER_ELECTBASELINE:
33,258✔
99
      code = vmProcessAlterVnodeElectBaselineReq(pMgmt, pMsg);
33,258✔
100
      break;
33,258✔
101
    default:
×
102
      terrno = TSDB_CODE_MSG_NOT_PROCESSED;
×
103
      dGError("msg:%p, not processed in vnode-mgmt queue", pMsg);
×
104
  }
105

106
  if (IsReq(pMsg)) {
4,554,862✔
107
    if (code != 0) {
4,554,862✔
108
      if (terrno != 0) code = terrno;
1,939,431✔
109
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
1,939,431✔
110
    }
111
    vmSendRsp(pMsg, code);
4,554,862✔
112
  }
113

114
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
4,554,862✔
115
  rpcFreeCont(pMsg->pCont);
4,554,862✔
116
  taosFreeQitem(pMsg);
4,554,862✔
117
}
4,554,862✔
118

119
static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
423,311,828✔
120
  SVnodeObj      *pVnode = pInfo->ahandle;
423,311,828✔
121
  const STraceId *trace = &pMsg->info.traceId;
423,320,647✔
122

123
  dGTrace("vgId:%d, msg:%p, get from vnode-query queue", pVnode->vgId, pMsg);
423,315,586✔
124
  int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg, pInfo);
423,315,718✔
125
  if (code != 0) {
423,190,488✔
126
    if (terrno != 0) code = terrno;
181,845✔
127
    dGError("vgId:%d, msg:%p, failed to query since %s", pVnode->vgId, pMsg, tstrerror(code));
181,845✔
128
    vmSendRsp(pMsg, code);
181,845✔
129
  }
130

131
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
423,190,488✔
132
  rpcFreeCont(pMsg->pCont);
423,190,488✔
133
  taosFreeQitem(pMsg);
423,168,626✔
134
}
423,162,874✔
135

136
static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
1,055,469,991✔
137
  SVnodeObj *pVnode = pInfo->ahandle;
1,055,469,991✔
138
  SRpcMsg   *pMsg = NULL;
1,055,498,551✔
139

140
  for (int32_t i = 0; i < numOfMsgs; ++i) {
2,136,980,897✔
141
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
1,081,454,327✔
142
    const STraceId *trace = &pMsg->info.traceId;
1,081,274,159✔
143
    dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
1,081,472,809✔
144

145
    terrno = 0;
1,081,475,831✔
146
    int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
1,081,465,441✔
147
    if (code != 0) {
1,081,571,827✔
148
      if (code == -1 && terrno != 0) {
765,288✔
149
        code = terrno;
×
150
      }
151

152
      if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) {
765,288✔
153
        dGDebug("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
×
154
      } else {
155
        dGError("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
765,288✔
156
      }
157

158
      vmSendRsp(pMsg, code);
765,288✔
159
    }
160

161
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, code);
1,081,571,827✔
162
    rpcFreeCont(pMsg->pCont);
1,081,571,827✔
163
    taosFreeQitem(pMsg);
1,081,518,717✔
164
  }
165
}
1,055,525,396✔
166

167
static void vmProcessStreamReaderQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
41,093,864✔
168
  SVnodeObj *pVnode = pInfo->ahandle;
41,093,864✔
169
  const STraceId *trace = &pMsg->info.traceId;
41,094,226✔
170
  dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
41,093,299✔
171

172
  terrno = 0;
41,093,299✔
173
  int32_t code = vnodeProcessStreamReaderMsg(pVnode->pImpl, pMsg, pInfo);
41,093,782✔
174
  if (code != 0) {
41,087,031✔
175
    if (code == -1 && terrno != 0) {
81,124✔
176
      code = terrno;
×
177
    }
178

179
    if (code == 0) {
81,124✔
180
      dGDebug("vgId:%d, msg:%p, success to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
×
181
    } else {
182
      dGError("vgId:%d, msg:%p, failed to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
81,124✔
183
    }
184
  }
185

186
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, code);
41,087,031✔
187
  rpcFreeCont(pMsg->pCont);
41,087,163✔
188
  taosFreeQitem(pMsg);
41,082,033✔
189
}
41,085,698✔
190

191
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
287,310,615✔
192
  SVnodeObj *pVnode = pInfo->ahandle;
287,310,615✔
193
  SRpcMsg   *pMsg = NULL;
287,312,648✔
194

195
  for (int32_t i = 0; i < numOfMsgs; ++i) {
600,096,575✔
196
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
312,813,087✔
197
    const STraceId *trace = &pMsg->info.traceId;
312,809,291✔
198
    dGTrace("vgId:%d, msg:%p, get from vnode-sync queue", pVnode->vgId, pMsg);
312,811,384✔
199

200
    int32_t code = vnodeProcessSyncMsg(pVnode->pImpl, pMsg, NULL);  // no response here
312,811,384✔
201
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
312,802,604✔
202
    rpcFreeCont(pMsg->pCont);
312,803,358✔
203
    taosFreeQitem(pMsg);
312,753,585✔
204
  }
205
}
287,287,047✔
206

207
static void vmSendResponse(SRpcMsg *pMsg) {
×
208
  if (pMsg->info.handle) {
×
209
    SRpcMsg rsp = {.info = pMsg->info, .code = terrno};
×
210
    if (rpcSendResponse(&rsp) != 0) {
×
211
      dError("failed to send response since %s", terrstr());
×
212
    }
213
  }
214
}
×
215

216
static bool vmDataSpaceSufficient(SVnodeObj *pVnode) {
587,284,963✔
217
  STfs *pTfs = pVnode->pImpl->pTfs;
587,284,963✔
218
  if (pTfs) {
587,333,379✔
219
    return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary);
587,333,379✔
220
  } else {
221
    return osDataSpaceSufficient();
×
222
  }
223
}
224

225
static int32_t vmAcquireVnodeWrapper(SVnodeMgmt *pMgt, int32_t vgId, SVnodeObj **pNode) {
2,147,483,647✔
226
  *pNode = vmAcquireVnode(pMgt, vgId);
2,147,483,647✔
227
  if (*pNode == NULL) {
2,147,483,647✔
228
    return terrno;
3,079,893✔
229
  }
230
  return 0;
2,147,483,647✔
231
}
232
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
2,147,483,647✔
233
  int32_t         code = 0;
2,147,483,647✔
234
  SMsgHead *      pHead = pMsg->pCont;
2,147,483,647✔
235
  SVnodeObj *     pVnode = NULL;
2,147,483,647✔
236
  const STraceId *trace = &pMsg->info.traceId;
2,147,483,647✔
237

238
  if (pMsg->contLen < sizeof(SMsgHead)) {
2,147,483,647✔
239
    dGError("invalid rpc msg with no msg head at pCont. pMsg:%p, type:%s, contLen:%d", pMsg, TMSG_INFO(pMsg->msgType),
×
240
            pMsg->contLen);
241
    return TSDB_CODE_INVALID_MSG;
×
242
  }
243

244
  pHead->contLen = ntohl(pHead->contLen);
2,147,483,647✔
245
  pHead->vgId = ntohl(pHead->vgId);
2,147,483,647✔
246
  
247
  code = vmAcquireVnodeWrapper(pMgmt, pHead->vgId, &pVnode);
2,147,483,647✔
248
  if (code != 0) {
2,147,483,647✔
249
    dGDebug("vgId:%d, msg:%p, failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
3,079,893✔
250
            tstrerror(code), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
251
    return code;
3,079,168✔
252
  }
253

254
  switch (qtype) {
2,147,483,647✔
255
    case QUERY_QUEUE:
423,167,109✔
256
      // let's put into different query processing queue. The query type is extracted during preprocessing procedure,
257
      // mquery-queue for meta info query, and query-queue for ordinary users' queries.
258
      code = vnodePreprocessQueryMsg(pVnode->pImpl, pMsg);
423,167,109✔
259
      if (code) {
423,147,358✔
260
        dError("vgId:%d, msg:%p, preprocess query msg failed since %s", pVnode->vgId, pMsg, tstrerror(code));
×
261
      } else {
262
        dGTrace("vgId:%d, msg:%p, put into vnode-query queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
423,147,358✔
263
        code = taosWriteQitem(pVnode->pQueryQ, pMsg);
423,147,226✔
264
      }
265
      break;
423,216,536✔
266
    case FETCH_QUEUE:
1,081,072,312✔
267
      dGTrace("vgId:%d, msg:%p, put into vnode-fetch queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
1,081,072,312✔
268
      code = taosWriteQitem(pVnode->pFetchQ, pMsg);
1,081,072,312✔
269
      break;
1,081,561,957✔
270
    case WRITE_QUEUE:
587,292,612✔
271
      if (!vmDataSpaceSufficient(pVnode)) {
587,292,612✔
272
        code = TSDB_CODE_NO_ENOUGH_DISKSPACE;
×
273
        dError("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
×
274
               tstrerror(code), TMSG_INFO(pMsg->msgType));
275
        break;
×
276
      }
277
#if 0
278
      if (pMsg->msgType == TDMT_VND_SUBMIT && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
279
        code = TSDB_CODE_VND_NO_WRITE_AUTH;
280
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
281
               tstrerror(code), TMSG_INFO(pMsg->msgType));
282
        break;
283
      }
284
#endif
285
      if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
587,305,440✔
286
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since its disable, type:%s", pVnode->vgId, pMsg,
94,411✔
287
               TMSG_INFO(pMsg->msgType));
288
        code = TSDB_CODE_VND_STOPPED;
94,411✔
289
        break;
94,411✔
290
      }
291
      dGDebug("vgId:%d, msg:%p, put into vnode-write queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
587,191,585✔
292
      code = taosWriteQitem(pVnode->pWriteW.queue, pMsg);
587,193,295✔
293
      break;
587,241,281✔
294
    case SYNC_QUEUE:
269,884,369✔
295
      dGDebug("vgId:%d, msg:%p, put into vnode-sync queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
269,884,369✔
296
      code = taosWriteQitem(pVnode->pSyncW.queue, pMsg);
269,884,369✔
297
      break;
269,898,865✔
298
    case SYNC_RD_QUEUE:
42,914,938✔
299
      if(tsSyncLogHeartbeat){
42,914,938✔
300
        dGInfo("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
×
301
      }
302
      else{
303
        dGDebug("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
42,914,938✔
304
      }
305
      code = taosWriteQitem(pVnode->pSyncRdW.queue, pMsg);
42,914,938✔
306
      break;
42,920,716✔
307
    case APPLY_QUEUE:
181,058,881✔
308
      dGDebug("vgId:%d, msg:%p, put into vnode-apply queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
181,058,881✔
309
      code = taosWriteQitem(pVnode->pApplyW.queue, pMsg);
181,058,881✔
310
      break;
181,062,767✔
311
    case STREAM_READER_QUEUE:
41,082,419✔
312
      dGDebug("vgId:%d, msg:%p, put into vnode-stream-reader queue, type:%s", pVnode->vgId, pMsg,
41,082,419✔
313
              TMSG_INFO(pMsg->msgType));
314
      code = taosWriteQitem(pVnode->pStreamReaderQ, pMsg);
41,082,103✔
315
      break;
41,090,039✔
UNCOV
316
    default:
×
UNCOV
317
      code = TSDB_CODE_INVALID_MSG;
×
UNCOV
318
      break;
×
319
  }
320

321
  vmReleaseVnode(pMgmt, pVnode);
2,147,483,647✔
322
  return code;
2,147,483,647✔
323
}
324

325
int32_t vmPutMsgToSyncRdQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_RD_QUEUE); }
43,122,487✔
326

327
int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); }
186,607,238✔
328

329
int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, WRITE_QUEUE); }
586,313,692✔
330

331
int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, QUERY_QUEUE); }
370,678,869✔
332

333
int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); }
1,081,503,238✔
334

335
int32_t vmPutMsgToStreamReaderQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_READER_QUEUE); }
41,108,487✔
336

337
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
3,091,995✔
338
  const STraceId *trace = &pMsg->info.traceId;
3,091,995✔
339
  dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
3,091,995✔
340
  return taosWriteQitem(pMgmt->mgmtMultiWorker.queue, pMsg);
3,091,995✔
341
}
342

343
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
4,554,841✔
344
  const STraceId *trace = &pMsg->info.traceId;
4,554,841✔
345
  dGTrace("msg:%p, put into vnode-mgmt queue", pMsg);
4,554,862✔
346
  return taosWriteQitem(pMgmt->mgmtWorker.queue, pMsg);
4,554,862✔
347
}
348

349
int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
320,633,505✔
350
  int32_t code;
351
  if (pRpc->contLen < sizeof(SMsgHead)) {
320,633,505✔
352
    dError("invalid rpc msg with no msg head at pCont. pRpc:%p, type:%s, len:%d", pRpc, TMSG_INFO(pRpc->msgType),
×
353
           pRpc->contLen);
354
    rpcFreeCont(pRpc->pCont);
×
355
    pRpc->pCont = NULL;
×
356
    return TSDB_CODE_INVALID_MSG;
×
357
  }
358

359
  EQItype  itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM;
320,636,464✔
360
  SRpcMsg *pMsg;
320,625,538✔
361
  code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg);
320,640,128✔
362
  if (code) {
320,629,432✔
363
    rpcFreeCont(pRpc->pCont);
×
364
    pRpc->pCont = NULL;
×
365
    return code;
×
366
  }
367

368
  SMsgHead *pHead = pRpc->pCont;
320,629,432✔
369
  dTrace("vgId:%d, msg:%p, is created, type:%s len:%d", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType), pRpc->contLen);
320,633,771✔
370

371
  pHead->contLen = htonl(pHead->contLen);
320,633,771✔
372
  pHead->vgId = htonl(pHead->vgId);
320,637,100✔
373
  memcpy(pMsg, pRpc, sizeof(SRpcMsg));
320,629,061✔
374
  pRpc->pCont = NULL;
320,629,061✔
375

376
  code = vmPutMsgToQueue(pMgmt, pMsg, qtype);
320,642,542✔
377
  if (code != 0) {
320,649,844✔
378
    dTrace("msg:%p, is freed", pMsg);
1,453,444✔
379
    rpcFreeCont(pMsg->pCont);
1,453,444✔
380
    taosFreeQitem(pMsg);
1,453,444✔
381
  }
382

383
  return code;
320,645,792✔
384
}
385

386
int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
18,861,214✔
387
  int32_t    size = -1;
18,861,214✔
388
  SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
18,861,214✔
389
  if (pVnode != NULL) {
18,867,524✔
390
    switch (qtype) {
10,465,161✔
391
      case WRITE_QUEUE:
×
392
        size = taosQueueItemSize(pVnode->pWriteW.queue);
×
393
        break;
×
394
      case SYNC_QUEUE:
×
395
        size = taosQueueItemSize(pVnode->pSyncW.queue);
×
396
        break;
×
397
      case APPLY_QUEUE:
10,465,161✔
398
        size = taosQueueItemSize(pVnode->pApplyW.queue);
10,465,161✔
399
        break;
10,469,213✔
400
      case QUERY_QUEUE:
×
401
        size = taosQueueItemSize(pVnode->pQueryQ);
×
402
        break;
×
403
      case FETCH_QUEUE:
×
404
        size = taosQueueItemSize(pVnode->pFetchQ);
×
405
        break;
×
406
      case STREAM_READER_QUEUE:
×
407
        size = taosQueueItemSize(pVnode->pStreamReaderQ);
×
408
        break;
×
409
      default:
×
410
        break;
×
411
    }
412
  }
413
  if (pVnode) vmReleaseVnode(pMgmt, pVnode);
18,871,576✔
414
  if (size < 0) {
18,860,783✔
415
    dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
8,385,357✔
416
    size = 0;
8,382,022✔
417
  }
418
  return size;
18,857,448✔
419
}
420

421
int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
4,204,415✔
422
  int32_t         code = 0;
4,204,415✔
423
  SMultiWorkerCfg wcfg = {.max = 1, .name = "vnode-write", .fp = (FItems)vnodeProposeWriteMsg, .param = pVnode->pImpl};
4,204,415✔
424
  SMultiWorkerCfg scfg = {.max = 1, .name = "vnode-sync", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
4,204,442✔
425
  SMultiWorkerCfg sccfg = {.max = 1, .name = "vnode-sync-rd", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
4,204,415✔
426
  SMultiWorkerCfg acfg = {.max = 1, .name = "vnode-apply", .fp = (FItems)vnodeApplyWriteMsg, .param = pVnode->pImpl};
4,202,110✔
427
  code = tMultiWorkerInit(&pVnode->pWriteW, &wcfg);
4,203,612✔
428
  if (code) {
4,205,038✔
429
    return code;
×
430
  }
431
  code = tMultiWorkerInit(&pVnode->pSyncW, &scfg);
4,205,038✔
432
  if (code) {
4,205,038✔
433
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
434
    return code;
×
435
  }
436
  code = tMultiWorkerInit(&pVnode->pSyncRdW, &sccfg);
4,205,038✔
437
  if (code) {
4,205,018✔
438
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
439
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
440
    return code;
×
441
  }
442
  code = tMultiWorkerInit(&pVnode->pApplyW, &acfg);
4,205,018✔
443
  if (code) {
4,204,519✔
444
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
445
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
446
    tMultiWorkerCleanup(&pVnode->pSyncRdW);
×
447
    return code;
×
448
  }
449

450
  pVnode->pQueryQ = tQueryAutoQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
4,204,519✔
451
  pVnode->pStreamReaderQ = tQueryAutoQWorkerAllocQueue(&pMgmt->streamReaderPool, pVnode, (FItem)vmProcessStreamReaderQueue);
4,205,038✔
452
  pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
4,205,038✔
453

454
  if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
4,205,038✔
455
      pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pFetchQ == NULL || !pVnode->pStreamReaderQ) {
4,205,038✔
456
    return TSDB_CODE_OUT_OF_MEMORY;
×
457
  }
458

459
  dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
4,205,038✔
460
        taosQueueGetThreadId(pVnode->pWriteW.queue));
461
  dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
4,205,038✔
462
        taosQueueGetThreadId(pVnode->pSyncW.queue));
463
  dInfo("vgId:%d, sync-rd-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncRdW.queue,
4,205,038✔
464
        taosQueueGetThreadId(pVnode->pSyncRdW.queue));
465
  dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
4,205,038✔
466
        taosQueueGetThreadId(pVnode->pApplyW.queue));
467
  dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
4,205,038✔
468
  dInfo("vgId:%d, stream-reader-queue:%p is alloced", pVnode->vgId, pVnode->pStreamReaderQ);
4,205,038✔
469
  dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
4,205,038✔
470
        taosQueueGetThreadId(pVnode->pFetchQ));
471
  return 0;
4,205,038✔
472
}
473

474
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
4,204,610✔
475
  tQueryAutoQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
4,204,610✔
476
  tQueryAutoQWorkerFreeQueue(&pMgmt->streamReaderPool, pVnode->pStreamReaderQ);
4,205,038✔
477
  tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
4,205,038✔
478

479
  pVnode->pQueryQ = NULL;
4,205,038✔
480
  pVnode->pFetchQ = NULL;
4,205,038✔
481
  pVnode->pStreamReaderQ = NULL;
4,205,038✔
482
  dDebug("vgId:%d, queue is freed", pVnode->vgId);
4,205,038✔
483
}
4,205,038✔
484

485
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
620,640✔
486
  int32_t code = 0;
620,640✔
487

488
  SQueryAutoQWorkerPool *pQPool = &pMgmt->queryPool;
620,640✔
489
  pQPool->name = "vnode-query";
620,640✔
490
  pQPool->min = tsNumOfVnodeQueryThreads;
620,640✔
491
  pQPool->max = tsNumOfVnodeQueryThreads;
620,640✔
492
  if ((code = tQueryAutoQWorkerInit(pQPool)) != 0) return code;
620,640✔
493

494
  SQueryAutoQWorkerPool *pSPool = &pMgmt->streamReaderPool;
620,640✔
495
  pSPool->name = "vnode-st-reader";
620,640✔
496
  pSPool->min = tsNumOfVnodeStreamReaderThreads;
620,640✔
497
  pSPool->max = tsNumOfVnodeStreamReaderThreads;
620,640✔
498
  if ((code = tQueryAutoQWorkerInit(pSPool)) != 0) return code;
620,640✔
499

500
  tsNumOfQueryThreads += tsNumOfVnodeQueryThreads;
620,640✔
501

502
  SWWorkerPool *pFPool = &pMgmt->fetchPool;
620,640✔
503
  pFPool->name = "vnode-fetch";
620,640✔
504
  pFPool->max = tsNumOfVnodeFetchThreads;
620,640✔
505
  if ((code = tWWorkerInit(pFPool)) != 0) return code;
620,640✔
506

507
  SSingleWorkerCfg mgmtCfg = {
620,640✔
508
      .min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
509

510
  if ((code = tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg)) != 0) return code;
620,640✔
511

512
  int32_t threadNum = 0;
620,640✔
513
  if (tsNumOfCores == 1) {
620,640✔
514
    threadNum = 2;
×
515
  } else {
516
    threadNum = tsNumOfCores;
620,640✔
517
  }
518
  SSingleWorkerCfg multiMgmtCfg = {.min = threadNum,
620,640✔
519
                                   .max = threadNum,
520
                                   .name = "vnode-multi-mgmt",
521
                                   .fp = (FItem)vmProcessMultiMgmtQueue,
522
                                   .param = pMgmt};
523

524
  if ((code = tSingleWorkerInit(&pMgmt->mgmtMultiWorker, &multiMgmtCfg)) != 0) return code;
620,640✔
525
  dDebug("vnode workers are initialized");
620,640✔
526
  return 0;
620,640✔
527
}
528

529
void vmStopWorker(SVnodeMgmt *pMgmt) {
620,640✔
530
  tQueryAutoQWorkerCleanup(&pMgmt->queryPool);
620,640✔
531
  tWWorkerCleanup(&pMgmt->fetchPool);
620,640✔
532
  tQueryAutoQWorkerCleanup(&pMgmt->streamReaderPool);
620,640✔
533
  dDebug("vnode workers are closed");
620,640✔
534
}
620,640✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc