• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #5043

29 Apr 2026 11:44AM UTC coverage: 73.107% (-0.06%) from 73.17%
#5043

push

travis-ci

web-flow
feat(statewindow): support multi columns (#35136)

1563 of 1828 new or added lines in 18 files covered. (85.5%)

7490 existing lines in 148 files now uncovered.

277321 of 379338 relevant lines covered (73.11%)

131116908.85 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

82.07
/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http:www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "vmInt.h"
18
#include "vnodeInt.h"
19

20
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
14,139,801✔
21
  if (pMsg->info.handle == NULL) return;
14,139,801✔
22
  SRpcMsg rsp = {
28,280,700✔
23
      .code = code,
24
      .pCont = pMsg->info.rsp,
14,145,232✔
25
      .contLen = pMsg->info.rspLen,
14,144,218✔
26
      .info = pMsg->info,
27
  };
28
  tmsgSendRsp(&rsp);
14,144,384✔
29
}
30

31
static void vmProcessMultiMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
3,428,725✔
32
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
3,428,725✔
33
  int32_t         code = -1;
3,436,545✔
34
  const STraceId *trace = &pMsg->info.traceId;
3,436,545✔
35

36
  dGTrace("msg:%p, get from vnode-multi-mgmt queue", pMsg);
3,436,545✔
37
  switch (pMsg->msgType) {
3,436,545✔
38
    case TDMT_DND_CREATE_VNODE:
3,434,289✔
39
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
3,434,289✔
40
      break;
3,434,289✔
41
#ifdef USE_MOUNT
42
    case TDMT_DND_RETRIEVE_MOUNT_PATH:
1,056✔
43
      code = vmProcessRetrieveMountPathReq(pMgmt, pMsg);
1,056✔
44
      break;
1,056✔
45
    case TDMT_DND_MOUNT_VNODE:
1,200✔
46
      code = vmProcessMountVnodeReq(pMgmt, pMsg);
1,200✔
47
      break;
1,200✔
48
#endif
49
  }
50

51
  if (IsReq(pMsg)) {
3,436,545✔
52
    if (code != 0) {
3,436,545✔
53
      if (terrno != 0) code = terrno;
756✔
54
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
756✔
55
    }
56
    vmSendRsp(pMsg, code);
3,436,545✔
57
  }
58

59
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
3,434,706✔
60
  rpcFreeCont(pMsg->pCont);
3,434,706✔
61
  taosFreeQitem(pMsg);
3,436,545✔
62
}
3,436,152✔
63

64
static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
5,208,056✔
65
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
5,208,056✔
66
  int32_t         code = -1;
5,208,056✔
67
  const STraceId *trace = &pMsg->info.traceId;
5,208,056✔
68

69
  dGTrace("msg:%p, get from vnode-mgmt queue", pMsg);
5,208,056✔
70
  switch (pMsg->msgType) {
5,208,056✔
71
    case TDMT_DND_CREATE_VNODE:
×
72
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
×
73
      break;
×
74
    case TDMT_DND_DROP_VNODE:
1,812,128✔
75
      code = vmProcessDropVnodeReq(pMgmt, pMsg);
1,812,128✔
76
      break;
1,812,128✔
77
    case TDMT_VND_ALTER_REPLICA:
747,604✔
78
      code = vmProcessAlterVnodeReplicaReq(pMgmt, pMsg);
747,604✔
79
      break;
747,604✔
80
    case TDMT_VND_DISABLE_WRITE:
35,699✔
81
      code = vmProcessDisableVnodeWriteReq(pMgmt, pMsg);
35,699✔
82
      break;
35,699✔
83
    case TDMT_VND_SET_KEEP_VERSION:
3,603✔
84
      code = vmProcessSetKeepVersionReq(pMgmt, pMsg);
3,603✔
85
      break;
3,603✔
86
    case TDMT_VND_ALTER_HASHRANGE:
34,936✔
87
      code = vmProcessAlterHashRangeReq(pMgmt, pMsg);
34,936✔
88
      break;
34,936✔
89
    case TDMT_DND_ALTER_VNODE_TYPE:
2,290,024✔
90
      code = vmProcessAlterVnodeTypeReq(pMgmt, pMsg);
2,290,024✔
91
      break;
2,290,024✔
92
    case TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP:
×
93
      code = vmProcessCheckLearnCatchupReq(pMgmt, pMsg);
×
94
      break;
×
95
    case TDMT_VND_ARB_HEARTBEAT:
115,780✔
96
      code = vmProcessArbHeartBeatReq(pMgmt, pMsg);
115,780✔
97
      break;
115,780✔
98
    case TDMT_VND_ALTER_ELECTBASELINE:
27,620✔
99
      code = vmProcessAlterVnodeElectBaselineReq(pMgmt, pMsg);
27,620✔
100
      break;
27,620✔
101
    case TDMT_DND_QUERY_COMPACT_PROGRESS:
140,662✔
102
      code = vmProcessDnodeQueryCompactProgressReq(pMgmt, pMsg);
140,662✔
103
      break;
140,662✔
104
    default:
×
105
      terrno = TSDB_CODE_MSG_NOT_PROCESSED;
×
106
      dGError("msg:%p, not processed in vnode-mgmt queue", pMsg);
×
107
  }
108

109
  if (IsReq(pMsg)) {
5,208,056✔
110
    if (code != 0) {
5,208,056✔
111
      if (terrno != 0) code = terrno;
2,181,473✔
112
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
2,181,473✔
113
    }
114
    vmSendRsp(pMsg, code);
5,208,056✔
115
  }
116

117
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
5,208,056✔
118
  rpcFreeCont(pMsg->pCont);
5,208,056✔
119
  taosFreeQitem(pMsg);
5,208,056✔
120
}
5,208,056✔
121

122
static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
714,905,103✔
123
  SVnodeObj      *pVnode = pInfo->ahandle;
714,905,103✔
124
  const STraceId *trace = &pMsg->info.traceId;
714,915,060✔
125

126
  dGTrace("vgId:%d, msg:%p, get from vnode-query queue", pVnode->vgId, pMsg);
714,915,505✔
127
  int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg, pInfo);
714,915,505✔
128
  if (code != 0) {
714,849,274✔
129
    if (terrno != 0) code = terrno;
4,683,030✔
130
    dGError("vgId:%d, msg:%p, failed to query since %s", pVnode->vgId, pMsg, tstrerror(code));
4,683,030✔
131
    vmSendRsp(pMsg, code);
4,683,030✔
132
  }
133

134
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
714,847,900✔
135
  rpcFreeCont(pMsg->pCont);
714,847,900✔
136
  taosFreeQitem(pMsg);
714,848,135✔
137
}
714,834,167✔
138

139
static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
1,074,370,180✔
140
  SVnodeObj *pVnode = pInfo->ahandle;
1,074,370,180✔
141
  SRpcMsg   *pMsg = NULL;
1,074,390,066✔
142

143
  for (int32_t i = 0; i < numOfMsgs; ++i) {
2,147,483,647✔
144
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
1,113,437,859✔
145
    const STraceId *trace = &pMsg->info.traceId;
1,113,488,842✔
146
    dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
1,113,519,865✔
147

148
    terrno = 0;
1,113,519,999✔
149
    int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
1,113,527,778✔
150
    if (code != 0) {
1,113,584,586✔
151
      if (code == -1 && terrno != 0) {
817,668✔
152
        code = terrno;
×
153
      }
154

155
      if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) {
817,668✔
156
        dGDebug("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
×
157
      } else {
158
        dGError("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
817,668✔
159
      }
160

161
      vmSendRsp(pMsg, code);
817,668✔
162
    }
163

164
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, code);
1,113,584,586✔
165
    rpcFreeCont(pMsg->pCont);
1,113,585,289✔
166
    taosFreeQitem(pMsg);
1,113,572,883✔
167
  }
168
}
1,074,401,849✔
169

170
static void vmProcessStreamReaderQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
58,067,014✔
171
  SVnodeObj *pVnode = pInfo->ahandle;
58,067,014✔
172
  const STraceId *trace = &pMsg->info.traceId;
58,067,670✔
173
  dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
58,065,433✔
174

175
  terrno = 0;
58,065,960✔
176
  int32_t code = vnodeProcessStreamReaderMsg(pVnode->pImpl, pMsg, pInfo);
58,067,449✔
177
  if (code != 0) {
58,063,871✔
178
    if (code == -1 && terrno != 0) {
141,272✔
179
      code = terrno;
×
180
    }
181

182
    if (code == 0) {
141,272✔
183
      dGDebug("vgId:%d, msg:%p, success to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
×
184
    } else {
185
      dGError("vgId:%d, msg:%p, failed to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
141,272✔
186
    }
187
  }
188

189
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, code);
58,063,871✔
190
  rpcFreeCont(pMsg->pCont);
58,063,871✔
191
  taosFreeQitem(pMsg);
58,064,434✔
192
}
58,064,428✔
193

194
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
339,113,109✔
195
  SVnodeObj *pVnode = pInfo->ahandle;
339,113,109✔
196
  SRpcMsg   *pMsg = NULL;
339,114,791✔
197

198
  for (int32_t i = 0; i < numOfMsgs; ++i) {
708,502,957✔
199
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
369,393,416✔
200
    const STraceId *trace = &pMsg->info.traceId;
369,385,075✔
201
    dGTrace("vgId:%d, msg:%p, get from vnode-sync queue", pVnode->vgId, pMsg);
369,387,114✔
202

203
    int32_t code = vnodeProcessSyncMsg(pVnode->pImpl, pMsg, NULL);  // no response here
369,387,114✔
204
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
369,385,624✔
205
    rpcFreeCont(pMsg->pCont);
369,386,713✔
206
    taosFreeQitem(pMsg);
369,357,651✔
207
  }
208
}
339,114,932✔
209

210
static void vmSendResponse(SRpcMsg *pMsg) {
×
211
  if (pMsg->info.handle) {
×
212
    SRpcMsg rsp = {.info = pMsg->info, .code = terrno};
×
213
    if (rpcSendResponse(&rsp) != 0) {
×
214
      dError("failed to send response since %s", terrstr());
×
215
    }
216
  }
217
}
×
218

219
static bool vmDataSpaceSufficient(SVnodeObj *pVnode) {
632,603,683✔
220
  STfs *pTfs = pVnode->pImpl->pTfs;
632,603,683✔
221
  if (pTfs) {
632,619,087✔
222
    return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary);
632,619,087✔
223
  } else {
224
    return osDataSpaceSufficient();
×
225
  }
226
}
227

228
static int32_t vmAcquireVnodeWrapper(SVnodeMgmt *pMgt, int32_t vgId, SVnodeObj **pNode) {
2,147,483,647✔
229
  *pNode = vmAcquireVnode(pMgt, vgId);
2,147,483,647✔
230
  if (*pNode == NULL) {
2,147,483,647✔
231
    return terrno;
13,752,136✔
232
  }
233
  return 0;
2,147,483,647✔
234
}
235
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
2,147,483,647✔
236
  int32_t         code = 0;
2,147,483,647✔
237
  SMsgHead *      pHead = pMsg->pCont;
2,147,483,647✔
238
  SVnodeObj *     pVnode = NULL;
2,147,483,647✔
239
  const STraceId *trace = &pMsg->info.traceId;
2,147,483,647✔
240

241
  if (pMsg->contLen < sizeof(SMsgHead)) {
2,147,483,647✔
242
    dGError("invalid rpc msg with no msg head at pCont. pMsg:%p, type:%s, contLen:%d", pMsg, TMSG_INFO(pMsg->msgType),
×
243
            pMsg->contLen);
244
    return TSDB_CODE_INVALID_MSG;
×
245
  }
246

247
  pHead->contLen = ntohl(pHead->contLen);
2,147,483,647✔
248
  pHead->vgId = ntohl(pHead->vgId);
2,147,483,647✔
249
  
250
  code = vmAcquireVnodeWrapper(pMgmt, pHead->vgId, &pVnode);
2,147,483,647✔
251
  if (code != 0) {
2,147,483,647✔
252
    dGDebug("vgId:%d, msg:%p, failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
13,752,136✔
253
            tstrerror(code), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
254
    return code;
13,752,136✔
255
  }
256

257
  switch (qtype) {
2,147,483,647✔
258
    case QUERY_QUEUE:
714,814,113✔
259
      // let's put into different query processing queue. The query type is extracted during preprocessing procedure,
260
      // mquery-queue for meta info query, and query-queue for ordinary users' queries.
261
      code = vnodePreprocessQueryMsg(pVnode->pImpl, pMsg);
714,814,113✔
262
      if (code) {
714,774,735✔
263
        dError("vgId:%d, msg:%p, preprocess query msg failed since %s", pVnode->vgId, pMsg, tstrerror(code));
×
264
      } else {
265
        dGTrace("vgId:%d, msg:%p, put into vnode-query queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
714,774,735✔
266
        code = taosWriteQitem(pVnode->pQueryQ, pMsg);
714,775,527✔
267
      }
268
      break;
714,820,054✔
269
    case FETCH_QUEUE:
1,113,385,378✔
270
      dGTrace("vgId:%d, msg:%p, put into vnode-fetch queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
1,113,385,378✔
271
      code = taosWriteQitem(pVnode->pFetchQ, pMsg);
1,113,385,378✔
272
      break;
1,113,604,793✔
273
    case WRITE_QUEUE:
632,595,955✔
274
      if (!vmDataSpaceSufficient(pVnode)) {
632,595,955✔
275
        code = TSDB_CODE_NO_ENOUGH_DISKSPACE;
×
276
        dError("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
×
277
               tstrerror(code), TMSG_INFO(pMsg->msgType));
278
        break;
×
279
      }
280
#if 0
281
      if (pMsg->msgType == TDMT_VND_SUBMIT && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
282
        code = TSDB_CODE_VND_NO_WRITE_AUTH;
283
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
284
               tstrerror(code), TMSG_INFO(pMsg->msgType));
285
        break;
286
      }
287
#endif
288
      if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
632,592,984✔
289
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since its disable, type:%s", pVnode->vgId, pMsg,
234,326✔
290
               TMSG_INFO(pMsg->msgType));
291
        code = TSDB_CODE_VND_STOPPED;
234,326✔
292
        break;
234,326✔
293
      }
294
      dGDebug("vgId:%d, msg:%p, put into vnode-write queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
632,350,697✔
295
      code = taosWriteQitem(pVnode->pWriteW.queue, pMsg);
632,350,608✔
296
      break;
632,376,292✔
297
    case SYNC_QUEUE:
311,811,251✔
298
      dGDebug("vgId:%d, msg:%p, put into vnode-sync queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
311,811,251✔
299
      code = taosWriteQitem(pVnode->pSyncW.queue, pMsg);
311,811,251✔
300
      break;
311,819,817✔
301
    case SYNC_RD_QUEUE:
57,574,983✔
302
      if(tsSyncLogHeartbeat){
57,574,983✔
303
        dGInfo("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
×
304
      }
305
      else{
306
        dGDebug("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
57,574,983✔
307
      }
308
      code = taosWriteQitem(pVnode->pSyncRdW.queue, pMsg);
57,574,983✔
309
      break;
57,580,429✔
310
    case APPLY_QUEUE:
177,593,971✔
311
      dGDebug("vgId:%d, msg:%p, put into vnode-apply queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
177,593,971✔
312
      code = taosWriteQitem(pVnode->pApplyW.queue, pMsg);
177,593,971✔
313
      break;
177,593,867✔
314
    case STREAM_READER_QUEUE:
58,060,294✔
315
      dGDebug("vgId:%d, msg:%p, put into vnode-stream-reader queue, type:%s", pVnode->vgId, pMsg,
58,060,294✔
316
              TMSG_INFO(pMsg->msgType));
317
      code = taosWriteQitem(pVnode->pStreamReaderQ, pMsg);
58,058,905✔
318
      break;
58,063,952✔
UNCOV
319
    default:
×
UNCOV
320
      code = TSDB_CODE_INVALID_MSG;
×
UNCOV
321
      break;
×
322
  }
323

324
  vmReleaseVnode(pMgmt, pVnode);
2,147,483,647✔
325
  return code;
2,147,483,647✔
326
}
327

328
int32_t vmPutMsgToSyncRdQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_RD_QUEUE); }
57,882,075✔
329

330
int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); }
206,653,499✔
331

332
int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, WRITE_QUEUE); }
631,564,488✔
333

334
int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, QUERY_QUEUE); }
670,144,776✔
335

336
int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); }
1,113,539,935✔
337

338
int32_t vmPutMsgToStreamReaderQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_READER_QUEUE); }
58,086,837✔
339

340
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
3,435,082✔
341
  const STraceId *trace = &pMsg->info.traceId;
3,435,082✔
342
  dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
3,435,903✔
343
  return taosWriteQitem(pMgmt->mgmtMultiWorker.queue, pMsg);
3,435,903✔
344
}
345

346
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
5,206,242✔
347
  const STraceId *trace = &pMsg->info.traceId;
5,206,242✔
348
  dGTrace("msg:%p, put into vnode-mgmt queue", pMsg);
5,208,056✔
349
  return taosWriteQitem(pMgmt->mgmtWorker.queue, pMsg);
5,208,056✔
350
}
351

352
int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
341,814,701✔
353
  int32_t code;
354
  if (pRpc->contLen < sizeof(SMsgHead)) {
341,814,701✔
355
    dError("invalid rpc msg with no msg head at pCont. pRpc:%p, type:%s, len:%d", pRpc, TMSG_INFO(pRpc->msgType),
×
356
           pRpc->contLen);
357
    rpcFreeCont(pRpc->pCont);
×
358
    pRpc->pCont = NULL;
×
359
    return TSDB_CODE_INVALID_MSG;
×
360
  }
361

362
  EQItype  itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM;
341,816,351✔
363
  SRpcMsg *pMsg;
341,804,021✔
364
  code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg);
341,817,484✔
365
  if (code) {
341,817,228✔
366
    rpcFreeCont(pRpc->pCont);
×
367
    pRpc->pCont = NULL;
×
368
    return code;
×
369
  }
370

371
  SMsgHead *pHead = pRpc->pCont;
341,817,228✔
372
  dTrace("vgId:%d, msg:%p, is created, type:%s len:%d", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType), pRpc->contLen);
341,815,996✔
373

374
  pHead->contLen = htonl(pHead->contLen);
341,815,996✔
375
  pHead->vgId = htonl(pHead->vgId);
341,815,550✔
376
  memcpy(pMsg, pRpc, sizeof(SRpcMsg));
341,817,851✔
377
  pRpc->pCont = NULL;
341,817,851✔
378

379
  code = vmPutMsgToQueue(pMgmt, pMsg, qtype);
341,823,326✔
380
  if (code != 0) {
341,820,013✔
381
    dTrace("msg:%p, is freed", pMsg);
1,880,234✔
382
    rpcFreeCont(pMsg->pCont);
1,880,234✔
383
    taosFreeQitem(pMsg);
1,880,234✔
384
  }
385

386
  return code;
341,819,862✔
387
}
388

389
int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
20,470,380✔
390
  int32_t    size = -1;
20,470,380✔
391
  SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
20,470,380✔
392
  if (pVnode != NULL) {
20,469,838✔
393
    switch (qtype) {
11,162,340✔
394
      case WRITE_QUEUE:
×
395
        size = taosQueueItemSize(pVnode->pWriteW.queue);
×
396
        break;
×
397
      case SYNC_QUEUE:
×
398
        size = taosQueueItemSize(pVnode->pSyncW.queue);
×
399
        break;
×
400
      case APPLY_QUEUE:
11,162,340✔
401
        size = taosQueueItemSize(pVnode->pApplyW.queue);
11,162,340✔
402
        break;
11,165,239✔
403
      case QUERY_QUEUE:
×
404
        size = taosQueueItemSize(pVnode->pQueryQ);
×
405
        break;
×
406
      case FETCH_QUEUE:
×
407
        size = taosQueueItemSize(pVnode->pFetchQ);
×
408
        break;
×
409
      case STREAM_READER_QUEUE:
×
410
        size = taosQueueItemSize(pVnode->pStreamReaderQ);
×
411
        break;
×
412
      default:
×
413
        break;
×
414
    }
415
  }
416
  if (pVnode) vmReleaseVnode(pMgmt, pVnode);
20,472,737✔
417
  if (size < 0) {
20,469,772✔
418
    dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
9,303,433✔
419
    size = 0;
9,303,433✔
420
  }
421
  return size;
20,469,772✔
422
}
423

424
int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
4,660,455✔
425
  int32_t         code = 0;
4,660,455✔
426
  SMultiWorkerCfg wcfg = {.max = 1, .name = "vnode-write", .fp = (FItems)vnodeProposeWriteMsg, .param = pVnode->pImpl};
4,660,455✔
427
  SMultiWorkerCfg scfg = {.max = 1, .name = "vnode-sync", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
4,660,532✔
428
  SMultiWorkerCfg sccfg = {.max = 1, .name = "vnode-sync-rd", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
4,660,532✔
429
  SMultiWorkerCfg acfg = {.max = 1, .name = "vnode-apply", .fp = (FItems)vnodeApplyWriteMsg, .param = pVnode->pImpl};
4,660,128✔
430
  code = tMultiWorkerInit(&pVnode->pWriteW, &wcfg);
4,660,455✔
431
  if (code) {
4,660,728✔
432
    return code;
×
433
  }
434
  code = tMultiWorkerInit(&pVnode->pSyncW, &scfg);
4,660,728✔
435
  if (code) {
4,659,984✔
436
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
437
    return code;
×
438
  }
439
  code = tMultiWorkerInit(&pVnode->pSyncRdW, &sccfg);
4,659,984✔
440
  if (code) {
4,660,728✔
441
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
442
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
443
    return code;
×
444
  }
445
  code = tMultiWorkerInit(&pVnode->pApplyW, &acfg);
4,660,728✔
446
  if (code) {
4,660,728✔
447
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
448
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
449
    tMultiWorkerCleanup(&pVnode->pSyncRdW);
×
450
    return code;
×
451
  }
452

453
  pVnode->pQueryQ = tQueryAutoQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
4,660,728✔
454
  pVnode->pStreamReaderQ = tQueryAutoQWorkerAllocQueue(&pMgmt->streamReaderPool, pVnode, (FItem)vmProcessStreamReaderQueue);
4,660,728✔
455
  pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
4,660,728✔
456

457
  if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
4,660,728✔
458
      pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pFetchQ == NULL || !pVnode->pStreamReaderQ) {
4,660,728✔
459
    return TSDB_CODE_OUT_OF_MEMORY;
×
460
  }
461

462
  dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
4,660,728✔
463
        taosQueueGetThreadId(pVnode->pWriteW.queue));
464
  dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
4,660,728✔
465
        taosQueueGetThreadId(pVnode->pSyncW.queue));
466
  dInfo("vgId:%d, sync-rd-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncRdW.queue,
4,660,728✔
467
        taosQueueGetThreadId(pVnode->pSyncRdW.queue));
468
  dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
4,660,728✔
469
        taosQueueGetThreadId(pVnode->pApplyW.queue));
470
  dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
4,660,728✔
471
  dInfo("vgId:%d, stream-reader-queue:%p is alloced", pVnode->vgId, pVnode->pStreamReaderQ);
4,660,728✔
472
  dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
4,660,728✔
473
        taosQueueGetThreadId(pVnode->pFetchQ));
474
  return 0;
4,660,728✔
475
}
476

477
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
4,660,728✔
478
  tQueryAutoQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
4,660,728✔
479
  tQueryAutoQWorkerFreeQueue(&pMgmt->streamReaderPool, pVnode->pStreamReaderQ);
4,660,728✔
480
  tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
4,660,728✔
481

482
  pVnode->pQueryQ = NULL;
4,660,728✔
483
  pVnode->pFetchQ = NULL;
4,660,728✔
484
  pVnode->pStreamReaderQ = NULL;
4,660,728✔
485
  dDebug("vgId:%d, queue is freed", pVnode->vgId);
4,660,728✔
486
}
4,660,728✔
487

488
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
669,897✔
489
  int32_t code = 0;
669,897✔
490

491
  SQueryAutoQWorkerPool *pQPool = &pMgmt->queryPool;
669,897✔
492
  pQPool->name = "vnode-query";
669,897✔
493
  pQPool->min = tsNumOfVnodeQueryThreads;
669,897✔
494
  pQPool->max = tsNumOfVnodeQueryThreads;
669,897✔
495
  if ((code = tQueryAutoQWorkerInit(pQPool)) != 0) return code;
669,897✔
496

497
  SQueryAutoQWorkerPool *pSPool = &pMgmt->streamReaderPool;
669,897✔
498
  pSPool->name = "vnode-st-reader";
669,897✔
499
  pSPool->min = tsNumOfVnodeStreamReaderThreads;
669,897✔
500
  pSPool->max = tsNumOfVnodeStreamReaderThreads;
669,897✔
501
  if ((code = tQueryAutoQWorkerInit(pSPool)) != 0) return code;
669,897✔
502

503
  tsNumOfQueryThreads += tsNumOfVnodeQueryThreads;
669,897✔
504

505
  SWWorkerPool *pFPool = &pMgmt->fetchPool;
669,897✔
506
  pFPool->name = "vnode-fetch";
669,897✔
507
  pFPool->max = tsNumOfVnodeFetchThreads;
669,897✔
508
  if ((code = tWWorkerInit(pFPool)) != 0) return code;
669,897✔
509

510
  SSingleWorkerCfg mgmtCfg = {
669,897✔
511
      .min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
512

513
  if ((code = tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg)) != 0) return code;
669,897✔
514

515
  int32_t threadNum = 0;
669,897✔
516
  if (tsNumOfCores == 1) {
669,897✔
517
    threadNum = 2;
×
518
  } else {
519
    threadNum = tsNumOfCores;
669,897✔
520
  }
521
  SSingleWorkerCfg multiMgmtCfg = {.min = threadNum,
669,897✔
522
                                   .max = threadNum,
523
                                   .name = "vnode-multi-mgmt",
524
                                   .fp = (FItem)vmProcessMultiMgmtQueue,
525
                                   .param = pMgmt};
526

527
  if ((code = tSingleWorkerInit(&pMgmt->mgmtMultiWorker, &multiMgmtCfg)) != 0) return code;
669,897✔
528
  dDebug("vnode workers are initialized");
669,897✔
529
  return 0;
669,897✔
530
}
531

532
void vmStopWorker(SVnodeMgmt *pMgmt) {
669,897✔
533
  tQueryAutoQWorkerCleanup(&pMgmt->queryPool);
669,897✔
534
  tWWorkerCleanup(&pMgmt->fetchPool);
669,897✔
535
  tQueryAutoQWorkerCleanup(&pMgmt->streamReaderPool);
669,897✔
536
  dDebug("vnode workers are closed");
669,897✔
537
}
669,897✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc