• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #4847

11 Nov 2025 05:50AM UTC coverage: 62.651% (+0.3%) from 62.306%
#4847

push

travis-ci

web-flow
Merge e78cd6509 into 47a2ea7a0

542 of 650 new or added lines in 16 files covered. (83.38%)

1515 existing lines in 91 files now uncovered.

113826 of 181682 relevant lines covered (62.65%)

113230552.12 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

82.04
/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http:www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "vmInt.h"
18
#include "vnodeInt.h"
19

20
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
7,999,280✔
21
  if (pMsg->info.handle == NULL) return;
7,999,280✔
22
  SRpcMsg rsp = {
15,983,275✔
23
      .code = code,
24
      .pCont = pMsg->info.rsp,
7,999,280✔
25
      .contLen = pMsg->info.rspLen,
7,999,280✔
26
      .info = pMsg->info,
27
  };
28
  tmsgSendRsp(&rsp);
7,998,534✔
29
}
30

31
static void vmProcessMultiMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
2,931,202✔
32
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
2,931,202✔
33
  int32_t         code = -1;
2,940,523✔
34
  const STraceId *trace = &pMsg->info.traceId;
2,940,523✔
35

36
  dGTrace("msg:%p, get from vnode-multi-mgmt queue", pMsg);
2,940,523✔
37
  switch (pMsg->msgType) {
2,940,523✔
38
    case TDMT_DND_CREATE_VNODE:
2,936,041✔
39
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
2,936,041✔
40
      break;
2,936,041✔
41
#ifdef USE_MOUNT
42
    case TDMT_DND_RETRIEVE_MOUNT_PATH:
1,930✔
43
      code = vmProcessRetrieveMountPathReq(pMgmt, pMsg);
1,930✔
44
      break;
1,930✔
45
    case TDMT_DND_MOUNT_VNODE:
2,552✔
46
      code = vmProcessMountVnodeReq(pMgmt, pMsg);
2,552✔
47
      break;
2,552✔
48
#endif
49
  }
50

51
  if (IsReq(pMsg)) {
2,939,267✔
52
    if (code != 0) {
2,940,523✔
53
      if (terrno != 0) code = terrno;
1,292✔
54
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
1,292✔
55
    }
56
    vmSendRsp(pMsg, code);
2,940,523✔
57
  }
58

59
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
2,940,287✔
60
  rpcFreeCont(pMsg->pCont);
2,940,287✔
61
  taosFreeQitem(pMsg);
2,940,523✔
62
}
2,940,349✔
63

64
static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
4,319,849✔
65
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
4,319,849✔
66
  int32_t         code = -1;
4,319,849✔
67
  const STraceId *trace = &pMsg->info.traceId;
4,319,849✔
68

69
  dGTrace("msg:%p, get from vnode-mgmt queue", pMsg);
4,319,849✔
70
  switch (pMsg->msgType) {
4,319,849✔
71
    case TDMT_DND_CREATE_VNODE:
×
72
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
×
73
      break;
×
74
    case TDMT_DND_DROP_VNODE:
1,543,205✔
75
      code = vmProcessDropVnodeReq(pMgmt, pMsg);
1,543,205✔
76
      break;
1,543,205✔
77
    case TDMT_VND_ALTER_REPLICA:
609,661✔
78
      code = vmProcessAlterVnodeReplicaReq(pMgmt, pMsg);
609,661✔
79
      break;
609,661✔
80
    case TDMT_VND_DISABLE_WRITE:
20,607✔
81
      code = vmProcessDisableVnodeWriteReq(pMgmt, pMsg);
20,607✔
82
      break;
20,607✔
83
    case TDMT_VND_ALTER_HASHRANGE:
19,889✔
84
      code = vmProcessAlterHashRangeReq(pMgmt, pMsg);
19,889✔
85
      break;
19,889✔
86
    case TDMT_DND_ALTER_VNODE_TYPE:
1,956,584✔
87
      code = vmProcessAlterVnodeTypeReq(pMgmt, pMsg);
1,956,584✔
88
      break;
1,956,584✔
89
    case TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP:
×
90
      code = vmProcessCheckLearnCatchupReq(pMgmt, pMsg);
×
91
      break;
×
92
    case TDMT_VND_ARB_HEARTBEAT:
128,572✔
93
      code = vmProcessArbHeartBeatReq(pMgmt, pMsg);
128,572✔
94
      break;
128,572✔
95
    case TDMT_VND_ALTER_ELECTBASELINE:
41,331✔
96
      code = vmProcessAlterVnodeElectBaselineReq(pMgmt, pMsg);
41,331✔
97
      break;
41,331✔
98
    default:
×
99
      terrno = TSDB_CODE_MSG_NOT_PROCESSED;
×
100
      dGError("msg:%p, not processed in vnode-mgmt queue", pMsg);
×
101
  }
102

103
  if (IsReq(pMsg)) {
4,319,849✔
104
    if (code != 0) {
4,319,849✔
105
      if (terrno != 0) code = terrno;
1,868,802✔
106
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
1,868,802✔
107
    }
108
    vmSendRsp(pMsg, code);
4,319,849✔
109
  }
110

111
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
4,319,849✔
112
  rpcFreeCont(pMsg->pCont);
4,319,849✔
113
  taosFreeQitem(pMsg);
4,319,849✔
114
}
4,319,849✔
115

116
static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
256,354,568✔
117
  SVnodeObj      *pVnode = pInfo->ahandle;
256,354,568✔
118
  const STraceId *trace = &pMsg->info.traceId;
256,357,874✔
119

120
  dGTrace("vgId:%d, msg:%p, get from vnode-query queue", pVnode->vgId, pMsg);
256,357,712✔
121
  int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg, pInfo);
256,358,178✔
122
  if (code != 0) {
256,361,479✔
123
    if (terrno != 0) code = terrno;
74,719✔
124
    dGError("vgId:%d, msg:%p, failed to query since %s", pVnode->vgId, pMsg, tstrerror(code));
74,719✔
125
    vmSendRsp(pMsg, code);
74,719✔
126
  }
127

128
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
256,361,409✔
129
  rpcFreeCont(pMsg->pCont);
256,361,409✔
130
  taosFreeQitem(pMsg);
256,361,017✔
131
}
256,359,453✔
132

133
static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
564,628,131✔
134
  SVnodeObj *pVnode = pInfo->ahandle;
564,628,131✔
135
  SRpcMsg   *pMsg = NULL;
564,635,720✔
136

137
  for (int32_t i = 0; i < numOfMsgs; ++i) {
1,152,104,055✔
138
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
587,416,826✔
139
    const STraceId *trace = &pMsg->info.traceId;
587,467,891✔
140
    dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
587,475,573✔
141

142
    terrno = 0;
587,475,573✔
143
    int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
587,456,377✔
144
    if (code != 0) {
587,500,799✔
145
      if (code == -1 && terrno != 0) {
664,189✔
146
        code = terrno;
×
147
      }
148

149
      if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) {
664,189✔
150
        dGDebug("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
×
151
      } else {
152
        dGError("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
664,189✔
153
      }
154

155
      vmSendRsp(pMsg, code);
664,189✔
156
    }
157

158
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, code);
587,500,799✔
159
    rpcFreeCont(pMsg->pCont);
587,500,799✔
160
    taosFreeQitem(pMsg);
587,498,339✔
161
  }
162
}
564,689,394✔
163

164
static void vmProcessStreamReaderQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
27,430,198✔
165
  SVnodeObj *pVnode = pInfo->ahandle;
27,430,198✔
166
  const STraceId *trace = &pMsg->info.traceId;
27,430,198✔
167
  dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
27,428,165✔
168

169
  terrno = 0;
27,428,165✔
170
  int32_t code = vnodeProcessStreamReaderMsg(pVnode->pImpl, pMsg);
27,430,864✔
171
  if (code != 0) {
27,418,463✔
172
    if (code == -1 && terrno != 0) {
38,970✔
173
      code = terrno;
×
174
    }
175

176
    if (code == 0) {
38,970✔
177
      dGDebug("vgId:%d, msg:%p, success to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
×
178
    } else {
179
      dGError("vgId:%d, msg:%p, failed to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
38,970✔
180
    }
181
  }
182

183
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, code);
27,418,463✔
184
  rpcFreeCont(pMsg->pCont);
27,418,463✔
185
  taosFreeQitem(pMsg);
27,423,323✔
186
}
27,415,801✔
187

188
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
204,802,098✔
189
  SVnodeObj *pVnode = pInfo->ahandle;
204,802,098✔
190
  SRpcMsg   *pMsg = NULL;
204,803,385✔
191

192
  for (int32_t i = 0; i < numOfMsgs; ++i) {
420,262,881✔
193
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
215,476,141✔
194
    const STraceId *trace = &pMsg->info.traceId;
215,477,894✔
195
    dGTrace("vgId:%d, msg:%p, get from vnode-sync queue", pVnode->vgId, pMsg);
215,480,951✔
196

197
    int32_t code = vnodeProcessSyncMsg(pVnode->pImpl, pMsg, NULL);  // no response here
215,481,696✔
198
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
215,470,536✔
199
    rpcFreeCont(pMsg->pCont);
215,471,724✔
200
    taosFreeQitem(pMsg);
215,456,208✔
201
  }
202
}
204,789,545✔
203

204
static void vmSendResponse(SRpcMsg *pMsg) {
×
205
  if (pMsg->info.handle) {
×
206
    SRpcMsg rsp = {.info = pMsg->info, .code = terrno};
×
207
    if (rpcSendResponse(&rsp) != 0) {
×
208
      dError("failed to send response since %s", terrstr());
×
209
    }
210
  }
211
}
×
212

213
static bool vmDataSpaceSufficient(SVnodeObj *pVnode) {
548,156,075✔
214
  STfs *pTfs = pVnode->pImpl->pTfs;
548,156,075✔
215
  if (pTfs) {
548,167,796✔
216
    return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary);
548,167,796✔
217
  } else {
218
    return osDataSpaceSufficient();
×
219
  }
220
}
221

222
static int32_t vmAcquireVnodeWrapper(SVnodeMgmt *pMgt, int32_t vgId, SVnodeObj **pNode) {
1,717,625,793✔
223
  *pNode = vmAcquireVnode(pMgt, vgId);
1,717,625,793✔
224
  if (*pNode == NULL) {
1,717,762,590✔
225
    return terrno;
2,017,148✔
226
  }
227
  return 0;
1,715,622,655✔
228
}
229
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
1,717,677,835✔
230
  int32_t         code = 0;
1,717,677,835✔
231
  SMsgHead *      pHead = pMsg->pCont;
1,717,677,835✔
232
  SVnodeObj *     pVnode = NULL;
1,717,617,429✔
233
  const STraceId *trace = &pMsg->info.traceId;
1,717,624,683✔
234

235
  if (pMsg->contLen < sizeof(SMsgHead)) {
1,717,657,282✔
236
    dGError("invalid rpc msg with no msg head at pCont. pMsg:%p, type:%s, contLen:%d", pMsg, TMSG_INFO(pMsg->msgType),
×
237
            pMsg->contLen);
238
    return TSDB_CODE_INVALID_MSG;
×
239
  }
240

241
  pHead->contLen = ntohl(pHead->contLen);
1,717,636,882✔
242
  pHead->vgId = ntohl(pHead->vgId);
1,717,675,816✔
243
  
244
  code = vmAcquireVnodeWrapper(pMgmt, pHead->vgId, &pVnode);
1,717,720,906✔
245
  if (code != 0) {
1,717,644,014✔
246
    dGDebug("vgId:%d, msg:%p, failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
2,017,164✔
247
            tstrerror(code), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
248
    return code;
2,017,164✔
249
  }
250

251
  switch (qtype) {
1,715,626,850✔
252
    case QUERY_QUEUE:
256,352,027✔
253
      // let's put into different query processing queue. The query type is extracted during preprocessing procedure,
254
      // mquery-queue for meta info query, and query-queue for ordinary users' queries.
255
      code = vnodePreprocessQueryMsg(pVnode->pImpl, pMsg);
256,352,027✔
256
      if (code) {
256,359,189✔
257
        dError("vgId:%d, msg:%p, preprocess query msg failed since %s", pVnode->vgId, pMsg, tstrerror(code));
1,323✔
258
      } else {
259
        dGTrace("vgId:%d, msg:%p, put into vnode-query queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
256,357,866✔
260
        code = taosWriteQitem(pVnode->pQueryQ, pMsg);
256,357,866✔
261
      }
262
      break;
256,356,198✔
263
    case FETCH_QUEUE:
587,447,495✔
264
      dGTrace("vgId:%d, msg:%p, put into vnode-fetch queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
587,447,495✔
265
      code = taosWriteQitem(pVnode->pFetchQ, pMsg);
587,447,495✔
266
      break;
587,480,983✔
267
    case WRITE_QUEUE:
548,148,345✔
268
      if (!vmDataSpaceSufficient(pVnode)) {
548,148,345✔
269
        code = TSDB_CODE_NO_ENOUGH_DISKSPACE;
×
270
        dError("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
×
271
               tstrerror(code), TMSG_INFO(pMsg->msgType));
272
        break;
×
273
      }
274
#if 0
275
      if (pMsg->msgType == TDMT_VND_SUBMIT && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
276
        code = TSDB_CODE_VND_NO_WRITE_AUTH;
277
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
278
               tstrerror(code), TMSG_INFO(pMsg->msgType));
279
        break;
280
      }
281
#endif
282
      if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
548,160,322✔
283
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since its disable, type:%s", pVnode->vgId, pMsg,
25,390✔
284
               TMSG_INFO(pMsg->msgType));
285
        code = TSDB_CODE_VND_STOPPED;
25,390✔
286
        break;
25,390✔
287
      }
288
      dGDebug("vgId:%d, msg:%p, put into vnode-write queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
548,115,992✔
289
      code = taosWriteQitem(pVnode->pWriteW.queue, pMsg);
548,115,992✔
290
      break;
548,139,106✔
291
    case SYNC_QUEUE:
164,858,589✔
292
      dGDebug("vgId:%d, msg:%p, put into vnode-sync queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
164,858,589✔
293
      code = taosWriteQitem(pVnode->pSyncW.queue, pMsg);
164,858,589✔
294
      break;
164,864,953✔
295
    case SYNC_RD_QUEUE:
50,606,520✔
296
      if(tsSyncLogHeartbeat){
50,606,520✔
297
        dGInfo("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
×
298
      }
299
      else{
300
        dGDebug("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
50,606,520✔
301
      }
302
      code = taosWriteQitem(pVnode->pSyncRdW.queue, pMsg);
50,606,520✔
303
      break;
50,616,207✔
304
    case APPLY_QUEUE:
80,807,516✔
305
      dGDebug("vgId:%d, msg:%p, put into vnode-apply queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
80,807,516✔
306
      code = taosWriteQitem(pVnode->pApplyW.queue, pMsg);
80,807,516✔
307
      break;
80,807,907✔
308
    case STREAM_READER_QUEUE:
27,406,358✔
309
      dGDebug("vgId:%d, msg:%p, put into vnode-stream-reader queue, type:%s", pVnode->vgId, pMsg,
27,406,358✔
310
              TMSG_INFO(pMsg->msgType));
311
      code = taosWriteQitem(pVnode->pStreamReaderQ, pMsg);
27,406,358✔
312
      break;
27,427,448✔
UNCOV
313
    default:
×
UNCOV
314
      code = TSDB_CODE_INVALID_MSG;
×
UNCOV
315
      break;
×
316
  }
317

318
  vmReleaseVnode(pMgmt, pVnode);
1,715,718,192✔
319
  return code;
1,715,710,363✔
320
}
321

322
int32_t vmPutMsgToSyncRdQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_RD_QUEUE); }
50,968,382✔
323

324
int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); }
96,511,996✔
325

326
int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, WRITE_QUEUE); }
547,292,038✔
327

328
int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, QUERY_QUEUE); }
208,024,452✔
329

330
int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); }
587,571,395✔
331

332
int32_t vmPutMsgToStreamReaderQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_READER_QUEUE); }
27,423,200✔
333

334
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
2,936,285✔
335
  const STraceId *trace = &pMsg->info.traceId;
2,936,285✔
336
  dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
2,939,777✔
337
  return taosWriteQitem(pMgmt->mgmtMultiWorker.queue, pMsg);
2,939,777✔
338
}
339

340
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
4,319,849✔
341
  const STraceId *trace = &pMsg->info.traceId;
4,319,849✔
342
  dGTrace("msg:%p, put into vnode-mgmt queue", pMsg);
4,319,849✔
343
  return taosWriteQitem(pMgmt->mgmtWorker.queue, pMsg);
4,319,849✔
344
}
345

346
int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
199,924,755✔
347
  int32_t code;
348
  if (pRpc->contLen < sizeof(SMsgHead)) {
199,924,755✔
349
    dError("invalid rpc msg with no msg head at pCont. pRpc:%p, type:%s, len:%d", pRpc, TMSG_INFO(pRpc->msgType),
×
350
           pRpc->contLen);
351
    rpcFreeCont(pRpc->pCont);
×
352
    pRpc->pCont = NULL;
×
353
    return TSDB_CODE_INVALID_MSG;
×
354
  }
355

356
  EQItype  itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM;
199,923,843✔
357
  SRpcMsg *pMsg;
199,896,089✔
358
  code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg);
199,924,857✔
359
  if (code) {
199,922,102✔
360
    rpcFreeCont(pRpc->pCont);
×
361
    pRpc->pCont = NULL;
×
362
    return code;
×
363
  }
364

365
  SMsgHead *pHead = pRpc->pCont;
199,922,102✔
366
  dTrace("vgId:%d, msg:%p, is created, type:%s len:%d", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType), pRpc->contLen);
199,924,131✔
367

368
  pHead->contLen = htonl(pHead->contLen);
199,924,131✔
369
  pHead->vgId = htonl(pHead->vgId);
199,922,245✔
370
  memcpy(pMsg, pRpc, sizeof(SRpcMsg));
199,921,971✔
371
  pRpc->pCont = NULL;
199,921,971✔
372

373
  code = vmPutMsgToQueue(pMgmt, pMsg, qtype);
199,924,286✔
374
  if (code != 0) {
199,924,103✔
375
    dTrace("msg:%p, is freed", pMsg);
1,289,462✔
376
    rpcFreeCont(pMsg->pCont);
1,289,462✔
377
    taosFreeQitem(pMsg);
1,289,462✔
378
  }
379

380
  return code;
199,924,191✔
381
}
382

383
int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
16,767,864✔
384
  int32_t    size = -1;
16,767,864✔
385
  SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
16,767,864✔
386
  if (pVnode != NULL) {
16,768,928✔
387
    switch (qtype) {
8,733,257✔
388
      case WRITE_QUEUE:
×
389
        size = taosQueueItemSize(pVnode->pWriteW.queue);
×
390
        break;
×
391
      case SYNC_QUEUE:
×
392
        size = taosQueueItemSize(pVnode->pSyncW.queue);
×
393
        break;
×
394
      case APPLY_QUEUE:
8,733,257✔
395
        size = taosQueueItemSize(pVnode->pApplyW.queue);
8,733,257✔
396
        break;
8,733,243✔
397
      case QUERY_QUEUE:
×
398
        size = taosQueueItemSize(pVnode->pQueryQ);
×
399
        break;
×
400
      case FETCH_QUEUE:
×
401
        size = taosQueueItemSize(pVnode->pFetchQ);
×
402
        break;
×
403
      case STREAM_READER_QUEUE:
×
404
        size = taosQueueItemSize(pVnode->pStreamReaderQ);
×
405
        break;
×
406
      default:
×
407
        break;
×
408
    }
409
  }
410
  if (pVnode) vmReleaseVnode(pMgmt, pVnode);
16,768,914✔
411
  if (size < 0) {
16,767,249✔
412
    dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
8,033,636✔
413
    size = 0;
8,033,636✔
414
  }
415
  return size;
16,767,249✔
416
}
417

418
int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
4,026,572✔
419
  int32_t         code = 0;
4,026,572✔
420
  SMultiWorkerCfg wcfg = {.max = 1, .name = "vnode-write", .fp = (FItems)vnodeProposeWriteMsg, .param = pVnode->pImpl};
4,026,572✔
421
  SMultiWorkerCfg scfg = {.max = 1, .name = "vnode-sync", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
4,026,572✔
422
  SMultiWorkerCfg sccfg = {.max = 1, .name = "vnode-sync-rd", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
4,026,572✔
423
  SMultiWorkerCfg acfg = {.max = 1, .name = "vnode-apply", .fp = (FItems)vnodeApplyWriteMsg, .param = pVnode->pImpl};
4,026,572✔
424
  code = tMultiWorkerInit(&pVnode->pWriteW, &wcfg);
4,026,572✔
425
  if (code) {
4,026,572✔
426
    return code;
×
427
  }
428
  code = tMultiWorkerInit(&pVnode->pSyncW, &scfg);
4,026,572✔
429
  if (code) {
4,026,572✔
430
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
431
    return code;
×
432
  }
433
  code = tMultiWorkerInit(&pVnode->pSyncRdW, &sccfg);
4,026,572✔
434
  if (code) {
4,026,572✔
435
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
436
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
437
    return code;
×
438
  }
439
  code = tMultiWorkerInit(&pVnode->pApplyW, &acfg);
4,026,572✔
440
  if (code) {
4,026,572✔
441
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
442
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
443
    tMultiWorkerCleanup(&pVnode->pSyncRdW);
×
444
    return code;
×
445
  }
446

447
  pVnode->pQueryQ = tQueryAutoQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
4,026,572✔
448
  pVnode->pStreamReaderQ = tQueryAutoQWorkerAllocQueue(&pMgmt->streamReaderPool, pVnode, (FItem)vmProcessStreamReaderQueue);
4,026,572✔
449
  pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
4,026,572✔
450

451
  if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
4,026,572✔
452
      pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pFetchQ == NULL || !pVnode->pStreamReaderQ) {
4,026,572✔
453
    return TSDB_CODE_OUT_OF_MEMORY;
×
454
  }
455

456
  dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
4,026,572✔
457
        taosQueueGetThreadId(pVnode->pWriteW.queue));
458
  dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
4,026,572✔
459
        taosQueueGetThreadId(pVnode->pSyncW.queue));
460
  dInfo("vgId:%d, sync-rd-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncRdW.queue,
4,026,572✔
461
        taosQueueGetThreadId(pVnode->pSyncRdW.queue));
462
  dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
4,026,572✔
463
        taosQueueGetThreadId(pVnode->pApplyW.queue));
464
  dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
4,026,572✔
465
  dInfo("vgId:%d, stream-reader-queue:%p is alloced", pVnode->vgId, pVnode->pStreamReaderQ);
4,026,572✔
466
  dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
4,026,572✔
467
        taosQueueGetThreadId(pVnode->pFetchQ));
468
  return 0;
4,026,572✔
469
}
470

471
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
4,026,572✔
472
  tQueryAutoQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
4,026,572✔
473
  tQueryAutoQWorkerFreeQueue(&pMgmt->streamReaderPool, pVnode->pStreamReaderQ);
4,026,572✔
474
  tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
4,026,572✔
475

476
  pVnode->pQueryQ = NULL;
4,026,572✔
477
  pVnode->pFetchQ = NULL;
4,026,572✔
478
  pVnode->pStreamReaderQ = NULL;
4,026,572✔
479
  dDebug("vgId:%d, queue is freed", pVnode->vgId);
4,026,572✔
480
}
4,026,572✔
481

482
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
659,608✔
483
  int32_t code = 0;
659,608✔
484

485
  SQueryAutoQWorkerPool *pQPool = &pMgmt->queryPool;
659,608✔
486
  pQPool->name = "vnode-query";
659,608✔
487
  pQPool->min = tsNumOfVnodeQueryThreads;
659,608✔
488
  pQPool->max = tsNumOfVnodeQueryThreads;
659,608✔
489
  if ((code = tQueryAutoQWorkerInit(pQPool)) != 0) return code;
659,608✔
490

491
  SQueryAutoQWorkerPool *pSPool = &pMgmt->streamReaderPool;
659,608✔
492
  pSPool->name = "vnode-st-reader";
659,608✔
493
  pSPool->min = tsNumOfVnodeStreamReaderThreads;
659,608✔
494
  pSPool->max = tsNumOfVnodeStreamReaderThreads;
659,608✔
495
  if ((code = tQueryAutoQWorkerInit(pSPool)) != 0) return code;
659,608✔
496

497
  tsNumOfQueryThreads += tsNumOfVnodeQueryThreads;
659,608✔
498

499
  SWWorkerPool *pFPool = &pMgmt->fetchPool;
659,608✔
500
  pFPool->name = "vnode-fetch";
659,608✔
501
  pFPool->max = tsNumOfVnodeFetchThreads;
659,608✔
502
  if ((code = tWWorkerInit(pFPool)) != 0) return code;
659,608✔
503

504
  SSingleWorkerCfg mgmtCfg = {
659,608✔
505
      .min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
506

507
  if ((code = tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg)) != 0) return code;
659,608✔
508

509
  int32_t threadNum = 0;
659,608✔
510
  if (tsNumOfCores == 1) {
659,608✔
511
    threadNum = 2;
×
512
  } else {
513
    threadNum = tsNumOfCores;
659,608✔
514
  }
515
  SSingleWorkerCfg multiMgmtCfg = {.min = threadNum,
659,608✔
516
                                   .max = threadNum,
517
                                   .name = "vnode-multi-mgmt",
518
                                   .fp = (FItem)vmProcessMultiMgmtQueue,
519
                                   .param = pMgmt};
520

521
  if ((code = tSingleWorkerInit(&pMgmt->mgmtMultiWorker, &multiMgmtCfg)) != 0) return code;
659,608✔
522
  dDebug("vnode workers are initialized");
659,608✔
523
  return 0;
659,608✔
524
}
525

526
void vmStopWorker(SVnodeMgmt *pMgmt) {
659,608✔
527
  tQueryAutoQWorkerCleanup(&pMgmt->queryPool);
659,608✔
528
  tWWorkerCleanup(&pMgmt->fetchPool);
659,608✔
529
  tQueryAutoQWorkerCleanup(&pMgmt->streamReaderPool);
659,608✔
530
  dDebug("vnode workers are closed");
659,608✔
531
}
659,608✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc