• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #4761

28 Sep 2025 10:49AM UTC coverage: 57.837% (-1.0%) from 58.866%
#4761

push

travis-ci

web-flow
merge: set version (#33122)

136913 of 302095 branches covered (45.32%)

Branch coverage included in aggregate %.

207750 of 293830 relevant lines covered (70.7%)

5673932.16 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

66.26
/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http:www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "vmInt.h"
18
#include "vnodeInt.h"
19

20
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
22,643✔
21
  if (pMsg->info.handle == NULL) return;
22,643!
22
  SRpcMsg rsp = {
22,643✔
23
      .code = code,
24
      .pCont = pMsg->info.rsp,
22,643✔
25
      .contLen = pMsg->info.rspLen,
22,643✔
26
      .info = pMsg->info,
27
  };
28
  tmsgSendRsp(&rsp);
22,643✔
29
}
30

31
static void vmProcessMultiMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
9,959✔
32
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
9,959✔
33
  int32_t         code = -1;
9,959✔
34
  const STraceId *trace = &pMsg->info.traceId;
9,959✔
35

36
  dGTrace("msg:%p, get from vnode-multi-mgmt queue", pMsg);
9,959!
37
  switch (pMsg->msgType) {
9,959!
38
    case TDMT_DND_CREATE_VNODE:
9,945✔
39
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
9,945✔
40
      break;
9,945✔
41
#ifdef USE_MOUNT
42
    case TDMT_DND_RETRIEVE_MOUNT_PATH:
6✔
43
      code = vmProcessRetrieveMountPathReq(pMgmt, pMsg);
6✔
44
      break;
6✔
45
    case TDMT_DND_MOUNT_VNODE:
8✔
46
      code = vmProcessMountVnodeReq(pMgmt, pMsg);
8✔
47
      break;
8✔
48
#endif
49
  }
50

51
  if (IsReq(pMsg)) {
9,959!
52
    if (code != 0) {
9,959✔
53
      if (terrno != 0) code = terrno;
4!
54
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
4!
55
    }
56
    vmSendRsp(pMsg, code);
9,959✔
57
  }
58

59
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
9,959!
60
  rpcFreeCont(pMsg->pCont);
9,959✔
61
  taosFreeQitem(pMsg);
9,959✔
62
}
9,959✔
63

64
static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
9,478✔
65
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
9,478✔
66
  int32_t         code = -1;
9,478✔
67
  const STraceId *trace = &pMsg->info.traceId;
9,478✔
68

69
  dGTrace("msg:%p, get from vnode-mgmt queue", pMsg);
9,478!
70
  switch (pMsg->msgType) {
9,478!
71
    case TDMT_DND_CREATE_VNODE:
×
72
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
×
73
      break;
×
74
    case TDMT_DND_DROP_VNODE:
4,251✔
75
      code = vmProcessDropVnodeReq(pMgmt, pMsg);
4,251✔
76
      break;
4,251✔
77
    case TDMT_VND_ALTER_REPLICA:
1,210✔
78
      code = vmProcessAlterVnodeReplicaReq(pMgmt, pMsg);
1,210✔
79
      break;
1,210✔
80
    case TDMT_VND_DISABLE_WRITE:
66✔
81
      code = vmProcessDisableVnodeWriteReq(pMgmt, pMsg);
66✔
82
      break;
66✔
83
    case TDMT_VND_ALTER_HASHRANGE:
65✔
84
      code = vmProcessAlterHashRangeReq(pMgmt, pMsg);
65✔
85
      break;
65✔
86
    case TDMT_DND_ALTER_VNODE_TYPE:
3,537✔
87
      code = vmProcessAlterVnodeTypeReq(pMgmt, pMsg);
3,537✔
88
      break;
3,537✔
89
    case TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP:
×
90
      code = vmProcessCheckLearnCatchupReq(pMgmt, pMsg);
×
91
      break;
×
92
    case TDMT_VND_ARB_HEARTBEAT:
243✔
93
      code = vmProcessArbHeartBeatReq(pMgmt, pMsg);
243✔
94
      break;
243✔
95
    case TDMT_VND_ALTER_ELECTBASELINE:
106✔
96
      code = vmProcessAlterVnodeElectBaselineReq(pMgmt, pMsg);
106✔
97
      break;
106✔
98
    default:
×
99
      terrno = TSDB_CODE_MSG_NOT_PROCESSED;
×
100
      dGError("msg:%p, not processed in vnode-mgmt queue", pMsg);
×
101
  }
102

103
  if (IsReq(pMsg)) {
9,478!
104
    if (code != 0) {
9,478✔
105
      if (terrno != 0) code = terrno;
3,354!
106
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
3,354!
107
    }
108
    vmSendRsp(pMsg, code);
9,478✔
109
  }
110

111
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
9,478!
112
  rpcFreeCont(pMsg->pCont);
9,478✔
113
  taosFreeQitem(pMsg);
9,478✔
114
}
9,478✔
115

116
static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
1,433,821✔
117
  SVnodeObj      *pVnode = pInfo->ahandle;
1,433,821✔
118
  const STraceId *trace = &pMsg->info.traceId;
1,433,821✔
119

120
  dGTrace("vgId:%d, msg:%p, get from vnode-query queue", pVnode->vgId, pMsg);
1,433,821!
121
  int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg, pInfo);
1,433,822✔
122
  if (code != 0) {
1,433,764✔
123
    if (terrno != 0) code = terrno;
1,327!
124
    dGError("vgId:%d, msg:%p, failed to query since %s", pVnode->vgId, pMsg, tstrerror(code));
1,327!
125
    vmSendRsp(pMsg, code);
1,327✔
126
  }
127

128
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
1,433,764!
129
  rpcFreeCont(pMsg->pCont);
1,433,764✔
130
  taosFreeQitem(pMsg);
1,433,818✔
131
}
1,433,827✔
132

133
static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
3,654,626✔
134
  SVnodeObj *pVnode = pInfo->ahandle;
3,654,626✔
135
  SRpcMsg   *pMsg = NULL;
3,654,626✔
136

137
  for (int32_t i = 0; i < numOfMsgs; ++i) {
7,342,949✔
138
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
3,687,236!
139
    const STraceId *trace = &pMsg->info.traceId;
3,688,076✔
140
    dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
3,688,076!
141

142
    terrno = 0;
3,688,077✔
143
    int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
3,687,170✔
144
    if (code != 0) {
3,688,249✔
145
      if (code == -1 && terrno != 0) {
1,879!
146
        code = terrno;
×
147
      }
148

149
      if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) {
1,879!
150
        dGDebug("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
×
151
      } else {
152
        dGError("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
1,879!
153
      }
154

155
      vmSendRsp(pMsg, code);
1,879✔
156
    }
157

158
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, code);
3,688,249!
159
    rpcFreeCont(pMsg->pCont);
3,688,248✔
160
    taosFreeQitem(pMsg);
3,688,325✔
161
  }
162
}
3,655,713✔
163

164
static void vmProcessStreamReaderQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
79,838✔
165
  SVnodeObj *pVnode = pInfo->ahandle;
79,838✔
166
  const STraceId *trace = &pMsg->info.traceId;
79,838✔
167
  dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
79,838!
168

169
  terrno = 0;
79,838✔
170
  int32_t code = vnodeProcessStreamReaderMsg(pVnode->pImpl, pMsg);
79,830✔
171
  if (code != 0) {
79,765✔
172
    if (code == -1 && terrno != 0) {
339!
173
      code = terrno;
×
174
    }
175

176
    if (code == 0) {
339!
177
      dGDebug("vgId:%d, msg:%p, success to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
×
178
    } else {
179
      dGError("vgId:%d, msg:%p, failed to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
339!
180
    }
181
  }
182

183
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, code);
79,765!
184
  rpcFreeCont(pMsg->pCont);
79,765✔
185
  taosFreeQitem(pMsg);
79,811✔
186
}
79,810✔
187

188
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
679,739✔
189
  SVnodeObj *pVnode = pInfo->ahandle;
679,739✔
190
  SRpcMsg   *pMsg = NULL;
679,739✔
191

192
  for (int32_t i = 0; i < numOfMsgs; ++i) {
1,434,831✔
193
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
754,876!
194
    const STraceId *trace = &pMsg->info.traceId;
755,058✔
195
    dGTrace("vgId:%d, msg:%p, get from vnode-sync queue", pVnode->vgId, pMsg);
755,058!
196

197
    int32_t code = vnodeProcessSyncMsg(pVnode->pImpl, pMsg, NULL);  // no response here
755,058✔
198
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
755,106!
199
    rpcFreeCont(pMsg->pCont);
755,106✔
200
    taosFreeQitem(pMsg);
755,090✔
201
  }
202
}
679,955✔
203

204
static void vmSendResponse(SRpcMsg *pMsg) {
×
205
  if (pMsg->info.handle) {
×
206
    SRpcMsg rsp = {.info = pMsg->info, .code = terrno};
×
207
    if (rpcSendResponse(&rsp) != 0) {
×
208
      dError("failed to send response since %s", terrstr());
×
209
    }
210
  }
211
}
×
212

213
static bool vmDataSpaceSufficient(SVnodeObj *pVnode) {
1,874,333✔
214
  STfs *pTfs = pVnode->pImpl->pTfs;
1,874,333✔
215
  if (pTfs) {
1,874,333!
216
    return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary);
1,874,364✔
217
  } else {
218
    return osDataSpaceSufficient();
×
219
  }
220
}
221

222
static int32_t vmAcquireVnodeWrapper(SVnodeMgmt *pMgt, int32_t vgId, SVnodeObj **pNode) {
8,261,805✔
223
  *pNode = vmAcquireVnode(pMgt, vgId);
8,261,805✔
224
  if (*pNode == NULL) {
8,262,735✔
225
    return terrno;
9,017✔
226
  }
227
  return 0;
8,253,718✔
228
}
229
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
8,261,796✔
230
  int32_t         code = 0;
8,261,796✔
231
  const STraceId *trace = &pMsg->info.traceId;
8,261,796✔
232
  if (pMsg->contLen < sizeof(SMsgHead)) {
8,261,796!
233
    dGError("invalid rpc msg with no msg head at pCont. pMsg:%p, type:%s, contLen:%d", pMsg, TMSG_INFO(pMsg->msgType),
×
234
            pMsg->contLen);
235
    return TSDB_CODE_INVALID_MSG;
×
236
  }
237

238
  SMsgHead *pHead = pMsg->pCont;
8,261,796✔
239

240
  pHead->contLen = ntohl(pHead->contLen);
8,261,796✔
241
  pHead->vgId = ntohl(pHead->vgId);
8,261,796✔
242

243
  SVnodeObj *pVnode = NULL;
8,261,796✔
244
  code = vmAcquireVnodeWrapper(pMgmt, pHead->vgId, &pVnode);
8,261,796✔
245
  if (code != 0) {
8,262,678✔
246
    dGDebug("vgId:%d, msg:%p, failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
9,012!
247
            tstrerror(code), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
248
    return code;
9,012✔
249
  }
250

251
  switch (qtype) {
8,253,666✔
252
    case QUERY_QUEUE:
1,433,600✔
253
      code = vnodePreprocessQueryMsg(pVnode->pImpl, pMsg);
1,433,600✔
254
      if (code) {
1,433,665✔
255
        dError("vgId:%d, msg:%p, preprocess query msg failed since %s", pVnode->vgId, pMsg, tstrerror(code));
3!
256
      } else {
257
        dGTrace("vgId:%d, msg:%p, put into vnode-query queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
1,433,662!
258
        code = taosWriteQitem(pVnode->pQueryQ, pMsg);
1,433,662✔
259
      }
260
      break;
1,433,689✔
261
    case FETCH_QUEUE:
3,688,002✔
262
      dGTrace("vgId:%d, msg:%p, put into vnode-fetch queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
3,688,002!
263
      code = taosWriteQitem(pVnode->pFetchQ, pMsg);
3,688,002✔
264
      break;
3,688,215✔
265
    case WRITE_QUEUE:
1,874,338✔
266
      if (!vmDataSpaceSufficient(pVnode)) {
1,874,338!
267
        code = TSDB_CODE_NO_ENOUGH_DISKSPACE;
×
268
        dError("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
×
269
               tstrerror(code), TMSG_INFO(pMsg->msgType));
270
        break;
×
271
      }
272
#if 0
273
      if (pMsg->msgType == TDMT_VND_SUBMIT && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
274
        code = TSDB_CODE_VND_NO_WRITE_AUTH;
275
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
276
               tstrerror(code), TMSG_INFO(pMsg->msgType));
277
        break;
278
      }
279
#endif
280
      if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
1,874,338✔
281
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since its disable, type:%s", pVnode->vgId, pMsg,
360!
282
               TMSG_INFO(pMsg->msgType));
283
        code = TSDB_CODE_VND_STOPPED;
360✔
284
        break;
360✔
285
      }
286
      dGDebug("vgId:%d, msg:%p, put into vnode-write queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
1,873,978!
287
      code = taosWriteQitem(pVnode->pWriteW.queue, pMsg);
1,873,979✔
288
      break;
1,873,990✔
289
    case SYNC_QUEUE:
636,894✔
290
      dGDebug("vgId:%d, msg:%p, put into vnode-sync queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
636,894!
291
      code = taosWriteQitem(pVnode->pSyncW.queue, pMsg);
636,894✔
292
      break;
636,905✔
293
    case SYNC_RD_QUEUE:
118,197✔
294
      if(tsSyncLogHeartbeat){
118,197!
295
        dGInfo("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
×
296
      }
297
      else{
298
        dGDebug("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
118,197!
299
      }
300
      code = taosWriteQitem(pVnode->pSyncRdW.queue, pMsg);
118,197✔
301
      break;
118,209✔
302
    case APPLY_QUEUE:
422,309✔
303
      dGDebug("vgId:%d, msg:%p, put into vnode-apply queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
422,309!
304
      code = taosWriteQitem(pVnode->pApplyW.queue, pMsg);
422,309✔
305
      break;
422,310✔
306
    case STREAM_READER_QUEUE:
79,723✔
307
      dGDebug("vgId:%d, msg:%p, put into vnode-stream-reader queue, type:%s", pVnode->vgId, pMsg,
79,723!
308
              TMSG_INFO(pMsg->msgType));
309
      code = taosWriteQitem(pVnode->pStreamReaderQ, pMsg);
79,723✔
310
      break;
79,793✔
311
    default:
603✔
312
      code = TSDB_CODE_INVALID_MSG;
603✔
313
      break;
603✔
314
  }
315

316
  vmReleaseVnode(pMgmt, pVnode);
8,254,074✔
317
  return code;
8,253,637✔
318
}
319

320
int32_t vmPutMsgToSyncRdQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_RD_QUEUE); }
118,611✔
321

322
int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); }
431,535✔
323

324
int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, WRITE_QUEUE); }
1,870,643✔
325

326
int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, QUERY_QUEUE); }
1,347,287✔
327

328
int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); }
3,688,321✔
329

330
int32_t vmPutMsgToStreamReaderQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_READER_QUEUE); }
79,730✔
331

332
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
9,957✔
333
  const STraceId *trace = &pMsg->info.traceId;
9,957✔
334
  dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
9,957!
335
  return taosWriteQitem(pMgmt->mgmtMultiWorker.queue, pMsg);
9,957✔
336
}
337

338
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
9,478✔
339
  const STraceId *trace = &pMsg->info.traceId;
9,478✔
340
  dGTrace("msg:%p, put into vnode-mgmt queue", pMsg);
9,478!
341
  return taosWriteQitem(pMgmt->mgmtWorker.queue, pMsg);
9,478✔
342
}
343

344
int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
725,882✔
345
  int32_t code;
346
  if (pRpc->contLen < sizeof(SMsgHead)) {
725,882!
347
    dError("invalid rpc msg with no msg head at pCont. pRpc:%p, type:%s, len:%d", pRpc, TMSG_INFO(pRpc->msgType),
×
348
           pRpc->contLen);
349
    rpcFreeCont(pRpc->pCont);
×
350
    pRpc->pCont = NULL;
×
351
    return TSDB_CODE_INVALID_MSG;
×
352
  }
353

354
  EQItype  itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM;
725,882✔
355
  SRpcMsg *pMsg;
356
  code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg);
725,882✔
357
  if (code) {
725,881!
358
    rpcFreeCont(pRpc->pCont);
×
359
    pRpc->pCont = NULL;
×
360
    return code;
×
361
  }
362

363
  SMsgHead *pHead = pRpc->pCont;
725,881✔
364
  dTrace("vgId:%d, msg:%p, is created, type:%s len:%d", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType), pRpc->contLen);
725,881!
365

366
  pHead->contLen = htonl(pHead->contLen);
725,881✔
367
  pHead->vgId = htonl(pHead->vgId);
725,881✔
368
  memcpy(pMsg, pRpc, sizeof(SRpcMsg));
725,881✔
369
  pRpc->pCont = NULL;
725,881✔
370

371
  code = vmPutMsgToQueue(pMgmt, pMsg, qtype);
725,881✔
372
  if (code != 0) {
725,879✔
373
    dTrace("msg:%p, is freed", pMsg);
4,438✔
374
    rpcFreeCont(pMsg->pCont);
4,438✔
375
    taosFreeQitem(pMsg);
4,437✔
376
  }
377

378
  return code;
725,879✔
379
}
380

381
int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
54,929✔
382
  int32_t    size = -1;
54,929✔
383
  SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
54,929✔
384
  if (pVnode != NULL) {
54,931✔
385
    switch (qtype) {
29,871!
386
      case WRITE_QUEUE:
×
387
        size = taosQueueItemSize(pVnode->pWriteW.queue);
×
388
        break;
×
389
      case SYNC_QUEUE:
×
390
        size = taosQueueItemSize(pVnode->pSyncW.queue);
×
391
        break;
×
392
      case APPLY_QUEUE:
29,871✔
393
        size = taosQueueItemSize(pVnode->pApplyW.queue);
29,871✔
394
        break;
29,872✔
395
      case QUERY_QUEUE:
×
396
        size = taosQueueItemSize(pVnode->pQueryQ);
×
397
        break;
×
398
      case FETCH_QUEUE:
×
399
        size = taosQueueItemSize(pVnode->pFetchQ);
×
400
        break;
×
401
      case STREAM_READER_QUEUE:
×
402
        size = taosQueueItemSize(pVnode->pStreamReaderQ);
×
403
        break;
×
404
      default:
×
405
        break;
×
406
    }
407
  }
408
  if (pVnode) vmReleaseVnode(pMgmt, pVnode);
54,932✔
409
  if (size < 0) {
54,927✔
410
    dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
25,056✔
411
    size = 0;
25,057✔
412
  }
413
  return size;
54,928✔
414
}
415

416
int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
12,585✔
417
  int32_t         code = 0;
12,585✔
418
  SMultiWorkerCfg wcfg = {.max = 1, .name = "vnode-write", .fp = (FItems)vnodeProposeWriteMsg, .param = pVnode->pImpl};
12,585✔
419
  SMultiWorkerCfg scfg = {.max = 1, .name = "vnode-sync", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
12,585✔
420
  SMultiWorkerCfg sccfg = {.max = 1, .name = "vnode-sync-rd", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
12,585✔
421
  SMultiWorkerCfg acfg = {.max = 1, .name = "vnode-apply", .fp = (FItems)vnodeApplyWriteMsg, .param = pVnode->pImpl};
12,585✔
422
  code = tMultiWorkerInit(&pVnode->pWriteW, &wcfg);
12,585✔
423
  if (code) {
12,585!
424
    return code;
×
425
  }
426
  code = tMultiWorkerInit(&pVnode->pSyncW, &scfg);
12,585✔
427
  if (code) {
12,585!
428
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
429
    return code;
×
430
  }
431
  code = tMultiWorkerInit(&pVnode->pSyncRdW, &sccfg);
12,585✔
432
  if (code) {
12,585!
433
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
434
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
435
    return code;
×
436
  }
437
  code = tMultiWorkerInit(&pVnode->pApplyW, &acfg);
12,585✔
438
  if (code) {
12,585!
439
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
440
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
441
    tMultiWorkerCleanup(&pVnode->pSyncRdW);
×
442
    return code;
×
443
  }
444

445
  pVnode->pQueryQ = tQueryAutoQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
12,585✔
446
  pVnode->pStreamReaderQ = tQueryAutoQWorkerAllocQueue(&pMgmt->streamReaderPool, pVnode, (FItem)vmProcessStreamReaderQueue);
12,585✔
447
  pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
12,585✔
448

449
  if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
12,585!
450
      pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pFetchQ == NULL || !pVnode->pStreamReaderQ) {
12,585!
451
    return TSDB_CODE_OUT_OF_MEMORY;
×
452
  }
453

454
  dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
12,585!
455
        taosQueueGetThreadId(pVnode->pWriteW.queue));
456
  dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
12,585!
457
        taosQueueGetThreadId(pVnode->pSyncW.queue));
458
  dInfo("vgId:%d, sync-rd-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncRdW.queue,
12,585!
459
        taosQueueGetThreadId(pVnode->pSyncRdW.queue));
460
  dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
12,585!
461
        taosQueueGetThreadId(pVnode->pApplyW.queue));
462
  dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
12,585!
463
  dInfo("vgId:%d, stream-reader-queue:%p is alloced", pVnode->vgId, pVnode->pStreamReaderQ);
12,585!
464
  dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
12,585!
465
        taosQueueGetThreadId(pVnode->pFetchQ));
466
  return 0;
12,585✔
467
}
468

469
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
12,585✔
470
  tQueryAutoQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
12,585✔
471
  tQueryAutoQWorkerFreeQueue(&pMgmt->streamReaderPool, pVnode->pStreamReaderQ);
12,585✔
472
  tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
12,585✔
473
  pVnode->pQueryQ = NULL;
12,585✔
474
  pVnode->pFetchQ = NULL;
12,585✔
475

476
  pVnode->pFetchQ = NULL;
12,585✔
477
  pVnode->pStreamReaderQ = NULL;
12,585✔
478
  dDebug("vgId:%d, queue is freed", pVnode->vgId);
12,585✔
479
}
12,585✔
480

481
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
2,406✔
482
  int32_t code = 0;
2,406✔
483

484
  SQueryAutoQWorkerPool *pQPool = &pMgmt->queryPool;
2,406✔
485
  pQPool->name = "vnode-query";
2,406✔
486
  pQPool->min = tsNumOfVnodeQueryThreads;
2,406✔
487
  pQPool->max = tsNumOfVnodeQueryThreads;
2,406✔
488
  if ((code = tQueryAutoQWorkerInit(pQPool)) != 0) return code;
2,406!
489

490
  SQueryAutoQWorkerPool *pSPool = &pMgmt->streamReaderPool;
2,406✔
491
  pSPool->name = "vnode-st-reader";
2,406✔
492
  pSPool->min = tsNumOfVnodeStreamReaderThreads;
2,406✔
493
  pSPool->max = tsNumOfVnodeStreamReaderThreads;
2,406✔
494
  if ((code = tQueryAutoQWorkerInit(pSPool)) != 0) return code;
2,406!
495

496
  tsNumOfQueryThreads += tsNumOfVnodeQueryThreads;
2,406✔
497

498
  SWWorkerPool *pFPool = &pMgmt->fetchPool;
2,406✔
499
  pFPool->name = "vnode-fetch";
2,406✔
500
  pFPool->max = tsNumOfVnodeFetchThreads;
2,406✔
501
  if ((code = tWWorkerInit(pFPool)) != 0) return code;
2,406!
502

503
  SSingleWorkerCfg mgmtCfg = {
2,406✔
504
      .min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
505

506
  if ((code = tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg)) != 0) return code;
2,406!
507

508
  int32_t threadNum = 0;
2,406✔
509
  if (tsNumOfCores == 1) {
2,406!
510
    threadNum = 2;
×
511
  } else {
512
    threadNum = tsNumOfCores;
2,406✔
513
  }
514
  SSingleWorkerCfg multiMgmtCfg = {.min = threadNum,
2,406✔
515
                                   .max = threadNum,
516
                                   .name = "vnode-multi-mgmt",
517
                                   .fp = (FItem)vmProcessMultiMgmtQueue,
518
                                   .param = pMgmt};
519

520
  if ((code = tSingleWorkerInit(&pMgmt->mgmtMultiWorker, &multiMgmtCfg)) != 0) return code;
2,406!
521
  dDebug("vnode workers are initialized");
2,406✔
522
  return 0;
2,406✔
523
}
524

525
void vmStopWorker(SVnodeMgmt *pMgmt) {
2,406✔
526
  tQueryAutoQWorkerCleanup(&pMgmt->queryPool);
2,406✔
527
  tWWorkerCleanup(&pMgmt->fetchPool);
2,406✔
528
  tQueryAutoQWorkerCleanup(&pMgmt->streamReaderPool);
2,406✔
529
  dDebug("vnode workers are closed");
2,406✔
530
}
2,406✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc