• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #4740

18 Sep 2025 04:31AM UTC coverage: 58.139% (-0.9%) from 59.028%
#4740

push

travis-ci

web-flow
fix: clear parse csv error syntax error msg (#33000)

133663 of 293099 branches covered (45.6%)

Branch coverage included in aggregate %.

0 of 2 new or added lines in 1 file covered. (0.0%)

4143 existing lines in 175 files now uncovered.

202241 of 284660 relevant lines covered (71.05%)

5584206.0 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

66.34
/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http:www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "vmInt.h"
18
#include "vnodeInt.h"
19

20
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
22,080✔
21
  if (pMsg->info.handle == NULL) return;
22,080!
22
  SRpcMsg rsp = {
22,080✔
23
      .code = code,
24
      .pCont = pMsg->info.rsp,
22,080✔
25
      .contLen = pMsg->info.rspLen,
22,080✔
26
      .info = pMsg->info,
27
  };
28
  tmsgSendRsp(&rsp);
22,080✔
29
}
30

31
static void vmProcessMultiMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
9,728✔
32
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
9,728✔
33
  int32_t         code = -1;
9,728✔
34
  const STraceId *trace = &pMsg->info.traceId;
9,728✔
35

36
  dGTrace("msg:%p, get from vnode-multi-mgmt queue", pMsg);
9,728!
37
  switch (pMsg->msgType) {
9,728!
38
    case TDMT_DND_CREATE_VNODE:
9,864✔
39
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
9,864✔
40
      break;
9,868✔
41
#ifdef USE_MOUNT
42
    case TDMT_DND_RETRIEVE_MOUNT_PATH:
6✔
43
      code = vmProcessRetrieveMountPathReq(pMgmt, pMsg);
6✔
44
      break;
6✔
45
    case TDMT_DND_MOUNT_VNODE:
8✔
46
      code = vmProcessMountVnodeReq(pMgmt, pMsg);
8✔
47
      break;
8✔
48
#endif
49
  }
50

51
  if (IsReq(pMsg)) {
9,732!
52
    if (code != 0) {
9,882✔
53
      if (terrno != 0) code = terrno;
4!
54
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
4!
55
    }
56
    vmSendRsp(pMsg, code);
9,882✔
57
  }
58

59
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
9,731!
60
  rpcFreeCont(pMsg->pCont);
9,731✔
61
  taosFreeQitem(pMsg);
9,881✔
62
}
9,882✔
63

64
static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
8,904✔
65
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
8,904✔
66
  int32_t         code = -1;
8,904✔
67
  const STraceId *trace = &pMsg->info.traceId;
8,904✔
68

69
  dGTrace("msg:%p, get from vnode-mgmt queue", pMsg);
8,904!
70
  switch (pMsg->msgType) {
8,904!
71
    case TDMT_DND_CREATE_VNODE:
×
72
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
×
73
      break;
×
74
    case TDMT_DND_DROP_VNODE:
4,084✔
75
      code = vmProcessDropVnodeReq(pMgmt, pMsg);
4,084✔
76
      break;
4,084✔
77
    case TDMT_VND_ALTER_REPLICA:
1,200✔
78
      code = vmProcessAlterVnodeReplicaReq(pMgmt, pMsg);
1,200✔
79
      break;
1,200✔
80
    case TDMT_VND_DISABLE_WRITE:
60✔
81
      code = vmProcessDisableVnodeWriteReq(pMgmt, pMsg);
60✔
82
      break;
60✔
83
    case TDMT_VND_ALTER_HASHRANGE:
60✔
84
      code = vmProcessAlterHashRangeReq(pMgmt, pMsg);
60✔
85
      break;
60✔
86
    case TDMT_DND_ALTER_VNODE_TYPE:
3,345✔
87
      code = vmProcessAlterVnodeTypeReq(pMgmt, pMsg);
3,345✔
88
      break;
3,345✔
89
    case TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP:
×
90
      code = vmProcessCheckLearnCatchupReq(pMgmt, pMsg);
×
91
      break;
×
92
    case TDMT_VND_ARB_HEARTBEAT:
155✔
93
      code = vmProcessArbHeartBeatReq(pMgmt, pMsg);
155✔
94
      break;
155✔
95
    default:
×
96
      terrno = TSDB_CODE_MSG_NOT_PROCESSED;
×
97
      dGError("msg:%p, not processed in vnode-mgmt queue", pMsg);
×
98
  }
99

100
  if (IsReq(pMsg)) {
8,904!
101
    if (code != 0) {
8,904✔
102
      if (terrno != 0) code = terrno;
3,163!
103
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
3,163!
104
    }
105
    vmSendRsp(pMsg, code);
8,904✔
106
  }
107

108
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
8,904!
109
  rpcFreeCont(pMsg->pCont);
8,904✔
110
  taosFreeQitem(pMsg);
8,904✔
111
}
8,904✔
112

113
static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
1,336,046✔
114
  SVnodeObj      *pVnode = pInfo->ahandle;
1,336,046✔
115
  const STraceId *trace = &pMsg->info.traceId;
1,336,046✔
116

117
  dGTrace("vgId:%d, msg:%p, get from vnode-query queue", pVnode->vgId, pMsg);
1,336,046!
118
  int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg, pInfo);
1,336,047✔
119
  if (code != 0) {
1,336,011✔
120
    if (terrno != 0) code = terrno;
1,328!
121
    dGError("vgId:%d, msg:%p, failed to query since %s", pVnode->vgId, pMsg, tstrerror(code));
1,328!
122
    vmSendRsp(pMsg, code);
1,328✔
123
  }
124

125
  dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
1,336,011!
126
  rpcFreeCont(pMsg->pCont);
1,336,011✔
127
  taosFreeQitem(pMsg);
1,336,050✔
128
}
1,336,048✔
129

130
static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
3,496,982✔
131
  SVnodeObj *pVnode = pInfo->ahandle;
3,496,982✔
132
  SRpcMsg   *pMsg = NULL;
3,496,982✔
133

134
  for (int32_t i = 0; i < numOfMsgs; ++i) {
7,027,534✔
135
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
3,529,435!
136
    const STraceId *trace = &pMsg->info.traceId;
3,530,260✔
137
    dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
3,530,260!
138

139
    terrno = 0;
3,530,261✔
140
    int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
3,529,339✔
141
    if (code != 0) {
3,530,465✔
142
      if (code == -1 && terrno != 0) {
1,966!
143
        code = terrno;
×
144
      }
145

146
      if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) {
1,966!
147
        dGDebug("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
×
148
      } else {
149
        dGError("vgId:%d, msg:%p, failed to fetch since %s [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, terrstr());
1,966!
150
      }
151

152
      vmSendRsp(pMsg, code);
1,966✔
153
    }
154

155
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vnodeProcessFetchMsg]", pVnode->vgId, pMsg, code);
3,530,465!
156
    rpcFreeCont(pMsg->pCont);
3,530,465✔
157
    taosFreeQitem(pMsg);
3,530,541✔
158
  }
159
}
3,498,099✔
160

161
static void vmProcessStreamReaderQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
63,709✔
162
  SVnodeObj *pVnode = pInfo->ahandle;
63,709✔
163
  SRpcMsg   *pMsg = NULL;
63,709✔
164

165
  for (int32_t i = 0; i < numOfMsgs; ++i) {
142,763✔
166
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
79,039!
167
    const STraceId *trace = &pMsg->info.traceId;
79,041✔
168
    dGTrace("vgId:%d, msg:%p, get from vnode-fetch queue", pVnode->vgId, pMsg);
79,041!
169

170
    terrno = 0;
79,041✔
171
    int32_t code = vnodeProcessStreamReaderMsg(pVnode->pImpl, pMsg);
79,040✔
172
    if (code != 0) {
79,031✔
173
      if (code == -1 && terrno != 0) {
380!
174
        code = terrno;
×
175
      }
176

177
      if (code == 0) {
380!
178
        dGDebug("vgId:%d, msg:%p, success to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
×
179
      } else {
180
        dGError("vgId:%d, msg:%p, failed to stream reader since %s [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, terrstr());
380!
181
      }
182
    }
183

184
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x [vmProcessStreamReaderQueue]", pVnode->vgId, pMsg, code);
79,031!
185
    rpcFreeCont(pMsg->pCont);
79,031✔
186
    taosFreeQitem(pMsg);
79,054✔
187
  }
188
}
63,724✔
189

190
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
673,463✔
191
  SVnodeObj *pVnode = pInfo->ahandle;
673,463✔
192
  SRpcMsg   *pMsg = NULL;
673,463✔
193

194
  for (int32_t i = 0; i < numOfMsgs; ++i) {
1,437,594✔
195
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
763,945!
196
    const STraceId *trace = &pMsg->info.traceId;
764,113✔
197
    dGTrace("vgId:%d, msg:%p, get from vnode-sync queue", pVnode->vgId, pMsg);
764,113!
198

199
    int32_t code = vnodeProcessSyncMsg(pVnode->pImpl, pMsg, NULL);  // no response here
764,113✔
200
    dGTrace("vgId:%d, msg:%p, is freed, code:0x%x", pVnode->vgId, pMsg, code);
764,143!
201
    rpcFreeCont(pMsg->pCont);
764,143✔
202
    taosFreeQitem(pMsg);
764,123✔
203
  }
204
}
673,649✔
205

206
static void vmSendResponse(SRpcMsg *pMsg) {
×
207
  if (pMsg->info.handle) {
×
208
    SRpcMsg rsp = {.info = pMsg->info, .code = terrno};
×
209
    if (rpcSendResponse(&rsp) != 0) {
×
210
      dError("failed to send response since %s", terrstr());
×
211
    }
212
  }
213
}
×
214

215
static bool vmDataSpaceSufficient(SVnodeObj *pVnode) {
1,882,469✔
216
  STfs *pTfs = pVnode->pImpl->pTfs;
1,882,469✔
217
  if (pTfs) {
1,882,469!
218
    return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary);
1,882,514✔
219
  } else {
220
    return osDataSpaceSufficient();
×
221
  }
222
}
223

224
static int32_t vmAcquireVnodeWrapper(SVnodeMgmt *pMgt, int32_t vgId, SVnodeObj **pNode) {
8,061,935✔
225
  *pNode = vmAcquireVnode(pMgt, vgId);
8,061,935✔
226
  if (*pNode == NULL) {
8,062,898✔
227
    return terrno;
8,752✔
228
  }
229
  return 0;
8,054,146✔
230
}
231
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
8,061,829✔
232
  int32_t         code = 0;
8,061,829✔
233
  const STraceId *trace = &pMsg->info.traceId;
8,061,829✔
234
  if (pMsg->contLen < sizeof(SMsgHead)) {
8,061,829!
235
    dGError("invalid rpc msg with no msg head at pCont. pMsg:%p, type:%s, contLen:%d", pMsg, TMSG_INFO(pMsg->msgType),
×
236
            pMsg->contLen);
237
    return TSDB_CODE_INVALID_MSG;
×
238
  }
239

240
  SMsgHead *pHead = pMsg->pCont;
8,061,829✔
241

242
  pHead->contLen = ntohl(pHead->contLen);
8,061,829✔
243
  pHead->vgId = ntohl(pHead->vgId);
8,061,829✔
244

245
  SVnodeObj *pVnode = NULL;
8,061,829✔
246
  code = vmAcquireVnodeWrapper(pMgmt, pHead->vgId, &pVnode);
8,061,829✔
247
  if (code != 0) {
8,062,707✔
248
    dGDebug("vgId:%d, msg:%p, failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
8,751!
249
            tstrerror(code), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
250
    return code;
8,751✔
251
  }
252

253
  switch (qtype) {
8,053,956✔
254
    case QUERY_QUEUE:
1,335,828✔
255
      code = vnodePreprocessQueryMsg(pVnode->pImpl, pMsg);
1,335,828✔
256
      if (code) {
1,335,825!
UNCOV
257
        dError("vgId:%d, msg:%p, preprocess query msg failed since %s", pVnode->vgId, pMsg, tstrerror(code));
×
258
      } else {
259
        dGTrace("vgId:%d, msg:%p, put into vnode-query queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
1,335,825!
260
        code = taosWriteQitem(pVnode->pQueryQ, pMsg);
1,335,825✔
261
      }
262
      break;
1,335,911✔
263
    case FETCH_QUEUE:
3,530,033✔
264
      dGTrace("vgId:%d, msg:%p, put into vnode-fetch queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
3,530,033!
265
      code = taosWriteQitem(pVnode->pFetchQ, pMsg);
3,530,033✔
266
      break;
3,530,353✔
267
    case WRITE_QUEUE:
1,882,471✔
268
      if (!vmDataSpaceSufficient(pVnode)) {
1,882,471!
269
        code = TSDB_CODE_NO_ENOUGH_DISKSPACE;
×
270
        dError("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
×
271
               tstrerror(code), TMSG_INFO(pMsg->msgType));
272
        break;
×
273
      }
274
#if 0
275
      if (pMsg->msgType == TDMT_VND_SUBMIT && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
276
        code = TSDB_CODE_VND_NO_WRITE_AUTH;
277
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since %s, type:%s", pVnode->vgId, pMsg,
278
               tstrerror(code), TMSG_INFO(pMsg->msgType));
279
        break;
280
      }
281
#endif
282
      if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
1,882,479✔
283
        dDebug("vgId:%d, msg:%p, failed to put into vnode-write queue since its disable, type:%s", pVnode->vgId, pMsg,
585!
284
               TMSG_INFO(pMsg->msgType));
285
        code = TSDB_CODE_VND_STOPPED;
585✔
286
        break;
585✔
287
      }
288
      dGDebug("vgId:%d, msg:%p, put into vnode-write queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
1,881,894!
289
      code = taosWriteQitem(pVnode->pWriteW.queue, pMsg);
1,881,896✔
290
      break;
1,881,891✔
291
    case SYNC_QUEUE:
657,746✔
292
      dGDebug("vgId:%d, msg:%p, put into vnode-sync queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
657,746!
293
      code = taosWriteQitem(pVnode->pSyncW.queue, pMsg);
657,747✔
294
      break;
657,761✔
295
    case SYNC_RD_QUEUE:
106,403✔
296
      if(tsSyncLogHeartbeat){
106,403!
297
        dGInfo("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
×
298
      }
299
      else{
300
        dGDebug("vgId:%d, msg:%p, put into vnode-sync-rd queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
106,403!
301
      }
302
      code = taosWriteQitem(pVnode->pSyncRdW.queue, pMsg);
106,403✔
303
      break;
106,401✔
304
    case APPLY_QUEUE:
461,839✔
305
      dGDebug("vgId:%d, msg:%p, put into vnode-apply queue, type:%s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType));
461,839!
306
      code = taosWriteQitem(pVnode->pApplyW.queue, pMsg);
461,839✔
307
      break;
461,841✔
308
    case STREAM_READER_QUEUE:
79,013✔
309
      dGDebug("vgId:%d, msg:%p, put into vnode-stream-reader queue, type:%s", pVnode->vgId, pMsg,
79,013!
310
              TMSG_INFO(pMsg->msgType));
311
      code = taosWriteQitem(pVnode->pStreamReaderQ, pMsg);
79,014✔
312
      break;
79,039✔
313
    default:
623✔
314
      code = TSDB_CODE_INVALID_MSG;
623✔
315
      break;
623✔
316
  }
317

318
  vmReleaseVnode(pMgmt, pVnode);
8,054,405✔
319
  return code;
8,054,128✔
320
}
321

322
int32_t vmPutMsgToSyncRdQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_RD_QUEUE); }
106,817✔
323

324
int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); }
461,807✔
325

326
int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, WRITE_QUEUE); }
1,878,816✔
327

328
int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, QUERY_QUEUE); }
1,309,146✔
329

330
int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); }
3,530,361✔
331

332
int32_t vmPutMsgToStreamReaderQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_READER_QUEUE); }
79,011✔
333

334
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
9,880✔
335
  const STraceId *trace = &pMsg->info.traceId;
9,880✔
336
  dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
9,880!
337
  return taosWriteQitem(pMgmt->mgmtMultiWorker.queue, pMsg);
9,880✔
338
}
339

340
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
8,900✔
341
  const STraceId *trace = &pMsg->info.traceId;
8,900✔
342
  dGTrace("msg:%p, put into vnode-mgmt queue", pMsg);
8,900!
343
  return taosWriteQitem(pMgmt->mgmtWorker.queue, pMsg);
8,900✔
344
}
345

346
int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
696,046✔
347
  int32_t code;
348
  if (pRpc->contLen < sizeof(SMsgHead)) {
696,046!
349
    dError("invalid rpc msg with no msg head at pCont. pRpc:%p, type:%s, len:%d", pRpc, TMSG_INFO(pRpc->msgType),
×
350
           pRpc->contLen);
351
    rpcFreeCont(pRpc->pCont);
×
352
    pRpc->pCont = NULL;
×
353
    return TSDB_CODE_INVALID_MSG;
×
354
  }
355

356
  EQItype  itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM;
696,046✔
357
  SRpcMsg *pMsg;
358
  code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg);
696,046✔
359
  if (code) {
696,047!
360
    rpcFreeCont(pRpc->pCont);
×
361
    pRpc->pCont = NULL;
×
362
    return code;
×
363
  }
364

365
  SMsgHead *pHead = pRpc->pCont;
696,047✔
366
  dTrace("vgId:%d, msg:%p, is created, type:%s len:%d", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType), pRpc->contLen);
696,047!
367

368
  pHead->contLen = htonl(pHead->contLen);
696,047✔
369
  pHead->vgId = htonl(pHead->vgId);
696,047✔
370
  memcpy(pMsg, pRpc, sizeof(SRpcMsg));
696,047✔
371
  pRpc->pCont = NULL;
696,047✔
372

373
  code = vmPutMsgToQueue(pMgmt, pMsg, qtype);
696,047✔
374
  if (code != 0) {
696,040✔
375
    dTrace("msg:%p, is freed", pMsg);
4,284✔
376
    rpcFreeCont(pMsg->pCont);
4,284✔
377
    taosFreeQitem(pMsg);
4,283✔
378
  }
379

380
  return code;
696,040✔
381
}
382

383
int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
49,168✔
384
  int32_t    size = -1;
49,168✔
385
  SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
49,168✔
386
  if (pVnode != NULL) {
49,170✔
387
    switch (qtype) {
24,437!
388
      case WRITE_QUEUE:
×
389
        size = taosQueueItemSize(pVnode->pWriteW.queue);
×
390
        break;
×
391
      case SYNC_QUEUE:
×
392
        size = taosQueueItemSize(pVnode->pSyncW.queue);
×
393
        break;
×
394
      case APPLY_QUEUE:
24,437✔
395
        size = taosQueueItemSize(pVnode->pApplyW.queue);
24,437✔
396
        break;
24,437✔
397
      case QUERY_QUEUE:
×
398
        size = taosQueueItemSize(pVnode->pQueryQ);
×
399
        break;
×
400
      case FETCH_QUEUE:
×
401
        size = taosQueueItemSize(pVnode->pFetchQ);
×
402
        break;
×
403
      case STREAM_READER_QUEUE:
×
404
        size = taosQueueItemSize(pVnode->pStreamReaderQ);
×
405
        break;
×
406
      default:
×
407
        break;
×
408
    }
409
  }
410
  if (pVnode) vmReleaseVnode(pMgmt, pVnode);
49,170✔
411
  if (size < 0) {
49,169✔
412
    dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
24,733✔
413
    size = 0;
24,733✔
414
  }
415
  return size;
49,169✔
416
}
417

418
int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
12,388✔
419
  int32_t         code = 0;
12,388✔
420
  SMultiWorkerCfg wcfg = {.max = 1, .name = "vnode-write", .fp = (FItems)vnodeProposeWriteMsg, .param = pVnode->pImpl};
12,388✔
421
  SMultiWorkerCfg scfg = {.max = 1, .name = "vnode-sync", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
12,388✔
422
  SMultiWorkerCfg sccfg = {.max = 1, .name = "vnode-sync-rd", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
12,388✔
423
  SMultiWorkerCfg acfg = {.max = 1, .name = "vnode-apply", .fp = (FItems)vnodeApplyWriteMsg, .param = pVnode->pImpl};
12,388✔
424
  code = tMultiWorkerInit(&pVnode->pWriteW, &wcfg);
12,388✔
425
  if (code) {
12,388!
426
    return code;
×
427
  }
428
  code = tMultiWorkerInit(&pVnode->pSyncW, &scfg);
12,388✔
429
  if (code) {
12,388!
430
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
431
    return code;
×
432
  }
433
  code = tMultiWorkerInit(&pVnode->pSyncRdW, &sccfg);
12,388✔
434
  if (code) {
12,388!
435
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
436
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
437
    return code;
×
438
  }
439
  code = tMultiWorkerInit(&pVnode->pApplyW, &acfg);
12,388✔
440
  if (code) {
12,388!
441
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
442
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
443
    tMultiWorkerCleanup(&pVnode->pSyncRdW);
×
444
    return code;
×
445
  }
446

447
  pVnode->pQueryQ = tQueryAutoQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
12,388✔
448
  pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
12,387✔
449

450
  // init stream msg processing queue family
451
  pVnode->pStreamReaderQ = tWWorkerAllocQueue(&pMgmt->streamReaderPool, pVnode, vmProcessStreamReaderQueue);
12,388✔
452

453
  if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
12,388!
454
      pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pFetchQ == NULL || !pVnode->pStreamReaderQ) {
12,388!
455
    return TSDB_CODE_OUT_OF_MEMORY;
×
456
  }
457

458
  dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
12,388!
459
        taosQueueGetThreadId(pVnode->pWriteW.queue));
460
  dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
12,388!
461
        taosQueueGetThreadId(pVnode->pSyncW.queue));
462
  dInfo("vgId:%d, sync-rd-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncRdW.queue,
12,388!
463
        taosQueueGetThreadId(pVnode->pSyncRdW.queue));
464
  dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
12,388!
465
        taosQueueGetThreadId(pVnode->pApplyW.queue));
466
  dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
12,388!
467
  dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
12,388!
468
        taosQueueGetThreadId(pVnode->pFetchQ));
469
  dInfo("vgId:%d, stream-reader-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pStreamReaderQ,
12,388!
470
        taosQueueGetThreadId(pVnode->pStreamReaderQ));
471
  return 0;
12,388✔
472
}
473

474
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
12,388✔
475
  tQueryAutoQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
12,388✔
476
  tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
12,388✔
477
  tWWorkerFreeQueue(&pMgmt->streamReaderPool, pVnode->pStreamReaderQ);
12,388✔
478
  pVnode->pQueryQ = NULL;
12,388✔
479
  pVnode->pFetchQ = NULL;
12,388✔
480

481
  pVnode->pFetchQ = NULL;
12,388✔
482
  pVnode->pStreamReaderQ = NULL;
12,388✔
483
  dDebug("vgId:%d, queue is freed", pVnode->vgId);
12,388✔
484
}
12,388✔
485

486
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
2,381✔
487
  int32_t code = 0;
2,381✔
488

489
  SQueryAutoQWorkerPool *pQPool = &pMgmt->queryPool;
2,381✔
490
  pQPool->name = "vnode-query";
2,381✔
491
  pQPool->min = tsNumOfVnodeQueryThreads;
2,381✔
492
  pQPool->max = tsNumOfVnodeQueryThreads;
2,381✔
493
  if ((code = tQueryAutoQWorkerInit(pQPool)) != 0) return code;
2,381!
494

495
  tsNumOfQueryThreads += tsNumOfVnodeQueryThreads;
2,381✔
496

497
  SWWorkerPool *pFPool = &pMgmt->fetchPool;
2,381✔
498
  pFPool->name = "vnode-fetch";
2,381✔
499
  pFPool->max = tsNumOfVnodeFetchThreads;
2,381✔
500
  if ((code = tWWorkerInit(pFPool)) != 0) return code;
2,381!
501

502
  SSingleWorkerCfg mgmtCfg = {
2,381✔
503
      .min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
504

505
  if ((code = tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg)) != 0) return code;
2,381!
506

507
  int32_t threadNum = 0;
2,381✔
508
  if (tsNumOfCores == 1) {
2,381!
509
    threadNum = 2;
×
510
  } else {
511
    threadNum = tsNumOfCores;
2,381✔
512
  }
513
  SSingleWorkerCfg multiMgmtCfg = {.min = threadNum,
2,381✔
514
                                   .max = threadNum,
515
                                   .name = "vnode-multi-mgmt",
516
                                   .fp = (FItem)vmProcessMultiMgmtQueue,
517
                                   .param = pMgmt};
518

519
  if ((code = tSingleWorkerInit(&pMgmt->mgmtMultiWorker, &multiMgmtCfg)) != 0) return code;
2,381!
520

521
  SWWorkerPool *pStreamReaderPool = &pMgmt->streamReaderPool;
2,381✔
522
  pStreamReaderPool->name = "vnode-st-reader";
2,381✔
523
  pStreamReaderPool->max = tsNumOfVnodeStreamReaderThreads;
2,381✔
524
  if ((code = tWWorkerInit(pStreamReaderPool)) != 0) return code;
2,381!
525

526
/*
527
  SSingleWorkerCfg runnerWorkerCfg = {.min = tsNumOfStreamRunnerThreads,
528
                                      .max = tsNumOfStreamRunnerThreads,
529
                                      .name = "vnode-st-runner",
530
                                      .fp = (FItem)NULL,
531
                                      .param = pMgmt};
532
  if ((code = tSingleWorkerInit(&pMgmt->streamRunnerWorker, &runnerWorkerCfg)) != 0) return code;
533
*/
534

535
  dDebug("vnode workers are initialized");
2,381✔
536
  return 0;
2,381✔
537
}
538

539
void vmStopWorker(SVnodeMgmt *pMgmt) {
2,381✔
540
  tQueryAutoQWorkerCleanup(&pMgmt->queryPool);
2,381✔
541
  tWWorkerCleanup(&pMgmt->fetchPool);
2,381✔
542
  tWWorkerCleanup(&pMgmt->streamReaderPool);
2,381✔
543
//  tSingleWorkerCleanup(&pMgmt->streamRunnerWorker);
544
  dDebug("vnode workers are closed");
2,381✔
545
}
2,381✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc