• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #3523

06 Nov 2024 02:29AM UTC coverage: 55.861% (-2.4%) from 58.216%
#3523

push

travis-ci

web-flow
Merge pull request #28551 from taosdata/feat/TS-5215-2

test(blob): testing & fixes for blob

106075 of 245834 branches covered (43.15%)

Branch coverage included in aggregate %.

0 of 15 new or added lines in 2 files covered. (0.0%)

17003 existing lines in 254 files now uncovered.

181910 of 269703 relevant lines covered (67.45%)

1527639.59 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

58.6
/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http:www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "vmInt.h"
18
#include "vnodeInt.h"
19

20
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
9,855✔
21
  if (pMsg->info.handle == NULL) return;
9,855✔
22
  SRpcMsg rsp = {
9,837✔
23
      .code = code,
24
      .pCont = pMsg->info.rsp,
9,837✔
25
      .contLen = pMsg->info.rspLen,
9,837✔
26
      .info = pMsg->info,
27
  };
28
  tmsgSendRsp(&rsp);
9,837✔
29
}
30

31
static void vmProcessMultiMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
4,743✔
32
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
4,743✔
33
  int32_t         code = -1;
4,743✔
34
  const STraceId *trace = &pMsg->info.traceId;
4,743✔
35

36
  dGTrace("msg:%p, get from vnode-multi-mgmt queue", pMsg);
4,743!
37
  switch (pMsg->msgType) {
4,743!
38
    case TDMT_DND_CREATE_VNODE:
4,743✔
39
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
4,743✔
40
      break;
4,743✔
41
  }
42

43
  if (IsReq(pMsg)) {
4,743!
44
    if (code != 0) {
4,743!
45
      if (terrno != 0) code = terrno;
×
46
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
×
47
    }
48
    vmSendRsp(pMsg, code);
4,743✔
49
  }
50

51
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
4,743!
52
  rpcFreeCont(pMsg->pCont);
4,743✔
53
  taosFreeQitem(pMsg);
4,743✔
54
}
4,743✔
55

56
static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
4,104✔
57
  SVnodeMgmt     *pMgmt = pInfo->ahandle;
4,104✔
58
  int32_t         code = -1;
4,104✔
59
  const STraceId *trace = &pMsg->info.traceId;
4,104✔
60

61
  dGTrace("msg:%p, get from vnode-mgmt queue", pMsg);
4,104!
62
  switch (pMsg->msgType) {
4,104!
63
    case TDMT_DND_CREATE_VNODE:
×
64
      code = vmProcessCreateVnodeReq(pMgmt, pMsg);
×
65
      break;
×
66
    case TDMT_DND_DROP_VNODE:
3,067✔
67
      code = vmProcessDropVnodeReq(pMgmt, pMsg);
3,067✔
68
      break;
3,067✔
69
    case TDMT_VND_ALTER_REPLICA:
255✔
70
      code = vmProcessAlterVnodeReplicaReq(pMgmt, pMsg);
255✔
71
      break;
255✔
UNCOV
72
    case TDMT_VND_DISABLE_WRITE:
×
UNCOV
73
      code = vmProcessDisableVnodeWriteReq(pMgmt, pMsg);
×
UNCOV
74
      break;
×
UNCOV
75
    case TDMT_VND_ALTER_HASHRANGE:
×
UNCOV
76
      code = vmProcessAlterHashRangeReq(pMgmt, pMsg);
×
UNCOV
77
      break;
×
78
    case TDMT_DND_ALTER_VNODE_TYPE:
782✔
79
      code = vmProcessAlterVnodeTypeReq(pMgmt, pMsg);
782✔
80
      break;
782✔
81
    case TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP:
×
82
      code = vmProcessCheckLearnCatchupReq(pMgmt, pMsg);
×
83
      break;
×
UNCOV
84
    case TDMT_VND_ARB_HEARTBEAT:
×
UNCOV
85
      code = vmProcessArbHeartBeatReq(pMgmt, pMsg);
×
UNCOV
86
      break;
×
87
    default:
×
88
      terrno = TSDB_CODE_MSG_NOT_PROCESSED;
×
89
      dGError("msg:%p, not processed in vnode-mgmt queue", pMsg);
×
90
  }
91

92
  if (IsReq(pMsg)) {
4,104!
93
    if (code != 0) {
4,104✔
94
      if (terrno != 0) code = terrno;
757!
95
      dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
757!
96
    }
97
    vmSendRsp(pMsg, code);
4,104✔
98
  }
99

100
  dGTrace("msg:%p, is freed, code:0x%x", pMsg, code);
4,104!
101
  rpcFreeCont(pMsg->pCont);
4,104✔
102
  taosFreeQitem(pMsg);
4,104✔
103
}
4,104✔
104

105
static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
189,807✔
106
  SVnodeObj      *pVnode = pInfo->ahandle;
189,807✔
107
  const STraceId *trace = &pMsg->info.traceId;
189,807✔
108

109
  dGTrace("vgId:%d, msg:%p get from vnode-query queue", pVnode->vgId, pMsg);
189,807!
110
  int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg, pInfo);
189,807✔
111
  if (code != 0) {
189,712!
112
    if (terrno != 0) code = terrno;
×
113
    dGError("vgId:%d, msg:%p failed to query since %s", pVnode->vgId, pMsg, tstrerror(code));
×
114
    vmSendRsp(pMsg, code);
×
115
  }
116

117
  dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
189,712!
118
  rpcFreeCont(pMsg->pCont);
189,712✔
119
  taosFreeQitem(pMsg);
189,807✔
120
}
189,806✔
121

122
static void vmProcessStreamQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
131,295✔
123
  SVnodeObj      *pVnode = pInfo->ahandle;
131,295✔
124
  const STraceId *trace = &pMsg->info.traceId;
131,295✔
125

126
  dGTrace("vgId:%d, msg:%p get from vnode-stream queue", pVnode->vgId, pMsg);
131,295!
127
  int32_t code = vnodeProcessStreamMsg(pVnode->pImpl, pMsg, pInfo);
131,295✔
128
  if (code != 0) {
131,195✔
129
    terrno = code;
18✔
130
    dGError("vgId:%d, msg:%p failed to process stream msg %s since %s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType),
18!
131
            tstrerror(code));
132
    vmSendRsp(pMsg, code);
18✔
133
  }
134

135
  dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
131,195!
136
  rpcFreeCont(pMsg->pCont);
131,195✔
137
  taosFreeQitem(pMsg);
131,277✔
138
}
131,315✔
139

140
static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
442,731✔
141
  SVnodeObj *pVnode = pInfo->ahandle;
442,731✔
142
  SRpcMsg   *pMsg = NULL;
442,731✔
143

144
  for (int32_t i = 0; i < numOfMsgs; ++i) {
912,545✔
145
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
469,119!
146
    const STraceId *trace = &pMsg->info.traceId;
469,539✔
147
    dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg);
469,539!
148

149
    terrno = 0;
469,539✔
150
    int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
469,100✔
151
    if (code != 0) {
469,358✔
152
      if (code == -1 && terrno != 0) {
990!
153
        code = terrno;
×
154
      }
155

156
      if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) {
990!
157
        dGDebug("vnodeProcessFetchMsg vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
×
158
      } else {
159
        dGError("vnodeProcessFetchMsg vgId:%d, msg:%p failed to fetch since %s", pVnode->vgId, pMsg, terrstr());
990!
160
      }
161

162
      vmSendRsp(pMsg, code);
990✔
163
    }
164

165
    dGTrace("vnodeProcessFetchMsg vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
469,358!
166
    rpcFreeCont(pMsg->pCont);
469,358✔
167
    taosFreeQitem(pMsg);
469,827✔
168
  }
169
}
443,426✔
170

171
static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
656,618✔
172
  SVnodeObj *pVnode = pInfo->ahandle;
656,618✔
173
  SRpcMsg   *pMsg = NULL;
656,618✔
174

175
  for (int32_t i = 0; i < numOfMsgs; ++i) {
1,323,475✔
176
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
663,098!
177
    const STraceId *trace = &pMsg->info.traceId;
666,097✔
178
    dGTrace("vgId:%d, msg:%p get from vnode-sync queue", pVnode->vgId, pMsg);
666,097!
179

180
    int32_t code = vnodeProcessSyncMsg(pVnode->pImpl, pMsg, NULL);  // no response here
666,097✔
181
    dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
663,907!
182
    rpcFreeCont(pMsg->pCont);
663,907✔
183
    taosFreeQitem(pMsg);
666,762✔
184
  }
185
}
660,377✔
186

187
static void vmSendResponse(SRpcMsg *pMsg) {
×
188
  if (pMsg->info.handle) {
×
189
    SRpcMsg rsp = {.info = pMsg->info, .code = terrno};
×
190
    if (rpcSendResponse(&rsp) != 0) {
×
191
      dError("failed to send response since %s", terrstr());
×
192
    }
193
  }
194
}
×
195

196
static bool vmDataSpaceSufficient(SVnodeObj *pVnode) {
4,549,862✔
197
  STfs *pTfs = pVnode->pImpl->pTfs;
4,549,862✔
198
  if (pTfs) {
4,549,862!
199
    return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary);
4,551,680✔
200
  } else {
201
    return osDataSpaceSufficient();
×
202
  }
203
}
204

205
static int32_t vmAcquireVnodeWrapper(SVnodeMgmt *pMgt, int32_t vgId, SVnodeObj **pNode) {
6,171,923✔
206
  *pNode = vmAcquireVnode(pMgt, vgId);
6,171,923✔
207
  if (*pNode == NULL) {
6,175,904✔
208
    return terrno;
97,104✔
209
  }
210
  return 0;
6,078,800✔
211
}
212
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
6,172,124✔
213
  int32_t         code = 0;
6,172,124✔
214
  const STraceId *trace = &pMsg->info.traceId;
6,172,124✔
215
  if (pMsg->contLen < sizeof(SMsgHead)) {
6,172,124!
216
    dGError("invalid rpc msg with no msg head at pCont. pMsg:%p, type:%s, contLen:%d", pMsg, TMSG_INFO(pMsg->msgType),
×
217
            pMsg->contLen);
218
    return TSDB_CODE_INVALID_MSG;
×
219
  }
220

221
  SMsgHead *pHead = pMsg->pCont;
6,172,124✔
222

223
  pHead->contLen = ntohl(pHead->contLen);
6,172,124✔
224
  pHead->vgId = ntohl(pHead->vgId);
6,172,124✔
225

226
  SVnodeObj *pVnode = NULL;
6,172,124✔
227
  code = vmAcquireVnodeWrapper(pMgmt, pHead->vgId, &pVnode);
6,172,124✔
228
  if (code != 0) {
6,175,044✔
229
    dGDebug("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
97,046!
230
            tstrerror(code), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
231
    return code;
97,046✔
232
  }
233

234
  switch (qtype) {
6,077,998✔
235
    case QUERY_QUEUE:
189,731✔
236
      code = vnodePreprocessQueryMsg(pVnode->pImpl, pMsg);
189,731✔
237
      if (code) {
189,528✔
238
        dError("vgId:%d, msg:%p preprocess query msg failed since %s", pVnode->vgId, pMsg, tstrerror(code));
83!
239
      } else {
240
        dGTrace("vgId:%d, msg:%p put into vnode-query queue", pVnode->vgId, pMsg);
189,445!
241
        code = taosWriteQitem(pVnode->pQueryQ, pMsg);
189,445✔
242
      }
243
      break;
189,808✔
244
    case STREAM_QUEUE:
130,897✔
245
      dGTrace("vgId:%d, msg:%p put into vnode-stream queue", pVnode->vgId, pMsg);
130,897!
246
      code = taosWriteQitem(pVnode->pStreamQ, pMsg);
130,897✔
247
      break;
131,183✔
248
    case FETCH_QUEUE:
467,690✔
249
      dGTrace("vgId:%d, msg:%p put into vnode-fetch queue", pVnode->vgId, pMsg);
467,690!
250
      code = taosWriteQitem(pVnode->pFetchQ, pMsg);
467,690✔
251
      break;
469,285✔
252
    case WRITE_QUEUE:
4,550,100✔
253
      if (!vmDataSpaceSufficient(pVnode)) {
4,550,100!
254
        code = TSDB_CODE_NO_ENOUGH_DISKSPACE;
×
255
        dError("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, tstrerror(code));
×
256
        break;
×
257
      }
258
      if (pMsg->msgType == TDMT_VND_SUBMIT && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
4,551,696!
259
        code = TSDB_CODE_VND_NO_WRITE_AUTH;
×
260
        dDebug("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, tstrerror(code));
×
261
        break;
×
262
      }
263
      if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
4,550,571!
UNCOV
264
        dDebug("vgId:%d, msg:%p put into vnode-write queue failed since its disable", pVnode->vgId, pMsg);
×
UNCOV
265
        code = TSDB_CODE_VND_STOPPED;
×
UNCOV
266
        break;
×
267
      }
268
      dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg);
4,550,571!
269
      code = taosWriteQitem(pVnode->pWriteW.queue, pMsg);
4,550,571✔
270
      break;
4,555,162✔
271
    case SYNC_QUEUE:
660,359✔
272
      dGTrace("vgId:%d, msg:%p put into vnode-sync queue", pVnode->vgId, pMsg);
660,359!
273
      code = taosWriteQitem(pVnode->pSyncW.queue, pMsg);
660,359✔
274
      break;
660,359✔
275
    case SYNC_RD_QUEUE:
6,542✔
276
      dGTrace("vgId:%d, msg:%p put into vnode-sync-rd queue", pVnode->vgId, pMsg);
6,542!
277
      code = taosWriteQitem(pVnode->pSyncRdW.queue, pMsg);
6,542✔
278
      break;
6,542✔
279
    case APPLY_QUEUE:
65,797✔
280
      dGTrace("vgId:%d, msg:%p put into vnode-apply queue", pVnode->vgId, pMsg);
65,797!
281
      code = taosWriteQitem(pVnode->pApplyW.queue, pMsg);
65,797✔
282
      break;
65,796✔
283
    default:
6,882✔
284
      code = TSDB_CODE_INVALID_MSG;
6,882✔
285
      break;
6,882✔
286
  }
287

288
  vmReleaseVnode(pMgmt, pVnode);
6,085,017✔
289
  return code;
6,079,784✔
290
}
291

292
int32_t vmPutMsgToSyncRdQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_RD_QUEUE); }
6,571✔
293

294
int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); }
44,755✔
295

296
int32_t vmPutMsgToWriteQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, WRITE_QUEUE); }
4,638,934✔
297

298
int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, QUERY_QUEUE); }
188,534✔
299

300
int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, FETCH_QUEUE); }
469,207✔
301

302
int32_t vmPutMsgToStreamQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_QUEUE); }
61,112✔
303

304
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
4,734✔
305
  const STraceId *trace = &pMsg->info.traceId;
4,734✔
306
  dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
4,734!
307
  return taosWriteQitem(pMgmt->mgmtMultiWorker.queue, pMsg);
4,734✔
308
}
309

310
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
4,093✔
311
  const STraceId *trace = &pMsg->info.traceId;
4,093✔
312
  dGTrace("msg:%p, put into vnode-mgmt queue", pMsg);
4,093!
313
  return taosWriteQitem(pMgmt->mgmtWorker.queue, pMsg);
4,093✔
314
}
315

316
int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
763,799✔
317
  int32_t code;
318
  if (pRpc->contLen < sizeof(SMsgHead)) {
763,799!
319
    dError("invalid rpc msg with no msg head at pCont. pRpc:%p, type:%s, len:%d", pRpc, TMSG_INFO(pRpc->msgType),
×
320
           pRpc->contLen);
321
    rpcFreeCont(pRpc->pCont);
×
322
    pRpc->pCont = NULL;
×
323
    return TSDB_CODE_INVALID_MSG;
×
324
  }
325

326
  EQItype  itype = APPLY_QUEUE == qtype ? DEF_QITEM : RPC_QITEM;
763,799✔
327
  SRpcMsg *pMsg;
328
  code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg);
763,799✔
329
  if (code) {
763,823!
330
    rpcFreeCont(pRpc->pCont);
×
331
    pRpc->pCont = NULL;
×
332
    return code;
×
333
  }
334

335
  SMsgHead *pHead = pRpc->pCont;
763,823✔
336
  dTrace("vgId:%d, msg:%p is created, type:%s len:%d", pHead->vgId, pMsg, TMSG_INFO(pRpc->msgType), pRpc->contLen);
763,823!
337

338
  pHead->contLen = htonl(pHead->contLen);
763,823✔
339
  pHead->vgId = htonl(pHead->vgId);
763,823✔
340
  memcpy(pMsg, pRpc, sizeof(SRpcMsg));
763,823✔
341
  pRpc->pCont = NULL;
763,823✔
342

343
  code = vmPutMsgToQueue(pMgmt, pMsg, qtype);
763,823✔
344
  if (code != 0) {
763,851✔
345
    dTrace("msg:%p, is freed", pMsg);
1,892!
346
    rpcFreeCont(pMsg->pCont);
1,892✔
347
    taosFreeQitem(pMsg);
1,892✔
348
  }
349

350
  return code;
763,842✔
351
}
352

353
int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
18,927✔
354
  int32_t    size = -1;
18,927✔
355
  SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
18,927✔
356
  if (pVnode != NULL) {
18,927✔
357
    switch (qtype) {
8,060!
358
      case WRITE_QUEUE:
×
359
        size = taosQueueItemSize(pVnode->pWriteW.queue);
×
360
        break;
×
361
      case SYNC_QUEUE:
×
362
        size = taosQueueItemSize(pVnode->pSyncW.queue);
×
363
        break;
×
364
      case APPLY_QUEUE:
8,060✔
365
        size = taosQueueItemSize(pVnode->pApplyW.queue);
8,060✔
366
        break;
8,060✔
367
      case QUERY_QUEUE:
×
368
        size = taosQueueItemSize(pVnode->pQueryQ);
×
369
        break;
×
370
      case FETCH_QUEUE:
×
371
        size = taosQueueItemSize(pVnode->pFetchQ);
×
372
        break;
×
373
      case STREAM_QUEUE:
×
374
        size = taosQueueItemSize(pVnode->pStreamQ);
×
375
        break;
×
376
      default:
×
377
        break;
×
378
    }
379
  }
380
  if (pVnode) vmReleaseVnode(pMgmt, pVnode);
18,927✔
381
  if (size < 0) {
18,925✔
382
    dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
10,865✔
383
    size = 0;
10,866✔
384
  }
385
  return size;
18,926✔
386
}
387

388
int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
5,405✔
389
  int32_t         code = 0;
5,405✔
390
  SMultiWorkerCfg wcfg = {.max = 1, .name = "vnode-write", .fp = (FItems)vnodeProposeWriteMsg, .param = pVnode->pImpl};
5,405✔
391
  SMultiWorkerCfg scfg = {.max = 1, .name = "vnode-sync", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
5,405✔
392
  SMultiWorkerCfg sccfg = {.max = 1, .name = "vnode-sync-rd", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
5,405✔
393
  SMultiWorkerCfg acfg = {.max = 1, .name = "vnode-apply", .fp = (FItems)vnodeApplyWriteMsg, .param = pVnode->pImpl};
5,405✔
394
  code = tMultiWorkerInit(&pVnode->pWriteW, &wcfg);
5,405✔
395
  if (code) {
5,406!
396
    return code;
×
397
  }
398
  code = tMultiWorkerInit(&pVnode->pSyncW, &scfg);
5,406✔
399
  if (code) {
5,406!
400
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
401
    return code;
×
402
  }
403
  code = tMultiWorkerInit(&pVnode->pSyncRdW, &sccfg);
5,406✔
404
  if (code) {
5,406!
405
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
406
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
407
    return code;
×
408
  }
409
  code = tMultiWorkerInit(&pVnode->pApplyW, &acfg);
5,406✔
410
  if (code) {
5,406!
411
    tMultiWorkerCleanup(&pVnode->pWriteW);
×
412
    tMultiWorkerCleanup(&pVnode->pSyncW);
×
413
    tMultiWorkerCleanup(&pVnode->pSyncRdW);
×
414
    return code;
×
415
  }
416

417
  pVnode->pQueryQ = tQueryAutoQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
5,406✔
418
  pVnode->pStreamQ = tAutoQWorkerAllocQueue(&pMgmt->streamPool, pVnode, (FItem)vmProcessStreamQueue);
5,406✔
419
  pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
5,406✔
420

421
  if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
5,406!
422
      pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL) {
5,406!
423
    return TSDB_CODE_OUT_OF_MEMORY;
×
424
  }
425

426
  dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
5,406!
427
        taosQueueGetThreadId(pVnode->pWriteW.queue));
428
  dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
5,406!
429
        taosQueueGetThreadId(pVnode->pSyncW.queue));
430
  dInfo("vgId:%d, sync-rd-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncRdW.queue,
5,406!
431
        taosQueueGetThreadId(pVnode->pSyncRdW.queue));
432
  dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
5,406!
433
        taosQueueGetThreadId(pVnode->pApplyW.queue));
434
  dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
5,406!
435
  dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
5,406!
436
        taosQueueGetThreadId(pVnode->pFetchQ));
437
  dInfo("vgId:%d, stream-queue:%p is alloced", pVnode->vgId, pVnode->pStreamQ);
5,406!
438
  return 0;
5,406✔
439
}
440

441
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
5,406✔
442
  tQueryAutoQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
5,406✔
443
  tAutoQWorkerFreeQueue(&pMgmt->streamPool, pVnode->pStreamQ);
5,406✔
444
  tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
5,406✔
445
  pVnode->pQueryQ = NULL;
5,406✔
446
  pVnode->pStreamQ = NULL;
5,406✔
447
  pVnode->pFetchQ = NULL;
5,406✔
448
  dDebug("vgId:%d, queue is freed", pVnode->vgId);
5,406✔
449
}
5,406✔
450

451
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
853✔
452
  int32_t                code = 0;
853✔
453
  SQueryAutoQWorkerPool *pQPool = &pMgmt->queryPool;
853✔
454
  pQPool->name = "vnode-query";
853✔
455
  pQPool->min = tsNumOfVnodeQueryThreads;
853✔
456
  pQPool->max = tsNumOfVnodeQueryThreads;
853✔
457
  if ((code = tQueryAutoQWorkerInit(pQPool)) != 0) return code;
853!
458

459
  SAutoQWorkerPool *pStreamPool = &pMgmt->streamPool;
853✔
460
  pStreamPool->name = "vnode-stream";
853✔
461
  pStreamPool->ratio = tsRatioOfVnodeStreamThreads;
853✔
462
  if ((code = tAutoQWorkerInit(pStreamPool)) != 0) return code;
853!
463

464
  SWWorkerPool *pFPool = &pMgmt->fetchPool;
853✔
465
  pFPool->name = "vnode-fetch";
853✔
466
  pFPool->max = tsNumOfVnodeFetchThreads;
853✔
467
  if ((code = tWWorkerInit(pFPool)) != 0) return code;
853!
468

469
  SSingleWorkerCfg mgmtCfg = {
853✔
470
      .min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
471

472
  if ((code = tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg)) != 0) return code;
853!
473

474
  int32_t threadNum = 0;
853✔
475
  if (tsNumOfCores == 1) {
853!
476
    threadNum = 2;
×
477
  } else {
478
    threadNum = tsNumOfCores;
853✔
479
  }
480
  SSingleWorkerCfg multiMgmtCfg = {.min = threadNum,
853✔
481
                                   .max = threadNum,
482
                                   .name = "vnode-multi-mgmt",
483
                                   .fp = (FItem)vmProcessMultiMgmtQueue,
484
                                   .param = pMgmt};
485

486
  if ((code = tSingleWorkerInit(&pMgmt->mgmtMultiWorker, &multiMgmtCfg)) != 0) return code;
853!
487

488
  dDebug("vnode workers are initialized");
853✔
489
  return 0;
853✔
490
}
491

492
void vmStopWorker(SVnodeMgmt *pMgmt) {
853✔
493
  tQueryAutoQWorkerCleanup(&pMgmt->queryPool);
853✔
494
  tAutoQWorkerCleanup(&pMgmt->streamPool);
853✔
495
  tWWorkerCleanup(&pMgmt->fetchPool);
853✔
496
  dDebug("vnode workers are closed");
853✔
497
}
853✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc