• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #3615

18 Feb 2025 07:41AM UTC coverage: 62.953% (+1.6%) from 61.4%
#3615

push

travis-ci

web-flow
Merge pull request #29812 from taosdata/doc/analysis

doc: update tdgpt doc.

146885 of 299602 branches covered (49.03%)

Branch coverage included in aggregate %.

230802 of 300346 relevant lines covered (76.85%)

17263824.17 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

64.12
/source/dnode/vnode/src/vnd/vnodeSync.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "sync.h"
18
#include "tq.h"
19
#include "tqCommon.h"
20
#include "tsdb.h"
21
#include "vnd.h"
22

23
#define BATCH_ENABLE 0
24

25
static inline bool vnodeIsMsgWeak(tmsg_t type) { return false; }
10,052,972✔
26

27
static inline void vnodeWaitBlockMsg(SVnode *pVnode, const SRpcMsg *pMsg) {
2,272✔
28
  const STraceId *trace = &pMsg->info.traceId;
2,272✔
29
  vGTrace("vgId:%d, msg:%p wait block, type:%s sec:%d seq:%" PRId64, pVnode->config.vgId, pMsg,
2,272!
30
          TMSG_INFO(pMsg->msgType), pVnode->blockSec, pVnode->blockSeq);
31
  if (tsem_wait(&pVnode->syncSem) != 0) {
2,272!
32
    vError("vgId:%d, failed to wait sem", pVnode->config.vgId);
×
33
  }
34
}
2,274✔
35

36
static inline void vnodePostBlockMsg(SVnode *pVnode, const SRpcMsg *pMsg) {
3,187,101✔
37
  if (vnodeIsMsgBlock(pMsg->msgType)) {
3,187,101✔
38
    const STraceId *trace = &pMsg->info.traceId;
19,076✔
39
    (void)taosThreadMutexLock(&pVnode->lock);
19,076✔
40
    if (pVnode->blocked) {
19,077✔
41
      vGTrace("vgId:%d, msg:%p post block, type:%s sec:%d seq:%" PRId64, pVnode->config.vgId, pMsg,
2,255!
42
              TMSG_INFO(pMsg->msgType), pVnode->blockSec, pVnode->blockSeq);
43
      pVnode->blocked = false;
2,255✔
44
      pVnode->blockSec = 0;
2,255✔
45
      pVnode->blockSeq = 0;
2,255✔
46
      if (tsem_post(&pVnode->syncSem) != 0) {
2,255!
47
        vError("vgId:%d, failed to post sem", pVnode->config.vgId);
×
48
      }
49
    }
50
    (void)taosThreadMutexUnlock(&pVnode->lock);
19,077✔
51
  }
52
}
3,186,996✔
53

54
void vnodeRedirectRpcMsg(SVnode *pVnode, SRpcMsg *pMsg, int32_t code) {
39,803✔
55
  SEpSet newEpSet = {0};
39,803✔
56
  syncGetRetryEpSet(pVnode->sync, &newEpSet);
39,803✔
57

58
  const STraceId *trace = &pMsg->info.traceId;
39,803✔
59
  vGTrace("vgId:%d, msg:%p is redirect since not leader, numOfEps:%d inUse:%d", pVnode->config.vgId, pMsg,
39,803!
60
          newEpSet.numOfEps, newEpSet.inUse);
61
  for (int32_t i = 0; i < newEpSet.numOfEps; ++i) {
150,805✔
62
    vGTrace("vgId:%d, msg:%p redirect:%d ep:%s:%u", pVnode->config.vgId, pMsg, i, newEpSet.eps[i].fqdn,
111,002!
63
            newEpSet.eps[i].port);
64
  }
65
  pMsg->info.hasEpSet = 1;
39,803✔
66

67
  if (code == 0) code = TSDB_CODE_SYN_NOT_LEADER;
39,803!
68

69
  SRpcMsg rsp = {.code = code, .info = pMsg->info, .msgType = pMsg->msgType + 1};
39,803✔
70
  int32_t contLen = tSerializeSEpSet(NULL, 0, &newEpSet);
39,803✔
71

72
  rsp.pCont = rpcMallocCont(contLen);
39,802✔
73
  if (rsp.pCont == NULL) {
39,802!
74
    pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
×
75
  } else {
76
    if (tSerializeSEpSet(rsp.pCont, contLen, &newEpSet) < 0) {
39,802!
77
      vError("vgId:%d, failed to serialize ep set", pVnode->config.vgId);
×
78
    }
79
    rsp.contLen = contLen;
39,803✔
80
  }
81

82
  tmsgSendRsp(&rsp);
39,803✔
83
}
39,803✔
84

85
static void inline vnodeHandleWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
9,807,010✔
86
  SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info};
9,807,010✔
87
  if (vnodeProcessWriteMsg(pVnode, pMsg, pMsg->info.conn.applyIndex, &rsp) < 0) {
9,807,010✔
88
    rsp.code = terrno;
578✔
89
    const STraceId *trace = &pMsg->info.traceId;
522✔
90
    vGError("vgId:%d, msg:%p failed to apply right now since %s", pVnode->config.vgId, pMsg, terrstr());
522!
91
  }
92
  if (rsp.info.handle != NULL) {
9,807,100✔
93
    tmsgSendRsp(&rsp);
9,774,287✔
94
  } else {
95
    if (rsp.pCont) {
32,813✔
96
      rpcFreeCont(rsp.pCont);
23,635✔
97
    }
98
  }
99
}
9,807,434✔
100

101
static void vnodeHandleProposeError(SVnode *pVnode, SRpcMsg *pMsg, int32_t code) {
117,862✔
102
  if (code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_SYN_RESTORING) {
117,862✔
103
    vnodeRedirectRpcMsg(pVnode, pMsg, code);
28,190✔
104
  } else if (code == TSDB_CODE_MSG_PREPROCESSED) {
89,672✔
105
    SRpcMsg rsp = {.code = TSDB_CODE_SUCCESS, .info = pMsg->info};
88,743✔
106
    if (rsp.info.handle != NULL) {
88,743!
107
      tmsgSendRsp(&rsp);
88,876✔
108
    }
109
  } else {
110
    const STraceId *trace = &pMsg->info.traceId;
929✔
111
    vGError("vgId:%d, msg:%p failed to propose since %s, code:0x%x", pVnode->config.vgId, pMsg, tstrerror(code), code);
929!
112
    SRpcMsg rsp = {.code = code, .info = pMsg->info};
929✔
113
    if (rsp.info.handle != NULL) {
929✔
114
      tmsgSendRsp(&rsp);
501✔
115
    }
116
  }
117
}
118,202✔
118

119
static int32_t inline vnodeProposeMsg(SVnode *pVnode, SRpcMsg *pMsg, bool isWeak) {
9,944,501✔
120
  int64_t seq = 0;
9,944,501✔
121

122
  (void)taosThreadMutexLock(&pVnode->lock);
9,944,501✔
123
  int32_t code = syncPropose(pVnode->sync, pMsg, isWeak, &seq);
9,944,885✔
124
  bool    wait = (code == 0 && vnodeIsMsgBlock(pMsg->msgType));
9,944,474✔
125
  if (wait) {
9,944,474✔
126
    if (pVnode->blocked) {
2,274!
127
      return TSDB_CODE_INTERNAL_ERROR;
×
128
    }
129
    pVnode->blocked = true;
2,274✔
130
    pVnode->blockSec = taosGetTimestampSec();
2,274✔
131
    pVnode->blockSeq = seq;
2,272✔
132
  }
133
  (void)taosThreadMutexUnlock(&pVnode->lock);
9,944,472✔
134

135
  if (code > 0) {
9,944,847✔
136
    vnodeHandleWriteMsg(pVnode, pMsg);
9,807,456✔
137
  } else if (code < 0) {
137,391✔
138
    if (terrno != 0) code = terrno;
9,141!
139
    vnodeHandleProposeError(pVnode, pMsg, code);
9,141✔
140
  }
141

142
  if (wait) vnodeWaitBlockMsg(pVnode, pMsg);
9,944,802✔
143
  return code;
9,944,805✔
144
}
145

146
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit) {
10,042,184✔
147
  if (!vnodeShouldCommit(pVnode, atExit)) {
10,042,184✔
148
    return;
10,035,822✔
149
  }
150

151
  int32_t   contLen = sizeof(SMsgHead);
7,146✔
152
  SMsgHead *pHead = rpcMallocCont(contLen);
7,146✔
153
  pHead->contLen = contLen;
7,144✔
154
  pHead->vgId = pVnode->config.vgId;
7,144✔
155

156
  SRpcMsg rpcMsg = {0};
7,144✔
157
  rpcMsg.msgType = TDMT_VND_COMMIT;
7,144✔
158
  rpcMsg.contLen = contLen;
7,144✔
159
  rpcMsg.pCont = pHead;
7,144✔
160
  rpcMsg.info.noResp = 1;
7,144✔
161

162
  vInfo("vgId:%d, propose vnode commit", pVnode->config.vgId);
7,144✔
163
  bool isWeak = false;
7,145✔
164

165
  if (!atExit) {
7,145✔
166
    if (vnodeProposeMsg(pVnode, &rpcMsg, isWeak) < 0) {
572!
167
      vTrace("vgId:%d, failed to propose vnode commit since %s", pVnode->config.vgId, terrstr());
×
168
    }
169
    rpcFreeCont(rpcMsg.pCont);
572✔
170
    rpcMsg.pCont = NULL;
572✔
171
  } else {
172
    if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) {
6,573✔
173
      vTrace("vgId:%d, failed to put vnode commit to queue since %s", pVnode->config.vgId, terrstr());
2,777✔
174
    }
175
  }
176
}
177

178
#if BATCH_ENABLE
179

180
static void inline vnodeProposeBatchMsg(SVnode *pVnode, SRpcMsg **pMsgArr, bool *pIsWeakArr, int32_t *arrSize) {
181
  if (*arrSize <= 0) return;
182
  SRpcMsg *pLastMsg = pMsgArr[*arrSize - 1];
183

184
  (void)taosThreadMutexLock(&pVnode->lock);
185
  int32_t code = syncProposeBatch(pVnode->sync, pMsgArr, pIsWeakArr, *arrSize);
186
  bool    wait = (code == 0 && vnodeIsBlockMsg(pLastMsg->msgType));
187
  if (wait) {
188
    pVnode->blocked = true;
189
  }
190
  (void)taosThreadMutexUnlock(&pVnode->lock);
191

192
  if (code > 0) {
193
    for (int32_t i = 0; i < *arrSize; ++i) {
194
      vnodeHandleWriteMsg(pVnode, pMsgArr[i]);
195
    }
196
  } else if (code < 0) {
197
    if (terrno != 0) code = terrno;
198
    for (int32_t i = 0; i < *arrSize; ++i) {
199
      vnodeHandleProposeError(pVnode, pMsgArr[i], code);
200
    }
201
  }
202

203
  if (wait) vnodeWaitBlockMsg(pVnode, pLastMsg);
204
  pLastMsg = NULL;
205

206
  for (int32_t i = 0; i < *arrSize; ++i) {
207
    SRpcMsg        *pMsg = pMsgArr[i];
208
    const STraceId *trace = &pMsg->info.traceId;
209
    vGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->config.vgId, pMsg, code);
210
    rpcFreeCont(pMsg->pCont);
211
    taosFreeQitem(pMsg);
212
  }
213

214
  *arrSize = 0;
215
}
216

217
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
218
  SVnode   *pVnode = pInfo->ahandle;
219
  int32_t   vgId = pVnode->config.vgId;
220
  int32_t   code = 0;
221
  SRpcMsg  *pMsg = NULL;
222
  int32_t   arrayPos = 0;
223
  SRpcMsg **pMsgArr = taosMemoryCalloc(numOfMsgs, sizeof(SRpcMsg *));
224
  bool     *pIsWeakArr = taosMemoryCalloc(numOfMsgs, sizeof(bool));
225
  vTrace("vgId:%d, get %d msgs from vnode-write queue", vgId, numOfMsgs);
226

227
  for (int32_t msg = 0; msg < numOfMsgs; msg++) {
228
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
229
    bool isWeak = vnodeIsMsgWeak(pMsg->msgType);
230
    bool isBlock = vnodeIsMsgBlock(pMsg->msgType);
231

232
    const STraceId *trace = &pMsg->info.traceId;
233
    vGTrace("vgId:%d, msg:%p get from vnode-write queue, weak:%d block:%d msg:%d:%d pos:%d, handle:%p", vgId, pMsg,
234
            isWeak, isBlock, msg, numOfMsgs, arrayPos, pMsg->info.handle);
235

236
    if (!pVnode->restored) {
237
      vGWarn("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg,
238
             TMSG_INFO(pMsg->msgType));
239
      terrno = TSDB_CODE_SYN_RESTORING;
240
      vnodeHandleProposeError(pVnode, pMsg, TSDB_CODE_SYN_RESTORING);
241
      rpcFreeCont(pMsg->pCont);
242
      taosFreeQitem(pMsg);
243
      continue;
244
    }
245

246
    if (pMsgArr == NULL || pIsWeakArr == NULL) {
247
      vGError("vgId:%d, msg:%p failed to process since out of memory, type:%s", vgId, pMsg, TMSG_INFO(pMsg->msgType));
248
      terrno = TSDB_CODE_OUT_OF_MEMORY;
249
      vnodeHandleProposeError(pVnode, pMsg, terrno);
250
      rpcFreeCont(pMsg->pCont);
251
      taosFreeQitem(pMsg);
252
      continue;
253
    }
254

255
    bool atExit = false;
256
    vnodeProposeCommitOnNeed(pVnode, atExit);
257

258
    code = vnodePreProcessWriteMsg(pVnode, pMsg);
259
    if (code != 0) {
260
      vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, terrstr());
261
      rpcFreeCont(pMsg->pCont);
262
      taosFreeQitem(pMsg);
263
      continue;
264
    }
265

266
    if (isBlock) {
267
      vnodeProposeBatchMsg(pVnode, pMsgArr, pIsWeakArr, &arrayPos);
268
    }
269

270
    pMsgArr[arrayPos] = pMsg;
271
    pIsWeakArr[arrayPos] = isWeak;
272
    arrayPos++;
273

274
    if (isBlock || msg == numOfMsgs - 1) {
275
      vnodeProposeBatchMsg(pVnode, pMsgArr, pIsWeakArr, &arrayPos);
276
    }
277
  }
278

279
  taosMemoryFree(pMsgArr);
280
  taosMemoryFree(pIsWeakArr);
281
}
282

283
#else
284

285
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
8,562,293✔
286
  SVnode  *pVnode = pInfo->ahandle;
8,562,293✔
287
  int32_t  vgId = pVnode->config.vgId;
8,562,293✔
288
  int32_t  code = 0;
8,562,293✔
289
  SRpcMsg *pMsg = NULL;
8,562,293✔
290
  vTrace("vgId:%d, get %d msgs from vnode-write queue", vgId, numOfMsgs);
8,562,293✔
291

292
  for (int32_t msg = 0; msg < numOfMsgs; msg++) {
18,616,061✔
293
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
10,053,093!
294
    bool isWeak = vnodeIsMsgWeak(pMsg->msgType);
10,053,255✔
295

296
    const STraceId *trace = &pMsg->info.traceId;
10,052,967✔
297
    vGTrace("vgId:%d, msg:%p get from vnode-write queue, weak:%d block:%d msg:%d:%d, handle:%p", vgId, pMsg, isWeak,
10,052,967!
298
            vnodeIsMsgBlock(pMsg->msgType), msg, numOfMsgs, pMsg->info.handle);
299

300
    if (!pVnode->restored) {
10,052,967✔
301
      vGWarn("vgId:%d, msg:%p failed to process since restore not finished, type:%s", vgId, pMsg,
19,589!
302
             TMSG_INFO(pMsg->msgType));
303
      vnodeHandleProposeError(pVnode, pMsg, TSDB_CODE_SYN_RESTORING);
19,589✔
304
      rpcFreeCont(pMsg->pCont);
19,589✔
305
      taosFreeQitem(pMsg);
19,589✔
306
      continue;
19,589✔
307
    }
308

309
    bool atExit = false;
10,033,378✔
310
    vnodeProposeCommitOnNeed(pVnode, atExit);
10,033,378✔
311

312
    code = vnodePreProcessWriteMsg(pVnode, pMsg);
10,033,845✔
313
    if (code != 0) {
10,033,098✔
314
      if (code != TSDB_CODE_MSG_PREPROCESSED) {
89,166✔
315
        vGError("vgId:%d, msg:%p failed to pre-process since %s", vgId, pMsg, tstrerror(code));
498!
316
      }
317
      vnodeHandleProposeError(pVnode, pMsg, code);
89,166✔
318
      rpcFreeCont(pMsg->pCont);
89,434✔
319
      taosFreeQitem(pMsg);
89,530✔
320
      continue;
89,533✔
321
    }
322

323
    code = vnodeProposeMsg(pVnode, pMsg, isWeak);
9,943,932✔
324

325
    vGTrace("vgId:%d, msg:%p is freed, code:0x%x", vgId, pMsg, code);
9,944,213!
326
    rpcFreeCont(pMsg->pCont);
9,944,213✔
327
    taosFreeQitem(pMsg);
9,944,302✔
328
  }
329
}
8,562,968✔
330

331
#endif
332

333
void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
2,240,520✔
334
  SVnode  *pVnode = pInfo->ahandle;
2,240,520✔
335
  int32_t  vgId = pVnode->config.vgId;
2,240,520✔
336
  int32_t  code = 0;
2,240,520✔
337
  SRpcMsg *pMsg = NULL;
2,240,520✔
338

339
  for (int32_t i = 0; i < numOfMsgs; ++i) {
5,427,906✔
340
    if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
3,187,365!
341
    const STraceId *trace = &pMsg->info.traceId;
3,187,108✔
342

343
    if (vnodeIsMsgBlock(pMsg->msgType)) {
3,187,108✔
344
      vGTrace("vgId:%d, msg:%p get from vnode-apply queue, type:%s handle:%p index:%" PRId64
19,076!
345
              ", blocking msg obtained sec:%d seq:%" PRId64,
346
              vgId, pMsg, TMSG_INFO(pMsg->msgType), pMsg->info.handle, pMsg->info.conn.applyIndex, pVnode->blockSec,
347
              pVnode->blockSeq);
348
    } else {
349
      vGTrace("vgId:%d, msg:%p get from vnode-apply queue, type:%s handle:%p index:%" PRId64, vgId, pMsg,
3,167,969!
350
              TMSG_INFO(pMsg->msgType), pMsg->info.handle, pMsg->info.conn.applyIndex);
351
    }
352

353
    SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info};
3,187,045✔
354
    if (rsp.code == 0) {
3,187,045!
355
      if (vnodeProcessWriteMsg(pVnode, pMsg, pMsg->info.conn.applyIndex, &rsp) < 0) {
3,187,375!
356
        rsp.code = terrno;
×
357
        vGError("vgId:%d, msg:%p failed to apply since %s, index:%" PRId64, vgId, pMsg, terrstr(),
×
358
                pMsg->info.conn.applyIndex);
359
      }
360
    }
361

362
    vnodePostBlockMsg(pVnode, pMsg);
3,186,789✔
363
    if (rsp.info.handle != NULL) {
3,186,976✔
364
      tmsgSendRsp(&rsp);
128,215✔
365
    } else {
366
      if (rsp.pCont) {
3,058,761✔
367
        rpcFreeCont(rsp.pCont);
3,013,560✔
368
      }
369
    }
370

371
    vGTrace("vgId:%d, msg:%p is freed, code:0x%x index:%" PRId64, vgId, pMsg, rsp.code, pMsg->info.conn.applyIndex);
3,187,395!
372
    rpcFreeCont(pMsg->pCont);
3,187,395✔
373
    taosFreeQitem(pMsg);
3,187,472✔
374
  }
375
}
2,240,541✔
376

377
int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
6,160,530✔
378
  const STraceId *trace = &pMsg->info.traceId;
6,160,530✔
379
  vGTrace("vgId:%d, sync msg:%p will be processed, type:%s", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType));
6,160,530!
380

381
  int32_t code = syncProcessMsg(pVnode->sync, pMsg);
6,160,530✔
382
  if (code != 0) {
6,160,523✔
383
    vGError("vgId:%d, failed to process sync msg:%p type:%s, reason: %s", pVnode->config.vgId, pMsg,
14!
384
            TMSG_INFO(pMsg->msgType), tstrerror(code));
385
  }
386

387
  return code;
6,160,523✔
388
}
389

390
static int32_t vnodeSyncEqCtrlMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
×
391
  if (pMsg == NULL || pMsg->pCont == NULL) {
×
392
    return TSDB_CODE_INVALID_PARA;
×
393
  }
394

395
  if (msgcb == NULL || msgcb->putToQueueFp == NULL) {
×
396
    rpcFreeCont(pMsg->pCont);
×
397
    pMsg->pCont = NULL;
×
398
    return TSDB_CODE_INVALID_PARA;
×
399
  }
400

401
  int32_t code = tmsgPutToQueue(msgcb, SYNC_RD_QUEUE, pMsg);
×
402
  if (code != 0) {
×
403
    rpcFreeCont(pMsg->pCont);
×
404
    pMsg->pCont = NULL;
×
405
  }
406
  return code;
×
407
}
408

409
static int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
348,178✔
410
  if (pMsg == NULL || pMsg->pCont == NULL) {
348,178!
411
    return TSDB_CODE_INVALID_PARA;
×
412
  }
413

414
  if (msgcb == NULL || msgcb->putToQueueFp == NULL) {
348,179!
415
    rpcFreeCont(pMsg->pCont);
1✔
416
    pMsg->pCont = NULL;
×
417
    return TSDB_CODE_INVALID_PARA;
×
418
  }
419

420
  int32_t code = tmsgPutToQueue(msgcb, SYNC_QUEUE, pMsg);
348,178✔
421
  if (code != 0) {
348,178✔
422
    rpcFreeCont(pMsg->pCont);
1,331✔
423
    pMsg->pCont = NULL;
1,328✔
424
  }
425
  return code;
348,175✔
426
}
427

428
static int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
5,836,226✔
429
  int32_t code = tmsgSendSyncReq(pEpSet, pMsg);
5,836,226✔
430
  if (code != 0) {
5,836,234!
431
    rpcFreeCont(pMsg->pCont);
×
432
    pMsg->pCont = NULL;
×
433
  }
434
  return code;
5,836,234✔
435
}
436

437
static int32_t vnodeSyncGetSnapshotInfo(const SSyncFSM *pFsm, SSnapshot *pSnapshot) {
335,302✔
438
  return vnodeGetSnapshot(pFsm->data, pSnapshot);
335,302✔
439
}
440

441
static int32_t vnodeSyncApplyMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
3,187,550✔
442
  SVnode *pVnode = pFsm->data;
3,187,550✔
443
  pMsg->info.conn.applyIndex = pMeta->index;
3,187,550✔
444
  pMsg->info.conn.applyTerm = pMeta->term;
3,187,550✔
445

446
  const STraceId *trace = &pMsg->info.traceId;
3,187,550✔
447
  vGTrace("vgId:%d, commit-cb is excuted, fsm:%p, index:%" PRId64 ", term:%" PRIu64 ", msg-index:%" PRId64
3,187,550!
448
          ", weak:%d, code:%d, state:%d %s, type:%s code:0x%x",
449
          pVnode->config.vgId, pFsm, pMeta->index, pMeta->term, pMsg->info.conn.applyIndex, pMeta->isWeak, pMeta->code,
450
          pMeta->state, syncStr(pMeta->state), TMSG_INFO(pMsg->msgType), pMsg->code);
451

452
  return tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, pMsg);
3,187,550✔
453
}
454

455
static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
3,187,558✔
456
  if (pMsg->code == 0) {
3,187,558!
457
    return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
3,187,559✔
458
  }
459

460
  const STraceId *trace = &pMsg->info.traceId;
×
461
  SVnode         *pVnode = pFsm->data;
×
462
  vnodePostBlockMsg(pVnode, pMsg);
×
463

464
  SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info};
×
465
  if (rsp.info.handle != NULL) {
×
466
    tmsgSendRsp(&rsp);
×
467
  }
468

469
  vGTrace("vgId:%d, msg:%p is freed, code:0x%x index:%" PRId64, TD_VID(pVnode), pMsg, rsp.code, pMeta->index);
×
470
  rpcFreeCont(pMsg->pCont);
×
471
  pMsg->pCont = NULL;
×
472
  return 0;
×
473
}
474

475
static int32_t vnodeSyncPreCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
×
476
  if (pMeta->isWeak == 1) {
×
477
    return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
×
478
  }
479
  return 0;
×
480
}
481

482
static SyncIndex vnodeSyncAppliedIndex(const SSyncFSM *pFSM) {
10,050,906✔
483
  SVnode *pVnode = pFSM->data;
10,050,906✔
484
  return atomic_load_64(&pVnode->state.applied);
10,050,906✔
485
}
486

487
static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
×
488
  SVnode *pVnode = pFsm->data;
×
489
  vTrace("vgId:%d, rollback-cb is excuted, fsm:%p, index:%" PRId64 ", weak:%d, code:%d, state:%d %s, type:%s",
×
490
         pVnode->config.vgId, pFsm, pMeta->index, pMeta->isWeak, pMeta->code, pMeta->state, syncStr(pMeta->state),
491
         TMSG_INFO(pMsg->msgType));
492
}
×
493

494
static int32_t vnodeSnapshotStartRead(const SSyncFSM *pFsm, void *pParam, void **ppReader) {
72✔
495
  SVnode *pVnode = pFsm->data;
72✔
496
  return vnodeSnapReaderOpen(pVnode, (SSnapshotParam *)pParam, (SVSnapReader **)ppReader);
72✔
497
}
498

499
static void vnodeSnapshotStopRead(const SSyncFSM *pFsm, void *pReader) {
72✔
500
  SVnode *pVnode = pFsm->data;
72✔
501
  vnodeSnapReaderClose(pReader);
72✔
502
}
72✔
503

504
static int32_t vnodeSnapshotDoRead(const SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) {
82,008✔
505
  SVnode *pVnode = pFsm->data;
82,008✔
506
  return vnodeSnapRead(pReader, (uint8_t **)ppBuf, len);
82,008✔
507
}
508

509
static int32_t vnodeSnapshotStartWrite(const SSyncFSM *pFsm, void *pParam, void **ppWriter) {
70✔
510
  SVnode *pVnode = pFsm->data;
70✔
511

512
  do {
×
513
    int32_t itemSize = tmsgGetQueueSize(&pVnode->msgCb, pVnode->config.vgId, APPLY_QUEUE);
70✔
514
    if (itemSize == 0) {
70!
515
      vInfo("vgId:%d, start write vnode snapshot since apply queue is empty", pVnode->config.vgId);
70!
516
      break;
70✔
517
    } else {
518
      vInfo("vgId:%d, write vnode snapshot later since %d items in apply queue", pVnode->config.vgId, itemSize);
×
519
      taosMsleep(10);
×
520
    }
521
  } while (true);
522

523
  return vnodeSnapWriterOpen(pVnode, (SSnapshotParam *)pParam, (SVSnapWriter **)ppWriter);
70✔
524
}
525

526
static int32_t vnodeSnapshotStopWrite(const SSyncFSM *pFsm, void *pWriter, bool isApply, SSnapshot *pSnapshot) {
70✔
527
  SVnode *pVnode = pFsm->data;
70✔
528
  vInfo("vgId:%d, stop write vnode snapshot, apply:%d, index:%" PRId64 " term:%" PRIu64 " config:%" PRId64,
70!
529
        pVnode->config.vgId, isApply, pSnapshot->lastApplyIndex, pSnapshot->lastApplyTerm, pSnapshot->lastConfigIndex);
530

531
  int32_t code = vnodeSnapWriterClose(pWriter, !isApply, pSnapshot);
70✔
532
  if (code != 0) {
70!
533
    vError("vgId:%d, failed to finish applying vnode snapshot since %s, code:0x%x", pVnode->config.vgId, terrstr(),
×
534
           code);
535
  }
536
  return code;
70✔
537
}
538

539
static int32_t vnodeSnapshotDoWrite(const SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) {
81,834✔
540
  SVnode *pVnode = pFsm->data;
81,834✔
541
  vDebug("vgId:%d, continue write vnode snapshot, blockLen:%d", pVnode->config.vgId, len);
81,834!
542
  int32_t code = vnodeSnapWrite(pWriter, pBuf, len);
81,834✔
543
  vDebug("vgId:%d, continue write vnode snapshot finished, blockLen:%d", pVnode->config.vgId, len);
81,834!
544
  return code;
81,834✔
545
}
546

547
static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx) {
11,009✔
548
  SVnode   *pVnode = pFsm->data;
11,009✔
549
  int32_t   vgId = pVnode->config.vgId;
11,009✔
550
  SyncIndex appliedIdx = -1;
11,009✔
551

552
  do {
553
    appliedIdx = vnodeSyncAppliedIndex(pFsm);
22,523✔
554
    if (appliedIdx > commitIdx) {
22,527!
555
      vError("vgId:%d, restore failed since applied-index:%" PRId64 " is larger than commit-index:%" PRId64, vgId,
×
556
             appliedIdx, commitIdx);
557
      break;
×
558
    }
559
    if (appliedIdx == commitIdx) {
22,527✔
560
      vInfo("vgId:%d, no items to be applied, restore finish", pVnode->config.vgId);
11,011✔
561
      break;
11,011✔
562
    } else {
563
      vInfo("vgId:%d, restore not finish since %" PRId64 " items to be applied. commit-index:%" PRId64
11,516✔
564
            ", applied-index:%" PRId64,
565
            vgId, commitIdx - appliedIdx, commitIdx, appliedIdx);
566
      taosMsleep(10);
11,516✔
567
    }
568
  } while (true);
569

570
  walApplyVer(pVnode->pWal, commitIdx);
11,011✔
571
  pVnode->restored = true;
11,011✔
572

573
  SStreamMeta *pMeta = pVnode->pTq->pStreamMeta;
11,011✔
574
  streamMetaWLock(pMeta);
11,011✔
575

576
  if (pMeta->startInfo.tasksWillRestart) {
11,011!
577
    vInfo("vgId:%d, sync restore finished, stream tasks will be launched by other thread", vgId);
×
578
    streamMetaWUnLock(pMeta);
×
579
    return;
×
580
  }
581

582
  if (vnodeIsRoleLeader(pVnode)) {
11,011✔
583
    // start to restore all stream tasks
584
    if (tsDisableStream) {
9,107!
585
      vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId);
×
586
    } else {
587
      vInfo("vgId:%d sync restore finished, start to launch stream task(s)", vgId);
9,107✔
588
      if (pMeta->startInfo.startAllTasks == 1) {
9,108✔
589
        pMeta->startInfo.restartCount += 1;
6✔
590
        vDebug("vgId:%d in start tasks procedure, inc restartCounter by 1, remaining restart:%d", vgId,
6!
591
               pMeta->startInfo.restartCount);
592
      } else {
593
        pMeta->startInfo.startAllTasks = 1;
9,102✔
594
        streamMetaWUnLock(pMeta);
9,102✔
595

596
        tqInfo("vgId:%d stream task already loaded, start them", vgId);
9,102!
597
        int32_t code = streamTaskSchedTask(&pVnode->msgCb, TD_VID(pVnode), 0, 0, STREAM_EXEC_T_START_ALL_TASKS);
9,102✔
598
        if (code != 0) {
9,102!
599
          tqError("vgId:%d failed to sched stream task, code:%s", vgId, tstrerror(code));
×
600
        }
601
        return;
9,102✔
602
      }
603
    }
604
  } else {
605
    vInfo("vgId:%d, sync restore finished, not launch stream tasks since not leader", vgId);
1,903!
606
  }
607

608
  streamMetaWUnLock(pMeta);
1,909✔
609
}
610

611
static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
4,744✔
612
  SVnode *pVnode = pFsm->data;
4,744✔
613
  vInfo("vgId:%d, become follower", pVnode->config.vgId);
4,744!
614

615
  (void)taosThreadMutexLock(&pVnode->lock);
4,744✔
616
  if (pVnode->blocked) {
4,744!
617
    pVnode->blocked = false;
×
618
    vDebug("vgId:%d, become follower and post block", pVnode->config.vgId);
×
619
    if (tsem_post(&pVnode->syncSem) != 0) {
×
620
      vError("vgId:%d, failed to post sync semaphore", pVnode->config.vgId);
×
621
    }
622
  }
623
  (void)taosThreadMutexUnlock(&pVnode->lock);
4,744✔
624

625
  if (pVnode->pTq) {
4,744!
626
    tqUpdateNodeStage(pVnode->pTq, false);
4,744✔
627
    if (tqStopStreamTasksAsync(pVnode->pTq) != 0) {
4,744!
628
      vError("vgId:%d, failed to stop stream tasks", pVnode->config.vgId);
×
629
    }
630
  }
631
}
4,744✔
632

633
static void vnodeBecomeLearner(const SSyncFSM *pFsm) {
172✔
634
  SVnode *pVnode = pFsm->data;
172✔
635
  vInfo("vgId:%d, become learner", pVnode->config.vgId);
172!
636

637
  (void)taosThreadMutexLock(&pVnode->lock);
172✔
638
  if (pVnode->blocked) {
172!
639
    pVnode->blocked = false;
×
640
    vDebug("vgId:%d, become learner and post block", pVnode->config.vgId);
×
641
    if (tsem_post(&pVnode->syncSem) != 0) {
×
642
      vError("vgId:%d, failed to post sync semaphore", pVnode->config.vgId);
×
643
    }
644
  }
645
  (void)taosThreadMutexUnlock(&pVnode->lock);
172✔
646
}
172✔
647

648
static void vnodeBecomeLeader(const SSyncFSM *pFsm) {
9,109✔
649
  SVnode *pVnode = pFsm->data;
9,109✔
650
  vDebug("vgId:%d, become leader", pVnode->config.vgId);
9,109✔
651
  if (pVnode->pTq) {
9,109!
652
    tqUpdateNodeStage(pVnode->pTq, true);
9,109✔
653
  }
654
}
9,110✔
655

656
static void vnodeBecomeAssignedLeader(const SSyncFSM *pFsm) {
×
657
  SVnode *pVnode = pFsm->data;
×
658
  vDebug("vgId:%d, become assigned leader", pVnode->config.vgId);
×
659
  if (pVnode->pTq) {
×
660
    tqUpdateNodeStage(pVnode->pTq, true);
×
661
  }
662
}
×
663

664
static bool vnodeApplyQueueEmpty(const SSyncFSM *pFsm) {
×
665
  SVnode *pVnode = pFsm->data;
×
666

667
  if (pVnode != NULL && pVnode->msgCb.qsizeFp != NULL) {
×
668
    int32_t itemSize = tmsgGetQueueSize(&pVnode->msgCb, pVnode->config.vgId, APPLY_QUEUE);
×
669
    return (itemSize == 0);
×
670
  } else {
671
    return true;
×
672
  }
673
}
674

675
static int32_t vnodeApplyQueueItems(const SSyncFSM *pFsm) {
83,810✔
676
  SVnode *pVnode = pFsm->data;
83,810✔
677

678
  if (pVnode != NULL && pVnode->msgCb.qsizeFp != NULL) {
83,810!
679
    int32_t itemSize = tmsgGetQueueSize(&pVnode->msgCb, pVnode->config.vgId, APPLY_QUEUE);
83,816✔
680
    return itemSize;
83,806✔
681
  } else {
682
    return TSDB_CODE_INVALID_PARA;
×
683
  }
684
}
685

686
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
11,077✔
687
  SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
11,077!
688
  if (pFsm == NULL) {
11,077!
689
    terrno = TSDB_CODE_OUT_OF_MEMORY;
×
690
    return NULL;
×
691
  }
692
  pFsm->data = pVnode;
11,077✔
693
  pFsm->FpCommitCb = vnodeSyncCommitMsg;
11,077✔
694
  pFsm->FpAppliedIndexCb = vnodeSyncAppliedIndex;
11,077✔
695
  pFsm->FpPreCommitCb = vnodeSyncPreCommitMsg;
11,077✔
696
  pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
11,077✔
697
  pFsm->FpGetSnapshot = NULL;
11,077✔
698
  pFsm->FpGetSnapshotInfo = vnodeSyncGetSnapshotInfo;
11,077✔
699
  pFsm->FpRestoreFinishCb = vnodeRestoreFinish;
11,077✔
700
  pFsm->FpAfterRestoredCb = NULL;
11,077✔
701
  pFsm->FpLeaderTransferCb = NULL;
11,077✔
702
  pFsm->FpApplyQueueEmptyCb = vnodeApplyQueueEmpty;
11,077✔
703
  pFsm->FpApplyQueueItems = vnodeApplyQueueItems;
11,077✔
704
  pFsm->FpBecomeLeaderCb = vnodeBecomeLeader;
11,077✔
705
  pFsm->FpBecomeAssignedLeaderCb = vnodeBecomeAssignedLeader;
11,077✔
706
  pFsm->FpBecomeFollowerCb = vnodeBecomeFollower;
11,077✔
707
  pFsm->FpBecomeLearnerCb = vnodeBecomeLearner;
11,077✔
708
  pFsm->FpReConfigCb = NULL;
11,077✔
709
  pFsm->FpSnapshotStartRead = vnodeSnapshotStartRead;
11,077✔
710
  pFsm->FpSnapshotStopRead = vnodeSnapshotStopRead;
11,077✔
711
  pFsm->FpSnapshotDoRead = vnodeSnapshotDoRead;
11,077✔
712
  pFsm->FpSnapshotStartWrite = vnodeSnapshotStartWrite;
11,077✔
713
  pFsm->FpSnapshotStopWrite = vnodeSnapshotStopWrite;
11,077✔
714
  pFsm->FpSnapshotDoWrite = vnodeSnapshotDoWrite;
11,077✔
715

716
  return pFsm;
11,077✔
717
}
718

719
int32_t vnodeSyncOpen(SVnode *pVnode, char *path, int32_t vnodeVersion) {
11,077✔
720
  SSyncInfo syncInfo = {
11,077✔
721
      .snapshotStrategy = SYNC_STRATEGY_WAL_FIRST,
722
      .batchSize = 1,
723
      .vgId = pVnode->config.vgId,
11,077✔
724
      .syncCfg = pVnode->config.syncCfg,
725
      .pWal = pVnode->pWal,
11,077✔
726
      .msgcb = &pVnode->msgCb,
11,077✔
727
      .syncSendMSg = vnodeSyncSendMsg,
728
      .syncEqMsg = vnodeSyncEqMsg,
729
      .syncEqCtrlMsg = vnodeSyncEqCtrlMsg,
730
      .pingMs = 5000,
731
      .electMs = 4000,
732
      .heartbeatMs = 700,
733
  };
734

735
  snprintf(syncInfo.path, sizeof(syncInfo.path), "%s%ssync", path, TD_DIRSEP);
11,077✔
736
  syncInfo.pFsm = vnodeSyncMakeFsm(pVnode);
11,077✔
737

738
  SSyncCfg *pCfg = &syncInfo.syncCfg;
11,077✔
739
  vInfo("vgId:%d, start to open sync, replica:%d selfIndex:%d", pVnode->config.vgId, pCfg->replicaNum, pCfg->myIndex);
11,077✔
740
  for (int32_t i = 0; i < pCfg->totalReplicaNum; ++i) {
28,359✔
741
    SNodeInfo *pNode = &pCfg->nodeInfo[i];
17,282✔
742
    vInfo("vgId:%d, index:%d ep:%s:%u dnode:%d cluster:%" PRId64, pVnode->config.vgId, i, pNode->nodeFqdn,
17,282✔
743
          pNode->nodePort, pNode->nodeId, pNode->clusterId);
744
  }
745

746
  pVnode->sync = syncOpen(&syncInfo, vnodeVersion);
11,077✔
747
  if (pVnode->sync <= 0) {
11,077!
748
    vError("vgId:%d, failed to open sync since %s", pVnode->config.vgId, terrstr());
×
749
    return terrno;
×
750
  }
751

752
  return 0;
11,077✔
753
}
754

755
int32_t vnodeSyncStart(SVnode *pVnode) {
11,076✔
756
  vInfo("vgId:%d, start sync", pVnode->config.vgId);
11,076✔
757
  int32_t code = syncStart(pVnode->sync);
11,077✔
758
  if (code) {
11,077!
759
    vError("vgId:%d, failed to start sync subsystem since %s", pVnode->config.vgId, tstrerror(code));
×
760
    return code;
×
761
  }
762
  return 0;
11,077✔
763
}
764

765
void vnodeSyncPreClose(SVnode *pVnode) {
11,077✔
766
  vInfo("vgId:%d, sync pre close", pVnode->config.vgId);
11,077✔
767
  int32_t code = syncLeaderTransfer(pVnode->sync);
11,077✔
768
  if (code) {
11,077✔
769
    vError("vgId:%d, failed to transfer leader since %s", pVnode->config.vgId, tstrerror(code));
924!
770
  }
771
  syncPreStop(pVnode->sync);
11,077✔
772

773
  (void)taosThreadMutexLock(&pVnode->lock);
11,077✔
774
  if (pVnode->blocked) {
11,077✔
775
    vInfo("vgId:%d, post block after close sync", pVnode->config.vgId);
19!
776
    pVnode->blocked = false;
19✔
777
    if (tsem_post(&pVnode->syncSem) != 0) {
19!
778
      vError("vgId:%d, failed to post block", pVnode->config.vgId);
×
779
    }
780
  }
781
  (void)taosThreadMutexUnlock(&pVnode->lock);
11,077✔
782
}
11,077✔
783

784
void vnodeSyncPostClose(SVnode *pVnode) {
11,077✔
785
  vInfo("vgId:%d, sync post close", pVnode->config.vgId);
11,077✔
786
  syncPostStop(pVnode->sync);
11,077✔
787
}
11,075✔
788

789
void vnodeSyncClose(SVnode *pVnode) {
11,077✔
790
  vInfo("vgId:%d, close sync", pVnode->config.vgId);
11,077✔
791
  syncStop(pVnode->sync);
11,077✔
792
}
11,074✔
793

794
void vnodeSyncCheckTimeout(SVnode *pVnode) {
29,938✔
795
  vTrace("vgId:%d, check sync timeout msg", pVnode->config.vgId);
29,938✔
796
  (void)taosThreadMutexLock(&pVnode->lock);
29,938✔
797
  if (pVnode->blocked) {
29,938!
798
    int32_t curSec = taosGetTimestampSec();
×
799
    int32_t delta = curSec - pVnode->blockSec;
×
800
    if (delta > VNODE_TIMEOUT_SEC) {
×
801
      vError("vgId:%d, failed to propose since timeout and post block, start:%d cur:%d delta:%d seq:%" PRId64,
×
802
             pVnode->config.vgId, pVnode->blockSec, curSec, delta, pVnode->blockSeq);
803
      if (syncSendTimeoutRsp(pVnode->sync, pVnode->blockSeq) != 0) {
×
804
#if 0
805
        SRpcMsg rpcMsg = {.code = TSDB_CODE_SYN_TIMEOUT, .info = pVnode->blockInfo};
806
        vError("send timeout response since its applyed, seq:%" PRId64 " handle:%p ahandle:%p", pVnode->blockSeq,
807
              rpcMsg.info.handle, rpcMsg.info.ahandle);
808
        rpcSendResponse(&rpcMsg);
809
#endif
810
      }
811
      pVnode->blocked = false;
×
812
      pVnode->blockSec = 0;
×
813
      pVnode->blockSeq = 0;
×
814
      if (tsem_post(&pVnode->syncSem) != 0) {
×
815
        vError("vgId:%d, failed to post block", pVnode->config.vgId);
×
816
      }
817
    }
818
  }
819
  (void)taosThreadMutexUnlock(&pVnode->lock);
29,938✔
820
}
29,938✔
821

822
bool vnodeIsRoleLeader(SVnode *pVnode) {
616,897✔
823
  SSyncState state = syncGetState(pVnode->sync);
616,897✔
824
  return state.state == TAOS_SYNC_STATE_LEADER;
617,043✔
825
}
826

827
bool vnodeIsLeader(SVnode *pVnode) {
14,850✔
828
  terrno = 0;
14,850✔
829
  SSyncState state = syncGetState(pVnode->sync);
14,853✔
830

831
  if (terrno != 0) {
14,864✔
832
    vInfo("vgId:%d, vnode is stopping", pVnode->config.vgId);
1,987!
833
    return false;
1,987✔
834
  }
835

836
  if (state.state != TAOS_SYNC_STATE_LEADER) {
12,872!
837
    terrno = TSDB_CODE_SYN_NOT_LEADER;
×
838
    vInfo("vgId:%d, vnode not leader, state:%s", pVnode->config.vgId, syncStr(state.state));
×
839
    return false;
×
840
  }
841

842
  if (!state.restored || !pVnode->restored) {
12,872!
843
    terrno = TSDB_CODE_SYN_RESTORING;
×
844
    vInfo("vgId:%d, vnode not restored:%d:%d", pVnode->config.vgId, state.restored, pVnode->restored);
×
845
    return false;
×
846
  }
847

848
  return true;
12,872✔
849
}
850

851
int64_t vnodeClusterId(SVnode *pVnode) {
×
852
  SSyncCfg *syncCfg = &pVnode->config.syncCfg;
×
853
  return syncCfg->nodeInfo[syncCfg->myIndex].clusterId;
×
854
}
855

856
int32_t vnodeNodeId(SVnode *pVnode) {
376,459✔
857
  SSyncCfg *syncCfg = &pVnode->config.syncCfg;
376,459✔
858
  return syncCfg->nodeInfo[syncCfg->myIndex].nodeId;
376,459✔
859
}
860

861
int32_t vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnap) {
335,394✔
862
  int code = 0;
335,394✔
863
  pSnap->lastApplyIndex = pVnode->state.committed;
335,394✔
864
  pSnap->lastApplyTerm = pVnode->state.commitTerm;
335,394✔
865
  pSnap->lastConfigIndex = -1;
335,394✔
866
  pSnap->state = SYNC_FSM_STATE_COMPLETE;
335,394✔
867

868
  if (tsdbSnapGetFsState(pVnode) != TSDB_FS_STATE_NORMAL) {
335,394!
869
    pSnap->state = SYNC_FSM_STATE_INCOMPLETE;
×
870
  }
871

872
  if (pSnap->type == TDMT_SYNC_PREP_SNAPSHOT || pSnap->type == TDMT_SYNC_PREP_SNAPSHOT_REPLY) {
335,384✔
873
    code = tsdbSnapPrepDescription(pVnode, pSnap);
136✔
874
  }
875
  return code;
335,391✔
876
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc