• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #4324

18 Jun 2025 07:25AM UTC coverage: 62.916% (-0.2%) from 63.116%
#4324

push

travis-ci

web-flow
docs: add IPv6 support information for taosAdapter (#31362)

158158 of 319881 branches covered (49.44%)

Branch coverage included in aggregate %.

243705 of 318846 relevant lines covered (76.43%)

17827866.93 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

67.54
/source/libs/sync/src/syncTimeout.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "syncTimeout.h"
18
#include "syncElection.h"
19
#include "syncRaftCfg.h"
20
#include "syncRaftLog.h"
21
#include "syncReplication.h"
22
#include "syncRespMgr.h"
23
#include "syncSnapshot.h"
24
#include "syncUtil.h"
25

26
static void syncNodeCleanConfigIndex(SSyncNode* ths) {
14,741✔
27
#if 0
28
  int32_t   newArrIndex = 0;
29
  SyncIndex newConfigIndexArr[MAX_CONFIG_INDEX_COUNT] = {0};
30
  SSnapshot snapshot = {0};
31

32
  ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
33
  if (snapshot.lastApplyIndex != SYNC_INDEX_INVALID) {
34
    for (int32_t i = 0; i < ths->raftCfg.configIndexCount; ++i) {
35
      if (ths->raftCfg.configIndexArr[i] < snapshot.lastConfigIndex) {
36
        // pass
37
      } else {
38
        // save
39
        newConfigIndexArr[newArrIndex] = ths->raftCfg.configIndexArr[i];
40
        ++newArrIndex;
41
      }
42
    }
43

44
    int32_t oldCnt = ths->raftCfg.configIndexCount;
45
    ths->raftCfg.configIndexCount = newArrIndex;
46
    memcpy(ths->raftCfg.configIndexArr, newConfigIndexArr, sizeof(newConfigIndexArr));
47

48
    int32_t code = syncWriteCfgFile(ths);
49
    if (code != 0) {
50
      sNFatal(ths, "failed to persist cfg");
51
    } else {
52
      sNTrace(ths, "clean config index arr, old-cnt:%d, new-cnt:%d", oldCnt, ths->raftCfg.configIndexCount);
53
    }
54
  }
55
#endif
56
}
14,741✔
57

58
static int32_t syncNodeTimerRoutine(SSyncNode* ths) {
161,479✔
59
  ths->tmrRoutineNum++;
161,479✔
60

61
  sDebug("vgId:%d, timer routine, status report", ths->vgId);
161,479✔
62
  if (ths->tmrRoutineNum % (tsRoutineReportInterval / (ths->pingTimerMS / 1000)) == 0) {
161,524✔
63
    sNInfo(ths, "timer routines");
1,512✔
64
  } else {
65
    sNTrace(ths, "timer routines");
160,012✔
66
  }
67

68
  // timer replicate
69
  sDebug("vgId:%d, timer routine, node replicate", ths->vgId);
161,526✔
70
  TAOS_CHECK_RETURN(syncNodeReplicate(ths));
161,526✔
71

72
  // clean mnode index
73
  sDebug("vgId:%d, timer routine, clean config index", ths->vgId);
161,533✔
74
  if (syncNodeIsMnode(ths)) {
161,533✔
75
    syncNodeCleanConfigIndex(ths);
14,741✔
76
  }
77

78
  sDebug("vgId:%d, timer routine, snapshot resend", ths->vgId);
161,223✔
79
  int64_t timeNow = taosGetTimestampMs();
161,145✔
80

81
  for (int i = 0; i < ths->peersNum; ++i) {
194,842✔
82
    SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(ths->peersId[i]));
33,701✔
83
    if (pSender != NULL) {
33,697✔
84
      if (ths->isStart && (ths->state == TAOS_SYNC_STATE_LEADER || ths->state == TAOS_SYNC_STATE_ASSIGNED_LEADER) &&
33,696!
85
          pSender->start) {
10,321!
86
        int64_t elapsedMs = timeNow - pSender->lastSendTime;
×
87
        if (elapsedMs < SYNC_SNAP_RESEND_MS) {
×
88
          continue;
×
89
        }
90

91
        if (elapsedMs > SYNC_SNAP_TIMEOUT_MS) {
×
92
          sSError(pSender, "snap replication timeout, terminate.");
×
93
          snapshotSenderStop(pSender, false);
×
94
        } else {
95
          sSWarn(pSender, "snap replication resend.");
×
96
          TAOS_CHECK_RETURN(snapshotReSend(pSender));
×
97
        }
98
      }
99
    }
100
  }
101

102
  sDebug("vgId:%d, timer routine, resp clean", ths->vgId);
161,141✔
103
  if (!syncNodeIsMnode(ths)) {
161,141✔
104
    syncRespClean(ths->pSyncRespMgr);
146,647✔
105
  }
106

107
  return 0;
161,487✔
108
}
109

110
int32_t syncNodeOnTimeout(SSyncNode* ths, const SRpcMsg* pRpc) {
163,021✔
111
  int32_t      ret = 0;
163,021✔
112
  SyncTimeout* pMsg = pRpc->pCont;
163,021✔
113

114
  syncLogRecvTimer(ths, pMsg, &pRpc->info.traceId);
163,021✔
115

116
  if (pMsg->timeoutType == SYNC_TIMEOUT_PING) {
162,987✔
117
    if (atomic_load_64(&ths->pingTimerLogicClockUser) <= pMsg->logicClock) {
161,216!
118
      ++(ths->pingTimerCounter);
161,566✔
119

120
      TAOS_CHECK_RETURN(syncNodeTimerRoutine(ths));
161,566✔
121
    }
122

123
  } else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
1,771!
124
    if (atomic_load_64(&ths->electTimerLogicClock) <= pMsg->logicClock) {
1,771✔
125
      ++(ths->electTimerCounter);
1,770✔
126

127
      TAOS_CHECK_RETURN(syncNodeElect(ths));
1,770!
128
    }
129

130
  } else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
×
131
    if (atomic_load_64(&ths->heartbeatTimerLogicClockUser) <= pMsg->logicClock) {
×
132
      ++(ths->heartbeatTimerCounter);
×
133
      sTrace("vgId:%d, sync timer, type:replicate count:%" PRIu64 ", lc-user:%" PRIu64, ths->vgId,
×
134
             ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
135
    }
136

137
  } else {
138
    sError("vgId:%d, recv unknown timer-type:%d", ths->vgId, pMsg->timeoutType);
×
139
  }
140

141
  return ret;
163,153✔
142
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc