• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

taosdata / TDengine / #4106

19 May 2025 07:15AM UTC coverage: 62.857% (-0.2%) from 63.042%
#4106

push

travis-ci

GitHub
Merge pull request #31115 from taosdata/merge/mainto3.0

156749 of 318088 branches covered (49.28%)

Branch coverage included in aggregate %.

242535 of 317143 relevant lines covered (76.47%)

18746393.97 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.79
/source/libs/sync/src/syncTimeout.c
1
/*
2
 * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
3
 *
4
 * This program is free software: you can use, redistribute, and/or modify
5
 * it under the terms of the GNU Affero General Public License, version 3
6
 * or later ("AGPL"), as published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it will be useful, but WITHOUT
9
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
 * FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * You should have received a copy of the GNU Affero General Public License
13
 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14
 */
15

16
#define _DEFAULT_SOURCE
17
#include "syncTimeout.h"
18
#include "syncElection.h"
19
#include "syncRaftCfg.h"
20
#include "syncRaftLog.h"
21
#include "syncReplication.h"
22
#include "syncRespMgr.h"
23
#include "syncSnapshot.h"
24
#include "syncUtil.h"
25

26
static void syncNodeCleanConfigIndex(SSyncNode* ths) {
24,850✔
27
#if 0
28
  int32_t   newArrIndex = 0;
29
  SyncIndex newConfigIndexArr[MAX_CONFIG_INDEX_COUNT] = {0};
30
  SSnapshot snapshot = {0};
31

32
  ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
33
  if (snapshot.lastApplyIndex != SYNC_INDEX_INVALID) {
34
    for (int32_t i = 0; i < ths->raftCfg.configIndexCount; ++i) {
35
      if (ths->raftCfg.configIndexArr[i] < snapshot.lastConfigIndex) {
36
        // pass
37
      } else {
38
        // save
39
        newConfigIndexArr[newArrIndex] = ths->raftCfg.configIndexArr[i];
40
        ++newArrIndex;
41
      }
42
    }
43

44
    int32_t oldCnt = ths->raftCfg.configIndexCount;
45
    ths->raftCfg.configIndexCount = newArrIndex;
46
    memcpy(ths->raftCfg.configIndexArr, newConfigIndexArr, sizeof(newConfigIndexArr));
47

48
    int32_t code = syncWriteCfgFile(ths);
49
    if (code != 0) {
50
      sNFatal(ths, "failed to persist cfg");
51
    } else {
52
      sNTrace(ths, "clean config index arr, old-cnt:%d, new-cnt:%d", oldCnt, ths->raftCfg.configIndexCount);
53
    }
54
  }
55
#endif
56
}
24,850✔
57

58
static int32_t syncNodeTimerRoutine(SSyncNode* ths) {
260,035✔
59
  ths->tmrRoutineNum++;
260,035✔
60

61
  sDebug("vgId:%d, timer routine, status report", ths->vgId);
260,035✔
62
  if (ths->tmrRoutineNum % (tsRoutineReportInterval / (ths->pingTimerMS / 1000)) == 0) {
260,158✔
63
    sNInfo(ths, "timer routines");
3,146!
64
  } else {
65
    sNTrace(ths, "timer routines");
257,012✔
66
  }
67

68
  // timer replicate
69
  sDebug("vgId:%d, timer routine, node replicate", ths->vgId);
260,158✔
70
  TAOS_CHECK_RETURN(syncNodeReplicate(ths));
260,158✔
71

72
  // clean mnode index
73
  sDebug("vgId:%d, timer routine, clean config index", ths->vgId);
260,214✔
74
  if (syncNodeIsMnode(ths)) {
260,214✔
75
    syncNodeCleanConfigIndex(ths);
24,850✔
76
  }
77

78
  sDebug("vgId:%d, timer routine, snapshot resend", ths->vgId);
259,517✔
79
  int64_t timeNow = taosGetTimestampMs();
259,427✔
80

81
  for (int i = 0; i < ths->peersNum; ++i) {
290,087✔
82
    SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(ths->peersId[i]));
30,656✔
83
    if (pSender != NULL) {
30,661!
84
      if (ths->isStart && (ths->state == TAOS_SYNC_STATE_LEADER || ths->state == TAOS_SYNC_STATE_ASSIGNED_LEADER) &&
30,662!
85
          pSender->start) {
9,472!
86
        int64_t elapsedMs = timeNow - pSender->lastSendTime;
×
87
        if (elapsedMs < SYNC_SNAP_RESEND_MS) {
×
88
          continue;
×
89
        }
90

91
        if (elapsedMs > SYNC_SNAP_TIMEOUT_MS) {
×
92
          sSError(pSender, "snap replication timeout, terminate.");
×
93
          snapshotSenderStop(pSender, false);
×
94
        } else {
95
          sSWarn(pSender, "snap replication resend.");
×
96
          TAOS_CHECK_RETURN(snapshotReSend(pSender));
×
97
        }
98
      }
99
    }
100
  }
101

102
  sDebug("vgId:%d, timer routine, resp clean", ths->vgId);
259,431✔
103
  if (!syncNodeIsMnode(ths)) {
259,431✔
104
    syncRespClean(ths->pSyncRespMgr);
235,086✔
105
  }
106

107
  return 0;
259,841✔
108
}
109

110
int32_t syncNodeOnTimeout(SSyncNode* ths, const SRpcMsg* pRpc) {
260,846✔
111
  int32_t      ret = 0;
260,846✔
112
  SyncTimeout* pMsg = pRpc->pCont;
260,846✔
113

114
  syncLogRecvTimer(ths, pMsg, &pRpc->info.traceId);
260,846✔
115

116
  if (pMsg->timeoutType == SYNC_TIMEOUT_PING) {
261,016✔
117
    if (atomic_load_64(&ths->pingTimerLogicClockUser) <= pMsg->logicClock) {
259,468!
118
      ++(ths->pingTimerCounter);
260,229✔
119

120
      TAOS_CHECK_RETURN(syncNodeTimerRoutine(ths));
260,229✔
121
    }
122

123
  } else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
1,548!
124
    if (atomic_load_64(&ths->electTimerLogicClock) <= pMsg->logicClock) {
1,548✔
125
      ++(ths->electTimerCounter);
1,547✔
126

127
      TAOS_CHECK_RETURN(syncNodeElect(ths));
1,547!
128
    }
129

130
  } else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
×
131
    if (atomic_load_64(&ths->heartbeatTimerLogicClockUser) <= pMsg->logicClock) {
×
132
      ++(ths->heartbeatTimerCounter);
×
133
      sTrace("vgId:%d, sync timer, type:replicate count:%" PRIu64 ", lc-user:%" PRIu64, ths->vgId,
×
134
             ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
135
    }
136

137
  } else {
138
    sError("vgId:%d, recv unknown timer-type:%d", ths->vgId, pMsg->timeoutType);
×
139
  }
140

141
  return ret;
261,119✔
142
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc