• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

PowerDNS / pdns / 15920880335

26 Jun 2025 03:30PM UTC coverage: 61.923% (-3.7%) from 65.652%
15920880335

push

github

web-flow
Merge pull request #15669 from miodvallat/serial_keyer

Increase zone serial number after zone key operations

38311 of 91850 branches covered (41.71%)

Branch coverage included in aggregate %.

27 of 29 new or added lines in 1 file covered. (93.1%)

6308 existing lines in 78 files now uncovered.

120482 of 164587 relevant lines covered (73.2%)

5965233.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

79.21
/pdns/recursordist/recursor_cache.cc
1
/*
2
 * This file is part of PowerDNS or dnsdist.
3
 * Copyright -- PowerDNS.COM B.V. and its contributors
4
 *
5
 * This program is free software; you can redistribute it and/or modify
6
 * it under the terms of version 2 of the GNU General Public License as
7
 * published by the Free Software Foundation.
8
 *
9
 * In addition, for the avoidance of any doubt, permission is granted to
10
 * link this program with OpenSSL and to (re)distribute the binaries
11
 * produced as the result of such linking.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21
 */
22

23
#include "config.h"
24

25
#include <cinttypes>
26
#include <protozero/pbf_builder.hpp>
27
#include <protozero/pbf_message.hpp>
28

29
#include "recursor_cache.hh"
30
#include "misc.hh"
31
#include "dnsrecords.hh"
32
#include "syncres.hh"
33
#include "namespaces.hh"
34
#include "cachecleaner.hh"
35
#include "rec-taskqueue.hh"
36
#include "version.hh"
37

38
/*
39
 * SERVE-STALE: the general approach
40
 *
41
 * The general switch to enable serve-stale is s_maxServedStaleExtensions. If this value is zero, no
42
 * serve-stale is done. If it is positive, it determines how many times the serve-stale status of a
43
 * record can be extended.
44
 *
45
 * Each record in the cache has a field d_servedStale. If this value is zero, no special handling is
46
 * done. If it is positive, the record is being served stale. The value determines how many times
47
 * the serve-stale status was extended. Each time an extension happens, the value is incremented and
48
 * a task to see if the record resolves will be pushed. When the served-stale status is extended,
49
 * the TTD of a record is also changed so the record will be considered not-expired by the get()
50
 * function. The TTD will be s_serveStaleExtensionPeriod in the future, unless the original TTL was
51
 * smaller than that. If d_servedStale reaches s_maxServedStaleExtensions the serve-stale status
52
 * will no longer be extended and the record will be considered really expired.
53
 *
54
 * With s_serveStaleExtensionPeriod of 30 seconds, setting s_maxServedStaleExtensions to 1440 will
55
 * cause a record to be served stale a maximum of 30s * 1440 = 12 hours. If the original TTL is
56
 * smaller than 30, this period will be shorter. If there was a long time between serve-stale
57
 * extensions, the value of d_servedStale will be incremented by more than one to account for the
58
 * longer period.
59
 *
60
 * If serve-stale is enabled, the resolving process first will try to resolve a record in the
61
 * ordinary way, with the difference that a timeout will not lead to an ImmediateServFailException
62
 * being passed to the caller, but the resolving will be tried again with a flag to allow marking
63
 * records as served-stale. If the second time around a timeout happens, an
64
 * ImmediateServFailException *will* be passed to the caller.
65
 *
66
 * When serving stale, records are only wiped from the cache if they are older than
67
 * s_maxServedStaleExtensions * s_serveStaleExtensionPeriod. See isStale(). This is to have a good
68
 * chance of records being available for marking stale if a name server has an issue.
69
 *
70
 * The tasks to see if nameservers are reachable again do a resolve in refresh mode, considering
71
 * served-stale records as expired. When a record resolves again, the d_servedStale field will be
72
 * reset.
73
 */
74

75
uint16_t MemRecursorCache::s_maxServedStaleExtensions;
76
uint16_t MemRecursorCache::s_maxRRSetSize = 256;
77
bool MemRecursorCache::s_limitQTypeAny = true;
78

79
const MemRecursorCache::AuthRecs MemRecursorCache::s_emptyAuthRecs = std::make_shared<MemRecursorCache::AuthRecsVec>();
80
const MemRecursorCache::SigRecs MemRecursorCache::s_emptySigRecs = std::make_shared<MemRecursorCache::SigRecsVec>();
81

82
void MemRecursorCache::resetStaticsForTests()
83
{
636✔
84
  s_maxServedStaleExtensions = 0;
636✔
85
  SyncRes::s_refresh_ttlperc = 0;
636✔
86
  SyncRes::s_locked_ttlperc = 0;
636✔
87
  SyncRes::s_minimumTTL = 0;
636✔
88
  s_maxRRSetSize = 256;
636✔
89
  s_limitQTypeAny = true;
636✔
90
}
636✔
91

92
MemRecursorCache::MemRecursorCache(size_t mapsCount) :
93
  d_maps(mapsCount == 0 ? 1 : mapsCount)
94
{
689✔
95
}
689✔
96

97
size_t MemRecursorCache::size() const
98
{
519✔
99
  size_t count = 0;
519✔
100
  for (const auto& shard : d_maps) {
337,135✔
101
    count += shard.getEntriesCount();
337,135✔
102
  }
337,135✔
103
  return count;
519✔
104
}
519✔
105

106
pair<uint64_t, uint64_t> MemRecursorCache::stats()
107
{
150✔
108
  uint64_t contended = 0;
150✔
109
  uint64_t acquired = 0;
150✔
110
  for (auto& shard : d_maps) {
51,350✔
111
    auto lockedShard = shard.lock();
51,350✔
112
    contended += lockedShard->d_contended_count;
51,350✔
113
    acquired += lockedShard->d_acquired_count;
51,350✔
114
  }
51,350✔
115
  return {contended, acquired};
150✔
116
}
150✔
117

118
size_t MemRecursorCache::ecsIndexSize()
119
{
54✔
120
  // XXX!
121
  size_t count = 0;
54✔
122
  for (auto& shard : d_maps) {
18,468✔
123
    auto lockedShard = shard.lock();
18,468✔
124
    count += lockedShard->d_ecsIndex.size();
18,468✔
125
  }
18,468✔
126
  return count;
54✔
127
}
54✔
128

129
size_t MemRecursorCache::CacheEntry::authRecsSizeEstimate() const
130
{
6✔
131
  size_t ret = 0;
6✔
132
  if (d_authorityRecs) {
6!
133
    for (const auto& record : *d_authorityRecs) {
×
134
      ret += record.sizeEstimate();
×
135
    }
×
136
  }
×
137
  return ret;
6✔
138
}
6✔
139

140
size_t MemRecursorCache::CacheEntry::sigRecsSizeEstimate() const
141
{
6✔
142
  size_t ret = 0;
6✔
143
  if (d_signatures) {
6!
UNCOV
144
    for (const auto& record : *d_signatures) {
×
UNCOV
145
      ret += record->sizeEstimate();
×
UNCOV
146
    }
×
UNCOV
147
  }
×
148
  return ret;
6✔
149
}
6✔
150

151
size_t MemRecursorCache::CacheEntry::sizeEstimate() const
152
{
6✔
153
  auto ret = sizeof(struct CacheEntry);
6✔
154
  ret += d_qname.sizeEstimate();
6✔
155
  ret += d_authZone.sizeEstimate();
6✔
156
  for (const auto& record : d_records) {
6✔
157
    ret += record->sizeEstimate();
6✔
158
  }
6✔
159
  ret += authRecsSizeEstimate();
6✔
160
  ret += sigRecsSizeEstimate();
6✔
161
  return ret;
6✔
162
}
6✔
163

164
// this function is too slow to poll!
165
size_t MemRecursorCache::bytes()
166
{
14✔
167
  size_t ret = 0;
14✔
168
  for (auto& shard : d_maps) {
14,336✔
169
    auto lockedShard = shard.lock();
14,336✔
170
    for (const auto& entry : lockedShard->d_map) {
14,336✔
171
      ret += entry.sizeEstimate();
6✔
172
    }
6✔
173
  }
14,336✔
174
  return ret;
14✔
175
}
14✔
176

177
static void updateDNSSECValidationStateFromCache(boost::optional<vState>& state, const vState stateUpdate)
178
{
709,240✔
179
  // if there was no state it's easy */
180
  if (state == boost::none) {
709,240✔
181
    state = stateUpdate;
709,211✔
182
    return;
709,211✔
183
  }
709,211✔
184

185
  if (stateUpdate == vState::TA) {
29!
186
    state = vState::Secure;
×
187
  }
×
188
  else if (stateUpdate == vState::NTA) {
29!
189
    state = vState::Insecure;
×
190
  }
×
191
  else if (vStateIsBogus(stateUpdate) || stateUpdate == vState::Indeterminate) {
29!
192
    state = stateUpdate;
28✔
193
  }
28✔
194
  else if (stateUpdate == vState::Insecure || stateUpdate == vState::Secure) {
1!
195
    if (!vStateIsBogus(*state) && *state != vState::Indeterminate) {
×
196
      state = stateUpdate;
×
197
    }
×
198
  }
×
199
}
29✔
200

201
template <typename T>
202
static void ptrAssign(T* ptr, const T& value)
203
{
2,958,977✔
204
  if (ptr != nullptr) {
2,958,977✔
205
    *ptr = value;
178,794✔
206
  }
178,794✔
207
}
2,958,977✔
208

209
time_t MemRecursorCache::handleHit(time_t now, MapCombo::LockedContent& content, OrderedTagIterator_t& entry, const DNSName& qname, uint32_t& origTTL, vector<DNSRecord>* res, SigRecs* signatures, AuthRecs* authorityRecs, bool* variable, boost::optional<vState>& state, bool* wasAuth, DNSName* fromAuthZone, ComboAddress* fromAuthIP)
210
{
709,244✔
211
  // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock)
212
  if (entry->d_tooBig) {
709,244✔
213
    throw ImmediateServFailException("too many records in RRSet");
2✔
214
  }
2✔
215
  time_t ttd = entry->d_ttd;
709,242✔
216
  if (ttd <= now) {
709,242!
217
    // Expired, don't bother returning contents. Callers *MUST* check return value of get(), and only look at the entry
218
    // if it returned > 0
219
    return ttd;
×
220
  }
×
221
  origTTL = entry->d_orig_ttl;
709,242✔
222

223
  if (!entry->d_netmask.empty() || entry->d_rtag) {
709,243✔
224
    ptrAssign(variable, true);
420✔
225
  }
420✔
226

227
  if (res != nullptr) {
709,242✔
228
    if (s_limitQTypeAny && res->size() + entry->d_records.size() > s_maxRRSetSize) {
636,991✔
229
      throw ImmediateServFailException("too many records in result");
4✔
230
    }
4✔
231

232
    res->reserve(res->size() + entry->d_records.size());
636,987✔
233

234
    for (const auto& record : entry->d_records) {
753,410✔
235
      DNSRecord result;
753,410✔
236
      result.d_name = qname;
753,410✔
237
      result.d_type = entry->d_qtype;
753,410✔
238
      result.d_class = QClass::IN;
753,410✔
239
      result.setContent(record);
753,410✔
240
      // coverity[store_truncates_time_t]
241
      result.d_ttl = static_cast<uint32_t>(entry->d_ttd);
753,410✔
242
      result.d_place = DNSResourceRecord::ANSWER;
753,410✔
243
      res->push_back(std::move(result));
753,410✔
244
    }
753,410✔
245
  }
636,987✔
246

247
  if (signatures != nullptr) {
709,238✔
248
    if (*signatures && !(*signatures)->empty() && entry->d_signatures && !entry->d_signatures->empty()) {
7,634!
249
      // Return a new vec if we need to append to a non-empty vector
250
      SigRecsVec vec(**signatures);
2✔
251
      vec.insert(vec.end(), entry->d_signatures->cbegin(), entry->d_signatures->cend());
2✔
252
      *signatures = std::make_shared<SigRecsVec>(std::move(vec));
2✔
253
    }
2✔
254
    else {
7,632✔
255
      *signatures = entry->d_signatures ? entry->d_signatures : s_emptySigRecs;
7,632✔
256
    }
7,632✔
257
  }
7,634✔
258

259
  if (authorityRecs != nullptr) {
709,238✔
260
    // XXX Might need to be adapted like sigs to handle a non-empty incoming authorityRecs
261
    assert(*authorityRecs == nullptr || (*authorityRecs)->empty());
7,523✔
262
    *authorityRecs = entry->d_authorityRecs ? entry->d_authorityRecs : s_emptyAuthRecs;
7,523✔
263
  }
7,523✔
264

265
  updateDNSSECValidationStateFromCache(state, entry->d_state);
×
266

267
  if (wasAuth != nullptr) {
709,238✔
268
    *wasAuth = *wasAuth && entry->d_auth;
30,604✔
269
  }
30,603✔
270
  ptrAssign(fromAuthZone, entry->d_authZone);
709,238✔
271
  ptrAssign(fromAuthIP, entry->d_from);
709,238✔
272

273
  moveCacheItemToBack<SequencedTag>(content.d_map, entry);
709,238✔
274

275
  return ttd;
709,238✔
276
}
709,242✔
277

278
static void pushRefreshTask(const DNSName& qname, QType qtype, time_t deadline, const Netmask& netmask)
279
{
30✔
280
  if (qtype == QType::ADDR) {
30!
281
    pushAlmostExpiredTask(qname, QType::A, deadline, netmask);
×
282
    pushAlmostExpiredTask(qname, QType::AAAA, deadline, netmask);
×
283
  }
×
284
  else {
30✔
285
    pushAlmostExpiredTask(qname, qtype, deadline, netmask);
30✔
286
  }
30✔
287
}
30✔
288

289
void MemRecursorCache::updateStaleEntry(time_t now, MemRecursorCache::OrderedTagIterator_t& entry)
290
{
26✔
291
  // We need to take care a infrequently access stale item cannot be extended past
292
  // s_maxServedStaleExtension * s_serveStaleExtensionPeriod
293
  // We look how old the entry is, and increase d_servedStale accordingly, taking care not to overflow
294
  const time_t howlong = std::max(static_cast<time_t>(1), now - entry->d_ttd);
26✔
295
  const uint32_t extension = std::max(1U, std::min(entry->d_orig_ttl, s_serveStaleExtensionPeriod));
26✔
296
  entry->d_servedStale = std::min(entry->d_servedStale + 1 + howlong / extension, static_cast<time_t>(s_maxServedStaleExtensions));
26✔
297
  entry->d_ttd = now + extension;
26✔
298

299
  pushRefreshTask(entry->d_qname, entry->d_qtype, entry->d_ttd, entry->d_netmask);
26✔
300
}
26✔
301

302
// If we are serving this record stale (or *should*) and the ttd has
303
// passed increase ttd to the future and remember that we did. Also
304
// push a refresh task.
305
void MemRecursorCache::handleServeStaleBookkeeping(time_t now, bool serveStale, MemRecursorCache::OrderedTagIterator_t& entry)
306
{
709,268✔
307
  if ((serveStale || entry->d_servedStale > 0) && entry->d_ttd <= now && entry->d_servedStale < s_maxServedStaleExtensions) {
709,268!
308
    updateStaleEntry(now, entry);
26✔
309
  }
26✔
310
}
709,268✔
311

312
MemRecursorCache::cache_t::const_iterator MemRecursorCache::getEntryUsingECSIndex(MapCombo::LockedContent& map, time_t now, const DNSName& qname, const QType qtype, bool requireAuth, const ComboAddress& who, bool serveStale)
313
{
644✔
314
  // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock)
315
  auto ecsIndexKey = std::tie(qname, qtype);
644✔
316
  auto ecsIndex = map.d_ecsIndex.find(ecsIndexKey);
644✔
317
  if (ecsIndex != map.d_ecsIndex.end() && !ecsIndex->isEmpty()) {
644!
318
    /* we have netmask-specific entries, let's see if we match one */
319
    while (true) {
608✔
320
      const Netmask best = ecsIndex->lookupBestMatch(who);
608✔
321
      if (best.empty()) {
608✔
322
        /* we have nothing more specific for you */
323
        break;
290✔
324
      }
290✔
325
      auto key = std::tuple(qname, qtype, boost::none, best);
318✔
326
      auto entry = map.d_map.find(key);
318✔
327
      if (entry == map.d_map.end()) {
318!
328
        /* ecsIndex is not up-to-date */
329
        ecsIndex->removeNetmask(best);
×
330
        if (ecsIndex->isEmpty()) {
×
331
          map.d_ecsIndex.erase(ecsIndex);
×
332
          break;
×
333
        }
×
334
        continue;
×
335
      }
×
336
      handleServeStaleBookkeeping(now, serveStale, entry);
318✔
337

338
      if (entry->d_ttd > now) {
318✔
339
        if (!requireAuth || entry->d_auth) {
314✔
340
          return entry;
308✔
341
        }
308✔
342
        /* we need auth data and the best match is not authoritative */
343
        return map.d_map.end();
6✔
344
      }
314✔
345
      /* this netmask-specific entry has expired */
346
      moveCacheItemToFront<SequencedTag>(map.d_map, entry);
4✔
347
      // XXX when serving stale, it should be kept, but we don't want a match wth lookupBestMatch()...
348
      ecsIndex->removeNetmask(best);
4✔
349
      if (ecsIndex->isEmpty()) {
4✔
350
        map.d_ecsIndex.erase(ecsIndex);
2✔
351
        break;
2✔
352
      }
2✔
353
    }
4✔
354
  }
606✔
355

356
  /* we have nothing specific, let's see if we have a generic one */
357
  auto key = std::tuple(qname, qtype, boost::none, Netmask());
330✔
358
  auto entry = map.d_map.find(key);
330✔
359
  if (entry != map.d_map.end()) {
330✔
360
    handleServeStaleBookkeeping(now, serveStale, entry);
44✔
361
    if (entry->d_ttd > now) {
44✔
362
      if (!requireAuth || entry->d_auth) {
32!
363
        return entry;
32✔
364
      }
32✔
365
    }
32✔
366
    else {
12✔
367
      moveCacheItemToFront<SequencedTag>(map.d_map, entry);
12✔
368
    }
12✔
369
  }
44✔
370

371
  /* nothing for you, sorry */
372
  return map.d_map.end();
298✔
373
}
330✔
374

375
MemRecursorCache::Entries MemRecursorCache::getEntries(MapCombo::LockedContent& map, const DNSName& qname, const QType /* qtype */, const OptTag& rtag)
376
{
831,097✔
377
  // MUTEX SHOULD BE ACQUIRED
378
  if (!map.d_cachecachevalid || map.d_cachedqname != qname || map.d_cachedrtag != rtag) {
831,097✔
379
    map.d_cachedqname = qname;
688,234✔
380
    map.d_cachedrtag = rtag;
688,234✔
381
    const auto& idx = map.d_map.get<NameAndRTagOnlyHashedTag>();
688,234✔
382
    map.d_cachecache = idx.equal_range(std::tie(qname, rtag));
688,234✔
383
    map.d_cachecachevalid = true;
688,234✔
384
  }
688,234✔
385
  return map.d_cachecache;
831,097✔
386
}
831,097✔
387

388
bool MemRecursorCache::entryMatches(MemRecursorCache::OrderedTagIterator_t& entry, const QType qtype, bool requireAuth, const ComboAddress& who)
389
{
831,770✔
390
  // This code assumes that if a routing tag is present, it matches
391
  // MUTEX SHOULD BE ACQUIRED
392
  if (requireAuth && !entry->d_auth) {
831,770✔
393
    return false;
26,521✔
394
  }
26,521✔
395

396
  bool match = (entry->d_qtype == qtype || qtype == QType::ANY || (qtype == QType::ADDR && (entry->d_qtype == QType::A || entry->d_qtype == QType::AAAA)))
805,249!
397
    && (entry->d_netmask.empty() || entry->d_netmask.match(who));
805,249✔
398
  return match;
805,249✔
399
}
831,770✔
400

401
// Fake a cache miss if more than refreshTTLPerc of the original TTL has passed
402
time_t MemRecursorCache::fakeTTD(MemRecursorCache::OrderedTagIterator_t& entry, const DNSName& qname, QType qtype, time_t ret, time_t now, uint32_t origTTL, bool refresh)
403
{
709,196✔
404
  time_t ttl = ret - now;
709,196✔
405
  // If we are checking an entry being served stale in refresh mode,
406
  // we always consider it stale so a real refresh attempt will be
407
  // kicked by SyncRes
408
  if (refresh && entry->d_servedStale > 0) {
709,196✔
409
    return -1;
10✔
410
  }
10✔
411
  if (ttl > 0 && SyncRes::s_refresh_ttlperc > 0) {
709,186!
412
    const uint32_t deadline = origTTL * SyncRes::s_refresh_ttlperc / 100;
55,166✔
413
    // coverity[store_truncates_time_t]
414
    const bool almostExpired = static_cast<uint32_t>(ttl) <= deadline;
55,166✔
415
    if (almostExpired && qname != g_rootdnsname) {
55,166!
416
      if (refresh) {
6✔
417
        return -1;
2✔
418
      }
2✔
419
      if (!entry->d_submitted) {
4!
420
        pushRefreshTask(qname, qtype, entry->d_ttd, entry->d_netmask);
4✔
421
        entry->d_submitted = true;
4✔
422
      }
4✔
423
    }
4✔
424
  }
55,166✔
425
  return ttl;
709,184✔
426
}
709,186✔
427

428
// returns -1 for no hits
429
time_t MemRecursorCache::get(time_t now, const DNSName& qname, const QType qtype, Flags flags, vector<DNSRecord>* res, const ComboAddress& who, const OptTag& routingTag, SigRecs* signatures, AuthRecs* authorityRecs, bool* variable, vState* state, bool* wasAuth, DNSName* fromAuthZone, ComboAddress* fromAuthIP) // NOLINT(readability-function-cognitive-complexity)
430
{
830,871✔
431
  bool requireAuth = (flags & RequireAuth) != 0;
830,871✔
432
  bool refresh = (flags & Refresh) != 0;
830,871✔
433
  bool serveStale = (flags & ServeStale) != 0;
830,871✔
434

435
  boost::optional<vState> cachedState{boost::none};
830,871✔
436
  uint32_t origTTL = 0;
830,871✔
437

438
  if (res != nullptr) {
830,871✔
439
    res->clear();
758,307✔
440
  }
758,307✔
441

442
  // we might retrieve more than one entry, we need to set that to true
443
  // so it will be set to false if at least one entry is not auth
444
  ptrAssign(wasAuth, true);
830,871✔
445

446
  auto& shard = getMap(qname);
830,871✔
447
  auto lockedShard = shard.lock();
830,871✔
448

449
  /* If we don't have any netmask-specific entries at all, let's just skip this
450
     to be able to use the nice d_cachecache hack. */
451
  if (qtype != QType::ANY && !lockedShard->d_ecsIndex.empty() && !routingTag) {
830,888✔
452
    if (qtype == QType::ADDR) {
626✔
453
      time_t ret = -1;
18✔
454

455
      auto entryA = getEntryUsingECSIndex(*lockedShard, now, qname, QType::A, requireAuth, who, serveStale);
18✔
456
      if (entryA != lockedShard->d_map.end()) {
18✔
457
        ret = handleHit(now, *lockedShard, entryA, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
12✔
458
      }
12✔
459
      auto entryAAAA = getEntryUsingECSIndex(*lockedShard, now, qname, QType::AAAA, requireAuth, who, serveStale);
18✔
460
      if (entryAAAA != lockedShard->d_map.end()) {
18✔
461
        time_t ttdAAAA = handleHit(now, *lockedShard, entryAAAA, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
6✔
462
        if (ret > 0) {
6!
463
          ret = std::min(ret, ttdAAAA);
6✔
464
        }
6✔
465
        else {
×
466
          ret = ttdAAAA;
×
467
        }
×
468
      }
6✔
469

470
      if (cachedState && ret > 0) {
18!
471
        ptrAssign(state, *cachedState);
12✔
472
      }
12✔
473

474
      return ret > 0 ? (ret - now) : ret;
18✔
475
    }
18✔
476
    auto entry = getEntryUsingECSIndex(*lockedShard, now, qname, qtype, requireAuth, who, serveStale);
608✔
477
    if (entry != lockedShard->d_map.end()) {
608✔
478
      time_t ret = handleHit(now, *lockedShard, entry, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
322✔
479
      if (cachedState && ret > now) {
322!
480
        ptrAssign(state, *cachedState);
322✔
481
      }
322✔
482
      return fakeTTD(entry, qname, qtype, ret, now, origTTL, refresh);
322✔
483
    }
322✔
484
    return -1;
286✔
485
  }
608✔
486

487
  if (routingTag) {
830,245✔
488
    auto entries = getEntries(*lockedShard, qname, qtype, routingTag);
888✔
489
    unsigned int found = 0;
888✔
490
    time_t ttd{};
888✔
491

492
    if (entries.first != entries.second) {
888✔
493
      OrderedTagIterator_t firstIndexIterator;
106✔
494
      for (auto i = entries.first; i != entries.second; ++i) {
106!
495
        firstIndexIterator = lockedShard->d_map.project<OrderedTag>(i);
106✔
496

497
        // When serving stale, we consider expired records
498
        if (!i->isEntryUsable(now, serveStale)) {
106!
499
          moveCacheItemToFront<SequencedTag>(lockedShard->d_map, firstIndexIterator);
×
500
          continue;
×
501
        }
×
502

503
        if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
106!
UNCOV
504
          continue;
×
UNCOV
505
        }
×
506
        ++found;
106✔
507

508
        handleServeStaleBookkeeping(now, serveStale, firstIndexIterator);
106✔
509

510
        ttd = handleHit(now, *lockedShard, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
106✔
511

512
        if (qtype == QType::ADDR && found == 2) {
106!
513
          break;
×
514
        }
×
515
        if (qtype != QType::ANY) { // normally if we have a hit, we are done
106!
516
          break;
106✔
517
        }
106✔
518
      }
106✔
519
      if (found > 0) {
106!
520
        if (cachedState && ttd > now) {
106!
521
          ptrAssign(state, *cachedState);
106✔
522
        }
106✔
523
        return fakeTTD(firstIndexIterator, qname, qtype, ttd, now, origTTL, refresh);
106✔
524
      }
106✔
UNCOV
525
      return -1;
×
526
    }
106✔
527
  }
888✔
528
  // Try (again) without tag
529
  auto entries = getEntries(*lockedShard, qname, qtype, boost::none);
830,139✔
530

531
  if (entries.first != entries.second) {
830,139✔
532
    OrderedTagIterator_t firstIndexIterator;
746,444✔
533
    unsigned int found = 0;
746,444✔
534
    time_t ttd{};
746,444✔
535

536
    for (auto i = entries.first; i != entries.second; ++i) {
873,342✔
537
      firstIndexIterator = lockedShard->d_map.project<OrderedTag>(i);
835,650✔
538

539
      // When serving stale, we consider expired records
540
      if (!i->isEntryUsable(now, serveStale)) {
835,650✔
541
        moveCacheItemToFront<SequencedTag>(lockedShard->d_map, firstIndexIterator);
4,075✔
542
        continue;
4,075✔
543
      }
4,075✔
544

545
      if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
831,575✔
546
        continue;
122,780✔
547
      }
122,780✔
548
      ++found;
708,795✔
549

550
      handleServeStaleBookkeeping(now, serveStale, firstIndexIterator);
708,795✔
551

552
      ttd = handleHit(now, *lockedShard, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
708,795✔
553

554
      if (qtype == QType::ADDR && found == 2) {
708,795!
555
        break;
×
556
      }
×
557
      if (qtype != QType::ANY) { // normally if we have a hit, we are done
708,802✔
558
        break;
708,752✔
559
      }
708,752✔
560
    }
708,795✔
561
    if (found > 0) {
746,444✔
562
      if (cachedState && ttd > now) {
708,768!
563
        ptrAssign(state, *cachedState);
708,768✔
564
      }
708,768✔
565
      return fakeTTD(firstIndexIterator, qname, qtype, ttd, now, origTTL, refresh);
708,768✔
566
    }
708,768✔
567
  }
746,444✔
568
  return -1;
121,371✔
569
}
830,139✔
570

571
bool MemRecursorCache::CacheEntry::shouldReplace(time_t now, bool auth, vState state, bool refresh)
572
{
15,928✔
573
  if (!auth && d_auth) { // unauth data came in, we have some auth data, but is it fresh?
15,928✔
574
    // an auth entry that is going to expire while we are resolving can hurt, as it prevents infra
575
    // records (which might be unauth) to be updated. So apply a safety margin.
576
    const time_t margin = 5;
234✔
577
    if (d_ttd - margin > now) { // we still have valid data, ignore unauth data
234✔
578
      return false;
226✔
579
    }
226✔
580
    d_auth = false; // new data won't be auth
8✔
581
  }
8✔
582

583
  if (auth) {
15,702✔
584
    /* we don't want to keep a non-auth entry while we have an auth one */
585
    if (vStateIsBogus(state) && (!vStateIsBogus(d_state) && d_state != vState::Indeterminate) && d_ttd > now) {
2,829!
586
      /* the new entry is Bogus, the existing one is not and is still valid, let's keep the existing one */
587
      return false;
8✔
588
    }
8✔
589
    // Always allow upgrade unauth data to auth
590
    if (!d_auth) {
2,821✔
591
      return true;
444✔
592
    }
444✔
593
  }
2,821✔
594

595
  if (SyncRes::s_locked_ttlperc > 0) {
15,250✔
596
    // Override locking if existing data is stale or new data is Secure or refreshing
597
    if (d_ttd <= now || state == vState::Secure || refresh) {
8!
598
      return true;
×
599
    }
×
600
    const uint32_t percentage = 100 - SyncRes::s_locked_ttlperc;
8✔
601
    const time_t ttl = d_ttd - now;
8✔
602
    const uint32_t lockline = d_orig_ttl * percentage / 100;
8✔
603
    // We know ttl is > 0 as d_ttd > now
604
    // coverity[store_truncates_time_t]
605
    const bool locked = static_cast<uint32_t>(ttl) > lockline;
8✔
606
    if (locked) {
8!
607
      return false;
8✔
608
    }
8✔
609
  }
8✔
610

611
  return true;
15,242✔
612
}
15,250✔
613

614
bool MemRecursorCache::replace(CacheEntry&& entry)
615
{
200✔
616
  if (!entry.d_netmask.empty() || entry.d_rtag) {
200!
617
    // We don't handle that yet
618
    return false;
×
619
  }
×
620
  auto& shard = getMap(entry.d_qname);
200✔
621
  auto lockedShard = shard.lock();
200✔
622

623
  lockedShard->d_cachecachevalid = false;
200✔
624
  entry.d_submitted = false;
200✔
625
  if (lockedShard->d_map.emplace(std::move(entry)).second) {
200!
626
    shard.incEntriesCount();
200✔
627
    return true;
200✔
628
  }
200✔
629
  return false;
×
630
}
200✔
631

632
void MemRecursorCache::replace(time_t now, const DNSName& qname, const QType qtype, const vector<DNSRecord>& content, const SigRecsVec& signatures, const AuthRecsVec& authorityRecs, bool auth, const DNSName& authZone, boost::optional<Netmask> ednsmask, const OptTag& routingTag, vState state, boost::optional<ComboAddress> from, bool refresh, time_t ttl_time)
633
{
1,246,570✔
634
  auto& shard = getMap(qname);
1,246,570✔
635
  auto lockedShard = shard.lock();
1,246,570✔
636

637
  lockedShard->d_cachecachevalid = false;
1,246,570✔
638
  if (ednsmask) {
1,246,570✔
639
    ednsmask = ednsmask->getNormalized();
696✔
640
  }
696✔
641

642
  // We only store with a tag if we have an ednsmask and the tag is available
643
  // We only store an ednsmask if we do not have a tag and we do have a mask.
644
  auto key = std::tuple(qname, qtype.getCode(), ednsmask ? routingTag : boost::none, (ednsmask && !routingTag) ? *ednsmask : Netmask());
1,246,570✔
645
  bool isNew = false;
1,246,570✔
646
  cache_t::iterator stored = lockedShard->d_map.find(key);
1,246,570✔
647
  if (stored == lockedShard->d_map.end()) {
1,246,570✔
648
    stored = lockedShard->d_map.insert(CacheEntry(key, auth)).first;
1,230,642✔
649
    shard.incEntriesCount();
1,230,642✔
650
    isNew = true;
1,230,642✔
651
  }
1,230,642✔
652

653
  /* if we are inserting a new entry or updating an expired one (in which case the
654
     ECS index might have been removed but the entry still exists because it has not
655
     been garbage collected yet) we might need to update the ECS index.
656
     Otherwise it should already be indexed and we don't need to update it.
657
  */
658
  if (isNew || stored->d_ttd <= now) {
1,246,570✔
659
    /* don't bother building an ecsIndex if we don't have any netmask-specific entries */
660
    if (!routingTag && ednsmask && !ednsmask->empty()) {
1,230,955!
661
      auto ecsIndexKey = std::tuple(qname, qtype.getCode());
594✔
662
      auto ecsIndex = lockedShard->d_ecsIndex.find(ecsIndexKey);
594✔
663
      if (ecsIndex == lockedShard->d_ecsIndex.end()) {
594✔
664
        ecsIndex = lockedShard->d_ecsIndex.insert(ECSIndexEntry(qname, qtype.getCode())).first;
70✔
665
      }
70✔
666
      ecsIndex->addMask(*ednsmask);
594✔
667
    }
594✔
668
  }
1,230,955✔
669

670
  time_t maxTTD = std::numeric_limits<time_t>::max();
1,246,570✔
671
  CacheEntry cacheEntry = *stored; // this is a COPY
1,246,570✔
672
  cacheEntry.d_qtype = qtype.getCode();
1,246,570✔
673

674
  if (!isNew && !cacheEntry.shouldReplace(now, auth, state, refresh)) {
1,246,570✔
675
    return;
242✔
676
  }
242✔
677

678
  cacheEntry.d_state = state;
1,246,328✔
679

680
  // refuse any attempt to *raise* the TTL of auth NS records, as it would make it possible
681
  // for an auth to keep a "ghost" zone alive forever, even after the delegation is gone from
682
  // the parent
683
  // BUT make sure that we CAN refresh the root
684
  if (cacheEntry.d_auth && auth && qtype == QType::NS && !isNew && !qname.isRoot()) {
1,246,328!
685
    maxTTD = cacheEntry.d_ttd;
11✔
686
  }
11✔
687

688
  if (auth) {
1,246,328✔
689
    cacheEntry.d_auth = true;
1,209,797✔
690
  }
1,209,797✔
691

692
  if (!signatures.empty()) {
1,246,328✔
693
    cacheEntry.d_signatures = std::make_shared<const SigRecsVec>(signatures);
2,571✔
694
  }
2,571✔
695
  else {
1,243,757✔
696
    cacheEntry.d_signatures = nullptr;
1,243,757✔
697
  }
1,243,757✔
698
  if (!authorityRecs.empty()) {
1,246,328✔
699
    cacheEntry.d_authorityRecs = std::make_shared<const AuthRecsVec>(authorityRecs);
424✔
700
  }
424✔
701
  else {
1,245,904✔
702
    cacheEntry.d_authorityRecs = nullptr;
1,245,904✔
703
  }
1,245,904✔
704
  cacheEntry.d_records.clear();
1,246,328✔
705
  cacheEntry.d_authZone = authZone;
1,246,328✔
706
  if (from) {
1,246,328✔
707
    cacheEntry.d_from = *from;
29,540✔
708
  }
29,540✔
709
  else {
1,216,788✔
710
    cacheEntry.d_from = ComboAddress();
1,216,788✔
711
  }
1,216,788✔
712

713
  size_t toStore = content.size();
1,246,328✔
714
  if (toStore <= s_maxRRSetSize) {
1,246,328✔
715
    cacheEntry.d_tooBig = false;
1,246,326✔
716
  }
1,246,326✔
717
  else {
2✔
718
    toStore = 1; // record cache does not like empty RRSets
2✔
719
    cacheEntry.d_tooBig = true;
2✔
720
  }
2✔
721
  cacheEntry.d_records.reserve(toStore);
1,246,328✔
722
  for (const auto& record : content) {
1,268,978!
723
    /* Yes, we have altered the d_ttl value by adding time(nullptr) to it
724
       prior to calling this function, so the TTL actually holds a TTD. */
725
    cacheEntry.d_ttd = min(maxTTD, static_cast<time_t>(record.d_ttl)); // XXX this does weird things if TTLs differ in the set
1,268,978✔
726

727
    // coverity[store_truncates_time_t]
728
    cacheEntry.d_orig_ttl = cacheEntry.d_ttd - ttl_time;
1,268,978✔
729
    // Even though we record the time the ttd was computed, there still seems to be a case where the computed
730
    // d_orig_ttl can wrap.
731
    // So santize the computed ce.d_orig_ttl to be on the safe side
732
    if (cacheEntry.d_orig_ttl < SyncRes::s_minimumTTL || cacheEntry.d_orig_ttl > SyncRes::s_maxcachettl) {
1,268,978✔
733
      cacheEntry.d_orig_ttl = SyncRes::s_minimumTTL;
823,679✔
734
    }
823,679✔
735
    cacheEntry.d_records.push_back(record.getContent());
1,268,978✔
736
    if (--toStore == 0) {
1,268,978✔
737
      break;
1,246,328✔
738
    }
1,246,328✔
739
  }
1,268,978✔
740

741
  if (!isNew) {
1,246,328✔
742
    moveCacheItemToBack<SequencedTag>(lockedShard->d_map, stored);
15,686✔
743
  }
15,686✔
744
  cacheEntry.d_submitted = false;
1,246,328✔
745
  cacheEntry.d_servedStale = 0;
1,246,328✔
746
  lockedShard->d_map.replace(stored, cacheEntry);
1,246,328✔
747
}
1,246,328✔
748

749
size_t MemRecursorCache::doWipeCache(const DNSName& name, bool sub, const QType qtype)
750
{
606,087✔
751
  size_t count = 0;
606,087✔
752

753
  if (!sub) {
606,087✔
754
    auto& shard = getMap(name);
606,033✔
755
    auto lockedShard = shard.lock();
606,033✔
756
    lockedShard->d_cachecachevalid = false;
606,033✔
757
    auto& idx = lockedShard->d_map.get<OrderedTag>();
606,033✔
758
    auto range = idx.equal_range(name);
606,033✔
759
    auto iter = range.first;
606,033✔
760
    while (iter != range.second) {
1,212,053✔
761
      if (iter->d_qtype == qtype || qtype == 0xffff) {
606,020✔
762
        iter = idx.erase(iter);
606,018✔
763
        count++;
606,018✔
764
        shard.decEntriesCount();
606,018✔
765
      }
606,018✔
766
      else {
2✔
767
        ++iter;
2✔
768
      }
2✔
769
    }
606,020✔
770

771
    if (qtype == 0xffff) {
606,033✔
772
      auto& ecsIdx = lockedShard->d_ecsIndex.get<OrderedTag>();
600,000✔
773
      auto ecsIndexRange = ecsIdx.equal_range(name);
600,000✔
774
      ecsIdx.erase(ecsIndexRange.first, ecsIndexRange.second);
600,000✔
775
    }
600,000✔
776
    else {
6,033✔
777
      auto& ecsIdx = lockedShard->d_ecsIndex.get<HashedTag>();
6,033✔
778
      auto ecsIndexRange = ecsIdx.equal_range(std::tie(name, qtype));
6,033✔
779
      ecsIdx.erase(ecsIndexRange.first, ecsIndexRange.second);
6,033✔
780
    }
6,033✔
781
  }
606,033✔
782
  else {
54✔
783
    for (auto& content : d_maps) {
49,158✔
784
      auto map = content.lock();
49,158✔
785
      map->d_cachecachevalid = false;
49,158✔
786
      auto& idx = map->d_map.get<OrderedTag>();
49,158✔
787
      for (auto i = idx.lower_bound(name); i != idx.end();) {
643,734✔
788
        if (!i->d_qname.isPartOf(name)) {
594,578✔
789
          break;
2✔
790
        }
2✔
791
        if (i->d_qtype == qtype || qtype == 0xffff) {
594,576!
792
          count++;
594,576✔
793
          i = idx.erase(i);
594,576✔
794
          content.decEntriesCount();
594,576✔
795
        }
594,576✔
796
        else {
×
797
          ++i;
×
798
        }
×
799
      }
594,576✔
800
      auto& ecsIdx = map->d_ecsIndex.get<OrderedTag>();
49,158✔
801
      for (auto i = ecsIdx.lower_bound(name); i != ecsIdx.end();) {
49,188✔
802
        if (!i->d_qname.isPartOf(name)) {
32✔
803
          break;
2✔
804
        }
2✔
805
        if (i->d_qtype == qtype || qtype == 0xffff) {
30!
806
          i = ecsIdx.erase(i);
30✔
807
        }
30✔
808
        else {
×
809
          ++i;
×
810
        }
×
811
      }
30✔
812
    }
49,158✔
813
  }
54✔
814
  return count;
606,087✔
815
}
606,087✔
816

817
// Name should be doLimitTime or so
818
bool MemRecursorCache::doAgeCache(time_t now, const DNSName& name, const QType qtype, uint32_t newTTL)
819
{
72✔
820
  auto& shard = getMap(name);
72✔
821
  auto lockedShard = shard.lock();
72✔
822
  cache_t::iterator iter = lockedShard->d_map.find(std::tie(name, qtype));
72✔
823
  if (iter == lockedShard->d_map.end()) {
72!
824
    return false;
×
825
  }
×
826

827
  CacheEntry cacheEntry = *iter;
72✔
828
  if (cacheEntry.d_ttd < now) {
72!
829
    return false; // would be dead anyhow
×
830
  }
×
831

832
  // coverity[store_truncates_time_t]
833
  auto maxTTL = static_cast<uint32_t>(cacheEntry.d_ttd - now);
72✔
834
  if (maxTTL > newTTL) {
72✔
835
    lockedShard->d_cachecachevalid = false;
23✔
836

837
    time_t newTTD = now + newTTL;
23✔
838

839
    if (cacheEntry.d_ttd > newTTD) {
23!
840
      cacheEntry.d_ttd = newTTD;
23✔
841
      lockedShard->d_map.replace(iter, cacheEntry);
23✔
842
    }
23✔
843
    return true;
23✔
844
  }
23✔
845
  return false;
49✔
846
}
72✔
847

848
bool MemRecursorCache::updateValidationStatus(time_t now, const DNSName& qname, const QType qtype, const ComboAddress& who, const OptTag& routingTag, bool requireAuth, vState newState, boost::optional<time_t> capTTD)
849
{
63✔
850
  if (qtype == QType::ANY) {
63!
851
    throw std::runtime_error("Trying to update the DNSSEC validation status of all (via ANY) records for " + qname.toLogString());
×
852
  }
×
853
  if (qtype == QType::ADDR) {
63!
854
    throw std::runtime_error("Trying to update the DNSSEC validation status of several (via ADDR) records for " + qname.toLogString());
×
855
  }
×
856

857
  auto& content = getMap(qname);
63✔
858
  auto map = content.lock();
63✔
859

860
  bool updated = false;
63✔
861
  if (!map->d_ecsIndex.empty() && !routingTag) {
63!
862
    auto entry = getEntryUsingECSIndex(*map, now, qname, qtype, requireAuth, who, false); // XXX serveStale?
×
863
    if (entry == map->d_map.end()) {
×
864
      return false;
×
865
    }
×
866

867
    entry->d_state = newState;
×
868
    if (capTTD) {
×
869
      entry->d_ttd = std::min(entry->d_ttd, *capTTD);
×
870
    }
×
871
    return true;
×
872
  }
×
873

874
  auto entries = getEntries(*map, qname, qtype, routingTag);
63✔
875

876
  for (auto i = entries.first; i != entries.second; ++i) {
86!
877
    auto firstIndexIterator = map->d_map.project<OrderedTag>(i);
86✔
878

879
    if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
86✔
880
      continue;
23✔
881
    }
23✔
882

883
    i->d_state = newState;
63✔
884
    if (capTTD) {
63✔
885
      i->d_ttd = std::min(i->d_ttd, *capTTD);
20✔
886
    }
20✔
887
    updated = true;
63✔
888

889
    break;
63✔
890
  }
86✔
891

892
  return updated;
63✔
893
}
63✔
894

895
uint64_t MemRecursorCache::doDump(int fileDesc, size_t maxCacheEntries)
UNCOV
896
{
×
UNCOV
897
  int newfd = dup(fileDesc);
×
UNCOV
898
  if (newfd == -1) {
×
899
    return 0;
×
900
  }
×
UNCOV
901
  auto filePtr = pdns::UniqueFilePtr(fdopen(newfd, "w"));
×
UNCOV
902
  if (!filePtr) { // dup probably failed
×
903
    close(newfd);
×
904
    return 0;
×
905
  }
×
906

UNCOV
907
  fprintf(filePtr.get(), "; main record cache dump follows\n;\n");
×
UNCOV
908
  uint64_t count = 0;
×
UNCOV
909
  size_t shardNumber = 0;
×
UNCOV
910
  size_t min = std::numeric_limits<size_t>::max();
×
UNCOV
911
  size_t max = 0;
×
UNCOV
912
  for (auto& shard : d_maps) {
×
UNCOV
913
    auto lockedShard = shard.lock();
×
UNCOV
914
    const auto shardSize = lockedShard->d_map.size();
×
UNCOV
915
    size_t bytes = 0;
×
UNCOV
916
    for (const auto& entry : lockedShard->d_map) {
×
UNCOV
917
      bytes += entry.sizeEstimate();
×
UNCOV
918
    }
×
UNCOV
919
    fprintf(filePtr.get(), "; record cache shard %zu; size %zu bytes %zu\n", shardNumber, shardSize, bytes);
×
UNCOV
920
    min = std::min(min, shardSize);
×
UNCOV
921
    max = std::max(max, shardSize);
×
UNCOV
922
    shardNumber++;
×
UNCOV
923
    const auto& sidx = lockedShard->d_map.get<SequencedTag>();
×
UNCOV
924
    time_t now = time(nullptr);
×
UNCOV
925
    for (const auto& recordSet : sidx) {
×
UNCOV
926
      for (const auto& record : recordSet.d_records) {
×
UNCOV
927
        count++;
×
UNCOV
928
        try {
×
UNCOV
929
          fprintf(filePtr.get(), "%s %" PRIu32 " %" PRId64 " IN %s %s ; (%s) auth=%i zone=%s from=%s nm=%s rtag=%s ss=%hd%s\n", recordSet.d_qname.toString().c_str(), recordSet.d_orig_ttl, static_cast<int64_t>(recordSet.d_ttd - now), recordSet.d_qtype.toString().c_str(), record->getZoneRepresentation().c_str(), vStateToString(recordSet.d_state).c_str(), static_cast<int>(recordSet.d_auth), recordSet.d_authZone.toLogString().c_str(), recordSet.d_from.toString().c_str(), recordSet.d_netmask.empty() ? "" : recordSet.d_netmask.toString().c_str(), !recordSet.d_rtag ? "" : recordSet.d_rtag.get().c_str(), recordSet.d_servedStale, recordSet.d_tooBig ? " (too big!)" : "");
×
UNCOV
930
        }
×
UNCOV
931
        catch (...) {
×
932
          fprintf(filePtr.get(), "; error printing '%s'\n", recordSet.d_qname.empty() ? "EMPTY" : recordSet.d_qname.toString().c_str());
×
933
        }
×
UNCOV
934
      }
×
UNCOV
935
      if (recordSet.d_signatures) {
×
UNCOV
936
        for (const auto& sig : *recordSet.d_signatures) {
×
UNCOV
937
          count++;
×
UNCOV
938
          try {
×
UNCOV
939
            fprintf(filePtr.get(), "%s %" PRIu32 " %" PRId64 " IN RRSIG %s ; %s\n", recordSet.d_qname.toString().c_str(), recordSet.d_orig_ttl, static_cast<int64_t>(recordSet.d_ttd - now), sig->getZoneRepresentation().c_str(), recordSet.d_netmask.empty() ? "" : recordSet.d_netmask.toString().c_str());
×
UNCOV
940
          }
×
UNCOV
941
          catch (...) {
×
942
            fprintf(filePtr.get(), "; error printing '%s'\n", recordSet.d_qname.empty() ? "EMPTY" : recordSet.d_qname.toString().c_str());
×
943
          }
×
UNCOV
944
        }
×
UNCOV
945
      }
×
UNCOV
946
    }
×
UNCOV
947
  }
×
UNCOV
948
  fprintf(filePtr.get(), "; main record cache size: %zu/%zu shards: %zu min/max shard size: %zu/%zu\n", size(), maxCacheEntries, d_maps.size(), min, max);
×
UNCOV
949
  return count;
×
UNCOV
950
}
×
951

952
void MemRecursorCache::doPrune(time_t now, size_t keep)
953
{
64✔
954
  size_t cacheSize = size();
64✔
955
  pruneMutexCollectionsVector<SequencedTag>(now, d_maps, keep, cacheSize);
64✔
956
}
64✔
957

958
enum class PBCacheDump : protozero::pbf_tag_type
959
{
960
  required_string_version = 1,
961
  required_string_identity = 2,
962
  required_uint64_protocolVersion = 3,
963
  required_int64_time = 4,
964
  required_string_type = 5,
965
  repeated_message_cacheEntry = 6,
966
};
967

968
enum class PBCacheEntry : protozero::pbf_tag_type
969
{
970
  repeated_bytes_record = 1,
971
  repeated_bytes_sig = 2,
972
  repeated_message_authRecord = 3,
973
  required_bytes_name = 4,
974
  required_bytes_authZone = 5,
975
  required_message_from = 6,
976
  optional_bytes_netmask = 7,
977
  optional_bytes_rtag = 8,
978
  required_uint32_state = 9,
979
  required_int64_ttd = 10,
980
  required_uint32_orig_ttl = 11,
981
  required_uint32_servedStale = 12,
982
  required_uint32_qtype = 13,
983
  required_bool_auth = 14,
984
  required_bool_submitted = 15,
985
  required_bool_tooBig = 16,
986
};
987

988
enum class PBComboAddress : protozero::pbf_tag_type
989
{
990
  required_uint32_port = 1,
991
  required_bytes_address = 2, // family implicit
992
};
993

994
enum class PBAuthRecord : protozero::pbf_tag_type
995
{
996
  required_bytes_name = 1,
997
  required_bytes_rdata = 2,
998
  required_uint32_type = 3,
999
  required_uint32_class = 4,
1000
  required_uint32_ttl = 5,
1001
  required_uint32_place = 6,
1002
  required_uint32_clen = 7,
1003
};
1004

1005
template <typename T>
1006
static void encodeComboAddress(protozero::pbf_builder<T>& writer, T type, const ComboAddress& address)
1007
{
200✔
1008
  protozero::pbf_builder<PBComboAddress> message(writer, type);
200✔
1009

1010
  // Skip all parts except address and port
1011
  message.add_uint32(PBComboAddress::required_uint32_port, address.getPort());
200✔
1012
  if (address.sin4.sin_family == AF_INET) {
200!
1013
    message.add_bytes(PBComboAddress::required_bytes_address, reinterpret_cast<const char*>(&address.sin4.sin_addr.s_addr), sizeof(address.sin4.sin_addr.s_addr)); // NOLINT(cppcoreguidelines-pro-type-reinterpret-cast): it's the API
×
1014
  }
×
1015
  else if (address.sin4.sin_family == AF_INET6) {
200!
1016
    message.add_bytes(PBComboAddress::required_bytes_address, reinterpret_cast<const char*>(&address.sin6.sin6_addr.s6_addr), sizeof(address.sin6.sin6_addr)); // NOLINT(cppcoreguidelines-pro-type-reinterpret-cast): it's the API
200✔
1017
  }
200✔
1018
}
200✔
1019

1020
template <typename T>
1021
static void decodeComboAddress(protozero::pbf_message<T>& reader, ComboAddress& address)
1022
{
200✔
1023
  address.reset();
200✔
1024
  protozero::pbf_message<PBComboAddress> message(reader.get_message());
200✔
1025

1026
  // Skip all parts except address and port
1027
  if (message.next(PBComboAddress::required_uint32_port)) {
200!
1028
    address.setPort(message.get_uint32());
200✔
1029
  }
200✔
1030
  else {
×
1031
    throw std::runtime_error("expected port in protobuf data");
×
1032
  }
×
1033
  constexpr auto inet4size = sizeof(address.sin4.sin_addr);
200✔
1034
  constexpr auto inet6size = sizeof(address.sin6.sin6_addr);
200✔
1035
  if (message.next(PBComboAddress::required_bytes_address)) {
200!
1036
    auto data = message.get_bytes();
200✔
1037
    address.sin4.sin_family = data.size() == inet4size ? AF_INET : AF_INET6;
200!
1038
    if (data.size() == inet4size) {
200!
1039
      address.sin4.sin_family = AF_INET;
×
1040
      memcpy(&address.sin4.sin_addr, data.data(), data.size());
×
1041
    }
×
1042
    else if (data.size() == inet6size) {
200!
1043
      address.sin6.sin6_family = AF_INET6;
200✔
1044
      memcpy(&address.sin6.sin6_addr, data.data(), data.size());
200✔
1045
    }
200✔
1046
    else {
×
1047
      throw std::runtime_error("unexpected address family in protobuf data");
×
1048
    }
×
1049
  }
200✔
1050
  else {
×
1051
    throw std::runtime_error("expected address bytes in protobuf data");
×
1052
  }
×
1053
}
200✔
1054

1055
template <typename T>
1056
static void encodeNetmask(protozero::pbf_builder<T>& writer, T type, const Netmask& subnet)
1057
{
200✔
1058
  if (!subnet.empty()) {
200!
1059
    writer.add_bytes(type, reinterpret_cast<const char*>(&subnet), sizeof(Netmask)); // NOLINT(cppcoreguidelines-pro-type-reinterpret-cast): it's the API
×
1060
  }
×
1061
}
200✔
1062

1063
template <typename T>
1064
static void decodeNetmask(protozero::pbf_message<T>& message, Netmask& subnet)
1065
{
×
1066
  auto data = message.get_bytes();
×
1067
  memcpy(&subnet, data.data(), data.size());
×
1068
}
×
1069

1070
template <typename T, typename U>
1071
void MemRecursorCache::getRecordSet(T& message, U recordSet)
1072
{
200✔
1073
  // Two fields below must come before the other fields
1074
  message.add_bytes(PBCacheEntry::required_bytes_name, recordSet->d_qname.toString());
200✔
1075
  message.add_uint32(PBCacheEntry::required_uint32_qtype, recordSet->d_qtype);
200✔
1076
  for (const auto& record : recordSet->d_records) {
200✔
1077
    message.add_bytes(PBCacheEntry::repeated_bytes_record, record->serialize(recordSet->d_qname, true));
200✔
1078
  }
200✔
1079
  if (recordSet->d_signatures) {
200!
1080
    for (const auto& record : *recordSet->d_signatures) {
200✔
1081
      message.add_bytes(PBCacheEntry::repeated_bytes_sig, record->serialize(recordSet->d_qname, true));
200✔
1082
    }
200✔
1083
  }
200✔
1084
  if (recordSet->d_authorityRecs) {
200!
1085
    for (const auto& authRec : *recordSet->d_authorityRecs) {
200✔
1086
      protozero::pbf_builder<PBAuthRecord> auth(message, PBCacheEntry::repeated_message_authRecord);
200✔
1087
      auth.add_bytes(PBAuthRecord::required_bytes_name, authRec.d_name.toString());
200✔
1088
      auth.add_bytes(PBAuthRecord::required_bytes_rdata, authRec.getContent()->serialize(authRec.d_name, true));
200✔
1089
      auth.add_uint32(PBAuthRecord::required_uint32_type, authRec.d_type);
200✔
1090
      auth.add_uint32(PBAuthRecord::required_uint32_class, authRec.d_class);
200✔
1091
      auth.add_uint32(PBAuthRecord::required_uint32_ttl, authRec.d_ttl);
200✔
1092
      auth.add_uint32(PBAuthRecord::required_uint32_place, authRec.d_place);
200✔
1093
      auth.add_uint32(PBAuthRecord::required_uint32_clen, authRec.d_clen);
200✔
1094
    }
200✔
1095
  }
200✔
1096
  message.add_bytes(PBCacheEntry::required_bytes_authZone, recordSet->d_authZone.toString());
200✔
1097
  encodeComboAddress(message, PBCacheEntry::required_message_from, recordSet->d_from);
200✔
1098
  encodeNetmask(message, PBCacheEntry::optional_bytes_netmask, recordSet->d_netmask);
200✔
1099
  if (recordSet->d_rtag) {
200!
1100
    message.add_bytes(PBCacheEntry::optional_bytes_rtag, *recordSet->d_rtag);
×
1101
  }
×
1102
  message.add_uint32(PBCacheEntry::required_uint32_state, static_cast<uint32_t>(recordSet->d_state));
200✔
1103
  message.add_int64(PBCacheEntry::required_int64_ttd, recordSet->d_ttd);
200✔
1104
  message.add_uint32(PBCacheEntry::required_uint32_orig_ttl, recordSet->d_orig_ttl);
200✔
1105
  message.add_uint32(PBCacheEntry::required_uint32_servedStale, recordSet->d_servedStale);
200✔
1106
  message.add_bool(PBCacheEntry::required_bool_auth, recordSet->d_auth);
200✔
1107
  message.add_bool(PBCacheEntry::required_bool_submitted, recordSet->d_submitted);
200✔
1108
  message.add_bool(PBCacheEntry::required_bool_tooBig, recordSet->d_tooBig);
200✔
1109
}
200✔
1110

1111
size_t MemRecursorCache::getRecordSets(size_t perShard, size_t maxSize, std::string& ret)
1112
{
2✔
1113
  auto log = g_slog->withName("recordcache")->withValues("perShard", Logging::Loggable(perShard), "maxSize", Logging::Loggable(maxSize));
2✔
1114
  log->info(Logr::Info, "Producing cache dump");
2✔
1115

1116
  // A size estimate is hard: size() returns the number of record *sets*. Each record set can have
1117
  // multiple records, plus other associated records like signatures. 150 seems to works ok.
1118
  size_t estimate = maxSize == 0 ? size() * 150 : maxSize + 4096; // We may overshoot (will be rolled back)
2!
1119

1120
  if (perShard == 0) {
2!
1121
    perShard = std::numeric_limits<size_t>::max();
2✔
1122
  }
2✔
1123
  if (maxSize == 0) {
2!
1124
    maxSize = std::numeric_limits<size_t>::max();
2✔
1125
  }
2✔
1126
  protozero::pbf_builder<PBCacheDump> full(ret);
2✔
1127
  full.add_string(PBCacheDump::required_string_version, getPDNSVersion());
2✔
1128
  full.add_string(PBCacheDump::required_string_identity, SyncRes::s_serverID);
2✔
1129
  full.add_uint64(PBCacheDump::required_uint64_protocolVersion, 1);
2✔
1130
  full.add_int64(PBCacheDump::required_int64_time, time(nullptr));
2✔
1131
  full.add_string(PBCacheDump::required_string_type, "PBCacheDump");
2✔
1132

1133
  size_t count = 0;
2✔
1134
  ret.reserve(estimate);
2✔
1135

1136
  for (auto& shard : d_maps) {
2,048✔
1137
    auto lockedShard = shard.lock();
2,048✔
1138
    const auto& sidx = lockedShard->d_map.get<SequencedTag>();
2,048✔
1139
    size_t thisShardCount = 0;
2,048✔
1140
    for (auto recordSet = sidx.rbegin(); recordSet != sidx.rend(); ++recordSet) {
2,248✔
1141
      protozero::pbf_builder<PBCacheEntry> message(full, PBCacheDump::repeated_message_cacheEntry);
200✔
1142
      getRecordSet(message, recordSet);
200✔
1143
      if (ret.size() > maxSize) {
200!
1144
        message.rollback();
×
1145
        log->info(Logr::Info, "Produced cache dump (max size reached)", "size", Logging::Loggable(ret.size()), "count", Logging::Loggable(count));
×
1146
        return count;
×
1147
      }
×
1148
      ++count;
200✔
1149
      ++thisShardCount;
200✔
1150
      if (thisShardCount >= perShard) {
200!
1151
        break;
×
1152
      }
×
1153
    }
200✔
1154
  }
2,048✔
1155
  log->info(Logr::Info, "Produced cache dump", "size", Logging::Loggable(ret.size()), "count", Logging::Loggable(count));
2✔
1156
  return count;
2✔
1157
}
2✔
1158

1159
static void putAuthRecord(protozero::pbf_message<PBCacheEntry>& message, const DNSName& qname, std::vector<DNSRecord>& authRecs)
1160
{
200✔
1161
  protozero::pbf_message<PBAuthRecord> auth = message.get_message();
200✔
1162
  DNSRecord authRecord;
200✔
1163
  while (auth.next()) {
1,600✔
1164
    switch (auth.tag()) {
1,400✔
1165
    case PBAuthRecord::required_bytes_name:
200✔
1166
      authRecord.d_name = DNSName(auth.get_bytes());
200✔
1167
      break;
200✔
1168
    case PBAuthRecord::required_bytes_rdata: {
200✔
1169
      auto ptr = DNSRecordContent::deserialize(qname, authRecord.d_type, auth.get_bytes());
200✔
1170
      authRecord.setContent(ptr);
200✔
1171
      break;
200✔
1172
    }
×
1173
    case PBAuthRecord::required_uint32_class:
200✔
1174
      authRecord.d_class = auth.get_uint32();
200✔
1175
      break;
200✔
1176
    case PBAuthRecord::required_uint32_type:
200✔
1177
      authRecord.d_type = auth.get_uint32();
200✔
1178
      break;
200✔
1179
    case PBAuthRecord::required_uint32_ttl:
200✔
1180
      authRecord.d_ttl = auth.get_uint32();
200✔
1181
      break;
200✔
1182
    case PBAuthRecord::required_uint32_place:
200✔
1183
      authRecord.d_place = static_cast<DNSResourceRecord::Place>(auth.get_uint32());
200✔
1184
      break;
200✔
1185
    case PBAuthRecord::required_uint32_clen:
200✔
1186
      authRecord.d_clen = auth.get_uint32();
200✔
1187
      break;
200✔
1188
    default:
×
1189
      break;
×
1190
    }
1,400✔
1191
  }
1,400✔
1192
  authRecs.emplace_back(authRecord);
200✔
1193
}
200✔
1194

1195
template <typename T>
1196
bool MemRecursorCache::putRecordSet(T& message)
1197
{
200✔
1198
  AuthRecsVec authRecs;
200✔
1199
  SigRecsVec sigRecs;
200✔
1200
  CacheEntry cacheEntry{{g_rootdnsname, QType::A, boost::none, Netmask()}, false};
200✔
1201
  while (message.next()) {
3,000✔
1202
    switch (message.tag()) {
2,800✔
1203
    case PBCacheEntry::repeated_bytes_record: {
200✔
1204
      auto ptr = DNSRecordContent::deserialize(cacheEntry.d_qname, cacheEntry.d_qtype, message.get_bytes());
200✔
1205
      cacheEntry.d_records.emplace_back(ptr);
200✔
1206
      break;
200✔
1207
    }
×
1208
    case PBCacheEntry::repeated_bytes_sig: {
200✔
1209
      auto ptr = DNSRecordContent::deserialize(cacheEntry.d_qname, QType::RRSIG, message.get_bytes());
200✔
1210
      sigRecs.emplace_back(std::dynamic_pointer_cast<RRSIGRecordContent>(ptr));
200✔
1211
      break;
200✔
1212
    }
×
1213
    case PBCacheEntry::repeated_message_authRecord:
200✔
1214
      putAuthRecord(message, cacheEntry.d_qname, authRecs);
200✔
1215
      break;
200✔
1216
    case PBCacheEntry::required_bytes_name:
200✔
1217
      cacheEntry.d_qname = DNSName(message.get_bytes());
200✔
1218
      break;
200✔
1219
    case PBCacheEntry::required_bytes_authZone:
200✔
1220
      cacheEntry.d_authZone = DNSName(message.get_bytes());
200✔
1221
      break;
200✔
1222
    case PBCacheEntry::required_message_from:
200✔
1223
      decodeComboAddress(message, cacheEntry.d_from);
200✔
1224
      break;
200✔
1225
    case PBCacheEntry::optional_bytes_netmask:
×
1226
      decodeNetmask(message, cacheEntry.d_netmask);
×
1227
      break;
×
1228
    case PBCacheEntry::optional_bytes_rtag:
×
1229
      cacheEntry.d_rtag = message.get_bytes();
×
1230
      break;
×
1231
    case PBCacheEntry::required_uint32_state:
200✔
1232
      cacheEntry.d_state = static_cast<vState>(message.get_uint32());
200✔
1233
      break;
200✔
1234
    case PBCacheEntry::required_int64_ttd:
200✔
1235
      cacheEntry.d_ttd = message.get_int64();
200✔
1236
      break;
200✔
1237
    case PBCacheEntry::required_uint32_orig_ttl:
200✔
1238
      cacheEntry.d_orig_ttl = message.get_uint32();
200✔
1239
      break;
200✔
1240
    case PBCacheEntry::required_uint32_servedStale:
200✔
1241
      cacheEntry.d_servedStale = message.get_uint32();
200✔
1242
      break;
200✔
1243
    case PBCacheEntry::required_uint32_qtype:
200✔
1244
      cacheEntry.d_qtype = message.get_uint32();
200✔
1245
      break;
200✔
1246
    case PBCacheEntry::required_bool_auth:
200✔
1247
      cacheEntry.d_auth = message.get_bool();
200✔
1248
      break;
200✔
1249
    case PBCacheEntry::required_bool_submitted:
200✔
1250
      cacheEntry.d_submitted = message.get_bool();
200✔
1251
      cacheEntry.d_submitted = false; // actually not
200✔
1252
      break;
200✔
1253
    case PBCacheEntry::required_bool_tooBig:
200✔
1254
      cacheEntry.d_tooBig = message.get_bool();
200✔
1255
      break;
200✔
1256
    default:
×
1257
      break;
×
1258
    }
2,800✔
1259
  }
2,800✔
1260
  if (!authRecs.empty()) {
200!
1261
    cacheEntry.d_authorityRecs = std::make_shared<const AuthRecsVec>(std::move(authRecs));
200✔
1262
  }
200✔
1263
  if (!sigRecs.empty()) {
200!
1264
    cacheEntry.d_signatures = std::make_shared<const SigRecsVec>(std::move(sigRecs));
200✔
1265
  }
200✔
1266
  return replace(std::move(cacheEntry));
200✔
1267
}
200✔
1268

1269
size_t MemRecursorCache::putRecordSets(const std::string& pbuf)
1270
{
2✔
1271
  auto log = g_slog->withName("recordcache")->withValues("size", Logging::Loggable(pbuf.size()));
2✔
1272
  log->info(Logr::Debug, "Processing cache dump");
2✔
1273

1274
  protozero::pbf_message<PBCacheDump> full(pbuf);
2✔
1275
  size_t count = 0;
2✔
1276
  size_t inserted = 0;
2✔
1277
  try {
2✔
1278
    bool protocolVersionSeen = false;
2✔
1279
    bool typeSeen = false;
2✔
1280
    while (full.next()) {
212✔
1281
      switch (full.tag()) {
210!
1282
      case PBCacheDump::required_string_version: {
2✔
1283
        auto version = full.get_string();
2✔
1284
        log = log->withValues("version", Logging::Loggable(version));
2✔
1285
        break;
2✔
1286
      }
×
1287
      case PBCacheDump::required_string_identity: {
2✔
1288
        auto identity = full.get_string();
2✔
1289
        log = log->withValues("identity", Logging::Loggable(identity));
2✔
1290
        break;
2✔
1291
      }
×
1292
      case PBCacheDump::required_uint64_protocolVersion: {
2✔
1293
        auto protocolVersion = full.get_uint64();
2✔
1294
        log = log->withValues("protocolVersion", Logging::Loggable(protocolVersion));
2✔
1295
        if (protocolVersion != 1) {
2!
1296
          throw std::runtime_error("Protocol version mismatch");
×
1297
        }
×
1298
        protocolVersionSeen = true;
2✔
1299
        break;
2✔
1300
      }
2✔
1301
      case PBCacheDump::required_int64_time: {
2✔
1302
        auto time = full.get_int64();
2✔
1303
        log = log->withValues("time", Logging::Loggable(time));
2✔
1304
        break;
2✔
1305
      }
2✔
1306
      case PBCacheDump::required_string_type: {
2✔
1307
        auto type = full.get_string();
2✔
1308
        if (type != "PBCacheDump") {
2!
1309
          throw std::runtime_error("Data type mismatch");
×
1310
        }
×
1311
        typeSeen = true;
2✔
1312
        break;
2✔
1313
      }
2✔
1314
      case PBCacheDump::repeated_message_cacheEntry: {
200✔
1315
        if (!protocolVersionSeen || !typeSeen) {
200!
1316
          throw std::runtime_error("Required field missing");
×
1317
        }
×
1318
        protozero::pbf_message<PBCacheEntry> message = full.get_message();
200✔
1319
        if (putRecordSet(message)) {
200!
1320
          ++inserted;
200✔
1321
        }
200✔
1322
        ++count;
200✔
1323
        break;
200✔
1324
      }
200✔
1325
      }
210✔
1326
    }
210✔
1327
    log->info(Logr::Info, "Processed cache dump", "processed", Logging::Loggable(count), "inserted", Logging::Loggable(inserted));
2✔
1328
    return inserted;
2✔
1329
  }
2✔
1330
  catch (const std::runtime_error& e) {
2✔
1331
    log->error(Logr::Error, e.what(), "Runtime exception processing cache dump");
×
1332
  }
×
1333
  catch (const std::exception& e) {
2✔
1334
    log->error(Logr::Error, e.what(), "Exception processing cache dump");
×
1335
  }
×
1336
  catch (...) {
2✔
1337
    log->error(Logr::Error, "Other exception processing cache dump");
×
1338
  }
×
1339
  return 0;
×
1340
}
2✔
1341

1342
namespace boost
1343
{
1344
size_t hash_value(const MemRecursorCache::OptTag& rtag)
1345
{
2,548,364✔
1346
  return rtag ? hash_value(rtag.get()) : 0xcafebaaf;
2,548,364✔
1347
}
2,548,364✔
1348
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc