• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

PowerDNS / pdns / 18370591226

09 Oct 2025 08:40AM UTC coverage: 64.094% (-0.04%) from 64.136%
18370591226

Pull #16224

github

web-flow
Merge b58891300 into 152db0df0
Pull Request #16224: dnsdist: Fix a typo in the XSK documentation

42757 of 101504 branches covered (42.12%)

Branch coverage included in aggregate %.

129859 of 167814 relevant lines covered (77.38%)

5755713.48 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.45
/pdns/recursordist/recursor_cache.cc
1
/*
2
 * This file is part of PowerDNS or dnsdist.
3
 * Copyright -- PowerDNS.COM B.V. and its contributors
4
 *
5
 * This program is free software; you can redistribute it and/or modify
6
 * it under the terms of version 2 of the GNU General Public License as
7
 * published by the Free Software Foundation.
8
 *
9
 * In addition, for the avoidance of any doubt, permission is granted to
10
 * link this program with OpenSSL and to (re)distribute the binaries
11
 * produced as the result of such linking.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21
 */
22

23
#include "config.h"
24

25
#include <cinttypes>
26
#include <protozero/pbf_builder.hpp>
27
#include <protozero/pbf_message.hpp>
28

29
#include "recursor_cache.hh"
30
#include "misc.hh"
31
#include "dnsrecords.hh"
32
#include "syncres.hh"
33
#include "namespaces.hh"
34
#include "cachecleaner.hh"
35
#include "rec-taskqueue.hh"
36
#include "version.hh"
37
#include "protozero-helpers.hh"
38

39
/*
40
 * SERVE-STALE: the general approach
41
 *
42
 * The general switch to enable serve-stale is s_maxServedStaleExtensions. If this value is zero, no
43
 * serve-stale is done. If it is positive, it determines how many times the serve-stale status of a
44
 * record can be extended.
45
 *
46
 * Each record in the cache has a field d_servedStale. If this value is zero, no special handling is
47
 * done. If it is positive, the record is being served stale. The value determines how many times
48
 * the serve-stale status was extended. Each time an extension happens, the value is incremented and
49
 * a task to see if the record resolves will be pushed. When the served-stale status is extended,
50
 * the TTD of a record is also changed so the record will be considered not-expired by the get()
51
 * function. The TTD will be s_serveStaleExtensionPeriod in the future, unless the original TTL was
52
 * smaller than that. If d_servedStale reaches s_maxServedStaleExtensions the serve-stale status
53
 * will no longer be extended and the record will be considered really expired.
54
 *
55
 * With s_serveStaleExtensionPeriod of 30 seconds, setting s_maxServedStaleExtensions to 1440 will
56
 * cause a record to be served stale a maximum of 30s * 1440 = 12 hours. If the original TTL is
57
 * smaller than 30, this period will be shorter. If there was a long time between serve-stale
58
 * extensions, the value of d_servedStale will be incremented by more than one to account for the
59
 * longer period.
60
 *
61
 * If serve-stale is enabled, the resolving process first will try to resolve a record in the
62
 * ordinary way, with the difference that a timeout will not lead to an ImmediateServFailException
63
 * being passed to the caller, but the resolving will be tried again with a flag to allow marking
64
 * records as served-stale. If the second time around a timeout happens, an
65
 * ImmediateServFailException *will* be passed to the caller.
66
 *
67
 * When serving stale, records are only wiped from the cache if they are older than
68
 * s_maxServedStaleExtensions * s_serveStaleExtensionPeriod. See isStale(). This is to have a good
69
 * chance of records being available for marking stale if a name server has an issue.
70
 *
71
 * The tasks to see if nameservers are reachable again do a resolve in refresh mode, considering
72
 * served-stale records as expired. When a record resolves again, the d_servedStale field will be
73
 * reset.
74
 */
75

76
uint16_t MemRecursorCache::s_maxServedStaleExtensions;
77
uint16_t MemRecursorCache::s_maxRRSetSize = 256;
78
bool MemRecursorCache::s_limitQTypeAny = true;
79

80
const MemRecursorCache::AuthRecs MemRecursorCache::s_emptyAuthRecs = std::make_shared<MemRecursorCache::AuthRecsVec>();
81
const MemRecursorCache::SigRecs MemRecursorCache::s_emptySigRecs = std::make_shared<MemRecursorCache::SigRecsVec>();
82

83
void MemRecursorCache::resetStaticsForTests()
84
{
638✔
85
  s_maxServedStaleExtensions = 0;
638✔
86
  SyncRes::s_refresh_ttlperc = 0;
638✔
87
  SyncRes::s_locked_ttlperc = 0;
638✔
88
  SyncRes::s_minimumTTL = 0;
638✔
89
  s_maxRRSetSize = 256;
638✔
90
  s_limitQTypeAny = true;
638✔
91
}
638✔
92

93
MemRecursorCache::MemRecursorCache(size_t mapsCount) :
94
  d_maps(mapsCount == 0 ? 1 : mapsCount)
95
{
854✔
96
}
854✔
97

98
size_t MemRecursorCache::size() const
99
{
718✔
100
  size_t count = 0;
718✔
101
  for (const auto& shard : d_maps) {
540,911✔
102
    count += shard.getEntriesCount();
540,911✔
103
  }
540,911✔
104
  return count;
718✔
105
}
718✔
106

107
pair<uint64_t, uint64_t> MemRecursorCache::stats()
108
{
336✔
109
  uint64_t contended = 0;
336✔
110
  uint64_t acquired = 0;
336✔
111
  for (auto& shard : d_maps) {
241,814✔
112
    auto lockedShard = shard.lock();
241,814✔
113
    contended += lockedShard->d_contended_count;
241,814✔
114
    acquired += lockedShard->d_acquired_count;
241,814✔
115
  }
241,814✔
116
  return {contended, acquired};
336✔
117
}
336✔
118

119
size_t MemRecursorCache::ecsIndexSize()
120
{
54✔
121
  // XXX!
122
  size_t count = 0;
54✔
123
  for (auto& shard : d_maps) {
18,468✔
124
    auto lockedShard = shard.lock();
18,468✔
125
    count += lockedShard->d_ecsIndex.size();
18,468✔
126
  }
18,468✔
127
  return count;
54✔
128
}
54✔
129

130
size_t MemRecursorCache::CacheEntry::authRecsSizeEstimate() const
131
{
78✔
132
  size_t ret = 0;
78✔
133
  if (d_authorityRecs) {
78!
134
    for (const auto& record : *d_authorityRecs) {
×
135
      ret += record.sizeEstimate();
×
136
    }
×
137
  }
×
138
  return ret;
78✔
139
}
78✔
140

141
size_t MemRecursorCache::CacheEntry::sigRecsSizeEstimate() const
142
{
78✔
143
  size_t ret = 0;
78✔
144
  if (d_signatures) {
78✔
145
    for (const auto& record : *d_signatures) {
36✔
146
      ret += record->sizeEstimate();
36✔
147
    }
36✔
148
  }
36✔
149
  return ret;
78✔
150
}
78✔
151

152
size_t MemRecursorCache::CacheEntry::sizeEstimate() const
153
{
78✔
154
  auto ret = sizeof(struct CacheEntry);
78✔
155
  ret += d_qname.sizeEstimate();
78✔
156
  ret += d_authZone.sizeEstimate();
78✔
157
  for (const auto& record : d_records) {
88✔
158
    ret += record->sizeEstimate();
88✔
159
  }
88✔
160
  ret += authRecsSizeEstimate();
78✔
161
  ret += sigRecsSizeEstimate();
78✔
162
  return ret;
78✔
163
}
78✔
164

165
// this function is too slow to poll!
166
size_t MemRecursorCache::bytes()
167
{
14✔
168
  size_t ret = 0;
14✔
169
  for (auto& shard : d_maps) {
14,336✔
170
    auto lockedShard = shard.lock();
14,336✔
171
    for (const auto& entry : lockedShard->d_map) {
14,336✔
172
      ret += entry.sizeEstimate();
6✔
173
    }
6✔
174
  }
14,336✔
175
  return ret;
14✔
176
}
14✔
177

178
static void updateDNSSECValidationStateFromCache(boost::optional<vState>& state, const vState stateUpdate)
179
{
727,195✔
180
  // if there was no state it's easy */
181
  if (state == boost::none) {
727,195✔
182
    state = stateUpdate;
727,167✔
183
    return;
727,167✔
184
  }
727,167✔
185

186
  if (stateUpdate == vState::TA) {
28!
187
    state = vState::Secure;
×
188
  }
×
189
  else if (stateUpdate == vState::NTA) {
28!
190
    state = vState::Insecure;
×
191
  }
×
192
  else if (vStateIsBogus(stateUpdate) || stateUpdate == vState::Indeterminate) {
28!
193
    state = stateUpdate;
28✔
194
  }
28✔
195
  else if (stateUpdate == vState::Insecure || stateUpdate == vState::Secure) {
×
196
    if (!vStateIsBogus(*state) && *state != vState::Indeterminate) {
×
197
      state = stateUpdate;
×
198
    }
×
199
  }
×
200
}
28✔
201

202
template <typename T>
203
static void ptrAssign(T* ptr, const T& value)
204
{
3,049,397✔
205
  if (ptr != nullptr) {
3,049,397✔
206
    *ptr = value;
209,029✔
207
  }
209,029✔
208
}
3,049,397✔
209

210
time_t MemRecursorCache::handleHit(time_t now, MapCombo::LockedContent& content, OrderedTagIterator_t& entry, const DNSName& qname, uint32_t& origTTL, vector<DNSRecord>* res, SigRecs* signatures, AuthRecs* authorityRecs, bool* variable, boost::optional<vState>& state, bool* wasAuth, DNSName* fromAuthZone, ComboAddress* fromAuthIP)
211
{
727,198✔
212
  // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock)
213
  if (entry->d_tooBig) {
727,198✔
214
    throw ImmediateServFailException("too many records in RRSet");
2✔
215
  }
2✔
216
  time_t ttd = entry->d_ttd;
727,196✔
217
  if (ttd <= now) {
727,196!
218
    // Expired, don't bother returning contents. Callers *MUST* check return value of get(), and only look at the entry
219
    // if it returned > 0
220
    return ttd;
×
221
  }
×
222
  origTTL = entry->d_orig_ttl;
727,196✔
223

224
  if (!entry->d_netmask.empty() || entry->d_rtag) {
727,198✔
225
    ptrAssign(variable, true);
516✔
226
  }
516✔
227

228
  if (res != nullptr) {
727,196✔
229
    if (s_limitQTypeAny && res->size() + entry->d_records.size() > s_maxRRSetSize) {
650,927✔
230
      throw ImmediateServFailException("too many records in result");
4✔
231
    }
4✔
232

233
    res->reserve(res->size() + entry->d_records.size());
650,922✔
234

235
    for (const auto& record : entry->d_records) {
769,144✔
236
      DNSRecord result;
769,144✔
237
      result.d_name = qname;
769,144✔
238
      result.d_type = entry->d_qtype;
769,144✔
239
      result.d_class = QClass::IN;
769,144✔
240
      result.setContent(record);
769,144✔
241
      // coverity[store_truncates_time_t]
242
      result.d_ttl = static_cast<uint32_t>(entry->d_ttd);
769,144✔
243
      result.d_place = DNSResourceRecord::ANSWER;
769,144✔
244
      res->push_back(std::move(result));
769,144✔
245
    }
769,144✔
246
  }
650,922✔
247

248
  if (signatures != nullptr) {
727,192✔
249
    if (*signatures && !(*signatures)->empty() && entry->d_signatures && !entry->d_signatures->empty()) {
9,923!
250
      // Return a new vec if we need to append to a non-empty vector
251
      SigRecsVec vec(**signatures);
2✔
252
      vec.insert(vec.end(), entry->d_signatures->cbegin(), entry->d_signatures->cend());
2✔
253
      *signatures = std::make_shared<SigRecsVec>(std::move(vec));
2✔
254
    }
2✔
255
    else {
9,921✔
256
      *signatures = entry->d_signatures ? entry->d_signatures : s_emptySigRecs;
9,921✔
257
    }
9,921✔
258
  }
9,923✔
259

260
  if (authorityRecs != nullptr) {
727,192✔
261
    // XXX Might need to be adapted like sigs to handle a non-empty incoming authorityRecs
262
    assert(*authorityRecs == nullptr || (*authorityRecs)->empty());
9,676✔
263
    *authorityRecs = entry->d_authorityRecs ? entry->d_authorityRecs : s_emptyAuthRecs;
9,676✔
264
  }
9,676✔
265

266
  updateDNSSECValidationStateFromCache(state, entry->d_state);
×
267

268
  if (wasAuth != nullptr) {
727,192✔
269
    *wasAuth = *wasAuth && entry->d_auth;
37,747✔
270
  }
37,746✔
271
  ptrAssign(fromAuthZone, entry->d_authZone);
727,192✔
272
  ptrAssign(fromAuthIP, entry->d_from);
727,192✔
273

274
  moveCacheItemToBack<SequencedTag>(content.d_map, entry);
727,192✔
275

276
  return ttd;
727,192✔
277
}
727,196✔
278

279
static void pushRefreshTask(const DNSName& qname, QType qtype, time_t deadline, const Netmask& netmask)
280
{
30✔
281
  if (qtype == QType::ADDR) {
30!
282
    pushAlmostExpiredTask(qname, QType::A, deadline, netmask);
×
283
    pushAlmostExpiredTask(qname, QType::AAAA, deadline, netmask);
×
284
  }
×
285
  else {
30✔
286
    pushAlmostExpiredTask(qname, qtype, deadline, netmask);
30✔
287
  }
30✔
288
}
30✔
289

290
void MemRecursorCache::updateStaleEntry(time_t now, MemRecursorCache::OrderedTagIterator_t& entry)
291
{
26✔
292
  // We need to take care a infrequently access stale item cannot be extended past
293
  // s_maxServedStaleExtension * s_serveStaleExtensionPeriod
294
  // We look how old the entry is, and increase d_servedStale accordingly, taking care not to overflow
295
  const time_t howlong = std::max(static_cast<time_t>(1), now - entry->d_ttd);
26✔
296
  const uint32_t extension = std::max(1U, std::min(entry->d_orig_ttl, s_serveStaleExtensionPeriod));
26✔
297
  entry->d_servedStale = std::min(entry->d_servedStale + 1 + howlong / extension, static_cast<time_t>(s_maxServedStaleExtensions));
26✔
298
  entry->d_ttd = now + extension;
26✔
299

300
  pushRefreshTask(entry->d_qname, entry->d_qtype, entry->d_ttd, entry->d_netmask);
26✔
301
}
26✔
302

303
// If we are serving this record stale (or *should*) and the ttd has
304
// passed increase ttd to the future and remember that we did. Also
305
// push a refresh task.
306
void MemRecursorCache::handleServeStaleBookkeeping(time_t now, bool serveStale, MemRecursorCache::OrderedTagIterator_t& entry)
307
{
727,221✔
308
  if ((serveStale || entry->d_servedStale > 0) && entry->d_ttd <= now && entry->d_servedStale < s_maxServedStaleExtensions) {
727,222!
309
    updateStaleEntry(now, entry);
26✔
310
  }
26✔
311
}
727,221✔
312

313
MemRecursorCache::cache_t::const_iterator MemRecursorCache::getEntryUsingECSIndex(MapCombo::LockedContent& map, time_t now, const DNSName& qname, const QType qtype, bool requireAuth, const ComboAddress& who, bool serveStale)
314
{
940✔
315
  // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock)
316
  auto ecsIndexKey = std::tie(qname, qtype);
940✔
317
  auto ecsIndex = map.d_ecsIndex.find(ecsIndexKey);
940✔
318
  if (ecsIndex != map.d_ecsIndex.end() && !ecsIndex->isEmpty()) {
940!
319
    /* we have netmask-specific entries, let's see if we match one */
320
    while (true) {
740✔
321
      const Netmask best = ecsIndex->lookupBestMatch(who);
740✔
322
      if (best.empty()) {
740✔
323
        /* we have nothing more specific for you */
324
        break;
338✔
325
      }
338✔
326
      auto key = std::tuple(qname, qtype, boost::none, best);
402✔
327
      auto entry = map.d_map.find(key);
402✔
328
      if (entry == map.d_map.end()) {
402!
329
        /* ecsIndex is not up-to-date */
330
        ecsIndex->removeNetmask(best);
×
331
        if (ecsIndex->isEmpty()) {
×
332
          map.d_ecsIndex.erase(ecsIndex);
×
333
          break;
×
334
        }
×
335
        continue;
×
336
      }
×
337
      handleServeStaleBookkeeping(now, serveStale, entry);
402✔
338

339
      if (entry->d_ttd > now) {
402✔
340
        if (!requireAuth || entry->d_auth) {
398✔
341
          return entry;
392✔
342
        }
392✔
343
        /* we need auth data and the best match is not authoritative */
344
        return map.d_map.end();
6✔
345
      }
398✔
346
      /* this netmask-specific entry has expired */
347
      moveCacheItemToFront<SequencedTag>(map.d_map, entry);
4✔
348
      // XXX when serving stale, it should be kept, but we don't want a match wth lookupBestMatch()...
349
      ecsIndex->removeNetmask(best);
4✔
350
      if (ecsIndex->isEmpty()) {
4✔
351
        map.d_ecsIndex.erase(ecsIndex);
2✔
352
        break;
2✔
353
      }
2✔
354
    }
4✔
355
  }
738✔
356

357
  /* we have nothing specific, let's see if we have a generic one */
358
  auto key = std::tuple(qname, qtype, boost::none, Netmask());
542✔
359
  auto entry = map.d_map.find(key);
542✔
360
  if (entry != map.d_map.end()) {
542✔
361
    handleServeStaleBookkeeping(now, serveStale, entry);
44✔
362
    if (entry->d_ttd > now) {
44✔
363
      if (!requireAuth || entry->d_auth) {
32!
364
        return entry;
32✔
365
      }
32✔
366
    }
32✔
367
    else {
12✔
368
      moveCacheItemToFront<SequencedTag>(map.d_map, entry);
12✔
369
    }
12✔
370
  }
44✔
371

372
  /* nothing for you, sorry */
373
  return map.d_map.end();
510✔
374
}
542✔
375

376
MemRecursorCache::Entries MemRecursorCache::getEntries(MapCombo::LockedContent& map, const DNSName& qname, const QType /* qtype */, const OptTag& rtag)
377
{
867,319✔
378
  // MUTEX SHOULD BE ACQUIRED
379
  if (!map.d_cachecachevalid || map.d_cachedqname != qname || map.d_cachedrtag != rtag) {
867,319✔
380
    map.d_cachedqname = qname;
691,586✔
381
    map.d_cachedrtag = rtag;
691,586✔
382
    const auto& idx = map.d_map.get<NameAndRTagOnlyHashedTag>();
691,586✔
383
    map.d_cachecache = idx.equal_range(std::tie(qname, rtag));
691,586✔
384
    map.d_cachecachevalid = true;
691,586✔
385
  }
691,586✔
386
  return map.d_cachecache;
867,319✔
387
}
867,319✔
388

389
bool MemRecursorCache::entryMatches(MemRecursorCache::OrderedTagIterator_t& entry, const QType qtype, bool requireAuth, const ComboAddress& who)
390
{
884,552✔
391
  // This code assumes that if a routing tag is present, it matches
392
  // MUTEX SHOULD BE ACQUIRED
393
  if (requireAuth && !entry->d_auth) {
884,552✔
394
    return false;
36,855✔
395
  }
36,855✔
396

397
  bool match = (entry->d_qtype == qtype || qtype == QType::ANY || (qtype == QType::ADDR && (entry->d_qtype == QType::A || entry->d_qtype == QType::AAAA)))
847,697!
398
    && (entry->d_netmask.empty() || entry->d_netmask.match(who));
847,697✔
399
  return match;
847,697✔
400
}
884,552✔
401

402
// Fake a cache miss if more than refreshTTLPerc of the original TTL has passed
403
time_t MemRecursorCache::fakeTTD(MemRecursorCache::OrderedTagIterator_t& entry, const DNSName& qname, QType qtype, time_t ret, time_t now, uint32_t origTTL, bool refresh)
404
{
727,148✔
405
  time_t ttl = ret - now;
727,148✔
406
  // If we are checking an entry being served stale in refresh mode,
407
  // we always consider it stale so a real refresh attempt will be
408
  // kicked by SyncRes
409
  if (refresh && entry->d_servedStale > 0) {
727,148✔
410
    return -1;
10✔
411
  }
10✔
412
  if (ttl > 0 && SyncRes::s_refresh_ttlperc > 0) {
727,140✔
413
    const uint32_t deadline = origTTL * SyncRes::s_refresh_ttlperc / 100;
56,594✔
414
    // coverity[store_truncates_time_t]
415
    const bool almostExpired = static_cast<uint32_t>(ttl) <= deadline;
56,594✔
416
    if (almostExpired && qname != g_rootdnsname) {
56,594!
417
      if (refresh) {
6✔
418
        return -1;
2✔
419
      }
2✔
420
      if (!entry->d_submitted) {
4!
421
        pushRefreshTask(qname, qtype, entry->d_ttd, entry->d_netmask);
4✔
422
        entry->d_submitted = true;
4✔
423
      }
4✔
424
    }
4✔
425
  }
56,594✔
426
  return ttl;
727,136✔
427
}
727,138✔
428

429
// returns -1 for no hits
430
time_t MemRecursorCache::get(time_t now, const DNSName& qname, const QType qtype, Flags flags, vector<DNSRecord>* res, const ComboAddress& who, const OptTag& routingTag, SigRecs* signatures, AuthRecs* authorityRecs, bool* variable, vState* state, bool* wasAuth, DNSName* fromAuthZone, ComboAddress* fromAuthIP) // NOLINT(readability-function-cognitive-complexity)
431
{
867,333✔
432
  bool requireAuth = (flags & RequireAuth) != 0;
867,333✔
433
  bool refresh = (flags & Refresh) != 0;
867,333✔
434
  bool serveStale = (flags & ServeStale) != 0;
867,333✔
435

436
  boost::optional<vState> cachedState{boost::none};
867,333✔
437
  uint32_t origTTL = 0;
867,333✔
438

439
  if (res != nullptr) {
867,333✔
440
    res->clear();
790,736✔
441
  }
790,736✔
442

443
  // we might retrieve more than one entry, we need to set that to true
444
  // so it will be set to false if at least one entry is not auth
445
  ptrAssign(wasAuth, true);
867,333✔
446

447
  auto& shard = getMap(qname);
867,333✔
448
  auto lockedShard = shard.lock();
867,333✔
449

450
  /* If we don't have any netmask-specific entries at all, let's just skip this
451
     to be able to use the nice d_cachecache hack. */
452
  if (qtype != QType::ANY && !lockedShard->d_ecsIndex.empty() && !routingTag) {
867,346✔
453
    if (qtype == QType::ADDR) {
922✔
454
      time_t ret = -1;
18✔
455

456
      auto entryA = getEntryUsingECSIndex(*lockedShard, now, qname, QType::A, requireAuth, who, serveStale);
18✔
457
      if (entryA != lockedShard->d_map.end()) {
18✔
458
        ret = handleHit(now, *lockedShard, entryA, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
12✔
459
      }
12✔
460
      auto entryAAAA = getEntryUsingECSIndex(*lockedShard, now, qname, QType::AAAA, requireAuth, who, serveStale);
18✔
461
      if (entryAAAA != lockedShard->d_map.end()) {
18✔
462
        time_t ttdAAAA = handleHit(now, *lockedShard, entryAAAA, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
6✔
463
        if (ret > 0) {
6!
464
          ret = std::min(ret, ttdAAAA);
6✔
465
        }
6✔
466
        else {
×
467
          ret = ttdAAAA;
×
468
        }
×
469
      }
6✔
470

471
      if (cachedState && ret > 0) {
18!
472
        ptrAssign(state, *cachedState);
12✔
473
      }
12✔
474

475
      return ret > 0 ? (ret - now) : ret;
18✔
476
    }
18✔
477
    auto entry = getEntryUsingECSIndex(*lockedShard, now, qname, qtype, requireAuth, who, serveStale);
904✔
478
    if (entry != lockedShard->d_map.end()) {
904✔
479
      time_t ret = handleHit(now, *lockedShard, entry, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
406✔
480
      if (cachedState && ret > now) {
406!
481
        ptrAssign(state, *cachedState);
406✔
482
      }
406✔
483
      return fakeTTD(entry, qname, qtype, ret, now, origTTL, refresh);
406✔
484
    }
406✔
485
    return -1;
498✔
486
  }
904✔
487

488
  if (routingTag) {
866,411✔
489
    auto entries = getEntries(*lockedShard, qname, qtype, routingTag);
964✔
490
    unsigned int found = 0;
964✔
491
    time_t ttd{};
964✔
492

493
    if (entries.first != entries.second) {
964✔
494
      OrderedTagIterator_t firstIndexIterator;
118✔
495
      for (auto i = entries.first; i != entries.second; ++i) {
124✔
496
        firstIndexIterator = lockedShard->d_map.project<OrderedTag>(i);
118✔
497

498
        // When serving stale, we consider expired records
499
        if (!i->isEntryUsable(now, serveStale)) {
118!
500
          moveCacheItemToFront<SequencedTag>(lockedShard->d_map, firstIndexIterator);
×
501
          continue;
×
502
        }
×
503

504
        if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
118✔
505
          continue;
6✔
506
        }
6✔
507
        ++found;
112✔
508

509
        handleServeStaleBookkeeping(now, serveStale, firstIndexIterator);
112✔
510

511
        ttd = handleHit(now, *lockedShard, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
112✔
512

513
        if (qtype == QType::ADDR && found == 2) {
112!
514
          break;
×
515
        }
×
516
        if (qtype != QType::ANY) { // normally if we have a hit, we are done
112!
517
          break;
112✔
518
        }
112✔
519
      }
112✔
520
      if (found > 0) {
118✔
521
        if (cachedState && ttd > now) {
112!
522
          ptrAssign(state, *cachedState);
112✔
523
        }
112✔
524
        return fakeTTD(firstIndexIterator, qname, qtype, ttd, now, origTTL, refresh);
112✔
525
      }
112✔
526
      return -1;
6✔
527
    }
118✔
528
  }
964✔
529
  // Try (again) without tag
530
  auto entries = getEntries(*lockedShard, qname, qtype, boost::none);
866,293✔
531

532
  if (entries.first != entries.second) {
866,293✔
533
    OrderedTagIterator_t firstIndexIterator;
776,210✔
534
    unsigned int found = 0;
776,210✔
535
    time_t ttd{};
776,210✔
536

537
    for (auto i = entries.first; i != entries.second; ++i) {
938,591✔
538
      firstIndexIterator = lockedShard->d_map.project<OrderedTag>(i);
888,996✔
539

540
      // When serving stale, we consider expired records
541
      if (!i->isEntryUsable(now, serveStale)) {
888,996✔
542
        moveCacheItemToFront<SequencedTag>(lockedShard->d_map, firstIndexIterator);
4,646✔
543
        continue;
4,646✔
544
      }
4,646✔
545

546
      if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
884,350✔
547
        continue;
157,689✔
548
      }
157,689✔
549
      ++found;
726,661✔
550

551
      handleServeStaleBookkeeping(now, serveStale, firstIndexIterator);
726,661✔
552

553
      ttd = handleHit(now, *lockedShard, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
726,661✔
554

555
      if (qtype == QType::ADDR && found == 2) {
726,661!
556
        break;
×
557
      }
×
558
      if (qtype != QType::ANY) { // normally if we have a hit, we are done
726,667✔
559
        break;
726,615✔
560
      }
726,615✔
561
    }
726,661✔
562
    if (found > 0) {
776,210✔
563
      if (cachedState && ttd > now) {
726,631!
564
        ptrAssign(state, *cachedState);
726,631✔
565
      }
726,631✔
566
      return fakeTTD(firstIndexIterator, qname, qtype, ttd, now, origTTL, refresh);
726,630✔
567
    }
726,630✔
568
  }
776,210✔
569
  return -1;
139,663✔
570
}
866,293✔
571

572
bool MemRecursorCache::CacheEntry::shouldReplace(time_t now, bool auth, vState state, bool refresh)
573
{
17,997✔
574
  if (!auth && d_auth) { // unauth data came in, we have some auth data, but is it fresh?
17,997✔
575
    // an auth entry that is going to expire while we are resolving can hurt, as it prevents infra
576
    // records (which might be unauth) to be updated. So apply a safety margin.
577
    const time_t margin = 5;
429✔
578
    if (d_ttd - margin > now) { // we still have valid data, ignore unauth data
429✔
579
      return false;
421✔
580
    }
421✔
581
    d_auth = false; // new data won't be auth
8✔
582
  }
8✔
583

584
  if (auth) {
17,576✔
585
    /* we don't want to keep a non-auth entry while we have an auth one */
586
    if (vStateIsBogus(state) && (!vStateIsBogus(d_state) && d_state != vState::Indeterminate) && d_ttd > now) {
5,104!
587
      /* the new entry is Bogus, the existing one is not and is still valid, let's keep the existing one */
588
      return false;
8✔
589
    }
8✔
590
    // Always allow upgrade unauth data to auth
591
    if (!d_auth) {
5,096✔
592
      return true;
729✔
593
    }
729✔
594
  }
5,096✔
595

596
  if (SyncRes::s_locked_ttlperc > 0) {
16,839✔
597
    // Override locking if existing data is stale or new data is Secure or refreshing
598
    if (d_ttd <= now || state == vState::Secure || refresh) {
10!
599
      return true;
×
600
    }
×
601
    const uint32_t percentage = 100 - SyncRes::s_locked_ttlperc;
10✔
602
    const time_t ttl = d_ttd - now;
10✔
603
    const uint32_t lockline = d_orig_ttl * percentage / 100;
10✔
604
    // We know ttl is > 0 as d_ttd > now
605
    // coverity[store_truncates_time_t]
606
    const bool locked = static_cast<uint32_t>(ttl) > lockline;
10✔
607
    if (locked) {
10!
608
      return false;
10✔
609
    }
10✔
610
  }
10✔
611

612
  return true;
16,829✔
613
}
16,839✔
614

615
bool MemRecursorCache::replace(CacheEntry&& entry)
616
{
200✔
617
  if (!entry.d_netmask.empty() || entry.d_rtag) {
200!
618
    // We don't handle that yet
619
    return false;
×
620
  }
×
621
  auto& shard = getMap(entry.d_qname);
200✔
622
  auto lockedShard = shard.lock();
200✔
623

624
  lockedShard->d_cachecachevalid = false;
200✔
625
  entry.d_submitted = false;
200✔
626
  if (lockedShard->d_map.emplace(std::move(entry)).second) {
200!
627
    shard.incEntriesCount();
200✔
628
    return true;
200✔
629
  }
200✔
630
  return false;
×
631
}
200✔
632

633
void MemRecursorCache::replace(time_t now, const DNSName& qname, const QType qtype, const vector<DNSRecord>& content, const SigRecsVec& signatures, const AuthRecsVec& authorityRecs, bool auth, const DNSName& authZone, boost::optional<Netmask> ednsmask, const OptTag& routingTag, vState state, boost::optional<ComboAddress> from, bool refresh, time_t ttl_time)
634
{
1,267,254✔
635
  auto& shard = getMap(qname);
1,267,254✔
636
  auto lockedShard = shard.lock();
1,267,254✔
637

638
  lockedShard->d_cachecachevalid = false;
1,267,254✔
639
  if (ednsmask) {
1,267,254✔
640
    ednsmask = ednsmask->getNormalized();
747✔
641
  }
747✔
642

643
  // We only store with a tag if we have an ednsmask and the tag is available
644
  // We only store an ednsmask if we do not have a tag and we do have a mask.
645
  auto key = std::tuple(qname, qtype.getCode(), ednsmask ? routingTag : boost::none, (ednsmask && !routingTag) ? *ednsmask : Netmask());
1,267,254✔
646
  bool isNew = false;
1,267,254✔
647
  cache_t::iterator stored = lockedShard->d_map.find(key);
1,267,254✔
648
  if (stored == lockedShard->d_map.end()) {
1,267,254✔
649
    stored = lockedShard->d_map.insert(CacheEntry(key, auth)).first;
1,249,257✔
650
    shard.incEntriesCount();
1,249,257✔
651
    isNew = true;
1,249,257✔
652
  }
1,249,257✔
653

654
  /* if we are inserting a new entry or updating an expired one (in which case the
655
     ECS index might have been removed but the entry still exists because it has not
656
     been garbage collected yet) we might need to update the ECS index.
657
     Otherwise it should already be indexed and we don't need to update it.
658
  */
659
  if (isNew || stored->d_ttd <= now) {
1,267,254✔
660
    /* don't bother building an ecsIndex if we don't have any netmask-specific entries */
661
    if (!routingTag && ednsmask && !ednsmask->empty()) {
1,249,528!
662
      auto ecsIndexKey = std::tuple(qname, qtype.getCode());
641✔
663
      auto ecsIndex = lockedShard->d_ecsIndex.find(ecsIndexKey);
641✔
664
      if (ecsIndex == lockedShard->d_ecsIndex.end()) {
641✔
665
        ecsIndex = lockedShard->d_ecsIndex.insert(ECSIndexEntry(qname, qtype.getCode())).first;
93✔
666
      }
93✔
667
      ecsIndex->addMask(*ednsmask);
641✔
668
    }
641✔
669
  }
1,249,528✔
670

671
  time_t maxTTD = std::numeric_limits<time_t>::max();
1,267,254✔
672
  CacheEntry cacheEntry = *stored; // this is a COPY
1,267,254✔
673
  cacheEntry.d_qtype = qtype.getCode();
1,267,254✔
674

675
  if (!isNew && !cacheEntry.shouldReplace(now, auth, state, refresh)) {
1,267,254✔
676
    return;
439✔
677
  }
439✔
678

679
  cacheEntry.d_state = state;
1,266,815✔
680

681
  // refuse any attempt to *raise* the TTL of auth NS records, as it would make it possible
682
  // for an auth to keep a "ghost" zone alive forever, even after the delegation is gone from
683
  // the parent
684
  // BUT make sure that we CAN refresh the root
685
  if (cacheEntry.d_auth && auth && qtype == QType::NS && !isNew && !qname.isRoot()) {
1,266,815!
686
    maxTTD = cacheEntry.d_ttd;
18✔
687
  }
18✔
688

689
  if (auth) {
1,266,815✔
690
    cacheEntry.d_auth = true;
1,215,328✔
691
  }
1,215,328✔
692

693
  if (!signatures.empty()) {
1,266,815✔
694
    cacheEntry.d_signatures = std::make_shared<const SigRecsVec>(signatures);
7,573✔
695
  }
7,573✔
696
  else {
1,259,242✔
697
    cacheEntry.d_signatures = nullptr;
1,259,242✔
698
  }
1,259,242✔
699
  if (!authorityRecs.empty()) {
1,266,815✔
700
    cacheEntry.d_authorityRecs = std::make_shared<const AuthRecsVec>(authorityRecs);
1,014✔
701
  }
1,014✔
702
  else {
1,265,801✔
703
    cacheEntry.d_authorityRecs = nullptr;
1,265,801✔
704
  }
1,265,801✔
705
  cacheEntry.d_records.clear();
1,266,815✔
706
  cacheEntry.d_authZone = authZone;
1,266,815✔
707
  if (from) {
1,266,815✔
708
    cacheEntry.d_from = *from;
35,595✔
709
  }
35,595✔
710
  else {
1,231,220✔
711
    cacheEntry.d_from = ComboAddress();
1,231,220✔
712
  }
1,231,220✔
713

714
  size_t toStore = content.size();
1,266,815✔
715
  if (toStore <= s_maxRRSetSize) {
1,266,815✔
716
    cacheEntry.d_tooBig = false;
1,266,813✔
717
  }
1,266,813✔
718
  else {
2✔
719
    toStore = 1; // record cache does not like empty RRSets
2✔
720
    cacheEntry.d_tooBig = true;
2✔
721
  }
2✔
722
  cacheEntry.d_records.reserve(toStore);
1,266,815✔
723
  for (const auto& record : content) {
1,296,802!
724
    /* Yes, we have altered the d_ttl value by adding time(nullptr) to it
725
       prior to calling this function, so the TTL actually holds a TTD. */
726
    cacheEntry.d_ttd = min(maxTTD, static_cast<time_t>(record.d_ttl)); // XXX this does weird things if TTLs differ in the set
1,296,802✔
727

728
    // coverity[store_truncates_time_t]
729
    cacheEntry.d_orig_ttl = cacheEntry.d_ttd - ttl_time;
1,296,802✔
730
    // Even though we record the time the ttd was computed, there still seems to be a case where the computed
731
    // d_orig_ttl can wrap.
732
    // So santize the computed ce.d_orig_ttl to be on the safe side
733
    if (cacheEntry.d_orig_ttl < SyncRes::s_minimumTTL || cacheEntry.d_orig_ttl > SyncRes::s_maxcachettl) {
1,296,803✔
734
      cacheEntry.d_orig_ttl = SyncRes::s_minimumTTL;
845,677✔
735
    }
845,677✔
736
    cacheEntry.d_records.push_back(record.getContent());
1,296,802✔
737
    if (--toStore == 0) {
1,296,802✔
738
      break;
1,266,815✔
739
    }
1,266,815✔
740
  }
1,296,802✔
741

742
  if (!isNew) {
1,266,815✔
743
    moveCacheItemToBack<SequencedTag>(lockedShard->d_map, stored);
17,558✔
744
  }
17,558✔
745
  cacheEntry.d_submitted = false;
1,266,815✔
746
  cacheEntry.d_servedStale = 0;
1,266,815✔
747
  lockedShard->d_map.replace(stored, cacheEntry);
1,266,815✔
748
}
1,266,815✔
749

750
size_t MemRecursorCache::doWipeCache(const DNSName& name, bool sub, const QType qtype)
751
{
607,152✔
752
  size_t count = 0;
607,152✔
753

754
  if (!sub) {
607,152✔
755
    auto& shard = getMap(name);
606,342✔
756
    auto lockedShard = shard.lock();
606,342✔
757
    lockedShard->d_cachecachevalid = false;
606,342✔
758
    auto& idx = lockedShard->d_map.get<OrderedTag>();
606,342✔
759
    auto range = idx.equal_range(name);
606,342✔
760
    auto iter = range.first;
606,342✔
761
    while (iter != range.second) {
1,212,362✔
762
      if (iter->d_qtype == qtype || qtype == 0xffff) {
606,020✔
763
        iter = idx.erase(iter);
606,018✔
764
        count++;
606,018✔
765
        shard.decEntriesCount();
606,018✔
766
      }
606,018✔
767
      else {
2✔
768
        ++iter;
2✔
769
      }
2✔
770
    }
606,020✔
771

772
    if (qtype == 0xffff) {
606,342✔
773
      auto& ecsIdx = lockedShard->d_ecsIndex.get<OrderedTag>();
600,000✔
774
      auto ecsIndexRange = ecsIdx.equal_range(name);
600,000✔
775
      ecsIdx.erase(ecsIndexRange.first, ecsIndexRange.second);
600,000✔
776
    }
600,000✔
777
    else {
6,342✔
778
      auto& ecsIdx = lockedShard->d_ecsIndex.get<HashedTag>();
6,342✔
779
      auto ecsIndexRange = ecsIdx.equal_range(std::tie(name, qtype));
6,342✔
780
      ecsIdx.erase(ecsIndexRange.first, ecsIndexRange.second);
6,342✔
781
    }
6,342✔
782
  }
606,342✔
783
  else {
810✔
784
    for (auto& content : d_maps) {
823,302✔
785
      auto map = content.lock();
823,302✔
786
      map->d_cachecachevalid = false;
823,302✔
787
      auto& idx = map->d_map.get<OrderedTag>();
823,302✔
788
      for (auto i = idx.lower_bound(name); i != idx.end();) {
1,419,852✔
789
        if (!i->d_qname.isPartOf(name)) {
596,913✔
790
          break;
363✔
791
        }
363✔
792
        if (i->d_qtype == qtype || qtype == 0xffff) {
596,550!
793
          count++;
596,550✔
794
          i = idx.erase(i);
596,550✔
795
          content.decEntriesCount();
596,550✔
796
        }
596,550✔
797
        else {
×
798
          ++i;
×
799
        }
×
800
      }
596,550✔
801
      auto& ecsIdx = map->d_ecsIndex.get<OrderedTag>();
823,302✔
802
      for (auto i = ecsIdx.lower_bound(name); i != ecsIdx.end();) {
823,332✔
803
        if (!i->d_qname.isPartOf(name)) {
32✔
804
          break;
2✔
805
        }
2✔
806
        if (i->d_qtype == qtype || qtype == 0xffff) {
30!
807
          i = ecsIdx.erase(i);
30✔
808
        }
30✔
809
        else {
×
810
          ++i;
×
811
        }
×
812
      }
30✔
813
    }
823,302✔
814
  }
810✔
815
  return count;
607,152✔
816
}
607,152✔
817

818
// Name should be doLimitTime or so
819
bool MemRecursorCache::doAgeCache(time_t now, const DNSName& name, const QType qtype, uint32_t newTTL)
820
{
72✔
821
  auto& shard = getMap(name);
72✔
822
  auto lockedShard = shard.lock();
72✔
823
  cache_t::iterator iter = lockedShard->d_map.find(std::tie(name, qtype));
72✔
824
  if (iter == lockedShard->d_map.end()) {
72!
825
    return false;
×
826
  }
×
827

828
  CacheEntry cacheEntry = *iter;
72✔
829
  if (cacheEntry.d_ttd < now) {
72!
830
    return false; // would be dead anyhow
×
831
  }
×
832

833
  // coverity[store_truncates_time_t]
834
  auto maxTTL = static_cast<uint32_t>(cacheEntry.d_ttd - now);
72✔
835
  if (maxTTL > newTTL) {
72✔
836
    lockedShard->d_cachecachevalid = false;
24✔
837

838
    time_t newTTD = now + newTTL;
24✔
839

840
    if (cacheEntry.d_ttd > newTTD) {
24!
841
      cacheEntry.d_ttd = newTTD;
24✔
842
      lockedShard->d_map.replace(iter, cacheEntry);
24✔
843
    }
24✔
844
    return true;
24✔
845
  }
24✔
846
  return false;
48✔
847
}
72✔
848

849
bool MemRecursorCache::updateValidationStatus(time_t now, const DNSName& qname, const QType qtype, const ComboAddress& who, const OptTag& routingTag, bool requireAuth, vState newState, boost::optional<time_t> capTTD)
850
{
54✔
851
  if (qtype == QType::ANY) {
54!
852
    throw std::runtime_error("Trying to update the DNSSEC validation status of all (via ANY) records for " + qname.toLogString());
×
853
  }
×
854
  if (qtype == QType::ADDR) {
54!
855
    throw std::runtime_error("Trying to update the DNSSEC validation status of several (via ADDR) records for " + qname.toLogString());
×
856
  }
×
857

858
  auto& content = getMap(qname);
54✔
859
  auto map = content.lock();
54✔
860

861
  bool updated = false;
54✔
862
  if (!map->d_ecsIndex.empty() && !routingTag) {
54!
863
    auto entry = getEntryUsingECSIndex(*map, now, qname, qtype, requireAuth, who, false); // XXX serveStale?
×
864
    if (entry == map->d_map.end()) {
×
865
      return false;
×
866
    }
×
867

868
    entry->d_state = newState;
×
869
    if (capTTD) {
×
870
      entry->d_ttd = std::min(entry->d_ttd, *capTTD);
×
871
    }
×
872
    return true;
×
873
  }
×
874

875
  auto entries = getEntries(*map, qname, qtype, routingTag);
54✔
876

877
  for (auto i = entries.first; i != entries.second; ++i) {
76!
878
    auto firstIndexIterator = map->d_map.project<OrderedTag>(i);
76✔
879

880
    if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
76✔
881
      continue;
22✔
882
    }
22✔
883

884
    i->d_state = newState;
54✔
885
    if (capTTD) {
54✔
886
      i->d_ttd = std::min(i->d_ttd, *capTTD);
20✔
887
    }
20✔
888
    updated = true;
54✔
889

890
    break;
54✔
891
  }
76✔
892

893
  return updated;
54✔
894
}
54✔
895

896
uint64_t MemRecursorCache::doDump(int fileDesc, size_t maxCacheEntries)
897
{
5✔
898
  int newfd = dup(fileDesc);
5✔
899
  if (newfd == -1) {
5!
900
    return 0;
×
901
  }
×
902
  auto filePtr = pdns::UniqueFilePtr(fdopen(newfd, "w"));
5✔
903
  if (!filePtr) { // dup probably failed
5!
904
    close(newfd);
×
905
    return 0;
×
906
  }
×
907

908
  fprintf(filePtr.get(), "; main record cache dump follows\n;\n");
5✔
909
  uint64_t count = 0;
5✔
910
  size_t shardNumber = 0;
5✔
911
  size_t min = std::numeric_limits<size_t>::max();
5✔
912
  size_t max = 0;
5✔
913
  for (auto& shard : d_maps) {
5,120✔
914
    auto lockedShard = shard.lock();
5,120✔
915
    const auto shardSize = lockedShard->d_map.size();
5,120✔
916
    size_t bytes = 0;
5,120✔
917
    for (const auto& entry : lockedShard->d_map) {
5,120✔
918
      bytes += entry.sizeEstimate();
72✔
919
    }
72✔
920
    fprintf(filePtr.get(), "; record cache shard %zu; size %zu bytes %zu\n", shardNumber, shardSize, bytes);
5,120✔
921
    min = std::min(min, shardSize);
5,120✔
922
    max = std::max(max, shardSize);
5,120✔
923
    shardNumber++;
5,120✔
924
    const auto& sidx = lockedShard->d_map.get<SequencedTag>();
5,120✔
925
    time_t now = time(nullptr);
5,120✔
926
    for (const auto& recordSet : sidx) {
5,120✔
927
      for (const auto& record : recordSet.d_records) {
82✔
928
        count++;
82✔
929
        try {
82✔
930
          fprintf(filePtr.get(), "%s %" PRIu32 " %" PRId64 " IN %s %s ; (%s) auth=%i zone=%s from=%s nm=%s rtag=%s ss=%hd%s\n", recordSet.d_qname.toString().c_str(), recordSet.d_orig_ttl, static_cast<int64_t>(recordSet.d_ttd - now), recordSet.d_qtype.toString().c_str(), record->getZoneRepresentation().c_str(), vStateToString(recordSet.d_state).c_str(), static_cast<int>(recordSet.d_auth), recordSet.d_authZone.toLogString().c_str(), recordSet.d_from.toString().c_str(), recordSet.d_netmask.empty() ? "" : recordSet.d_netmask.toString().c_str(), !recordSet.d_rtag ? "" : recordSet.d_rtag.get().c_str(), recordSet.d_servedStale, recordSet.d_tooBig ? " (too big!)" : "");
82!
931
        }
82✔
932
        catch (...) {
82✔
933
          fprintf(filePtr.get(), "; error printing '%s'\n", recordSet.d_qname.empty() ? "EMPTY" : recordSet.d_qname.toString().c_str());
×
934
        }
×
935
      }
82✔
936
      if (recordSet.d_signatures) {
72✔
937
        for (const auto& sig : *recordSet.d_signatures) {
36✔
938
          count++;
36✔
939
          try {
36✔
940
            fprintf(filePtr.get(), "%s %" PRIu32 " %" PRId64 " IN RRSIG %s ; %s\n", recordSet.d_qname.toString().c_str(), recordSet.d_orig_ttl, static_cast<int64_t>(recordSet.d_ttd - now), sig->getZoneRepresentation().c_str(), recordSet.d_netmask.empty() ? "" : recordSet.d_netmask.toString().c_str());
36!
941
          }
36✔
942
          catch (...) {
36✔
943
            fprintf(filePtr.get(), "; error printing '%s'\n", recordSet.d_qname.empty() ? "EMPTY" : recordSet.d_qname.toString().c_str());
×
944
          }
×
945
        }
36✔
946
      }
36✔
947
    }
72✔
948
  }
5,120✔
949
  fprintf(filePtr.get(), "; main record cache size: %zu/%zu shards: %zu min/max shard size: %zu/%zu\n", size(), maxCacheEntries, d_maps.size(), min, max);
5✔
950
  return count;
5✔
951
}
5✔
952

953
void MemRecursorCache::doPrune(time_t now, size_t keep)
954
{
165✔
955
  size_t cacheSize = size();
165✔
956
  pruneMutexCollectionsVector<SequencedTag>(now, d_maps, keep, cacheSize);
165✔
957
}
165✔
958

959
enum class PBCacheDump : protozero::pbf_tag_type
960
{
961
  required_string_version = 1,
962
  required_string_identity = 2,
963
  required_uint64_protocolVersion = 3,
964
  required_int64_time = 4,
965
  required_string_type = 5,
966
  repeated_message_cacheEntry = 6,
967
};
968

969
enum class PBCacheEntry : protozero::pbf_tag_type
970
{
971
  repeated_bytes_record = 1,
972
  repeated_bytes_sig = 2,
973
  repeated_message_authRecord = 3,
974
  required_bytes_name = 4,
975
  required_bytes_authZone = 5,
976
  required_message_from = 6,
977
  optional_bytes_netmask = 7,
978
  optional_bytes_rtag = 8,
979
  required_uint32_state = 9,
980
  required_int64_ttd = 10,
981
  required_uint32_orig_ttl = 11,
982
  required_uint32_servedStale = 12,
983
  required_uint32_qtype = 13,
984
  required_bool_auth = 14,
985
  required_bool_submitted = 15,
986
  required_bool_tooBig = 16,
987
};
988

989
enum class PBAuthRecord : protozero::pbf_tag_type
990
{
991
  required_bytes_name = 1,
992
  required_bytes_rdata = 2,
993
  required_uint32_type = 3,
994
  required_uint32_class = 4,
995
  required_uint32_ttl = 5,
996
  required_uint32_place = 6,
997
  required_uint32_clen = 7,
998
};
999

1000
template <typename T, typename U>
1001
void MemRecursorCache::getRecordSet(T& message, U recordSet)
1002
{
200✔
1003
  // Two fields below must come before the other fields
1004
  message.add_bytes(PBCacheEntry::required_bytes_name, recordSet->d_qname.toString());
200✔
1005
  message.add_uint32(PBCacheEntry::required_uint32_qtype, recordSet->d_qtype);
200✔
1006
  for (const auto& record : recordSet->d_records) {
200✔
1007
    message.add_bytes(PBCacheEntry::repeated_bytes_record, record->serialize(recordSet->d_qname, true));
200✔
1008
  }
200✔
1009
  if (recordSet->d_signatures) {
200!
1010
    for (const auto& record : *recordSet->d_signatures) {
200✔
1011
      message.add_bytes(PBCacheEntry::repeated_bytes_sig, record->serialize(recordSet->d_qname, true));
200✔
1012
    }
200✔
1013
  }
200✔
1014
  if (recordSet->d_authorityRecs) {
200!
1015
    for (const auto& authRec : *recordSet->d_authorityRecs) {
200✔
1016
      protozero::pbf_builder<PBAuthRecord> auth(message, PBCacheEntry::repeated_message_authRecord);
200✔
1017
      auth.add_bytes(PBAuthRecord::required_bytes_name, authRec.d_name.toString());
200✔
1018
      auth.add_bytes(PBAuthRecord::required_bytes_rdata, authRec.getContent()->serialize(authRec.d_name, true));
200✔
1019
      auth.add_uint32(PBAuthRecord::required_uint32_type, authRec.d_type);
200✔
1020
      auth.add_uint32(PBAuthRecord::required_uint32_class, authRec.d_class);
200✔
1021
      auth.add_uint32(PBAuthRecord::required_uint32_ttl, authRec.d_ttl);
200✔
1022
      auth.add_uint32(PBAuthRecord::required_uint32_place, authRec.d_place);
200✔
1023
      auth.add_uint32(PBAuthRecord::required_uint32_clen, authRec.d_clen);
200✔
1024
    }
200✔
1025
  }
200✔
1026
  message.add_bytes(PBCacheEntry::required_bytes_authZone, recordSet->d_authZone.toString());
200✔
1027
  encodeComboAddress(message, PBCacheEntry::required_message_from, recordSet->d_from);
200✔
1028
  encodeNetmask(message, PBCacheEntry::optional_bytes_netmask, recordSet->d_netmask);
200✔
1029
  if (recordSet->d_rtag) {
200!
1030
    message.add_bytes(PBCacheEntry::optional_bytes_rtag, *recordSet->d_rtag);
×
1031
  }
×
1032
  message.add_uint32(PBCacheEntry::required_uint32_state, static_cast<uint32_t>(recordSet->d_state));
200✔
1033
  message.add_int64(PBCacheEntry::required_int64_ttd, recordSet->d_ttd);
200✔
1034
  message.add_uint32(PBCacheEntry::required_uint32_orig_ttl, recordSet->d_orig_ttl);
200✔
1035
  message.add_uint32(PBCacheEntry::required_uint32_servedStale, recordSet->d_servedStale);
200✔
1036
  message.add_bool(PBCacheEntry::required_bool_auth, recordSet->d_auth);
200✔
1037
  message.add_bool(PBCacheEntry::required_bool_submitted, recordSet->d_submitted);
200✔
1038
  message.add_bool(PBCacheEntry::required_bool_tooBig, recordSet->d_tooBig);
200✔
1039
}
200✔
1040

1041
size_t MemRecursorCache::getRecordSets(size_t perShard, size_t maxSize, std::string& ret)
1042
{
2✔
1043
  auto log = g_slog->withName("recordcache")->withValues("perShard", Logging::Loggable(perShard), "maxSize", Logging::Loggable(maxSize));
2✔
1044
  log->info(Logr::Info, "Producing cache dump");
2✔
1045

1046
  // A size estimate is hard: size() returns the number of record *sets*. Each record set can have
1047
  // multiple records, plus other associated records like signatures. 150 seems to works ok.
1048
  size_t estimate = maxSize == 0 ? size() * 150 : maxSize + 4096; // We may overshoot (will be rolled back)
2!
1049

1050
  if (perShard == 0) {
2!
1051
    perShard = std::numeric_limits<size_t>::max();
2✔
1052
  }
2✔
1053
  if (maxSize == 0) {
2!
1054
    maxSize = std::numeric_limits<size_t>::max();
2✔
1055
  }
2✔
1056
  protozero::pbf_builder<PBCacheDump> full(ret);
2✔
1057
  full.add_string(PBCacheDump::required_string_version, getPDNSVersion());
2✔
1058
  full.add_string(PBCacheDump::required_string_identity, SyncRes::s_serverID);
2✔
1059
  full.add_uint64(PBCacheDump::required_uint64_protocolVersion, 1);
2✔
1060
  full.add_int64(PBCacheDump::required_int64_time, time(nullptr));
2✔
1061
  full.add_string(PBCacheDump::required_string_type, "PBCacheDump");
2✔
1062

1063
  size_t count = 0;
2✔
1064
  ret.reserve(estimate);
2✔
1065

1066
  for (auto& shard : d_maps) {
2,048✔
1067
    auto lockedShard = shard.lock();
2,048✔
1068
    const auto& sidx = lockedShard->d_map.get<SequencedTag>();
2,048✔
1069
    size_t thisShardCount = 0;
2,048✔
1070
    for (auto recordSet = sidx.rbegin(); recordSet != sidx.rend(); ++recordSet) {
2,248✔
1071
      protozero::pbf_builder<PBCacheEntry> message(full, PBCacheDump::repeated_message_cacheEntry);
200✔
1072
      getRecordSet(message, recordSet);
200✔
1073
      if (ret.size() > maxSize) {
200!
1074
        message.rollback();
×
1075
        log->info(Logr::Info, "Produced cache dump (max size reached)", "size", Logging::Loggable(ret.size()), "count", Logging::Loggable(count));
×
1076
        return count;
×
1077
      }
×
1078
      ++count;
200✔
1079
      ++thisShardCount;
200✔
1080
      if (thisShardCount >= perShard) {
200!
1081
        break;
×
1082
      }
×
1083
    }
200✔
1084
  }
2,048✔
1085
  log->info(Logr::Info, "Produced cache dump", "size", Logging::Loggable(ret.size()), "count", Logging::Loggable(count));
2✔
1086
  return count;
2✔
1087
}
2✔
1088

1089
static void putAuthRecord(protozero::pbf_message<PBCacheEntry>& message, const DNSName& qname, std::vector<DNSRecord>& authRecs)
1090
{
200✔
1091
  protozero::pbf_message<PBAuthRecord> auth = message.get_message();
200✔
1092
  DNSRecord authRecord;
200✔
1093
  while (auth.next()) {
1,600✔
1094
    switch (auth.tag()) {
1,400✔
1095
    case PBAuthRecord::required_bytes_name:
200✔
1096
      authRecord.d_name = DNSName(auth.get_bytes());
200✔
1097
      break;
200✔
1098
    case PBAuthRecord::required_bytes_rdata: {
200✔
1099
      auto ptr = DNSRecordContent::deserialize(qname, authRecord.d_type, auth.get_bytes());
200✔
1100
      authRecord.setContent(ptr);
200✔
1101
      break;
200✔
1102
    }
×
1103
    case PBAuthRecord::required_uint32_class:
200✔
1104
      authRecord.d_class = auth.get_uint32();
200✔
1105
      break;
200✔
1106
    case PBAuthRecord::required_uint32_type:
200✔
1107
      authRecord.d_type = auth.get_uint32();
200✔
1108
      break;
200✔
1109
    case PBAuthRecord::required_uint32_ttl:
200✔
1110
      authRecord.d_ttl = auth.get_uint32();
200✔
1111
      break;
200✔
1112
    case PBAuthRecord::required_uint32_place:
200✔
1113
      authRecord.d_place = static_cast<DNSResourceRecord::Place>(auth.get_uint32());
200✔
1114
      break;
200✔
1115
    case PBAuthRecord::required_uint32_clen:
200✔
1116
      authRecord.d_clen = auth.get_uint32();
200✔
1117
      break;
200✔
1118
    default:
×
1119
      break;
×
1120
    }
1,400✔
1121
  }
1,400✔
1122
  authRecs.emplace_back(authRecord);
200✔
1123
}
200✔
1124

1125
template <typename T>
1126
bool MemRecursorCache::putRecordSet(T& message)
1127
{
200✔
1128
  AuthRecsVec authRecs;
200✔
1129
  SigRecsVec sigRecs;
200✔
1130
  CacheEntry cacheEntry{{g_rootdnsname, QType::A, boost::none, Netmask()}, false};
200✔
1131
  while (message.next()) {
3,000✔
1132
    switch (message.tag()) {
2,800✔
1133
    case PBCacheEntry::repeated_bytes_record: {
200✔
1134
      auto ptr = DNSRecordContent::deserialize(cacheEntry.d_qname, cacheEntry.d_qtype, message.get_bytes());
200✔
1135
      cacheEntry.d_records.emplace_back(ptr);
200✔
1136
      break;
200✔
1137
    }
×
1138
    case PBCacheEntry::repeated_bytes_sig: {
200✔
1139
      auto ptr = DNSRecordContent::deserialize(cacheEntry.d_qname, QType::RRSIG, message.get_bytes());
200✔
1140
      sigRecs.emplace_back(std::dynamic_pointer_cast<RRSIGRecordContent>(ptr));
200✔
1141
      break;
200✔
1142
    }
×
1143
    case PBCacheEntry::repeated_message_authRecord:
200✔
1144
      putAuthRecord(message, cacheEntry.d_qname, authRecs);
200✔
1145
      break;
200✔
1146
    case PBCacheEntry::required_bytes_name:
200✔
1147
      cacheEntry.d_qname = DNSName(message.get_bytes());
200✔
1148
      break;
200✔
1149
    case PBCacheEntry::required_bytes_authZone:
200✔
1150
      cacheEntry.d_authZone = DNSName(message.get_bytes());
200✔
1151
      break;
200✔
1152
    case PBCacheEntry::required_message_from:
200✔
1153
      decodeComboAddress(message, cacheEntry.d_from);
200✔
1154
      break;
200✔
1155
    case PBCacheEntry::optional_bytes_netmask:
×
1156
      decodeNetmask(message, cacheEntry.d_netmask);
×
1157
      break;
×
1158
    case PBCacheEntry::optional_bytes_rtag:
×
1159
      cacheEntry.d_rtag = message.get_bytes();
×
1160
      break;
×
1161
    case PBCacheEntry::required_uint32_state:
200✔
1162
      cacheEntry.d_state = static_cast<vState>(message.get_uint32());
200✔
1163
      break;
200✔
1164
    case PBCacheEntry::required_int64_ttd:
200✔
1165
      cacheEntry.d_ttd = message.get_int64();
200✔
1166
      break;
200✔
1167
    case PBCacheEntry::required_uint32_orig_ttl:
200✔
1168
      cacheEntry.d_orig_ttl = message.get_uint32();
200✔
1169
      break;
200✔
1170
    case PBCacheEntry::required_uint32_servedStale:
200✔
1171
      cacheEntry.d_servedStale = message.get_uint32();
200✔
1172
      break;
200✔
1173
    case PBCacheEntry::required_uint32_qtype:
200✔
1174
      cacheEntry.d_qtype = message.get_uint32();
200✔
1175
      break;
200✔
1176
    case PBCacheEntry::required_bool_auth:
200✔
1177
      cacheEntry.d_auth = message.get_bool();
200✔
1178
      break;
200✔
1179
    case PBCacheEntry::required_bool_submitted:
200✔
1180
      cacheEntry.d_submitted = message.get_bool();
200✔
1181
      cacheEntry.d_submitted = false; // actually not
200✔
1182
      break;
200✔
1183
    case PBCacheEntry::required_bool_tooBig:
200✔
1184
      cacheEntry.d_tooBig = message.get_bool();
200✔
1185
      break;
200✔
1186
    default:
×
1187
      break;
×
1188
    }
2,800✔
1189
  }
2,800✔
1190
  if (!authRecs.empty()) {
200!
1191
    cacheEntry.d_authorityRecs = std::make_shared<const AuthRecsVec>(std::move(authRecs));
200✔
1192
  }
200✔
1193
  if (!sigRecs.empty()) {
200!
1194
    cacheEntry.d_signatures = std::make_shared<const SigRecsVec>(std::move(sigRecs));
200✔
1195
  }
200✔
1196
  return replace(std::move(cacheEntry));
200✔
1197
}
200✔
1198

1199
size_t MemRecursorCache::putRecordSets(const std::string& pbuf)
1200
{
2✔
1201
  auto log = g_slog->withName("recordcache")->withValues("size", Logging::Loggable(pbuf.size()));
2✔
1202
  log->info(Logr::Debug, "Processing cache dump");
2✔
1203

1204
  protozero::pbf_message<PBCacheDump> full(pbuf);
2✔
1205
  size_t count = 0;
2✔
1206
  size_t inserted = 0;
2✔
1207
  try {
2✔
1208
    bool protocolVersionSeen = false;
2✔
1209
    bool typeSeen = false;
2✔
1210
    while (full.next()) {
212✔
1211
      switch (full.tag()) {
210!
1212
      case PBCacheDump::required_string_version: {
2✔
1213
        auto version = full.get_string();
2✔
1214
        log = log->withValues("version", Logging::Loggable(version));
2✔
1215
        break;
2✔
1216
      }
×
1217
      case PBCacheDump::required_string_identity: {
2✔
1218
        auto identity = full.get_string();
2✔
1219
        log = log->withValues("identity", Logging::Loggable(identity));
2✔
1220
        break;
2✔
1221
      }
×
1222
      case PBCacheDump::required_uint64_protocolVersion: {
2✔
1223
        auto protocolVersion = full.get_uint64();
2✔
1224
        log = log->withValues("protocolVersion", Logging::Loggable(protocolVersion));
2✔
1225
        if (protocolVersion != 1) {
2!
1226
          throw std::runtime_error("Protocol version mismatch");
×
1227
        }
×
1228
        protocolVersionSeen = true;
2✔
1229
        break;
2✔
1230
      }
2✔
1231
      case PBCacheDump::required_int64_time: {
2✔
1232
        auto time = full.get_int64();
2✔
1233
        log = log->withValues("time", Logging::Loggable(time));
2✔
1234
        break;
2✔
1235
      }
2✔
1236
      case PBCacheDump::required_string_type: {
2✔
1237
        auto type = full.get_string();
2✔
1238
        if (type != "PBCacheDump") {
2!
1239
          throw std::runtime_error("Data type mismatch");
×
1240
        }
×
1241
        typeSeen = true;
2✔
1242
        break;
2✔
1243
      }
2✔
1244
      case PBCacheDump::repeated_message_cacheEntry: {
200✔
1245
        if (!protocolVersionSeen || !typeSeen) {
200!
1246
          throw std::runtime_error("Required field missing");
×
1247
        }
×
1248
        protozero::pbf_message<PBCacheEntry> message = full.get_message();
200✔
1249
        if (putRecordSet(message)) {
200!
1250
          ++inserted;
200✔
1251
        }
200✔
1252
        ++count;
200✔
1253
        break;
200✔
1254
      }
200✔
1255
      }
210✔
1256
    }
210✔
1257
    log->info(Logr::Info, "Processed cache dump", "processed", Logging::Loggable(count), "inserted", Logging::Loggable(inserted));
2✔
1258
    return inserted;
2✔
1259
  }
2✔
1260
  catch (const std::runtime_error& e) {
2✔
1261
    log->error(Logr::Error, e.what(), "Runtime exception processing cache dump");
×
1262
  }
×
1263
  catch (const std::exception& e) {
2✔
1264
    log->error(Logr::Error, e.what(), "Exception processing cache dump");
×
1265
  }
×
1266
  catch (...) {
2✔
1267
    log->error(Logr::Error, "Other exception processing cache dump");
×
1268
  }
×
1269
  return 0;
×
1270
}
2✔
1271

1272
namespace boost
1273
{
1274
size_t hash_value(const MemRecursorCache::OptTag& rtag)
1275
{
2,571,497✔
1276
  return rtag ? hash_value(rtag.get()) : 0xcafebaaf;
2,571,497✔
1277
}
2,571,497✔
1278
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc