• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

PowerDNS / pdns / 12595591960

03 Jan 2025 09:27AM UTC coverage: 62.774% (+2.5%) from 60.245%
12595591960

Pull #15008

github

web-flow
Merge c2a2749d3 into 788f396a7
Pull Request #15008: Do not follow CNAME records for ANY or CNAME queries

30393 of 78644 branches covered (38.65%)

Branch coverage included in aggregate %.

105822 of 138350 relevant lines covered (76.49%)

4613078.44 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.27
/pdns/recursordist/recursor_cache.cc
1
/*
2
 * This file is part of PowerDNS or dnsdist.
3
 * Copyright -- PowerDNS.COM B.V. and its contributors
4
 *
5
 * This program is free software; you can redistribute it and/or modify
6
 * it under the terms of version 2 of the GNU General Public License as
7
 * published by the Free Software Foundation.
8
 *
9
 * In addition, for the avoidance of any doubt, permission is granted to
10
 * link this program with OpenSSL and to (re)distribute the binaries
11
 * produced as the result of such linking.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21
 */
22

23
#include "config.h"
24

25
#include <cinttypes>
26
#include <protozero/pbf_builder.hpp>
27
#include <protozero/pbf_message.hpp>
28

29
#include "recursor_cache.hh"
30
#include "misc.hh"
31
#include "dnsrecords.hh"
32
#include "syncres.hh"
33
#include "namespaces.hh"
34
#include "cachecleaner.hh"
35
#include "rec-taskqueue.hh"
36
#include "version.hh"
37

38
/*
39
 * SERVE-STALE: the general approach
40
 *
41
 * The general switch to enable serve-stale is s_maxServedStaleExtensions. If this value is zero, no
42
 * serve-stale is done. If it is positive, it determines how many times the serve-stale status of a
43
 * record can be extended.
44
 *
45
 * Each record in the cache has a field d_servedStale. If this value is zero, no special handling is
46
 * done. If it is positive, the record is being served stale. The value determines how many times
47
 * the serve-stale status was extended. Each time an extension happens, the value is incremented and
48
 * a task to see if the record resolves will be pushed. When the served-stale status is extended,
49
 * the TTD of a record is also changed so the record will be considered not-expired by the get()
50
 * function. The TTD will be s_serveStaleExtensionPeriod in the future, unless the original TTL was
51
 * smaller than that. If d_servedStale reaches s_maxServedStaleExtensions the serve-stale status
52
 * will no longer be extended and the record will be considered really expired.
53
 *
54
 * With s_serveStaleExtensionPeriod of 30 seconds, setting s_maxServedStaleExtensions to 1440 will
55
 * cause a record to be served stale a maximum of 30s * 1440 = 12 hours. If the original TTL is
56
 * smaller than 30, this period will be shorter. If there was a long time between serve-stale
57
 * extensions, the value of d_servedStale will be incremented by more than one to account for the
58
 * longer period.
59
 *
60
 * If serve-stale is enabled, the resolving process first will try to resolve a record in the
61
 * ordinary way, with the difference that a timeout will not lead to an ImmediateServFailException
62
 * being passed to the caller, but the resolving will be tried again with a flag to allow marking
63
 * records as served-stale. If the second time around a timeout happens, an
64
 * ImmediateServFailException *will* be passed to the caller.
65
 *
66
 * When serving stale, records are only wiped from the cache if they are older than
67
 * s_maxServedStaleExtensions * s_serveStaleExtensionPeriod. See isStale(). This is to have a good
68
 * chance of records being available for marking stale if a name server has an issue.
69
 *
70
 * The tasks to see if nameservers are reachable again do a resolve in refresh mode, considering
71
 * served-stale records as expired. When a record resolves again, the d_servedStale field will be
72
 * reset.
73
 */
74

75
uint16_t MemRecursorCache::s_maxServedStaleExtensions;
76
uint16_t MemRecursorCache::s_maxRRSetSize = 256;
77
bool MemRecursorCache::s_limitQTypeAny = true;
78

79
void MemRecursorCache::resetStaticsForTests()
80
{
624✔
81
  s_maxServedStaleExtensions = 0;
624✔
82
  SyncRes::s_refresh_ttlperc = 0;
624✔
83
  SyncRes::s_locked_ttlperc = 0;
624✔
84
  SyncRes::s_minimumTTL = 0;
624✔
85
  s_maxRRSetSize = 256;
624✔
86
  s_limitQTypeAny = true;
624✔
87
}
624✔
88

89
MemRecursorCache::MemRecursorCache(size_t mapsCount) :
90
  d_maps(mapsCount == 0 ? 1 : mapsCount)
91
{
815✔
92
}
815✔
93

94
size_t MemRecursorCache::size() const
95
{
663✔
96
  size_t count = 0;
663✔
97
  for (const auto& shard : d_maps) {
508,106✔
98
    count += shard.getEntriesCount();
508,106✔
99
  }
508,106✔
100
  return count;
663✔
101
}
663✔
102

103
pair<uint64_t, uint64_t> MemRecursorCache::stats()
104
{
295✔
105
  uint64_t contended = 0;
295✔
106
  uint64_t acquired = 0;
295✔
107
  for (auto& shard : d_maps) {
220,280✔
108
    auto lockedShard = shard.lock();
220,280✔
109
    contended += lockedShard->d_contended_count;
220,280✔
110
    acquired += lockedShard->d_acquired_count;
220,280✔
111
  }
220,280✔
112
  return {contended, acquired};
295✔
113
}
295✔
114

115
size_t MemRecursorCache::ecsIndexSize()
116
{
54✔
117
  // XXX!
118
  size_t count = 0;
54✔
119
  for (auto& shard : d_maps) {
18,468✔
120
    auto lockedShard = shard.lock();
18,468✔
121
    count += lockedShard->d_ecsIndex.size();
18,468✔
122
  }
18,468✔
123
  return count;
54✔
124
}
54✔
125

126
// this function is too slow to poll!
127
size_t MemRecursorCache::bytes()
128
{
14✔
129
  size_t ret = 0;
14✔
130
  for (auto& shard : d_maps) {
14,336✔
131
    auto lockedShard = shard.lock();
14,336✔
132
    for (const auto& entry : lockedShard->d_map) {
14,336✔
133
      ret += sizeof(struct CacheEntry);
6✔
134
      ret += entry.d_qname.toString().length();
6✔
135
      for (const auto& record : entry.d_records) {
6✔
136
        ret += sizeof(record); // XXX WRONG we don't know the stored size!
6✔
137
      }
6✔
138
    }
6✔
139
  }
14,336✔
140
  return ret;
14✔
141
}
14✔
142

143
static void updateDNSSECValidationStateFromCache(boost::optional<vState>& state, const vState stateUpdate)
144
{
679,615✔
145
  // if there was no state it's easy */
146
  if (state == boost::none) {
679,617✔
147
    state = stateUpdate;
679,589✔
148
    return;
679,589✔
149
  }
679,589✔
150

151
  if (stateUpdate == vState::TA) {
2,147,483,675!
152
    state = vState::Secure;
×
153
  }
×
154
  else if (stateUpdate == vState::NTA) {
2,147,483,675!
155
    state = vState::Insecure;
×
156
  }
×
157
  else if (vStateIsBogus(stateUpdate) || stateUpdate == vState::Indeterminate) {
2,147,483,675!
158
    state = stateUpdate;
28✔
159
  }
28✔
160
  else if (stateUpdate == vState::Insecure || stateUpdate == vState::Secure) {
2,147,483,647!
161
    if (!vStateIsBogus(*state) && *state != vState::Indeterminate) {
×
162
      state = stateUpdate;
×
163
    }
×
164
  }
×
165
}
2,147,483,675✔
166

167
template <typename T>
168
static void ptrAssign(T* ptr, const T& value)
169
{
2,810,015✔
170
  if (ptr != nullptr) {
2,810,015✔
171
    *ptr = value;
132,636✔
172
  }
132,636✔
173
}
2,810,015✔
174

175
time_t MemRecursorCache::handleHit(time_t now, MapCombo::LockedContent& content, MemRecursorCache::OrderedTagIterator_t& entry, const DNSName& qname, uint32_t& origTTL, vector<DNSRecord>* res, vector<std::shared_ptr<const RRSIGRecordContent>>* signatures, std::vector<std::shared_ptr<DNSRecord>>* authorityRecs, bool* variable, boost::optional<vState>& state, bool* wasAuth, DNSName* fromAuthZone, ComboAddress* fromAuthIP)
176
{
679,623✔
177
  // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock)
178
  if (entry->d_tooBig) {
679,623✔
179
    throw ImmediateServFailException("too many records in RRSet");
2✔
180
  }
2✔
181
  time_t ttd = entry->d_ttd;
679,621✔
182
  if (ttd <= now) {
679,621!
183
    // Expired, don't bother returning contents. Callers *MUST* check return value of get(), and only look at the entry
184
    // if it returned > 0
185
    return ttd;
×
186
  }
×
187
  origTTL = entry->d_orig_ttl;
679,621✔
188

189
  if (!entry->d_netmask.empty() || entry->d_rtag) {
679,622✔
190
    ptrAssign(variable, true);
479✔
191
  }
479✔
192

193
  if (res != nullptr) {
679,621✔
194
    if (s_limitQTypeAny && res->size() + entry->d_records.size() > s_maxRRSetSize) {
637,664✔
195
      throw ImmediateServFailException("too many records in result");
4✔
196
    }
4✔
197

198
    res->reserve(res->size() + entry->d_records.size());
637,659✔
199

200
    for (const auto& record : entry->d_records) {
712,311✔
201
      DNSRecord result;
712,311✔
202
      result.d_name = qname;
712,311✔
203
      result.d_type = entry->d_qtype;
712,311✔
204
      result.d_class = QClass::IN;
712,311✔
205
      result.setContent(record);
712,311✔
206
      // coverity[store_truncates_time_t]
207
      result.d_ttl = static_cast<uint32_t>(entry->d_ttd);
712,311✔
208
      result.d_place = DNSResourceRecord::ANSWER;
712,311✔
209
      res->push_back(std::move(result));
712,311✔
210
    }
712,311✔
211
  }
637,659✔
212

213
  if (signatures != nullptr) {
679,617✔
214
    signatures->insert(signatures->end(), entry->d_signatures.begin(), entry->d_signatures.end());
8,146✔
215
  }
8,146✔
216

217
  if (authorityRecs != nullptr) {
679,617✔
218
    authorityRecs->insert(authorityRecs->end(), entry->d_authorityRecs.begin(), entry->d_authorityRecs.end());
7,909✔
219
  }
7,909✔
220

221
  updateDNSSECValidationStateFromCache(state, entry->d_state);
679,617✔
222

223
  if (wasAuth != nullptr) {
679,617✔
224
    *wasAuth = *wasAuth && entry->d_auth;
25,920!
225
  }
25,920✔
226
  ptrAssign(fromAuthZone, entry->d_authZone);
679,617✔
227
  ptrAssign(fromAuthIP, entry->d_from);
679,617✔
228

229
  moveCacheItemToBack<SequencedTag>(content.d_map, entry);
679,617✔
230

231
  return ttd;
679,617✔
232
}
679,621✔
233

234
static void pushRefreshTask(const DNSName& qname, QType qtype, time_t deadline, const Netmask& netmask)
235
{
30✔
236
  if (qtype == QType::ADDR) {
30!
237
    pushAlmostExpiredTask(qname, QType::A, deadline, netmask);
×
238
    pushAlmostExpiredTask(qname, QType::AAAA, deadline, netmask);
×
239
  }
×
240
  else {
30✔
241
    pushAlmostExpiredTask(qname, qtype, deadline, netmask);
30✔
242
  }
30✔
243
}
30✔
244

245
void MemRecursorCache::updateStaleEntry(time_t now, MemRecursorCache::OrderedTagIterator_t& entry)
246
{
26✔
247
  // We need to take care a infrequently access stale item cannot be extended past
248
  // s_maxServedStaleExtension * s_serveStaleExtensionPeriod
249
  // We look how old the entry is, and increase d_servedStale accordingly, taking care not to overflow
250
  const time_t howlong = std::max(static_cast<time_t>(1), now - entry->d_ttd);
26✔
251
  const uint32_t extension = std::max(1U, std::min(entry->d_orig_ttl, s_serveStaleExtensionPeriod));
26✔
252
  entry->d_servedStale = std::min(entry->d_servedStale + 1 + howlong / extension, static_cast<time_t>(s_maxServedStaleExtensions));
26✔
253
  entry->d_ttd = now + extension;
26✔
254

255
  pushRefreshTask(entry->d_qname, entry->d_qtype, entry->d_ttd, entry->d_netmask);
26✔
256
}
26✔
257

258
// If we are serving this record stale (or *should*) and the ttd has
259
// passed increase ttd to the future and remember that we did. Also
260
// push a refresh task.
261
void MemRecursorCache::handleServeStaleBookkeeping(time_t now, bool serveStale, MemRecursorCache::OrderedTagIterator_t& entry)
262
{
679,645✔
263
  if ((serveStale || entry->d_servedStale > 0) && entry->d_ttd <= now && entry->d_servedStale < s_maxServedStaleExtensions) {
679,646!
264
    updateStaleEntry(now, entry);
26✔
265
  }
26✔
266
}
679,645✔
267

268
MemRecursorCache::cache_t::const_iterator MemRecursorCache::getEntryUsingECSIndex(MapCombo::LockedContent& map, time_t now, const DNSName& qname, const QType qtype, bool requireAuth, const ComboAddress& who, bool serveStale)
269
{
807✔
270
  // MUTEX SHOULD BE ACQUIRED (as indicated by the reference to the content which is protected by a lock)
271
  auto ecsIndexKey = std::tie(qname, qtype);
807✔
272
  auto ecsIndex = map.d_ecsIndex.find(ecsIndexKey);
807✔
273
  if (ecsIndex != map.d_ecsIndex.end() && !ecsIndex->isEmpty()) {
807!
274
    /* we have netmask-specific entries, let's see if we match one */
275
    while (true) {
681✔
276
      const Netmask best = ecsIndex->lookupBestMatch(who);
681✔
277
      if (best.empty()) {
681✔
278
        /* we have nothing more specific for you */
279
        break;
316✔
280
      }
316✔
281
      auto key = std::tuple(qname, qtype, boost::none, best);
365✔
282
      auto entry = map.d_map.find(key);
365✔
283
      if (entry == map.d_map.end()) {
365!
284
        /* ecsIndex is not up-to-date */
285
        ecsIndex->removeNetmask(best);
×
286
        if (ecsIndex->isEmpty()) {
×
287
          map.d_ecsIndex.erase(ecsIndex);
×
288
          break;
×
289
        }
×
290
        continue;
×
291
      }
×
292
      handleServeStaleBookkeeping(now, serveStale, entry);
365✔
293

294
      if (entry->d_ttd > now) {
365✔
295
        if (!requireAuth || entry->d_auth) {
361✔
296
          return entry;
355✔
297
        }
355✔
298
        /* we need auth data and the best match is not authoritative */
299
        return map.d_map.end();
6✔
300
      }
361✔
301
      /* this netmask-specific entry has expired */
302
      moveCacheItemToFront<SequencedTag>(map.d_map, entry);
4✔
303
      // XXX when serving stale, it should be kept, but we don't want a match wth lookupBestMatch()...
304
      ecsIndex->removeNetmask(best);
4✔
305
      if (ecsIndex->isEmpty()) {
4✔
306
        map.d_ecsIndex.erase(ecsIndex);
2✔
307
        break;
2✔
308
      }
2✔
309
    }
4✔
310
  }
679✔
311

312
  /* we have nothing specific, let's see if we have a generic one */
313
  auto key = std::tuple(qname, qtype, boost::none, Netmask());
446✔
314
  auto entry = map.d_map.find(key);
446✔
315
  if (entry != map.d_map.end()) {
446✔
316
    handleServeStaleBookkeeping(now, serveStale, entry);
44✔
317
    if (entry->d_ttd > now) {
44✔
318
      if (!requireAuth || entry->d_auth) {
32!
319
        return entry;
32✔
320
      }
32✔
321
    }
32✔
322
    else {
12✔
323
      moveCacheItemToFront<SequencedTag>(map.d_map, entry);
12✔
324
    }
12✔
325
  }
44✔
326

327
  /* nothing for you, sorry */
328
  return map.d_map.end();
414✔
329
}
446✔
330

331
MemRecursorCache::Entries MemRecursorCache::getEntries(MapCombo::LockedContent& map, const DNSName& qname, const QType /* qtype */, const OptTag& rtag)
332
{
770,848✔
333
  // MUTEX SHOULD BE ACQUIRED
334
  if (!map.d_cachecachevalid || map.d_cachedqname != qname || map.d_cachedrtag != rtag) {
770,848✔
335
    map.d_cachedqname = qname;
676,007✔
336
    map.d_cachedrtag = rtag;
676,007✔
337
    const auto& idx = map.d_map.get<NameAndRTagOnlyHashedTag>();
676,007✔
338
    map.d_cachecache = idx.equal_range(std::tie(qname, rtag));
676,007✔
339
    map.d_cachecachevalid = true;
676,007✔
340
  }
676,007✔
341
  return map.d_cachecache;
770,848✔
342
}
770,848✔
343

344
bool MemRecursorCache::entryMatches(MemRecursorCache::OrderedTagIterator_t& entry, const QType qtype, bool requireAuth, const ComboAddress& who)
345
{
803,601✔
346
  // This code assumes that if a routing tag is present, it matches
347
  // MUTEX SHOULD BE ACQUIRED
348
  if (requireAuth && !entry->d_auth) {
803,601✔
349
    return false;
30,370✔
350
  }
30,370✔
351

352
  bool match = (entry->d_qtype == qtype || qtype == QType::ANY || (qtype == QType::ADDR && (entry->d_qtype == QType::A || entry->d_qtype == QType::AAAA)))
773,231!
353
    && (entry->d_netmask.empty() || entry->d_netmask.match(who));
773,231✔
354
  return match;
773,231✔
355
}
803,601✔
356

357
// Fake a cache miss if more than refreshTTLPerc of the original TTL has passed
358
time_t MemRecursorCache::fakeTTD(MemRecursorCache::OrderedTagIterator_t& entry, const DNSName& qname, QType qtype, time_t ret, time_t now, uint32_t origTTL, bool refresh)
359
{
679,573✔
360
  time_t ttl = ret - now;
679,573✔
361
  // If we are checking an entry being served stale in refresh mode,
362
  // we always consider it stale so a real refresh attempt will be
363
  // kicked by SyncRes
364
  if (refresh && entry->d_servedStale > 0) {
679,573✔
365
    return -1;
10✔
366
  }
10✔
367
  if (ttl > 0 && SyncRes::s_refresh_ttlperc > 0) {
679,563✔
368
    const uint32_t deadline = origTTL * SyncRes::s_refresh_ttlperc / 100;
42,870✔
369
    // coverity[store_truncates_time_t]
370
    const bool almostExpired = static_cast<uint32_t>(ttl) <= deadline;
42,870✔
371
    if (almostExpired && qname != g_rootdnsname) {
42,870!
372
      if (refresh) {
6✔
373
        return -1;
2✔
374
      }
2✔
375
      if (!entry->d_submitted) {
4!
376
        pushRefreshTask(qname, qtype, entry->d_ttd, entry->d_netmask);
4✔
377
        entry->d_submitted = true;
4✔
378
      }
4✔
379
    }
4✔
380
  }
42,870✔
381
  return ttl;
679,561✔
382
}
679,563✔
383

384
// returns -1 for no hits
385
time_t MemRecursorCache::get(time_t now, const DNSName& qname, const QType qtype, Flags flags, vector<DNSRecord>* res, const ComboAddress& who, const OptTag& routingTag, vector<std::shared_ptr<const RRSIGRecordContent>>* signatures, std::vector<std::shared_ptr<DNSRecord>>* authorityRecs, bool* variable, vState* state, bool* wasAuth, DNSName* fromAuthZone, ComboAddress* fromAuthIP) // NOLINT(readability-function-cognitive-complexity)
386
{
770,725✔
387
  bool requireAuth = (flags & RequireAuth) != 0;
770,725✔
388
  bool refresh = (flags & Refresh) != 0;
770,725✔
389
  bool serveStale = (flags & ServeStale) != 0;
770,725✔
390

391
  boost::optional<vState> cachedState{boost::none};
770,725✔
392
  uint32_t origTTL = 0;
770,725✔
393

394
  if (res != nullptr) {
770,725✔
395
    res->clear();
728,434✔
396
  }
728,434✔
397

398
  // we might retrieve more than one entry, we need to set that to true
399
  // so it will be set to false if at least one entry is not auth
400
  ptrAssign(wasAuth, true);
770,725✔
401

402
  auto& shard = getMap(qname);
770,725✔
403
  auto lockedShard = shard.lock();
770,725✔
404

405
  /* If we don't have any netmask-specific entries at all, let's just skip this
406
     to be able to use the nice d_cachecache hack. */
407
  if (qtype != QType::ANY && !lockedShard->d_ecsIndex.empty() && !routingTag) {
770,739✔
408
    if (qtype == QType::ADDR) {
789✔
409
      time_t ret = -1;
18✔
410

411
      auto entryA = getEntryUsingECSIndex(*lockedShard, now, qname, QType::A, requireAuth, who, serveStale);
18✔
412
      if (entryA != lockedShard->d_map.end()) {
18✔
413
        ret = handleHit(now, *lockedShard, entryA, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
12✔
414
      }
12✔
415
      auto entryAAAA = getEntryUsingECSIndex(*lockedShard, now, qname, QType::AAAA, requireAuth, who, serveStale);
18✔
416
      if (entryAAAA != lockedShard->d_map.end()) {
18✔
417
        time_t ttdAAAA = handleHit(now, *lockedShard, entryAAAA, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
6✔
418
        if (ret > 0) {
6!
419
          ret = std::min(ret, ttdAAAA);
6✔
420
        }
6✔
421
        else {
×
422
          ret = ttdAAAA;
×
423
        }
×
424
      }
6✔
425

426
      if (cachedState && ret > 0) {
18!
427
        ptrAssign(state, *cachedState);
12✔
428
      }
12✔
429

430
      return ret > 0 ? (ret - now) : ret;
18✔
431
    }
18✔
432
    auto entry = getEntryUsingECSIndex(*lockedShard, now, qname, qtype, requireAuth, who, serveStale);
771✔
433
    if (entry != lockedShard->d_map.end()) {
771✔
434
      time_t ret = handleHit(now, *lockedShard, entry, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
369✔
435
      if (cachedState && ret > now) {
369!
436
        ptrAssign(state, *cachedState);
369✔
437
      }
369✔
438
      return fakeTTD(entry, qname, qtype, ret, now, origTTL, refresh);
369✔
439
    }
369✔
440
    return -1;
402✔
441
  }
771✔
442

443
  if (routingTag) {
769,936✔
444
    auto entries = getEntries(*lockedShard, qname, qtype, routingTag);
964✔
445
    unsigned int found = 0;
964✔
446
    time_t ttd{};
964✔
447

448
    if (entries.first != entries.second) {
964✔
449
      OrderedTagIterator_t firstIndexIterator;
118✔
450
      for (auto i = entries.first; i != entries.second; ++i) {
124✔
451
        firstIndexIterator = lockedShard->d_map.project<OrderedTag>(i);
118✔
452

453
        // When serving stale, we consider expired records
454
        if (!i->isEntryUsable(now, serveStale)) {
118!
455
          moveCacheItemToFront<SequencedTag>(lockedShard->d_map, firstIndexIterator);
×
456
          continue;
×
457
        }
×
458

459
        if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
118✔
460
          continue;
6✔
461
        }
6✔
462
        ++found;
112✔
463

464
        handleServeStaleBookkeeping(now, serveStale, firstIndexIterator);
112✔
465

466
        ttd = handleHit(now, *lockedShard, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
112✔
467

468
        if (qtype == QType::ADDR && found == 2) {
112!
469
          break;
×
470
        }
×
471
        if (qtype != QType::ANY) { // normally if we have a hit, we are done
112!
472
          break;
112✔
473
        }
112✔
474
      }
112✔
475
      if (found > 0) {
118✔
476
        if (cachedState && ttd > now) {
112!
477
          ptrAssign(state, *cachedState);
112✔
478
        }
112✔
479
        return fakeTTD(firstIndexIterator, qname, qtype, ttd, now, origTTL, refresh);
112✔
480
      }
112✔
481
      return -1;
6✔
482
    }
118✔
483
  }
964✔
484
  // Try (again) without tag
485
  auto entries = getEntries(*lockedShard, qname, qtype, boost::none);
769,818✔
486

487
  if (entries.first != entries.second) {
769,818✔
488
    OrderedTagIterator_t firstIndexIterator;
721,064✔
489
    unsigned int found = 0;
721,064✔
490
    time_t ttd{};
721,064✔
491

492
    for (auto i = entries.first; i != entries.second; ++i) {
849,561✔
493
      firstIndexIterator = lockedShard->d_map.project<OrderedTag>(i);
807,571✔
494

495
      // When serving stale, we consider expired records
496
      if (!i->isEntryUsable(now, serveStale)) {
807,571✔
497
        moveCacheItemToFront<SequencedTag>(lockedShard->d_map, firstIndexIterator);
4,178✔
498
        continue;
4,178✔
499
      }
4,178✔
500

501
      if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
803,393✔
502
        continue;
124,273✔
503
      }
124,273✔
504
      ++found;
679,120✔
505

506
      handleServeStaleBookkeeping(now, serveStale, firstIndexIterator);
679,120✔
507

508
      ttd = handleHit(now, *lockedShard, firstIndexIterator, qname, origTTL, res, signatures, authorityRecs, variable, cachedState, wasAuth, fromAuthZone, fromAuthIP);
679,120✔
509

510
      if (qtype == QType::ADDR && found == 2) {
679,120!
511
        break;
×
512
      }
×
513
      if (qtype != QType::ANY) { // normally if we have a hit, we are done
679,126✔
514
        break;
679,074✔
515
      }
679,074✔
516
    }
679,120✔
517
    if (found > 0) {
721,064✔
518
      if (cachedState && ttd > now) {
679,093!
519
        ptrAssign(state, *cachedState);
679,093✔
520
      }
679,093✔
521
      return fakeTTD(firstIndexIterator, qname, qtype, ttd, now, origTTL, refresh);
679,092✔
522
    }
679,092✔
523
  }
721,064✔
524
  return -1;
90,726✔
525
}
769,818✔
526

527
bool MemRecursorCache::CacheEntry::shouldReplace(time_t now, bool auth, vState state, bool refresh)
528
{
13,631✔
529
  if (!auth && d_auth) { // unauth data came in, we have some auth data, but is it fresh?
13,631✔
530
    // an auth entry that is going to expire while we are resolving can hurt, as it prevents infra
531
    // records (which might be unauth) to be updated. So apply a safety margin.
532
    const time_t margin = 5;
341✔
533
    if (d_ttd - margin > now) { // we still have valid data, ignore unauth data
341✔
534
      return false;
333✔
535
    }
333✔
536
    d_auth = false; // new data won't be auth
8✔
537
  }
8✔
538

539
  if (auth) {
13,298✔
540
    /* we don't want to keep a non-auth entry while we have an auth one */
541
    if (vStateIsBogus(state) && (!vStateIsBogus(d_state) && d_state != vState::Indeterminate) && d_ttd > now) {
4,119!
542
      /* the new entry is Bogus, the existing one is not and is still valid, let's keep the existing one */
543
      return false;
11✔
544
    }
11✔
545
    // Always allow upgrade unauth data to auth
546
    if (!d_auth) {
4,108✔
547
      return true;
717✔
548
    }
717✔
549
  }
4,108✔
550

551
  if (SyncRes::s_locked_ttlperc > 0) {
12,570✔
552
    // Override locking if existing data is stale or new data is Secure or refreshing
553
    if (d_ttd <= now || state == vState::Secure || refresh) {
10!
554
      return true;
×
555
    }
×
556
    const uint32_t percentage = 100 - SyncRes::s_locked_ttlperc;
10✔
557
    const time_t ttl = d_ttd - now;
10✔
558
    const uint32_t lockline = d_orig_ttl * percentage / 100;
10✔
559
    // We know ttl is > 0 as d_ttd > now
560
    // coverity[store_truncates_time_t]
561
    const bool locked = static_cast<uint32_t>(ttl) > lockline;
10✔
562
    if (locked) {
10!
563
      return false;
10✔
564
    }
10✔
565
  }
10✔
566

567
  return true;
12,560✔
568
}
12,570✔
569

570
bool MemRecursorCache::replace(CacheEntry&& entry)
571
{
200✔
572
  if (!entry.d_netmask.empty() || entry.d_rtag) {
200!
573
    // We don't handle that yet
574
    return false;
×
575
  }
×
576
  auto& shard = getMap(entry.d_qname);
200✔
577
  auto lockedShard = shard.lock();
200✔
578

579
  lockedShard->d_cachecachevalid = false;
200✔
580
  entry.d_submitted = false;
200✔
581
  if (lockedShard->d_map.emplace(std::move(entry)).second) {
200!
582
    shard.incEntriesCount();
200✔
583
    return true;
200✔
584
  }
200✔
585
  return false;
×
586
}
200✔
587

588
void MemRecursorCache::replace(time_t now, const DNSName& qname, const QType qtype, const vector<DNSRecord>& content, const vector<shared_ptr<const RRSIGRecordContent>>& signatures, const std::vector<std::shared_ptr<DNSRecord>>& authorityRecs, bool auth, const DNSName& authZone, boost::optional<Netmask> ednsmask, const OptTag& routingTag, vState state, boost::optional<ComboAddress> from, bool refresh, time_t ttl_time)
589
{
1,260,704✔
590
  auto& shard = getMap(qname);
1,260,704✔
591
  auto lockedShard = shard.lock();
1,260,704✔
592

593
  lockedShard->d_cachecachevalid = false;
1,260,704✔
594
  if (ednsmask) {
1,260,704✔
595
    ednsmask = ednsmask->getNormalized();
726✔
596
  }
726✔
597

598
  // We only store with a tag if we have an ednsmask and the tag is available
599
  // We only store an ednsmask if we do not have a tag and we do have a mask.
600
  auto key = std::tuple(qname, qtype.getCode(), ednsmask ? routingTag : boost::none, (ednsmask && !routingTag) ? *ednsmask : Netmask());
1,260,704✔
601
  bool isNew = false;
1,260,704✔
602
  cache_t::iterator stored = lockedShard->d_map.find(key);
1,260,704✔
603
  if (stored == lockedShard->d_map.end()) {
1,260,704✔
604
    stored = lockedShard->d_map.insert(CacheEntry(key, auth)).first;
1,247,073✔
605
    shard.incEntriesCount();
1,247,073✔
606
    isNew = true;
1,247,073✔
607
  }
1,247,073✔
608

609
  /* if we are inserting a new entry or updating an expired one (in which case the
610
     ECS index might have been removed but the entry still exists because it has not
611
     been garbage collected yet) we might need to update the ECS index.
612
     Otherwise it should already be indexed and we don't need to update it.
613
  */
614
  if (isNew || stored->d_ttd <= now) {
1,260,704✔
615
    /* don't bother building an ecsIndex if we don't have any netmask-specific entries */
616
    if (!routingTag && ednsmask && !ednsmask->empty()) {
1,247,322!
617
      auto ecsIndexKey = std::tuple(qname, qtype.getCode());
620✔
618
      auto ecsIndex = lockedShard->d_ecsIndex.find(ecsIndexKey);
620✔
619
      if (ecsIndex == lockedShard->d_ecsIndex.end()) {
620✔
620
        ecsIndex = lockedShard->d_ecsIndex.insert(ECSIndexEntry(qname, qtype.getCode())).first;
83✔
621
      }
83✔
622
      ecsIndex->addMask(*ednsmask);
620✔
623
    }
620✔
624
  }
1,247,322✔
625

626
  time_t maxTTD = std::numeric_limits<time_t>::max();
1,260,704✔
627
  CacheEntry cacheEntry = *stored; // this is a COPY
1,260,704✔
628
  cacheEntry.d_qtype = qtype.getCode();
1,260,704✔
629

630
  if (!isNew && !cacheEntry.shouldReplace(now, auth, state, refresh)) {
1,260,704✔
631
    return;
354✔
632
  }
354✔
633

634
  cacheEntry.d_state = state;
1,260,350✔
635

636
  // refuse any attempt to *raise* the TTL of auth NS records, as it would make it possible
637
  // for an auth to keep a "ghost" zone alive forever, even after the delegation is gone from
638
  // the parent
639
  // BUT make sure that we CAN refresh the root
640
  if (cacheEntry.d_auth && auth && qtype == QType::NS && !isNew && !qname.isRoot()) {
1,260,350!
641
    maxTTD = cacheEntry.d_ttd;
53✔
642
  }
53✔
643

644
  if (auth) {
1,260,350✔
645
    cacheEntry.d_auth = true;
1,213,631✔
646
  }
1,213,631✔
647

648
  cacheEntry.d_signatures = signatures;
1,260,350✔
649
  cacheEntry.d_authorityRecs = authorityRecs;
1,260,350✔
650
  cacheEntry.d_records.clear();
1,260,350✔
651
  cacheEntry.d_authZone = authZone;
1,260,350✔
652
  if (from) {
1,260,350✔
653
    cacheEntry.d_from = *from;
29,281✔
654
  }
29,281✔
655
  else {
1,231,069✔
656
    cacheEntry.d_from = ComboAddress();
1,231,069✔
657
  }
1,231,069✔
658

659
  size_t toStore = content.size();
1,260,350✔
660
  if (toStore <= s_maxRRSetSize) {
1,260,350✔
661
    cacheEntry.d_tooBig = false;
1,260,348✔
662
  }
1,260,348✔
663
  else {
2✔
664
    toStore = 1; // record cache does not like empty RRSets
2✔
665
    cacheEntry.d_tooBig = true;
2✔
666
  }
2✔
667
  cacheEntry.d_records.reserve(toStore);
1,260,350✔
668
  for (const auto& record : content) {
1,285,950!
669
    /* Yes, we have altered the d_ttl value by adding time(nullptr) to it
670
       prior to calling this function, so the TTL actually holds a TTD. */
671
    cacheEntry.d_ttd = min(maxTTD, static_cast<time_t>(record.d_ttl)); // XXX this does weird things if TTLs differ in the set
1,285,950✔
672

673
    // coverity[store_truncates_time_t]
674
    cacheEntry.d_orig_ttl = cacheEntry.d_ttd - ttl_time;
1,285,950✔
675
    // Even though we record the time the ttd was computed, there still seems to be a case where the computed
676
    // d_orig_ttl can wrap.
677
    // So santize the computed ce.d_orig_ttl to be on the safe side
678
    if (cacheEntry.d_orig_ttl < SyncRes::s_minimumTTL || cacheEntry.d_orig_ttl > SyncRes::s_maxcachettl) {
1,285,950✔
679
      cacheEntry.d_orig_ttl = SyncRes::s_minimumTTL;
845,336✔
680
    }
845,336✔
681
    cacheEntry.d_records.push_back(record.getContent());
1,285,950✔
682
    if (--toStore == 0) {
1,285,950✔
683
      break;
1,260,350✔
684
    }
1,260,350✔
685
  }
1,285,950✔
686

687
  if (!isNew) {
1,260,350✔
688
    moveCacheItemToBack<SequencedTag>(lockedShard->d_map, stored);
13,277✔
689
  }
13,277✔
690
  cacheEntry.d_submitted = false;
1,260,350✔
691
  cacheEntry.d_servedStale = 0;
1,260,350✔
692
  lockedShard->d_map.replace(stored, cacheEntry);
1,260,350✔
693
}
1,260,350✔
694

695
size_t MemRecursorCache::doWipeCache(const DNSName& name, bool sub, const QType qtype)
696
{
607,557✔
697
  size_t count = 0;
607,557✔
698

699
  if (!sub) {
607,557✔
700
    auto& shard = getMap(name);
606,322✔
701
    auto lockedShard = shard.lock();
606,322✔
702
    lockedShard->d_cachecachevalid = false;
606,322✔
703
    auto& idx = lockedShard->d_map.get<OrderedTag>();
606,322✔
704
    auto range = idx.equal_range(name);
606,322✔
705
    auto iter = range.first;
606,322✔
706
    while (iter != range.second) {
1,212,347✔
707
      if (iter->d_qtype == qtype || qtype == 0xffff) {
606,025✔
708
        iter = idx.erase(iter);
606,023✔
709
        count++;
606,023✔
710
        shard.decEntriesCount();
606,023✔
711
      }
606,023✔
712
      else {
2✔
713
        ++iter;
2✔
714
      }
2✔
715
    }
606,025✔
716

717
    if (qtype == 0xffff) {
606,322✔
718
      auto& ecsIdx = lockedShard->d_ecsIndex.get<OrderedTag>();
600,004✔
719
      auto ecsIndexRange = ecsIdx.equal_range(name);
600,004✔
720
      ecsIdx.erase(ecsIndexRange.first, ecsIndexRange.second);
600,004✔
721
    }
600,004✔
722
    else {
6,318✔
723
      auto& ecsIdx = lockedShard->d_ecsIndex.get<HashedTag>();
6,318✔
724
      auto ecsIndexRange = ecsIdx.equal_range(std::tie(name, qtype));
6,318✔
725
      ecsIdx.erase(ecsIndexRange.first, ecsIndexRange.second);
6,318✔
726
    }
6,318✔
727
  }
606,322✔
728
  else {
1,235✔
729
    for (auto& content : d_maps) {
1,258,502✔
730
      auto map = content.lock();
1,258,502✔
731
      map->d_cachecachevalid = false;
1,258,502✔
732
      auto& idx = map->d_map.get<OrderedTag>();
1,258,502✔
733
      for (auto i = idx.lower_bound(name); i != idx.end();) {
1,855,054✔
734
        if (!i->d_qname.isPartOf(name)) {
608,532✔
735
          break;
11,980✔
736
        }
11,980✔
737
        if (i->d_qtype == qtype || qtype == 0xffff) {
596,552!
738
          count++;
596,552✔
739
          i = idx.erase(i);
596,552✔
740
          content.decEntriesCount();
596,552✔
741
        }
596,552✔
742
        else {
×
743
          ++i;
×
744
        }
×
745
      }
596,552✔
746
      auto& ecsIdx = map->d_ecsIndex.get<OrderedTag>();
1,258,502✔
747
      for (auto i = ecsIdx.lower_bound(name); i != ecsIdx.end();) {
1,258,532✔
748
        if (!i->d_qname.isPartOf(name)) {
32✔
749
          break;
2✔
750
        }
2✔
751
        if (i->d_qtype == qtype || qtype == 0xffff) {
30!
752
          i = ecsIdx.erase(i);
30✔
753
        }
30✔
754
        else {
×
755
          ++i;
×
756
        }
×
757
      }
30✔
758
    }
1,258,502✔
759
  }
1,235✔
760
  return count;
607,557✔
761
}
607,557✔
762

763
// Name should be doLimitTime or so
764
bool MemRecursorCache::doAgeCache(time_t now, const DNSName& name, const QType qtype, uint32_t newTTL)
765
{
72✔
766
  auto& shard = getMap(name);
72✔
767
  auto lockedShard = shard.lock();
72✔
768
  cache_t::iterator iter = lockedShard->d_map.find(std::tie(name, qtype));
72✔
769
  if (iter == lockedShard->d_map.end()) {
72!
770
    return false;
×
771
  }
×
772

773
  CacheEntry cacheEntry = *iter;
72✔
774
  if (cacheEntry.d_ttd < now) {
72!
775
    return false; // would be dead anyhow
×
776
  }
×
777

778
  // coverity[store_truncates_time_t]
779
  auto maxTTL = static_cast<uint32_t>(cacheEntry.d_ttd - now);
72✔
780
  if (maxTTL > newTTL) {
72✔
781
    lockedShard->d_cachecachevalid = false;
23✔
782

783
    time_t newTTD = now + newTTL;
23✔
784

785
    if (cacheEntry.d_ttd > newTTD) {
23!
786
      cacheEntry.d_ttd = newTTD;
23✔
787
      lockedShard->d_map.replace(iter, cacheEntry);
23✔
788
    }
23✔
789
    return true;
23✔
790
  }
23✔
791
  return false;
49✔
792
}
72✔
793

794
bool MemRecursorCache::updateValidationStatus(time_t now, const DNSName& qname, const QType qtype, const ComboAddress& who, const OptTag& routingTag, bool requireAuth, vState newState, boost::optional<time_t> capTTD)
795
{
67✔
796
  if (qtype == QType::ANY) {
67!
797
    throw std::runtime_error("Trying to update the DNSSEC validation status of all (via ANY) records for " + qname.toLogString());
×
798
  }
×
799
  if (qtype == QType::ADDR) {
67!
800
    throw std::runtime_error("Trying to update the DNSSEC validation status of several (via ADDR) records for " + qname.toLogString());
×
801
  }
×
802

803
  auto& content = getMap(qname);
67✔
804
  auto map = content.lock();
67✔
805

806
  bool updated = false;
67✔
807
  if (!map->d_ecsIndex.empty() && !routingTag) {
67!
808
    auto entry = getEntryUsingECSIndex(*map, now, qname, qtype, requireAuth, who, false); // XXX serveStale?
×
809
    if (entry == map->d_map.end()) {
×
810
      return false;
×
811
    }
×
812

813
    entry->d_state = newState;
×
814
    if (capTTD) {
×
815
      entry->d_ttd = std::min(entry->d_ttd, *capTTD);
×
816
    }
×
817
    return true;
×
818
  }
×
819

820
  auto entries = getEntries(*map, qname, qtype, routingTag);
67✔
821

822
  for (auto i = entries.first; i != entries.second; ++i) {
89!
823
    auto firstIndexIterator = map->d_map.project<OrderedTag>(i);
89✔
824

825
    if (!entryMatches(firstIndexIterator, qtype, requireAuth, who)) {
89✔
826
      continue;
22✔
827
    }
22✔
828

829
    i->d_state = newState;
67✔
830
    if (capTTD) {
67✔
831
      i->d_ttd = std::min(i->d_ttd, *capTTD);
20✔
832
    }
20✔
833
    updated = true;
67✔
834

835
    break;
67✔
836
  }
89✔
837

838
  return updated;
67✔
839
}
67✔
840

841
uint64_t MemRecursorCache::doDump(int fileDesc, size_t maxCacheEntries)
842
{
5✔
843
  int newfd = dup(fileDesc);
5✔
844
  if (newfd == -1) {
5!
845
    return 0;
×
846
  }
×
847
  auto filePtr = pdns::UniqueFilePtr(fdopen(newfd, "w"));
5✔
848
  if (!filePtr) { // dup probably failed
5!
849
    close(newfd);
×
850
    return 0;
×
851
  }
×
852

853
  fprintf(filePtr.get(), "; main record cache dump follows\n;\n");
5✔
854
  uint64_t count = 0;
5✔
855
  size_t shardNumber = 0;
5✔
856
  size_t min = std::numeric_limits<size_t>::max();
5✔
857
  size_t max = 0;
5✔
858
  for (auto& shard : d_maps) {
5,120✔
859
    auto lockedShard = shard.lock();
5,120✔
860
    const auto shardSize = lockedShard->d_map.size();
5,120✔
861
    fprintf(filePtr.get(), "; record cache shard %zu; size %zu\n", shardNumber, shardSize);
5,120✔
862
    min = std::min(min, shardSize);
5,120✔
863
    max = std::max(max, shardSize);
5,120✔
864
    shardNumber++;
5,120✔
865
    const auto& sidx = lockedShard->d_map.get<SequencedTag>();
5,120✔
866
    time_t now = time(nullptr);
5,120✔
867
    for (const auto& recordSet : sidx) {
5,120✔
868
      for (const auto& record : recordSet.d_records) {
83✔
869
        count++;
83✔
870
        try {
83✔
871
          fprintf(filePtr.get(), "%s %" PRIu32 " %" PRId64 " IN %s %s ; (%s) auth=%i zone=%s from=%s nm=%s rtag=%s ss=%hd%s\n", recordSet.d_qname.toString().c_str(), recordSet.d_orig_ttl, static_cast<int64_t>(recordSet.d_ttd - now), recordSet.d_qtype.toString().c_str(), record->getZoneRepresentation().c_str(), vStateToString(recordSet.d_state).c_str(), static_cast<int>(recordSet.d_auth), recordSet.d_authZone.toLogString().c_str(), recordSet.d_from.toString().c_str(), recordSet.d_netmask.empty() ? "" : recordSet.d_netmask.toString().c_str(), !recordSet.d_rtag ? "" : recordSet.d_rtag.get().c_str(), recordSet.d_servedStale, recordSet.d_tooBig ? " (too big!)" : "");
83!
872
        }
83✔
873
        catch (...) {
83✔
874
          fprintf(filePtr.get(), "; error printing '%s'\n", recordSet.d_qname.empty() ? "EMPTY" : recordSet.d_qname.toString().c_str());
×
875
        }
×
876
      }
83✔
877
      for (const auto& sig : recordSet.d_signatures) {
73✔
878
        count++;
38✔
879
        try {
38✔
880
          fprintf(filePtr.get(), "%s %" PRIu32 " %" PRId64 " IN RRSIG %s ; %s\n", recordSet.d_qname.toString().c_str(), recordSet.d_orig_ttl, static_cast<int64_t>(recordSet.d_ttd - now), sig->getZoneRepresentation().c_str(), recordSet.d_netmask.empty() ? "" : recordSet.d_netmask.toString().c_str());
38!
881
        }
38✔
882
        catch (...) {
38✔
883
          fprintf(filePtr.get(), "; error printing '%s'\n", recordSet.d_qname.empty() ? "EMPTY" : recordSet.d_qname.toString().c_str());
×
884
        }
×
885
      }
38✔
886
    }
73✔
887
  }
5,120✔
888
  fprintf(filePtr.get(), "; main record cache size: %zu/%zu shards: %zu min/max shard size: %zu/%zu\n", size(), maxCacheEntries, d_maps.size(), min, max);
5✔
889
  return count;
5✔
890
}
5✔
891

892
void MemRecursorCache::doPrune(time_t now, size_t keep)
893
{
136✔
894
  size_t cacheSize = size();
136✔
895
  pruneMutexCollectionsVector<SequencedTag>(now, d_maps, keep, cacheSize);
136✔
896
}
136✔
897

898
enum class PBCacheDump : protozero::pbf_tag_type
899
{
900
  required_string_version = 1,
901
  required_string_identity = 2,
902
  required_uint64_protocolVersion = 3,
903
  required_int64_time = 4,
904
  required_string_type = 5,
905
  repeated_message_cacheEntry = 6,
906
};
907

908
enum class PBCacheEntry : protozero::pbf_tag_type
909
{
910
  repeated_bytes_record = 1,
911
  repeated_bytes_sig = 2,
912
  repeated_message_authRecord = 3,
913
  required_bytes_name = 4,
914
  required_bytes_authZone = 5,
915
  required_message_from = 6,
916
  optional_bytes_netmask = 7,
917
  optional_bytes_rtag = 8,
918
  required_uint32_state = 9,
919
  required_int64_ttd = 10,
920
  required_uint32_orig_ttl = 11,
921
  required_uint32_servedStale = 12,
922
  required_uint32_qtype = 13,
923
  required_bool_auth = 14,
924
  required_bool_submitted = 15,
925
  required_bool_tooBig = 16,
926
};
927

928
enum class PBComboAddress : protozero::pbf_tag_type
929
{
930
  required_uint32_port = 1,
931
  required_bytes_address = 2, // family implicit
932
};
933

934
enum class PBAuthRecord : protozero::pbf_tag_type
935
{
936
  required_bytes_name = 1,
937
  required_bytes_rdata = 2,
938
  required_uint32_type = 3,
939
  required_uint32_class = 4,
940
  required_uint32_ttl = 5,
941
  required_uint32_place = 6,
942
  required_uint32_clen = 7,
943
};
944

945
template <typename T>
946
static void encodeComboAddress(protozero::pbf_builder<T>& writer, T type, const ComboAddress& address)
947
{
200✔
948
  protozero::pbf_builder<PBComboAddress> message(writer, type);
200✔
949

950
  // Skip all parts except address and port
951
  message.add_uint32(PBComboAddress::required_uint32_port, address.getPort());
200✔
952
  if (address.sin4.sin_family == AF_INET) {
200!
953
    message.add_bytes(PBComboAddress::required_bytes_address, reinterpret_cast<const char*>(&address.sin4.sin_addr.s_addr), sizeof(address.sin4.sin_addr.s_addr)); // NOLINT(cppcoreguidelines-pro-type-reinterpret-cast): it's the API
×
954
  }
×
955
  else if (address.sin4.sin_family == AF_INET6) {
200!
956
    message.add_bytes(PBComboAddress::required_bytes_address, reinterpret_cast<const char*>(&address.sin6.sin6_addr.s6_addr), sizeof(address.sin6.sin6_addr)); // NOLINT(cppcoreguidelines-pro-type-reinterpret-cast): it's the API
200✔
957
  }
200✔
958
}
200✔
959

960
template <typename T>
961
static void decodeComboAddress(protozero::pbf_message<T>& reader, ComboAddress& address)
962
{
200✔
963
  address.reset();
200✔
964
  protozero::pbf_message<PBComboAddress> message(reader.get_message());
200✔
965

966
  // Skip all parts except address and port
967
  if (message.next(PBComboAddress::required_uint32_port)) {
200!
968
    address.setPort(message.get_uint32());
200✔
969
  }
200✔
970
  else {
×
971
    throw std::runtime_error("expected port in protobuf data");
×
972
  }
×
973
  constexpr auto inet4size = sizeof(address.sin4.sin_addr);
200✔
974
  constexpr auto inet6size = sizeof(address.sin6.sin6_addr);
200✔
975
  if (message.next(PBComboAddress::required_bytes_address)) {
200!
976
    auto data = message.get_bytes();
200✔
977
    address.sin4.sin_family = data.size() == inet4size ? AF_INET : AF_INET6;
200!
978
    if (data.size() == inet4size) {
200!
979
      address.sin4.sin_family = AF_INET;
×
980
      memcpy(&address.sin4.sin_addr, data.data(), data.size());
×
981
    }
×
982
    else if (data.size() == inet6size) {
200!
983
      address.sin6.sin6_family = AF_INET6;
200✔
984
      memcpy(&address.sin6.sin6_addr, data.data(), data.size());
200✔
985
    }
200✔
986
    else {
×
987
      throw std::runtime_error("unexpected address family in protobuf data");
×
988
    }
×
989
  }
200✔
990
  else {
×
991
    throw std::runtime_error("expected address bytes in protobuf data");
×
992
  }
×
993
}
200✔
994

995
template <typename T>
996
static void encodeNetmask(protozero::pbf_builder<T>& writer, T type, const Netmask& subnet)
997
{
200✔
998
  if (!subnet.empty()) {
200!
999
    writer.add_bytes(type, reinterpret_cast<const char*>(&subnet), sizeof(Netmask)); // NOLINT(cppcoreguidelines-pro-type-reinterpret-cast): it's the API
×
1000
  }
×
1001
}
200✔
1002

1003
template <typename T>
1004
static void decodeNetmask(protozero::pbf_message<T>& message, Netmask& subnet)
1005
{
×
1006
  auto data = message.get_bytes();
×
1007
  memcpy(&subnet, data.data(), data.size());
×
1008
}
×
1009

1010
template <typename T, typename U>
1011
void MemRecursorCache::getRecordSet(T& message, U recordSet)
1012
{
200✔
1013
  // Two fields below must come before the other fields
1014
  message.add_bytes(PBCacheEntry::required_bytes_name, recordSet->d_qname.toString());
200✔
1015
  message.add_uint32(PBCacheEntry::required_uint32_qtype, recordSet->d_qtype);
200✔
1016
  for (const auto& record : recordSet->d_records) {
200✔
1017
    message.add_bytes(PBCacheEntry::repeated_bytes_record, record->serialize(recordSet->d_qname, true));
200✔
1018
  }
200✔
1019
  for (const auto& record : recordSet->d_signatures) {
200✔
1020
    message.add_bytes(PBCacheEntry::repeated_bytes_sig, record->serialize(recordSet->d_qname, true));
200✔
1021
  }
200✔
1022
  for (const auto& authRec : recordSet->d_authorityRecs) {
200✔
1023
    protozero::pbf_builder<PBAuthRecord> auth(message, PBCacheEntry::repeated_message_authRecord);
200✔
1024
    auth.add_bytes(PBAuthRecord::required_bytes_name, authRec->d_name.toString());
200✔
1025
    auth.add_bytes(PBAuthRecord::required_bytes_rdata, authRec->getContent()->serialize(authRec->d_name, true));
200✔
1026
    auth.add_uint32(PBAuthRecord::required_uint32_type, authRec->d_type);
200✔
1027
    auth.add_uint32(PBAuthRecord::required_uint32_class, authRec->d_class);
200✔
1028
    auth.add_uint32(PBAuthRecord::required_uint32_ttl, authRec->d_ttl);
200✔
1029
    auth.add_uint32(PBAuthRecord::required_uint32_place, authRec->d_place);
200✔
1030
    auth.add_uint32(PBAuthRecord::required_uint32_clen, authRec->d_clen);
200✔
1031
  }
200✔
1032
  message.add_bytes(PBCacheEntry::required_bytes_authZone, recordSet->d_authZone.toString());
200✔
1033
  encodeComboAddress(message, PBCacheEntry::required_message_from, recordSet->d_from);
200✔
1034
  encodeNetmask(message, PBCacheEntry::optional_bytes_netmask, recordSet->d_netmask);
200✔
1035
  if (recordSet->d_rtag) {
200!
1036
    message.add_bytes(PBCacheEntry::optional_bytes_rtag, *recordSet->d_rtag);
×
1037
  }
×
1038
  message.add_uint32(PBCacheEntry::required_uint32_state, static_cast<uint32_t>(recordSet->d_state));
200✔
1039
  message.add_int64(PBCacheEntry::required_int64_ttd, recordSet->d_ttd);
200✔
1040
  message.add_uint32(PBCacheEntry::required_uint32_orig_ttl, recordSet->d_orig_ttl);
200✔
1041
  message.add_uint32(PBCacheEntry::required_uint32_servedStale, recordSet->d_servedStale);
200✔
1042
  message.add_bool(PBCacheEntry::required_bool_auth, recordSet->d_auth);
200✔
1043
  message.add_bool(PBCacheEntry::required_bool_submitted, recordSet->d_submitted);
200✔
1044
  message.add_bool(PBCacheEntry::required_bool_tooBig, recordSet->d_tooBig);
200✔
1045
}
200✔
1046

1047
size_t MemRecursorCache::getRecordSets(size_t perShard, size_t maxSize, std::string& ret)
1048
{
2✔
1049
  auto log = g_slog->withName("recordcache")->withValues("perShard", Logging::Loggable(perShard), "maxSize", Logging::Loggable(maxSize));
2✔
1050
  log->info(Logr::Info, "Producing cache dump");
2✔
1051

1052
  // A size estimate is hard: size() returns the number of record *sets*. Each record set can have
1053
  // multiple records, plus other associated records like signatures. 150 seems to works ok.
1054
  size_t estimate = maxSize == 0 ? size() * 150 : maxSize + 4096; // We may overshoot (will be rolled back)
2!
1055

1056
  if (perShard == 0) {
2!
1057
    perShard = std::numeric_limits<size_t>::max();
2✔
1058
  }
2✔
1059
  if (maxSize == 0) {
2!
1060
    maxSize = std::numeric_limits<size_t>::max();
2✔
1061
  }
2✔
1062
  protozero::pbf_builder<PBCacheDump> full(ret);
2✔
1063
  full.add_string(PBCacheDump::required_string_version, getPDNSVersion());
2✔
1064
  full.add_string(PBCacheDump::required_string_identity, SyncRes::s_serverID);
2✔
1065
  full.add_uint64(PBCacheDump::required_uint64_protocolVersion, 1);
2✔
1066
  full.add_int64(PBCacheDump::required_int64_time, time(nullptr));
2✔
1067
  full.add_string(PBCacheDump::required_string_type, "PBCacheDump");
2✔
1068

1069
  size_t count = 0;
2✔
1070
  ret.reserve(estimate);
2✔
1071

1072
  for (auto& shard : d_maps) {
2,048✔
1073
    auto lockedShard = shard.lock();
2,048✔
1074
    const auto& sidx = lockedShard->d_map.get<SequencedTag>();
2,048✔
1075
    size_t thisShardCount = 0;
2,048✔
1076
    for (auto recordSet = sidx.rbegin(); recordSet != sidx.rend(); ++recordSet) {
2,248✔
1077
      protozero::pbf_builder<PBCacheEntry> message(full, PBCacheDump::repeated_message_cacheEntry);
200✔
1078
      getRecordSet(message, recordSet);
200✔
1079
      if (ret.size() > maxSize) {
200!
1080
        message.rollback();
×
1081
        log->info(Logr::Info, "Produced cache dump (max size reached)", "size", Logging::Loggable(ret.size()), "count", Logging::Loggable(count));
×
1082
        return count;
×
1083
      }
×
1084
      ++count;
200✔
1085
      ++thisShardCount;
200✔
1086
      if (thisShardCount >= perShard) {
200!
1087
        break;
×
1088
      }
×
1089
    }
200✔
1090
  }
2,048✔
1091
  log->info(Logr::Info, "Produced cache dump", "size", Logging::Loggable(ret.size()), "count", Logging::Loggable(count));
2✔
1092
  return count;
2✔
1093
}
2✔
1094

1095
static void putAuthRecord(protozero::pbf_message<PBCacheEntry>& message, const DNSName& qname, std::vector<std::shared_ptr<DNSRecord>>& authRecs)
1096
{
200✔
1097
  protozero::pbf_message<PBAuthRecord> auth = message.get_message();
200✔
1098
  DNSRecord authRecord;
200✔
1099
  while (auth.next()) {
1,600✔
1100
    switch (auth.tag()) {
1,400✔
1101
    case PBAuthRecord::required_bytes_name:
200✔
1102
      authRecord.d_name = DNSName(auth.get_bytes());
200✔
1103
      break;
200✔
1104
    case PBAuthRecord::required_bytes_rdata: {
200✔
1105
      auto ptr = DNSRecordContent::deserialize(qname, authRecord.d_type, auth.get_bytes());
200✔
1106
      authRecord.setContent(ptr);
200✔
1107
      break;
200✔
1108
    }
×
1109
    case PBAuthRecord::required_uint32_class:
200✔
1110
      authRecord.d_class = auth.get_uint32();
200✔
1111
      break;
200✔
1112
    case PBAuthRecord::required_uint32_type:
200✔
1113
      authRecord.d_type = auth.get_uint32();
200✔
1114
      break;
200✔
1115
    case PBAuthRecord::required_uint32_ttl:
200✔
1116
      authRecord.d_ttl = auth.get_uint32();
200✔
1117
      break;
200✔
1118
    case PBAuthRecord::required_uint32_place:
200✔
1119
      authRecord.d_place = static_cast<DNSResourceRecord::Place>(auth.get_uint32());
200✔
1120
      break;
200✔
1121
    case PBAuthRecord::required_uint32_clen:
200✔
1122
      authRecord.d_clen = auth.get_uint32();
200✔
1123
      break;
200✔
1124
    default:
×
1125
      break;
×
1126
    }
1,400✔
1127
  }
1,400✔
1128
  authRecs.emplace_back(std::make_shared<DNSRecord>(authRecord));
200✔
1129
}
200✔
1130

1131
template <typename T>
1132
bool MemRecursorCache::putRecordSet(T& message)
1133
{
200✔
1134
  CacheEntry cacheEntry{{g_rootdnsname, QType::A, boost::none, Netmask()}, false};
200✔
1135
  while (message.next()) {
3,000✔
1136
    switch (message.tag()) {
2,800✔
1137
    case PBCacheEntry::repeated_bytes_record: {
200✔
1138
      auto ptr = DNSRecordContent::deserialize(cacheEntry.d_qname, cacheEntry.d_qtype, message.get_bytes());
200✔
1139
      cacheEntry.d_records.emplace_back(ptr);
200✔
1140
      break;
200✔
1141
    }
×
1142
    case PBCacheEntry::repeated_bytes_sig: {
200✔
1143
      auto ptr = DNSRecordContent::deserialize(cacheEntry.d_qname, QType::RRSIG, message.get_bytes());
200✔
1144
      cacheEntry.d_signatures.emplace_back(std::dynamic_pointer_cast<RRSIGRecordContent>(ptr));
200✔
1145
      break;
200✔
1146
    }
×
1147
    case PBCacheEntry::repeated_message_authRecord:
200✔
1148
      putAuthRecord(message, cacheEntry.d_qname, cacheEntry.d_authorityRecs);
200✔
1149
      break;
200✔
1150
    case PBCacheEntry::required_bytes_name:
200✔
1151
      cacheEntry.d_qname = DNSName(message.get_bytes());
200✔
1152
      break;
200✔
1153
    case PBCacheEntry::required_bytes_authZone:
200✔
1154
      cacheEntry.d_authZone = DNSName(message.get_bytes());
200✔
1155
      break;
200✔
1156
    case PBCacheEntry::required_message_from:
200✔
1157
      decodeComboAddress(message, cacheEntry.d_from);
200✔
1158
      break;
200✔
1159
    case PBCacheEntry::optional_bytes_netmask:
×
1160
      decodeNetmask(message, cacheEntry.d_netmask);
×
1161
      break;
×
1162
    case PBCacheEntry::optional_bytes_rtag:
×
1163
      cacheEntry.d_rtag = message.get_bytes();
×
1164
      break;
×
1165
    case PBCacheEntry::required_uint32_state:
200✔
1166
      cacheEntry.d_state = static_cast<vState>(message.get_uint32());
200✔
1167
      break;
200✔
1168
    case PBCacheEntry::required_int64_ttd:
200✔
1169
      cacheEntry.d_ttd = message.get_int64();
200✔
1170
      break;
200✔
1171
    case PBCacheEntry::required_uint32_orig_ttl:
200✔
1172
      cacheEntry.d_orig_ttl = message.get_uint32();
200✔
1173
      break;
200✔
1174
    case PBCacheEntry::required_uint32_servedStale:
200✔
1175
      cacheEntry.d_servedStale = message.get_uint32();
200✔
1176
      break;
200✔
1177
    case PBCacheEntry::required_uint32_qtype:
200✔
1178
      cacheEntry.d_qtype = message.get_uint32();
200✔
1179
      break;
200✔
1180
    case PBCacheEntry::required_bool_auth:
200✔
1181
      cacheEntry.d_auth = message.get_bool();
200✔
1182
      break;
200✔
1183
    case PBCacheEntry::required_bool_submitted:
200✔
1184
      cacheEntry.d_submitted = message.get_bool();
200✔
1185
      cacheEntry.d_submitted = false; // actually not
200✔
1186
      break;
200✔
1187
    case PBCacheEntry::required_bool_tooBig:
200✔
1188
      cacheEntry.d_tooBig = message.get_bool();
200✔
1189
      break;
200✔
1190
    default:
×
1191
      break;
×
1192
    }
2,800✔
1193
  }
2,800✔
1194
  return replace(std::move(cacheEntry));
200✔
1195
}
200✔
1196

1197
size_t MemRecursorCache::putRecordSets(const std::string& pbuf)
1198
{
2✔
1199
  auto log = g_slog->withName("recordcache")->withValues("size", Logging::Loggable(pbuf.size()));
2✔
1200
  log->info(Logr::Debug, "Processing cache dump");
2✔
1201

1202
  protozero::pbf_message<PBCacheDump> full(pbuf);
2✔
1203
  size_t count = 0;
2✔
1204
  size_t inserted = 0;
2✔
1205
  try {
2✔
1206
    bool protocolVersionSeen = false;
2✔
1207
    bool typeSeen = false;
2✔
1208
    while (full.next()) {
212✔
1209
      switch (full.tag()) {
210!
1210
      case PBCacheDump::required_string_version: {
2✔
1211
        auto version = full.get_string();
2✔
1212
        log = log->withValues("version", Logging::Loggable(version));
2✔
1213
        break;
2✔
1214
      }
×
1215
      case PBCacheDump::required_string_identity: {
2✔
1216
        auto identity = full.get_string();
2✔
1217
        log = log->withValues("identity", Logging::Loggable(identity));
2✔
1218
        break;
2✔
1219
      }
×
1220
      case PBCacheDump::required_uint64_protocolVersion: {
2✔
1221
        auto protocolVersion = full.get_uint64();
2✔
1222
        log = log->withValues("protocolVersion", Logging::Loggable(protocolVersion));
2✔
1223
        if (protocolVersion != 1) {
2!
1224
          throw std::runtime_error("Protocol version mismatch");
×
1225
        }
×
1226
        protocolVersionSeen = true;
2✔
1227
        break;
2✔
1228
      }
2✔
1229
      case PBCacheDump::required_int64_time: {
2✔
1230
        auto time = full.get_int64();
2✔
1231
        log = log->withValues("time", Logging::Loggable(time));
2✔
1232
        break;
2✔
1233
      }
2✔
1234
      case PBCacheDump::required_string_type: {
2✔
1235
        auto type = full.get_string();
2✔
1236
        if (type != "PBCacheDump") {
2!
1237
          throw std::runtime_error("Data type mismatch");
×
1238
        }
×
1239
        typeSeen = true;
2✔
1240
        break;
2✔
1241
      }
2✔
1242
      case PBCacheDump::repeated_message_cacheEntry: {
200✔
1243
        if (!protocolVersionSeen || !typeSeen) {
200!
1244
          throw std::runtime_error("Required field missing");
×
1245
        }
×
1246
        protozero::pbf_message<PBCacheEntry> message = full.get_message();
200✔
1247
        if (putRecordSet(message)) {
200!
1248
          ++inserted;
200✔
1249
        }
200✔
1250
        ++count;
200✔
1251
        break;
200✔
1252
      }
200✔
1253
      }
210✔
1254
    }
210✔
1255
    log->info(Logr::Info, "Processed cache dump", "processed", Logging::Loggable(count), "inserted", Logging::Loggable(inserted));
2✔
1256
    return inserted;
2✔
1257
  }
2✔
1258
  catch (const std::runtime_error& e) {
2✔
1259
    log->error(Logr::Error, e.what(), "Runtime exception processing cache dump");
×
1260
  }
×
1261
  catch (const std::exception& e) {
2✔
1262
    log->error(Logr::Error, e.what(), "Exception processing cache dump");
×
1263
  }
×
1264
  catch (...) {
2✔
1265
    log->error(Logr::Error, "Other exception processing cache dump");
×
1266
  }
×
1267
  return 0;
×
1268
}
2✔
1269

1270
namespace boost
1271
{
1272
size_t hash_value(const MemRecursorCache::OptTag& rtag)
1273
{
2,554,190✔
1274
  return rtag ? hash_value(rtag.get()) : 0xcafebaaf;
2,554,190✔
1275
}
2,554,190✔
1276
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc