• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

llnl / dftracer-utils / 24057299873

07 Apr 2026 12:01AM UTC coverage: 52.076% (+0.8%) from 51.228%
24057299873

push

github

rayandrew
feat(rocksdb): migrate SQLite indexing to RocksDB

Replace SQLite-backed indexing and provenance storage with RocksDB-backed stores.

  Key changes:
  - add RocksDB async/database/db-manager/filesystem/key-codec layers
  - migrate index and provenance databases from SQLite to RocksDB
  - update index builder, trace reader, reorganize, view, stats, and comparator paths for
  RocksDB
  - harden transaction atomicity and rollback behavior with TransactionScope
  - add iterator status checking for prefix scans
  - harden gzip/tar indexer cache state and metadata handling
  - capture executor context in RocksDB awaitables
  - clean up failed RocksDB open paths and manager lifecycle behavior
  - vendor CPM 0.42.1 and update CI/build integration
  - refresh docs, Python bindings, and C++/Python test coverage for the new backend

  Validation:
  - full test suite passed
  - Ubuntu 22.04 Docker run passed
  - focused RocksDB/indexer regression tests passed.

24097 of 59624 branches covered (40.41%)

Branch coverage included in aggregate %.

2516 of 3144 new or added lines in 75 files covered. (80.03%)

72 existing lines in 15 files now uncovered.

20858 of 26701 relevant lines covered (78.12%)

14113.43 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

60.15
/src/dftracer/utils/binaries/dftracer_split.cpp
1
#include <dftracer/utils/core/common/config.h>
2
#include <dftracer/utils/core/common/filesystem.h>
3
#include <dftracer/utils/core/common/platform_compat.h>
4
#include <dftracer/utils/core/pipeline/pipeline.h>
5
#include <dftracer/utils/core/pipeline/pipeline_config.h>
6
#include <dftracer/utils/core/task_graph/task_graph.h>
7
#include <dftracer/utils/core/tasks/coro_scope.h>
8
#include <dftracer/utils/core/tasks/task.h>
9
#include <dftracer/utils/core/utilities/utility_adapter.h>
10
#include <dftracer/utils/utilities/composites/composites.h>
11
#include <dftracer/utils/utilities/composites/dft/chunk_extractor_utility.h>
12
#include <dftracer/utils/utilities/fileio/types/types.h>
13
#include <dftracer/utils/utilities/indexer/index_builder_utility.h>
14
#include <dftracer/utils/utilities/indexer/internal/indexer.h>
15
#include <unistd.h>
16

17
#include <argparse/argparse.hpp>
18
#include <chrono>
19
#include <cinttypes>
20

21
using namespace dftracer::utils;
22
using namespace dftracer::utils::task_graph;
23
using Metadata = utilities::composites::dft::MetadataCollectorUtilityOutput;
24
using ChunkManifest =
25
    utilities::composites::dft::internal::DFTracerChunkManifest;
26
using ExtractInput = utilities::composites::dft::ChunkExtractorUtilityInput;
27
using ExtractResult = utilities::composites::dft::ChunkExtractorUtilityOutput;
28

29
int main(int argc, char** argv) {
10✔
30
    DFTRACER_UTILS_LOGGER_INIT();
10!
31

32
    auto default_checkpoint_size_str =
33
        std::to_string(dftracer::utils::utilities::indexer::internal::Indexer::
20!
34
                           DEFAULT_CHECKPOINT_SIZE) +
15!
35
        " B (" +
15!
36
        std::to_string(dftracer::utils::utilities::indexer::internal::Indexer::
10!
37
                           DEFAULT_CHECKPOINT_SIZE /
38
                       (1024 * 1024)) +
5!
39
        " MB)";
5!
40

41
    argparse::ArgumentParser program("dftracer_split",
10!
42
                                     DFTRACER_UTILS_PACKAGE_VERSION);
15!
43
    program.add_description(
10!
44
        "Split DFTracer traces into equal-sized chunks using explicit pipeline "
5!
45
        "with maximum parallelism");
46

47
    program.add_argument("-n", "--app-name")
15!
48
        .help("Application name for output files")
15!
49
        .default_value<std::string>("app");
10!
50

51
    program.add_argument("-d", "--directory")
15!
52
        .help("Input directory containing .pfw or .pfw.gz files")
15!
53
        .default_value<std::string>(".");
10!
54

55
    program.add_argument("-o", "--output")
15!
56
        .help("Output directory for split files")
15!
57
        .default_value<std::string>("./split");
10!
58

59
    program.add_argument("-s", "--chunk-size")
15!
60
        .help("Chunk size in MB")
15!
61
        .scan<'d', int>()
10!
62
        .default_value(4);
10!
63

64
    program.add_argument("-f", "--force")
15!
65
        .help("Override existing files and force index recreation")
15!
66
        .flag();
10!
67

68
    program.add_argument("-c", "--compress")
15!
69
        .help("Compress output files with gzip")
15!
70
        .flag()
10!
71
        .default_value(true);
10!
72

73
    program.add_argument("-v", "--verbose").help("Enable verbose mode").flag();
10!
74

75
    program.add_argument("--checkpoint-size")
15!
76
        .help("Checkpoint size for indexing in bytes (default: " +
20!
77
              default_checkpoint_size_str + ")")
15!
78
        .scan<'d', std::size_t>()
10!
79
        .default_value(static_cast<std::size_t>(
10!
80
            dftracer::utils::utilities::indexer::internal::Indexer::
81
                DEFAULT_CHECKPOINT_SIZE));
82

83
    program.add_argument("--executor-threads")
15!
84
        .help(
15!
85
            "Number of executor threads for parallel processing (default: "
5!
86
            "number "
87
            "of CPU cores)")
88
        .scan<'d', std::size_t>()
10!
89
        .default_value(
10!
90
            static_cast<std::size_t>(dftracer_utils_hardware_concurrency()));
10!
91

92
    program.add_argument("--index-dir")
15!
93
        .help("Directory to store index files (default: system temp directory)")
15!
94
        .default_value<std::string>("");
10!
95

96
    program.add_argument("--verify")
15!
97
        .help("Verify output chunks match input by comparing event IDs")
15!
98
        .flag();
10!
99

100
    program.add_argument("--disable-watchdog")
15!
101
        .help("Disable watchdog for hang detection")
15!
102
        .flag();
10!
103

104
    program.add_argument("--watchdog-global-timeout")
15!
105
        .help(
15!
106
            "Watchdog global timeout for pipeline execution in seconds (0 = no "
5!
107
            "timeout)")
108
        .scan<'d', int>()
10!
109
        .default_value(0);
10!
110

111
    program.add_argument("--watchdog-task-timeout")
15!
112
        .help("Watchdog default task timeout in seconds (0 = no timeout)")
15!
113
        .scan<'d', int>()
10!
114
        .default_value(0);
10!
115

116
    program.add_argument("--watchdog-interval")
15!
117
        .help("Watchdog check interval in seconds")
15!
118
        .scan<'d', int>()
10!
119
        .default_value(1);
10!
120

121
    program.add_argument("--watchdog-warning-threshold")
15!
122
        .help("Watchdog long-running task warning threshold in seconds")
15!
123
        .scan<'d', int>()
10!
124
        .default_value(300);
10!
125

126
    program.add_argument("--watchdog-idle-timeout")
15!
127
        .help("Watchdog idle timeout in seconds (0 = use default)")
15!
128
        .scan<'d', int>()
10!
129
        .default_value(300);
10!
130

131
    program.add_argument("--watchdog-deadlock-timeout")
15!
132
        .help("Watchdog deadlock timeout in seconds (0 = use default)")
15!
133
        .scan<'d', int>()
10!
134
        .default_value(600);
10!
135

136
    try {
137
        program.parse_args(argc, argv);
10!
138
    } catch (const std::exception& err) {
5!
139
        DFTRACER_UTILS_LOG_ERROR("Error occurred: %s", err.what());
×
140
        std::cerr << program << std::endl;
×
141
        return 1;
×
142
    }
×
143

144
    // Parse arguments
145
    std::string app_name = program.get<std::string>("--app-name");
10!
146
    std::string log_dir = program.get<std::string>("--directory");
10!
147
    std::string output_dir = program.get<std::string>("--output");
10!
148
    int chunk_size_mb = program.get<int>("--chunk-size");
10!
149
    bool force = program.get<bool>("--force");
10!
150
    bool compress = program.get<bool>("--compress");
10!
151
    bool verify = program.get<bool>("--verify");
10!
152
    std::size_t checkpoint_size = program.get<std::size_t>("--checkpoint-size");
10!
153
    std::size_t executor_threads =
5✔
154
        program.get<std::size_t>("--executor-threads");
10!
155
    std::string index_dir = program.get<std::string>("--index-dir");
10!
156
    bool disable_watchdog = program.get<bool>("--disable-watchdog");
10!
157
    int global_timeout = program.get<int>("--watchdog-global-timeout");
10!
158
    int task_timeout = program.get<int>("--watchdog-task-timeout");
10!
159
    int watchdog_interval = program.get<int>("--watchdog-interval");
10!
160
    int warning_threshold = program.get<int>("--watchdog-warning-threshold");
10!
161
    int idle_timeout = program.get<int>("--watchdog-idle-timeout");
10!
162
    int deadlock_timeout = program.get<int>("--watchdog-deadlock-timeout");
10!
163

164
    // Setup temp index directory
165
    std::string temp_index_dir;
10✔
166
    if (index_dir.empty()) {
10!
167
        temp_index_dir = fs::temp_directory_path() /
20!
168
                         ("dftracer_idx_" + std::to_string(std::time(nullptr)) +
20!
169
                          "_" + std::to_string(getpid()));
30!
170
        fs::create_directories(temp_index_dir);
10!
171
        index_dir = temp_index_dir;
10!
172
        DFTRACER_UTILS_LOG_INFO("Created temporary index directory: %s",
10!
173
                                index_dir.c_str());
174
    }
5✔
175

176
    log_dir = fs::absolute(log_dir).string();
10!
177
    output_dir = fs::absolute(output_dir).string();
10!
178

179
    std::printf("==========================================\n");
10!
180
    std::printf("DFTracer Split (Explicit Pipeline)\n");
10!
181
    std::printf("==========================================\n");
10!
182
    std::printf("Arguments:\n");
10!
183
    std::printf("  App name: %s\n", app_name.c_str());
10!
184
    std::printf("  Override: %s\n", force ? "true" : "false");
10!
185
    std::printf("  Compress: %s\n", compress ? "true" : "false");
10!
186
    std::printf("  Data dir: %s\n", log_dir.c_str());
10!
187
    std::printf("  Output dir: %s\n", output_dir.c_str());
10!
188
    std::printf("  Chunk size: %d MB\n", chunk_size_mb);
10!
189
    std::printf("  Executor threads: %zu\n", executor_threads);
10!
190
    std::printf("==========================================\n\n");
10!
191

192
    if (!fs::exists(output_dir)) {
10!
193
        fs::create_directories(output_dir);
10!
194
    }
5✔
195

196
    // Create pipeline with configuration
197
    auto pipeline_config =
198
        PipelineConfig()
15!
199
            .with_name("DFTracer Split")
15!
200
            .with_compute_threads(executor_threads)
10!
201
            .with_watchdog(!disable_watchdog)
10!
202
            .with_global_timeout(std::chrono::seconds(global_timeout))
10!
203
            .with_task_timeout(std::chrono::seconds(task_timeout))
10!
204
            .with_watchdog_interval(std::chrono::seconds(watchdog_interval))
10!
205
            .with_warning_threshold(std::chrono::seconds(warning_threshold))
10!
206
            .with_executor_idle_timeout(std::chrono::seconds(idle_timeout))
10!
207
            .with_executor_deadlock_timeout(
15!
208
                std::chrono::seconds(deadlock_timeout));
10!
209

210
    Pipeline pipeline(pipeline_config);
10!
211

212
    auto start_time = std::chrono::high_resolution_clock::now();
10✔
213

214
    // Phase 1: Discover input files
215
    DFTRACER_UTILS_LOG_INFO("%s", "Discovering input files...");
10!
216

217
    std::vector<std::string> input_files;
10✔
218
    for (const auto& entry : fs::directory_iterator(log_dir)) {
40!
219
        if (entry.is_regular_file()) {
20!
220
            std::string path = entry.path().string();
10!
221
            if (path.ends_with(".pfw.gz") || path.ends_with(".pfw")) {
10!
222
                input_files.push_back(path);
10!
223
            }
5✔
224
        }
10✔
225
    }
5✔
226

227
    if (input_files.empty()) {
10✔
228
        DFTRACER_UTILS_LOG_ERROR("No .pfw or .pfw.gz files found in %s",
2!
229
                                 log_dir.c_str());
230
        return 1;
2✔
231
    }
232

233
    DFTRACER_UTILS_LOG_INFO("Found %zu input files", input_files.size());
8!
234

235
    if (force) {
8!
236
        const std::string shared_index_path =
237
            utilities::composites::dft::internal::determine_index_path(
4!
238
                input_files.front(), index_dir);
8!
239
        if (fs::exists(shared_index_path)) {
8!
NEW
240
            DFTRACER_UTILS_LOG_INFO("Clearing shared index store: %s",
×
241
                                    shared_index_path.c_str());
NEW
242
            fs::remove_all(shared_index_path);
×
243
        }
244
    }
8✔
245

246
    // Phase 2: Build TaskGraph for file processing
247
    auto graph = TaskGraph::builder(
4!
248
        {.name = "DFTracerSplit", .max_concurrency = executor_threads});
12!
249

250
    DFTRACER_UTILS_LOG_INFO("%s", "Creating file processing tasks...");
8!
251

252
    auto* input_files_ptr = &input_files;
8✔
253
    auto file_metadata = graph.parallel<Metadata>(
4!
254
        input_files.size(),
4✔
255
        [input_files_ptr, checkpoint_size, force, index_dir, verify](
64!
256
            CoroScope&, std::size_t idx) -> coro::CoroTask<Metadata> {
5!
257
            const auto& file_path = (*input_files_ptr)[idx];
15✔
258

259
            // Determine index path
260
            std::string index_path =
15✔
261
                utilities::composites::dft::internal::determine_index_path(
15!
262
                    file_path, index_dir);
15✔
263

264
            // Build index
265
            auto idx_input =
15✔
266
                utilities::indexer::IndexBuildConfig::for_file(file_path)
30!
267
                    .with_checkpoint_size(checkpoint_size)
15✔
268
                    .with_force_rebuild(false)
5!
269
                    .with_index_dir(index_dir);
5!
270
            co_await utilities::indexer::IndexBuilderUtility{}.process(
25!
271
                idx_input);
272

273
            // Collect metadata
274
            auto meta_input =
15✔
275
                utilities::composites::dft::MetadataCollectorUtilityInput::
30!
276
                    from_file(file_path)
15!
277
                        .with_checkpoint_size(checkpoint_size)
15✔
278
                        .with_force_rebuild(false)
5!
279
                        .with_index(index_path)
5✔
280
                        .with_compute_hash(verify);
5!
281

282
            co_return co_await utilities::composites::dft::
35!
283
                MetadataCollectorUtility{}
284
                    .process(meta_input);
15!
285
        },
105!
286
        {.name = "ProcessFile"});
12!
287

288
    DFTRACER_UTILS_LOG_INFO("%s", "Creating chunk mapping task...");
4!
289

290
    auto manifests_group = graph.reduce<std::vector<ChunkManifest>>(
291
        file_metadata, split_every{input_files.size()},
292
        [chunk_size_mb](CoroScope&, std::vector<Metadata> all_metadata)
32!
293
            -> coro::CoroTask<std::vector<ChunkManifest>> {
4!
294
            DFTRACER_UTILS_LOG_INFO("Creating chunk mappings from %zu files...",
12!
295
                                    all_metadata.size());
296

297
            utilities::composites::dft::ChunkManifestMapperUtility mapper;
12!
298
            auto mapper_input =
12✔
299
                utilities::composites::dft::ChunkManifestMapperUtilityInput::
24!
300
                    from_metadata(all_metadata)
12!
301
                        .with_target_size(static_cast<double>(chunk_size_mb));
12✔
302

303
            auto manifests = co_await mapper.process(mapper_input);
20!
304
            DFTRACER_UTILS_LOG_INFO("Created %zu chunks", manifests.size());
4!
305
            co_return manifests;
4!
306
        },
36!
307
        {.name = "CreateManifests"});
4!
308

309
    DFTRACER_UTILS_LOG_INFO("%s", "Creating extraction task...");
4!
310

311
    using ExtractChunksOutput = std::vector<ExtractResult>;
312

313
    auto* app_name_ptr = &app_name;
4✔
314
    auto* output_dir_ptr = &output_dir;
4✔
315

316
    auto task_extract_chunks = make_task(
4✔
317
        [app_name_ptr, output_dir_ptr, compress, verify, executor_threads](
40!
318
            CoroScope& scope, std::vector<ChunkManifest> manifests)
319
            -> coro::CoroTask<ExtractChunksOutput> {
4!
320
            DFTRACER_UTILS_LOG_INFO("Extracting %zu chunks in parallel...",
4!
321
                                    manifests.size());
322

323
            auto permits = coro::make_channel<bool>(executor_threads * 2);
4!
324
            for (std::size_t i = 0; i < executor_threads * 2; ++i) {
28✔
325
                permits->try_send(true);
24!
326
            }
24✔
327

328
            std::vector<coro::SpawnFuture<ExtractResult>> futures;
4✔
329
            futures.reserve(manifests.size());
4!
330

331
            for (std::size_t i = 0; i < manifests.size(); ++i) {
8✔
332
                auto input = ExtractInput::from_manifest(
8!
333
                                 static_cast<int>(i + 1), manifests[i])
4!
334
                                 .with_output_dir(*output_dir_ptr)
4!
335
                                 .with_app_name(*app_name_ptr)
4!
336
                                 .with_compression(compress)
4!
337
                                 .with_compute_hash(verify);
4!
338

339
                futures.push_back(scope.spawn(
8!
340
                    [input = std::move(input),
44!
341
                     permits](CoroScope& s) -> coro::CoroTask<ExtractResult> {
8!
342
                        co_await s.receive(permits);
8!
343
                        try {
344
                            utilities::composites::dft::ChunkExtractorUtility
12✔
345
                                extractor;
12!
346
                            auto result = co_await extractor.process(input);
16!
347
                            permits->try_send(true);
4!
348
                            co_return result;
4!
349
                        } catch (...) {
4!
350
                            permits->try_send(true);
×
351
                            throw;
352
                        }
×
353
                    }));
16!
354
            }
4✔
355

356
            ExtractChunksOutput results;
4✔
357
            results.reserve(futures.size());
4!
358
            for (auto& future : futures) {
24✔
359
                results.push_back(co_await future);
20!
360
            }
4✔
361

362
            // Sort by chunk index
363
            std::sort(results.begin(), results.end(),
4!
364
                      [](const ExtractResult& a, const ExtractResult& b) {
×
365
                          return a.chunk_index < b.chunk_index;
×
366
                      });
367

368
            co_return results;
4!
369
        },
28!
370
        "ExtractChunks");
4!
371

372
    task_extract_chunks->depends_on(manifests_group.task());
4!
373
    graph.add(task_extract_chunks);
4!
374

375
    // Phase 3: Optional verification
376
    std::shared_ptr<Task> final_task = task_extract_chunks;
4✔
377
    std::shared_ptr<Task> task_verify_chunks = nullptr;
4✔
378

379
    if (verify) {
8✔
380
        DFTRACER_UTILS_LOG_INFO("%s", "Configuring verification...");
2!
381

382
        struct VerifyInput {
2✔
383
            ExtractChunksOutput chunks;
384
            std::vector<Metadata> all_metadata;
385
        };
386

387
        // Verification task receives both extraction results and metadata.
388
        // Both are passed via combiner so the scheduler keeps parent
389
        // results alive until this task consumes them.
390
        task_verify_chunks = make_task(
3!
391
            [](CoroScope&, const VerifyInput& input)
6!
392
                -> coro::CoroTask<
393
                    utilities::composites::ChunkVerificationUtilityOutput> {
1!
394
                // Sum output hashes from extraction results
395
                std::size_t output_hash = 0;
1✔
396
                for (const auto& chunk : input.chunks) {
2✔
397
                    output_hash += chunk.event_hash;
1✔
398
                }
1✔
399

400
                // Sum input hashes from metadata (computed during collection)
401
                std::size_t input_hash = 0;
1✔
402
                for (const auto& meta : input.all_metadata) {
2✔
403
                    if (!meta.success) continue;
1!
404
                    input_hash += meta.event_hash;
1✔
405
                }
1!
406

407
                co_return utilities::composites::
2!
408
                    ChunkVerificationUtilityOutput::success(
409
                        static_cast<std::uint64_t>(input_hash),
1✔
410
                        static_cast<std::uint64_t>(output_hash));
1✔
411
            },
3!
412
            "VerifyChunks");
2!
413

414
        // Depend on both extract results and metadata tasks.
415
        // The combiner collects parent outputs into the typed struct.
416
        task_verify_chunks->depends_on(task_extract_chunks);
2!
417
        for (const auto& meta_task : file_metadata.tasks()) {
4✔
418
            task_verify_chunks->depends_on(meta_task);
2!
419
        }
420

421
        task_verify_chunks->with_combiner(
2!
422
            [](const std::vector<std::any>& inputs) -> std::any {
2✔
423
                // inputs[0] = ExtractChunksOutput (from extract task)
424
                // inputs[1..N] = Metadata (from each metadata task)
425
                auto chunks = std::any_cast<ExtractChunksOutput>(inputs[0]);
2!
426

427
                std::vector<Metadata> all_metadata;
2✔
428
                all_metadata.reserve(inputs.size() - 1);
2!
429
                for (std::size_t i = 1; i < inputs.size(); ++i) {
4✔
430
                    all_metadata.push_back(std::any_cast<Metadata>(inputs[i]));
2!
431
                }
1✔
432

433
                VerifyInput vi{std::move(chunks), std::move(all_metadata)};
2✔
434
                return std::make_any<VerifyInput>(std::move(vi));
3!
435
            });
2✔
436

437
        graph.add(task_verify_chunks);
2!
438
        final_task = task_verify_chunks;
2✔
439
    }
1✔
440

441
    // Phase 4: Execute Pipeline
442
    DFTRACER_UTILS_LOG_INFO("%s", "Executing pipeline...");
4!
443

444
    pipeline.set_source(file_metadata.tasks());
4!
445
    pipeline.set_destination(final_task);
4!
446
    pipeline.execute();
4!
447

448
    // Get results from the destination task only (intermediate task values
449
    // are released after pipeline execution)
450
    auto end_time = std::chrono::high_resolution_clock::now();
4✔
451
    std::chrono::duration<double, std::milli> duration = end_time - start_time;
4!
452

453
    std::printf("\n");
4!
454
    std::printf("==========================================\n");
4!
455
    std::printf("Split Results\n");
4!
456
    std::printf("==========================================\n");
4!
457
    std::printf("  Execution time: %.2f seconds\n", duration.count() / 1000.0);
4!
458
    std::printf("  Input: %zu files\n", input_files.size());
4!
459

460
    int exit_code = 0;
4✔
461

462
    if (verify && task_verify_chunks) {
8!
463
        auto verify_result =
464
            task_verify_chunks
1✔
465
                ->get<utilities::composites::ChunkVerificationUtilityOutput>();
2!
466

467
        if (verify_result.input_hash == verify_result.output_hash) {
2!
468
            std::printf(
2!
469
                "  Verification: PASSED - all events present in output\n");
470
        } else {
1✔
471
            std::printf("  Verification: FAILED - event mismatch detected\n");
×
472
            exit_code = 1;
×
473
        }
474
        std::printf("    Input hash:  0x%016" PRIx64 "\n",
2!
475
                    verify_result.input_hash);
1✔
476
        std::printf("    Output hash: 0x%016" PRIx64 "\n",
2!
477
                    verify_result.output_hash);
1✔
478
    } else {
2✔
479
        // Without verification: extract task IS the destination, safe to read
480
        auto extraction_results =
481
            task_extract_chunks->get<ExtractChunksOutput>();
6!
482

483
        std::size_t successful_chunks = 0;
6✔
484
        std::size_t total_events = 0;
6✔
485

486
        for (const auto& result : extraction_results) {
12✔
487
            if (result.success) {
6!
488
                successful_chunks++;
6✔
489
                total_events += result.events;
6✔
490
            } else {
3✔
491
                DFTRACER_UTILS_LOG_ERROR("Failed to create chunk %d",
×
492
                                         result.chunk_index);
493
            }
494
        }
495

496
        std::printf("  Output: %zu/%zu chunks, %zu events\n", successful_chunks,
9!
497
                    extraction_results.size(), total_events);
3✔
498

499
        if (successful_chunks != extraction_results.size()) {
6✔
500
            exit_code = 1;
×
501
        }
502
    }
6✔
503

504
    std::printf("==========================================\n");
4!
505

506
    // Cleanup temporary index directory if created
507
    if (!temp_index_dir.empty() && fs::exists(temp_index_dir)) {
12!
508
        DFTRACER_UTILS_LOG_INFO("Cleaning up temporary index directory: %s",
8!
509
                                temp_index_dir.c_str());
510
        fs::remove_all(temp_index_dir);
8!
511
    }
4✔
512

513
    return exit_code;
4✔
514
}
5✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc