• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

STEllAR-GROUP / hpx / #853

19 Dec 2022 01:01AM UTC coverage: 86.287% (+0.4%) from 85.912%
#853

push

StellarBot
Merge #6109

6109: Modernize serialization module r=hkaiser a=hkaiser

- flyby separate serialization of Boost types

working towards https://github.com/STEllAR-GROUP/hpx/issues/5497

Co-authored-by: Hartmut Kaiser <hartmut.kaiser@gmail.com>

53 of 53 new or added lines in 6 files covered. (100.0%)

173939 of 201582 relevant lines covered (86.29%)

1931657.12 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.09
/libs/core/threadmanager/src/threadmanager.cpp
1
//  Copyright (c) 2007-2017 Hartmut Kaiser
2
//  Copyright (c)      2011 Bryce Lelbach, Katelyn Kufahl
3
//  Copyright (c) 2008-2009 Chirag Dekate, Anshul Tandon
4
//  Copyright (c) 2015 Patricia Grubel
5
//  Copyright (c) 2017 Shoshana Jakobovits
6
//
7
//  SPDX-License-Identifier: BSL-1.0
8
//  Distributed under the Boost Software License, Version 1.0. (See accompanying
9
//  file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
10

11
#include <hpx/config.hpp>
12
#include <hpx/assert.hpp>
13
#include <hpx/async_combinators/wait_all.hpp>
14
#include <hpx/execution_base/this_thread.hpp>
15
#include <hpx/futures/future.hpp>
16
#include <hpx/hardware/timestamp.hpp>
17
#include <hpx/modules/errors.hpp>
18
#include <hpx/modules/logging.hpp>
19
#include <hpx/modules/schedulers.hpp>
20
#include <hpx/modules/threadmanager.hpp>
21
#include <hpx/resource_partitioner/detail/partitioner.hpp>
22
#include <hpx/runtime_configuration/runtime_configuration.hpp>
23
#include <hpx/thread_pool_util/thread_pool_suspension_helpers.hpp>
24
#include <hpx/thread_pools/scheduled_thread_pool.hpp>
25
#include <hpx/threading_base/set_thread_state.hpp>
26
#include <hpx/threading_base/thread_data.hpp>
27
#include <hpx/threading_base/thread_helpers.hpp>
28
#include <hpx/threading_base/thread_init_data.hpp>
29
#include <hpx/threading_base/thread_queue_init_parameters.hpp>
30
#include <hpx/topology/topology.hpp>
31
#include <hpx/type_support/unused.hpp>
32
#include <hpx/util/get_entry_as.hpp>
33

34
#include <cstddef>
35
#include <cstdint>
36
#include <functional>
37
#include <iosfwd>
38
#include <memory>
39
#include <mutex>
40
#include <numeric>
41
#include <string>
42
#include <utility>
43
#include <vector>
44

45
namespace hpx { namespace threads {
46
    namespace detail {
47
        void check_num_high_priority_queues(
1,438✔
48
            std::size_t num_threads, std::size_t num_high_priority_queues)
49
        {
50
            if (num_high_priority_queues > num_threads)
1,438✔
51
            {
52
                throw hpx::detail::command_line_error(
×
53
                    "Invalid command line option: "
54
                    "number of high priority threads ("
55
                    "--hpx:high-priority-threads), should not be larger "
56
                    "than number of threads (--hpx:threads)");
57
            }
58
        }
1,438✔
59
    }    // namespace detail
60

61
    ///////////////////////////////////////////////////////////////////////////
62
    threadmanager::threadmanager(hpx::util::runtime_configuration& rtcfg,
1,216✔
63
#ifdef HPX_HAVE_TIMER_POOL
64
        util::io_service_pool& timer_pool,
65
#endif
66
        notification_policy_type& notifier,
67
        detail::network_background_callback_type network_background_callback)
68
      : rtcfg_(rtcfg)
1,216✔
69
#ifdef HPX_HAVE_TIMER_POOL
70
      , timer_pool_(timer_pool)
1,216✔
71
#endif
72
      , notifier_(notifier)
1,216✔
73
      , network_background_callback_(network_background_callback)
1,216✔
74
    {
75
        using placeholders::_1;
76
        using placeholders::_3;
77

78
        // Add callbacks local to threadmanager.
79
        notifier.add_on_start_thread_callback(
2,432✔
80
            hpx::bind(&threadmanager::init_tss, this, _1));
1,216✔
81
        notifier.add_on_stop_thread_callback(
2,432✔
82
            hpx::bind(&threadmanager::deinit_tss, this));
1,216✔
83

84
        auto& rp = hpx::resource::get_partitioner();
1,216✔
85
        notifier.add_on_start_thread_callback(hpx::bind(
2,432✔
86
            &resource::detail::partitioner::assign_pu, std::ref(rp), _3, _1));
1,216✔
87
        notifier.add_on_stop_thread_callback(hpx::bind(
2,432✔
88
            &resource::detail::partitioner::unassign_pu, std::ref(rp), _3, _1));
1,216✔
89
    }
1,216✔
90

91
    void threadmanager::create_pools()
1,216✔
92
    {
93
        auto& rp = hpx::resource::get_partitioner();
1,216✔
94
        size_t num_pools = rp.get_num_pools();
1,216✔
95
        std::size_t thread_offset = 0;
1,216✔
96

97
        std::size_t max_background_threads =
1,216✔
98
            hpx::util::get_entry_as<std::size_t>(rtcfg_,
1,216✔
99
                "hpx.max_background_threads",
1,216✔
100
                (std::numeric_limits<std::size_t>::max)());
1,216✔
101
        std::size_t const max_idle_loop_count =
1,216✔
102
            hpx::util::get_entry_as<std::int64_t>(
1,216✔
103
                rtcfg_, "hpx.max_idle_loop_count", HPX_IDLE_LOOP_COUNT_MAX);
1,216✔
104
        std::size_t const max_busy_loop_count =
1,216✔
105
            hpx::util::get_entry_as<std::int64_t>(
1,216✔
106
                rtcfg_, "hpx.max_busy_loop_count", HPX_BUSY_LOOP_COUNT_MAX);
1,216✔
107

108
        std::int64_t const max_thread_count =
1,216✔
109
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
110
                "hpx.thread_queue.max_thread_count",
1,216✔
111
                HPX_THREAD_QUEUE_MAX_THREAD_COUNT);
1,216✔
112
        std::int64_t const min_tasks_to_steal_pending =
1,216✔
113
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
114
                "hpx.thread_queue.min_tasks_to_steal_pending",
1,216✔
115
                HPX_THREAD_QUEUE_MIN_TASKS_TO_STEAL_PENDING);
1,216✔
116
        std::int64_t const min_tasks_to_steal_staged =
1,216✔
117
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
118
                "hpx.thread_queue.min_tasks_to_steal_staged",
1,216✔
119
                HPX_THREAD_QUEUE_MIN_TASKS_TO_STEAL_STAGED);
1,216✔
120
        std::int64_t const min_add_new_count =
1,216✔
121
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
122
                "hpx.thread_queue.min_add_new_count",
1,216✔
123
                HPX_THREAD_QUEUE_MIN_ADD_NEW_COUNT);
1,216✔
124
        std::int64_t const max_add_new_count =
1,216✔
125
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
126
                "hpx.thread_queue.max_add_new_count",
1,216✔
127
                HPX_THREAD_QUEUE_MAX_ADD_NEW_COUNT);
1,216✔
128
        std::int64_t const min_delete_count =
1,216✔
129
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
130
                "hpx.thread_queue.min_delete_count",
1,216✔
131
                HPX_THREAD_QUEUE_MIN_DELETE_COUNT);
1,216✔
132
        std::int64_t const max_delete_count =
1,216✔
133
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
134
                "hpx.thread_queue.max_delete_count",
1,216✔
135
                HPX_THREAD_QUEUE_MAX_DELETE_COUNT);
1,216✔
136
        std::int64_t const max_terminated_threads =
1,216✔
137
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
138
                "hpx.thread_queue.max_terminated_threads",
1,216✔
139
                HPX_THREAD_QUEUE_MAX_TERMINATED_THREADS);
1,216✔
140
        std::int64_t const init_threads_count =
1,216✔
141
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,216✔
142
                "hpx.thread_queue.init_threads_count",
1,216✔
143
                HPX_THREAD_QUEUE_INIT_THREADS_COUNT);
1,216✔
144
        double const max_idle_backoff_time = hpx::util::get_entry_as<double>(
1,216✔
145
            rtcfg_, "hpx.max_idle_backoff_time", HPX_IDLE_BACKOFF_TIME_MAX);
1,216✔
146

147
        std::ptrdiff_t small_stacksize =
1,216✔
148
            rtcfg_.get_stack_size(thread_stacksize::small_);
1,216✔
149
        std::ptrdiff_t medium_stacksize =
1,216✔
150
            rtcfg_.get_stack_size(thread_stacksize::medium);
1,216✔
151
        std::ptrdiff_t large_stacksize =
1,216✔
152
            rtcfg_.get_stack_size(thread_stacksize::large);
1,216✔
153
        std::ptrdiff_t huge_stacksize =
1,216✔
154
            rtcfg_.get_stack_size(thread_stacksize::huge);
1,216✔
155

156
        policies::thread_queue_init_parameters thread_queue_init(
1,216✔
157
            max_thread_count, min_tasks_to_steal_pending,
1,216✔
158
            min_tasks_to_steal_staged, min_add_new_count, max_add_new_count,
1,216✔
159
            min_delete_count, max_delete_count, max_terminated_threads,
1,216✔
160
            init_threads_count, max_idle_backoff_time, small_stacksize,
1,216✔
161
            medium_stacksize, large_stacksize, huge_stacksize);
1,216✔
162

163
        if (!rtcfg_.enable_networking())
1,216✔
164
        {
165
            max_background_threads = 0;
934✔
166
        }
934✔
167

168
        // instantiate the pools
169
        for (size_t i = 0; i != num_pools; i++)
2,774✔
170
        {
171
            std::string name = rp.get_pool_name(i);
1,558✔
172
            resource::scheduling_policy sched_type = rp.which_scheduler(name);
1,558✔
173
            std::size_t num_threads_in_pool = rp.get_num_threads(i);
1,558✔
174
            policies::scheduler_mode scheduler_mode = rp.get_scheduler_mode(i);
1,558✔
175

176
            // make sure the first thread-pool that gets instantiated is the default one
177
            if (i == 0)
1,558✔
178
            {
179
                if (name != rp.get_default_pool_name())
1,216✔
180
                {
181
                    throw std::invalid_argument("Trying to instantiate pool " +
×
182
                        name +
×
183
                        " as first thread pool, but first thread pool must "
184
                        "be named " +
×
185
                        rp.get_default_pool_name());
×
186
                }
187
            }
1,216✔
188

189
            thread_pool_init_parameters thread_pool_init(name, i,
1,558✔
190
                scheduler_mode, num_threads_in_pool, thread_offset, notifier_,
1,558✔
191
                rp.get_affinity_data(), network_background_callback_,
1,558✔
192
                max_background_threads, max_idle_loop_count,
1,558✔
193
                max_busy_loop_count);
1,558✔
194

195
            std::size_t numa_sensitive = hpx::util::get_entry_as<std::size_t>(
1,558✔
196
                rtcfg_, "hpx.numa_sensitive", 0);
1,558✔
197

198
            switch (sched_type)
1,558✔
199
            {
200
            case resource::user_defined:
201
            {
202
                auto pool_func = rp.get_pool_creator(i);
6✔
203
                std::unique_ptr<thread_pool_base> pool(
204
                    pool_func(thread_pool_init, thread_queue_init));
6✔
205
                pools_.push_back(HPX_MOVE(pool));
6✔
206
                break;
207
            }
6✔
208
            case resource::unspecified:
209
            {
210
                throw std::invalid_argument(
×
211
                    "cannot instantiate a thread-manager if the thread-pool" +
×
212
                    name + " has an unspecified scheduler type");
×
213
            }
214
            case resource::local:
215
            {
216
                // instantiate the scheduler
217
                using local_sched_type =
218
                    hpx::threads::policies::local_queue_scheduler<>;
219

220
                local_sched_type::init_parameter_type init(
51✔
221
                    thread_pool_init.num_threads_,
51✔
222
                    thread_pool_init.affinity_data_, thread_queue_init,
51✔
223
                    "core-local_queue_scheduler");
224

225
                std::unique_ptr<local_sched_type> sched(
51✔
226
                    new local_sched_type(init));
51✔
227

228
                // set the default scheduler flags
229
                sched->set_scheduler_mode(thread_pool_init.mode_);
51✔
230
                // conditionally set/unset this flag
231
                sched->update_scheduler_mode(
102✔
232
                    policies::scheduler_mode::enable_stealing_numa,
233
                    !numa_sensitive);
51✔
234

235
                // instantiate the pool
236
                std::unique_ptr<thread_pool_base> pool(
51✔
237
                    new hpx::threads::detail::scheduled_thread_pool<
51✔
238
                        local_sched_type>(HPX_MOVE(sched), thread_pool_init));
51✔
239
                pools_.push_back(HPX_MOVE(pool));
51✔
240
                break;
241
            }
51✔
242

243
            case resource::local_priority_fifo:
244
            {
245
                // set parameters for scheduler and pool instantiation and
246
                // perform compatibility checks
247
                std::size_t num_high_priority_queues =
1,224✔
248
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
1,224✔
249
                        "hpx.thread_queue.high_priority_queues",
1,224✔
250
                        thread_pool_init.num_threads_);
1,224✔
251
                detail::check_num_high_priority_queues(
1,224✔
252
                    thread_pool_init.num_threads_, num_high_priority_queues);
1,224✔
253

254
                // instantiate the scheduler
255
                using local_sched_type =
256
                    hpx::threads::policies::local_priority_queue_scheduler<
257
                        std::mutex, hpx::threads::policies::lockfree_fifo>;
258

259
                local_sched_type::init_parameter_type init(
1,224✔
260
                    thread_pool_init.num_threads_,
1,224✔
261
                    thread_pool_init.affinity_data_, num_high_priority_queues,
1,224✔
262
                    thread_queue_init, "core-local_priority_queue_scheduler");
1,224✔
263

264
                std::unique_ptr<local_sched_type> sched(
1,224✔
265
                    new local_sched_type(init));
1,224✔
266

267
                // set the default scheduler flags
268
                sched->set_scheduler_mode(thread_pool_init.mode_);
1,224✔
269
                // conditionally set/unset this flag
270
                sched->update_scheduler_mode(
2,448✔
271
                    policies::scheduler_mode::enable_stealing_numa,
272
                    !numa_sensitive);
1,224✔
273

274
                // instantiate the pool
275
                std::unique_ptr<thread_pool_base> pool(
1,224✔
276
                    new hpx::threads::detail::scheduled_thread_pool<
1,224✔
277
                        local_sched_type>(HPX_MOVE(sched), thread_pool_init));
1,224✔
278
                pools_.push_back(HPX_MOVE(pool));
1,224✔
279

280
                break;
281
            }
1,224✔
282

283
            case resource::local_priority_lifo:
284
            {
285
#if defined(HPX_HAVE_CXX11_STD_ATOMIC_128BIT)
286
                // set parameters for scheduler and pool instantiation and
287
                // perform compatibility checks
288
                std::size_t num_high_priority_queues =
54✔
289
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
54✔
290
                        "hpx.thread_queue.high_priority_queues",
54✔
291
                        thread_pool_init.num_threads_);
54✔
292
                detail::check_num_high_priority_queues(
54✔
293
                    thread_pool_init.num_threads_, num_high_priority_queues);
54✔
294

295
                // instantiate the scheduler
296
                using local_sched_type =
297
                    hpx::threads::policies::local_priority_queue_scheduler<
298
                        std::mutex, hpx::threads::policies::lockfree_lifo>;
299

300
                local_sched_type::init_parameter_type init(
54✔
301
                    thread_pool_init.num_threads_,
54✔
302
                    thread_pool_init.affinity_data_, num_high_priority_queues,
54✔
303
                    thread_queue_init, "core-local_priority_queue_scheduler");
54✔
304

305
                std::unique_ptr<local_sched_type> sched(
54✔
306
                    new local_sched_type(init));
54✔
307

308
                // set the default scheduler flags
309
                sched->set_scheduler_mode(thread_pool_init.mode_);
54✔
310
                // conditionally set/unset this flag
311
                sched->update_scheduler_mode(
108✔
312
                    policies::scheduler_mode::enable_stealing_numa,
313
                    !numa_sensitive);
54✔
314

315
                // instantiate the pool
316
                std::unique_ptr<thread_pool_base> pool(
54✔
317
                    new hpx::threads::detail::scheduled_thread_pool<
54✔
318
                        local_sched_type>(HPX_MOVE(sched), thread_pool_init));
54✔
319
                pools_.push_back(HPX_MOVE(pool));
54✔
320
#else
321
                throw hpx::detail::command_line_error(
322
                    "Command line option --hpx:queuing=local-priority-lifo "
323
                    "is not configured in this build. Please make sure 128bit "
324
                    "atomics are available.");
325
#endif
326
                break;
327
            }
54✔
328

329
            case resource::static_:
330
            {
331
                // instantiate the scheduler
332
                using local_sched_type =
333
                    hpx::threads::policies::static_queue_scheduler<>;
334

335
                local_sched_type::init_parameter_type init(
54✔
336
                    thread_pool_init.num_threads_,
54✔
337
                    thread_pool_init.affinity_data_, thread_queue_init,
54✔
338
                    "core-static_queue_scheduler");
339

340
                std::unique_ptr<local_sched_type> sched(
54✔
341
                    new local_sched_type(init));
54✔
342

343
                // set the default scheduler flags
344
                sched->set_scheduler_mode(thread_pool_init.mode_);
54✔
345
                // conditionally set/unset this flag
346
                sched->update_scheduler_mode(
108✔
347
                    policies::scheduler_mode::enable_stealing_numa,
348
                    !numa_sensitive);
54✔
349

350
                // instantiate the pool
351
                std::unique_ptr<thread_pool_base> pool(
54✔
352
                    new hpx::threads::detail::scheduled_thread_pool<
54✔
353
                        local_sched_type>(HPX_MOVE(sched), thread_pool_init));
54✔
354
                pools_.push_back(HPX_MOVE(pool));
54✔
355
                break;
356
            }
54✔
357

358
            case resource::static_priority:
359
            {
360
                // set parameters for scheduler and pool instantiation and
361
                // perform compatibility checks
362
                std::size_t num_high_priority_queues =
53✔
363
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
53✔
364
                        "hpx.thread_queue.high_priority_queues",
53✔
365
                        thread_pool_init.num_threads_);
53✔
366
                detail::check_num_high_priority_queues(
53✔
367
                    thread_pool_init.num_threads_, num_high_priority_queues);
53✔
368

369
                // instantiate the scheduler
370
                using local_sched_type =
371
                    hpx::threads::policies::static_priority_queue_scheduler<>;
372

373
                local_sched_type::init_parameter_type init(
53✔
374
                    thread_pool_init.num_threads_,
53✔
375
                    thread_pool_init.affinity_data_, num_high_priority_queues,
53✔
376
                    thread_queue_init, "core-static_priority_queue_scheduler");
53✔
377

378
                std::unique_ptr<local_sched_type> sched(
53✔
379
                    new local_sched_type(init));
53✔
380

381
                // set the default scheduler flags
382
                sched->set_scheduler_mode(thread_pool_init.mode_);
53✔
383
                // conditionally set/unset this flag
384
                sched->update_scheduler_mode(
106✔
385
                    policies::scheduler_mode::enable_stealing_numa,
386
                    !numa_sensitive);
53✔
387

388
                // instantiate the pool
389
                std::unique_ptr<thread_pool_base> pool(
53✔
390
                    new hpx::threads::detail::scheduled_thread_pool<
53✔
391
                        local_sched_type>(HPX_MOVE(sched), thread_pool_init));
53✔
392
                pools_.push_back(HPX_MOVE(pool));
53✔
393
                break;
394
            }
53✔
395

396
            case resource::abp_priority_fifo:
397
            {
398
#if defined(HPX_HAVE_CXX11_STD_ATOMIC_128BIT)
399
                // set parameters for scheduler and pool instantiation and
400
                // perform compatibility checks
401
                std::size_t num_high_priority_queues =
50✔
402
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
50✔
403
                        "hpx.thread_queue.high_priority_queues",
50✔
404
                        thread_pool_init.num_threads_);
50✔
405
                detail::check_num_high_priority_queues(
50✔
406
                    thread_pool_init.num_threads_, num_high_priority_queues);
50✔
407

408
                // instantiate the scheduler
409
                using local_sched_type =
410
                    hpx::threads::policies::local_priority_queue_scheduler<
411
                        std::mutex, hpx::threads::policies::lockfree_fifo>;
412

413
                local_sched_type::init_parameter_type init(
50✔
414
                    thread_pool_init.num_threads_,
50✔
415
                    thread_pool_init.affinity_data_, num_high_priority_queues,
50✔
416
                    thread_queue_init,
50✔
417
                    "core-abp_fifo_priority_queue_scheduler");
418

419
                std::unique_ptr<local_sched_type> sched(
50✔
420
                    new local_sched_type(init));
50✔
421

422
                // set the default scheduler flags
423
                sched->set_scheduler_mode(thread_pool_init.mode_);
50✔
424
                // conditionally set/unset this flag
425
                sched->update_scheduler_mode(
100✔
426
                    policies::scheduler_mode::enable_stealing_numa,
427
                    !numa_sensitive);
50✔
428

429
                // instantiate the pool
430
                std::unique_ptr<thread_pool_base> pool(
50✔
431
                    new hpx::threads::detail::scheduled_thread_pool<
50✔
432
                        local_sched_type>(HPX_MOVE(sched), thread_pool_init));
50✔
433
                pools_.push_back(HPX_MOVE(pool));
50✔
434
#else
435
                throw hpx::detail::command_line_error(
436
                    "Command line option --hpx:queuing=abp-priority-fifo "
437
                    "is not configured in this build. Please make sure 128bit "
438
                    "atomics are available.");
439
#endif
440
                break;
441
            }
50✔
442

443
            case resource::abp_priority_lifo:
444
            {
445
#if defined(HPX_HAVE_CXX11_STD_ATOMIC_128BIT)
446
                // set parameters for scheduler and pool instantiation and
447
                // perform compatibility checks
448
                std::size_t num_high_priority_queues =
57✔
449
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
57✔
450
                        "hpx.thread_queue.high_priority_queues",
57✔
451
                        thread_pool_init.num_threads_);
57✔
452
                detail::check_num_high_priority_queues(
57✔
453
                    thread_pool_init.num_threads_, num_high_priority_queues);
57✔
454

455
                // instantiate the scheduler
456
                using local_sched_type =
457
                    hpx::threads::policies::local_priority_queue_scheduler<
458
                        std::mutex, hpx::threads::policies::lockfree_lifo>;
459

460
                local_sched_type::init_parameter_type init(
57✔
461
                    thread_pool_init.num_threads_,
57✔
462
                    thread_pool_init.affinity_data_, num_high_priority_queues,
57✔
463
                    thread_queue_init,
57✔
464
                    "core-abp_fifo_priority_queue_scheduler");
465

466
                std::unique_ptr<local_sched_type> sched(
57✔
467
                    new local_sched_type(init));
57✔
468

469
                // set the default scheduler flags
470
                sched->set_scheduler_mode(thread_pool_init.mode_);
57✔
471
                // conditionally set/unset this flag
472
                sched->update_scheduler_mode(
114✔
473
                    policies::scheduler_mode::enable_stealing_numa,
474
                    !numa_sensitive);
57✔
475

476
                // instantiate the pool
477
                std::unique_ptr<thread_pool_base> pool(
57✔
478
                    new hpx::threads::detail::scheduled_thread_pool<
57✔
479
                        local_sched_type>(HPX_MOVE(sched), thread_pool_init));
57✔
480
                pools_.push_back(HPX_MOVE(pool));
57✔
481
#else
482
                throw hpx::detail::command_line_error(
483
                    "Command line option --hpx:queuing=abp-priority-lifo "
484
                    "is not configured in this build. Please make sure 128bit "
485
                    "atomics are available.");
486
#endif
487
                break;
488
            }
57✔
489

490
            case resource::shared_priority:
491
            {
492
                // instantiate the scheduler
493
                typedef hpx::threads::policies::
494
                    shared_priority_queue_scheduler<>
495
                        local_sched_type;
496
                local_sched_type::init_parameter_type init(
9✔
497
                    thread_pool_init.num_threads_, {1, 1, 1},
9✔
498
                    thread_pool_init.affinity_data_, thread_queue_init,
9✔
499
                    "core-shared_priority_queue_scheduler");
500

501
                std::unique_ptr<local_sched_type> sched(
9✔
502
                    new local_sched_type(init));
9✔
503

504
                // set the default scheduler flags
505
                sched->set_scheduler_mode(thread_pool_init.mode_);
9✔
506
                // conditionally set/unset this flag
507
                sched->update_scheduler_mode(
18✔
508
                    policies::scheduler_mode::enable_stealing_numa,
509
                    !numa_sensitive);
9✔
510

511
                // instantiate the pool
512
                std::unique_ptr<thread_pool_base> pool(
9✔
513
                    new hpx::threads::detail::scheduled_thread_pool<
9✔
514
                        local_sched_type>(HPX_MOVE(sched), thread_pool_init));
9✔
515
                pools_.push_back(HPX_MOVE(pool));
9✔
516
                break;
517
            }
9✔
518
            }
519

520
            // update the thread_offset for the next pool
521
            thread_offset += num_threads_in_pool;
1,558✔
522
        }
1,558✔
523

524
        // fill the thread-lookup table
525
        for (auto& pool_iter : pools_)
2,774✔
526
        {
527
            std::size_t nt = rp.get_num_threads(pool_iter->get_pool_index());
1,558✔
528
            for (std::size_t i = 0; i < nt; i++)
5,921✔
529
            {
530
                threads_lookup_.push_back(pool_iter->get_pool_id());
4,363✔
531
            }
4,363✔
532
        }
533
    }
1,216✔
534

535
    threadmanager::~threadmanager() {}
1,213✔
536

537
    void threadmanager::init()
1,216✔
538
    {
539
        auto& rp = hpx::resource::get_partitioner();
1,216✔
540
        std::size_t threads_offset = 0;
1,216✔
541

542
        // initialize all pools
543
        for (auto&& pool_iter : pools_)
2,774✔
544
        {
545
            std::size_t num_threads_in_pool =
1,558✔
546
                rp.get_num_threads(pool_iter->get_pool_index());
1,558✔
547
            pool_iter->init(num_threads_in_pool, threads_offset);
1,558✔
548
            threads_offset += num_threads_in_pool;
1,558✔
549
        }
550
    }
1,216✔
551

552
    void threadmanager::print_pools(std::ostream& os)
9✔
553
    {
554
        os << "The thread-manager owns " << pools_.size()    //  -V128
9✔
555
           << " pool(s) : \n";
9✔
556

557
        for (auto&& pool_iter : pools_)
339✔
558
        {
559
            pool_iter->print_pool(os);
330✔
560
        }
561
    }
9✔
562

563
    thread_pool_base& threadmanager::default_pool() const
1,205,136✔
564
    {
565
        HPX_ASSERT(!pools_.empty());
1,205,136✔
566
        return *pools_[0];
1,205,151✔
567
    }
568

569
    thread_pool_base& threadmanager::get_pool(
754✔
570
        std::string const& pool_name) const
571
    {
572
        // if the given pool_name is default, we don't need to look for it
573
        // we must always return pool 0
574
        if (pool_name == "default" ||
754✔
575
            pool_name == resource::get_partitioner().get_default_pool_name())
689✔
576
        {
577
            return default_pool();
81✔
578
        }
579

580
        // now check the other pools - no need to check pool 0 again, so ++begin
581
        auto pool = std::find_if(++pools_.begin(), pools_.end(),
1,346✔
582
            [&pool_name](pool_type const& itp) -> bool {
15,608✔
583
                return (itp->get_pool_name() == pool_name);
14,935✔
584
            });
585

586
        if (pool != pools_.end())
673✔
587
        {
588
            return **pool;
673✔
589
        }
590

591
        //! FIXME Add names of available pools?
592
        HPX_THROW_EXCEPTION(hpx::error::bad_parameter,
×
593
            "threadmanager::get_pool",
594
            "the resource partitioner does not own a thread pool named '{}'.\n",
595
            pool_name);
596
    }
754✔
597

598
    thread_pool_base& threadmanager::get_pool(pool_id_type const& pool_id) const
×
599
    {
600
        return get_pool(pool_id.name());
×
601
    }
602

603
    thread_pool_base& threadmanager::get_pool(std::size_t thread_index) const
×
604
    {
605
        return get_pool(threads_lookup_[thread_index]);
×
606
    }
607

608
    bool threadmanager::pool_exists(std::string const& pool_name) const
3✔
609
    {
610
        // if the given pool_name is default, we don't need to look for it
611
        // we must always return pool 0
612
        if (pool_name == "default" ||
3✔
613
            pool_name == resource::get_partitioner().get_default_pool_name())
2✔
614
        {
615
            return true;
2✔
616
        }
617

618
        // now check the other pools - no need to check pool 0 again, so ++begin
619
        auto pool = std::find_if(++pools_.begin(), pools_.end(),
2✔
620
            [&pool_name](pool_type const& itp) -> bool {
4✔
621
                return (itp->get_pool_name() == pool_name);
3✔
622
            });
623

624
        if (pool != pools_.end())
1✔
625
        {
626
            return true;
×
627
        }
628

629
        return false;
1✔
630
    }
3✔
631

632
    bool threadmanager::pool_exists(std::size_t pool_index) const
5✔
633
    {
634
        return pool_index < pools_.size();
5✔
635
    }
636

637
    ///////////////////////////////////////////////////////////////////////////
638
    std::int64_t threadmanager::get_thread_count(thread_schedule_state state,
50,006✔
639
        thread_priority priority, std::size_t num_thread, bool reset)
640
    {
641
        std::int64_t total_count = 0;
50,006✔
642
        std::lock_guard<mutex_type> lk(mtx_);
50,006✔
643

644
        for (auto& pool_iter : pools_)
100,012✔
645
        {
646
            total_count +=
50,006✔
647
                pool_iter->get_thread_count(state, priority, num_thread, reset);
50,006✔
648
        }
649

650
        return total_count;
50,006✔
651
    }
50,006✔
652

653
    std::int64_t threadmanager::get_idle_core_count()
×
654
    {
655
        std::int64_t total_count = 0;
×
656
        std::lock_guard<mutex_type> lk(mtx_);
×
657

658
        for (auto& pool_iter : pools_)
×
659
        {
660
            total_count += pool_iter->get_idle_core_count();
×
661
        }
662

663
        return total_count;
×
664
    }
×
665

666
    mask_type threadmanager::get_idle_core_mask()
×
667
    {
668
        mask_type mask = mask_type();
×
669
        resize(mask, hardware_concurrency());
×
670

671
        std::lock_guard<mutex_type> lk(mtx_);
×
672

673
        for (auto& pool_iter : pools_)
×
674
        {
675
            pool_iter->get_idle_core_mask(mask);
×
676
        }
677

678
        return mask;
×
679
    }
×
680

681
    std::int64_t threadmanager::get_background_thread_count()
×
682
    {
683
        std::int64_t total_count = 0;
×
684
        std::lock_guard<mutex_type> lk(mtx_);
×
685

686
        for (auto& pool_iter : pools_)
×
687
        {
688
            total_count += pool_iter->get_background_thread_count();
×
689
        }
690

691
        return total_count;
×
692
    }
×
693

694
    ///////////////////////////////////////////////////////////////////////////
695
    // Enumerate all matching threads
696
    bool threadmanager::enumerate_threads(
1✔
697
        hpx::function<bool(thread_id_type)> const& f,
698
        thread_schedule_state state) const
699
    {
700
        std::lock_guard<mutex_type> lk(mtx_);
1✔
701
        bool result = true;
1✔
702

703
        for (auto& pool_iter : pools_)
2✔
704
        {
705
            result = result && pool_iter->enumerate_threads(f, state);
2✔
706
        }
707

708
        return result;
1✔
709
    }
1✔
710

711
    ///////////////////////////////////////////////////////////////////////////
712
    // Abort all threads which are in suspended state. This will set
713
    // the state of all suspended threads to \a pending while
714
    // supplying the wait_abort extended state flag
715
    void threadmanager::abort_all_suspended_threads()
×
716
    {
717
        std::lock_guard<mutex_type> lk(mtx_);
×
718
        for (auto& pool_iter : pools_)
×
719
        {
720
            pool_iter->abort_all_suspended_threads();
×
721
        }
722
    }
×
723

724
    ///////////////////////////////////////////////////////////////////////////
725
    // Clean up terminated threads. This deletes all threads which
726
    // have been terminated but which are still held in the queue
727
    // of terminated threads. Some schedulers might not do anything
728
    // here.
729
    bool threadmanager::cleanup_terminated(bool delete_all)
8,423✔
730
    {
731
        std::lock_guard<mutex_type> lk(mtx_);
8,423✔
732
        bool result = true;
8,423✔
733

734
        for (auto& pool_iter : pools_)
16,846✔
735
        {
736
            result = pool_iter->cleanup_terminated(delete_all) && result;
8,423✔
737
        }
738

739
        return result;
8,423✔
740
    }
8,423✔
741

742
    ///////////////////////////////////////////////////////////////////////////
743
    void threadmanager::register_thread(
1,216✔
744
        thread_init_data& data, thread_id_ref_type& id, error_code& ec)
745
    {
746
        thread_pool_base* pool = nullptr;
1,216✔
747
        auto thrd_data = get_self_id_data();
1,216✔
748
        if (thrd_data)
1,216✔
749
        {
750
            pool = thrd_data->get_scheduler_base()->get_parent_pool();
×
751
        }
×
752
        else
753
        {
754
            pool = &default_pool();
1,216✔
755
        }
756
        pool->create_thread(data, id, ec);
1,216✔
757
    }
1,216✔
758

759
    ///////////////////////////////////////////////////////////////////////////
760
    thread_id_ref_type threadmanager::register_work(
×
761
        thread_init_data& data, error_code& ec)
762
    {
763
        thread_pool_base* pool = nullptr;
×
764
        auto thrd_data = get_self_id_data();
×
765
        if (thrd_data)
×
766
        {
767
            pool = thrd_data->get_scheduler_base()->get_parent_pool();
×
768
        }
×
769
        else
770
        {
771
            pool = &default_pool();
×
772
        }
773
        return pool->create_work(data, ec);
×
774
    }
775

776
    ///////////////////////////////////////////////////////////////////////////
777
    constexpr std::size_t all_threads = std::size_t(-1);
778

779
    std::int64_t threadmanager::get_queue_length(bool reset)
1✔
780
    {
781
        std::int64_t result = 0;
1✔
782
        for (auto const& pool_iter : pools_)
2✔
783
            result += pool_iter->get_queue_length(all_threads, reset);
1✔
784
        return result;
1✔
785
    }
786

787
#ifdef HPX_HAVE_THREAD_QUEUE_WAITTIME
788
    std::int64_t threadmanager::get_average_thread_wait_time(bool reset)
789
    {
790
        std::int64_t result = 0;
791
        for (auto const& pool_iter : pools_)
792
            result +=
793
                pool_iter->get_average_thread_wait_time(all_threads, reset);
794
        return result;
795
    }
796

797
    std::int64_t threadmanager::get_average_task_wait_time(bool reset)
798
    {
799
        std::int64_t result = 0;
800
        for (auto const& pool_iter : pools_)
801
            result += pool_iter->get_average_task_wait_time(all_threads, reset);
802
        return result;
803
    }
804
#endif
805

806
    std::int64_t threadmanager::get_cumulative_duration(bool reset)
1✔
807
    {
808
        std::int64_t result = 0;
1✔
809
        for (auto const& pool_iter : pools_)
2✔
810
            result += pool_iter->get_cumulative_duration(all_threads, reset);
1✔
811
        return result;
1✔
812
    }
813

814
#if defined(HPX_HAVE_BACKGROUND_THREAD_COUNTERS) &&                            \
815
    defined(HPX_HAVE_THREAD_IDLE_RATES)
816
    std::int64_t threadmanager::get_background_work_duration(bool reset)
817
    {
818
        std::int64_t result = 0;
819
        for (auto const& pool_iter : pools_)
820
            result +=
821
                pool_iter->get_background_work_duration(all_threads, reset);
822
        return result;
823
    }
824

825
    std::int64_t threadmanager::get_background_overhead(bool reset)
826
    {
827
        std::int64_t result = 0;
828
        for (auto const& pool_iter : pools_)
829
            result += pool_iter->get_background_overhead(all_threads, reset);
830
        return result;
831
    }
832

833
    std::int64_t threadmanager::get_background_send_duration(bool reset)
834
    {
835
        std::int64_t result = 0;
836
        for (auto const& pool_iter : pools_)
837
            result +=
838
                pool_iter->get_background_send_duration(all_threads, reset);
839
        return result;
840
    }
841

842
    std::int64_t threadmanager::get_background_send_overhead(bool reset)
843
    {
844
        std::int64_t result = 0;
845
        for (auto const& pool_iter : pools_)
846
            result +=
847
                pool_iter->get_background_send_overhead(all_threads, reset);
848
        return result;
849
    }
850

851
    std::int64_t threadmanager::get_background_receive_duration(bool reset)
852
    {
853
        std::int64_t result = 0;
854
        for (auto const& pool_iter : pools_)
855
            result +=
856
                pool_iter->get_background_receive_duration(all_threads, reset);
857
        return result;
858
    }
859

860
    std::int64_t threadmanager::get_background_receive_overhead(bool reset)
861
    {
862
        std::int64_t result = 0;
863
        for (auto const& pool_iter : pools_)
864
            result +=
865
                pool_iter->get_background_receive_overhead(all_threads, reset);
866
        return result;
867
    }
868
#endif    // HPX_HAVE_BACKGROUND_THREAD_COUNTERS
869

870
#ifdef HPX_HAVE_THREAD_IDLE_RATES
871
    std::int64_t threadmanager::avg_idle_rate(bool reset)
872
    {
873
        std::int64_t result = 0;
874
        for (auto const& pool_iter : pools_)
875
            result += pool_iter->avg_idle_rate(all_threads, reset);
876
        return result;
877
    }
878

879
#ifdef HPX_HAVE_THREAD_CREATION_AND_CLEANUP_RATES
880
    std::int64_t threadmanager::avg_creation_idle_rate(bool reset)
881
    {
882
        std::int64_t result = 0;
883
        for (auto const& pool_iter : pools_)
884
            result += pool_iter->avg_creation_idle_rate(all_threads, reset);
885
        return result;
886
    }
887

888
    std::int64_t threadmanager::avg_cleanup_idle_rate(bool reset)
889
    {
890
        std::int64_t result = 0;
891
        for (auto const& pool_iter : pools_)
892
            result += pool_iter->avg_cleanup_idle_rate(all_threads, reset);
893
        return result;
894
    }
895
#endif
896
#endif
897

898
#ifdef HPX_HAVE_THREAD_CUMULATIVE_COUNTS
899
    std::int64_t threadmanager::get_executed_threads(bool reset)
1✔
900
    {
901
        std::int64_t result = 0;
1✔
902
        for (auto const& pool_iter : pools_)
2✔
903
            result += pool_iter->get_executed_threads(all_threads, reset);
1✔
904
        return result;
1✔
905
    }
906

907
    std::int64_t threadmanager::get_executed_thread_phases(bool reset)
1✔
908
    {
909
        std::int64_t result = 0;
1✔
910
        for (auto const& pool_iter : pools_)
2✔
911
            result += pool_iter->get_executed_thread_phases(all_threads, reset);
1✔
912
        return result;
1✔
913
    }
914

915
#ifdef HPX_HAVE_THREAD_IDLE_RATES
916
    std::int64_t threadmanager::get_thread_duration(bool reset)
917
    {
918
        std::int64_t result = 0;
919
        for (auto const& pool_iter : pools_)
920
            result += pool_iter->get_thread_duration(all_threads, reset);
921
        return result;
922
    }
923

924
    std::int64_t threadmanager::get_thread_phase_duration(bool reset)
925
    {
926
        std::int64_t result = 0;
927
        for (auto const& pool_iter : pools_)
928
            result += pool_iter->get_thread_phase_duration(all_threads, reset);
929
        return result;
930
    }
931

932
    std::int64_t threadmanager::get_thread_overhead(bool reset)
933
    {
934
        std::int64_t result = 0;
935
        for (auto const& pool_iter : pools_)
936
            result += pool_iter->get_thread_overhead(all_threads, reset);
937
        return result;
938
    }
939

940
    std::int64_t threadmanager::get_thread_phase_overhead(bool reset)
941
    {
942
        std::int64_t result = 0;
943
        for (auto const& pool_iter : pools_)
944
            result += pool_iter->get_thread_phase_overhead(all_threads, reset);
945
        return result;
946
    }
947

948
    std::int64_t threadmanager::get_cumulative_thread_duration(bool reset)
949
    {
950
        std::int64_t result = 0;
951
        for (auto const& pool_iter : pools_)
952
            result +=
953
                pool_iter->get_cumulative_thread_duration(all_threads, reset);
954
        return result;
955
    }
956

957
    std::int64_t threadmanager::get_cumulative_thread_overhead(bool reset)
958
    {
959
        std::int64_t result = 0;
960
        for (auto const& pool_iter : pools_)
961
            result +=
962
                pool_iter->get_cumulative_thread_overhead(all_threads, reset);
963
        return result;
964
    }
965
#endif
966
#endif
967

968
#ifdef HPX_HAVE_THREAD_STEALING_COUNTS
969
    std::int64_t threadmanager::get_num_pending_misses(bool reset)
970
    {
971
        std::int64_t result = 0;
972
        for (auto const& pool_iter : pools_)
973
            result += pool_iter->get_num_pending_misses(all_threads, reset);
974
        return result;
975
    }
976

977
    std::int64_t threadmanager::get_num_pending_accesses(bool reset)
978
    {
979
        std::int64_t result = 0;
980
        for (auto const& pool_iter : pools_)
981
            result += pool_iter->get_num_pending_accesses(all_threads, reset);
982
        return result;
983
    }
984

985
    std::int64_t threadmanager::get_num_stolen_from_pending(bool reset)
986
    {
987
        std::int64_t result = 0;
988
        for (auto const& pool_iter : pools_)
989
            result +=
990
                pool_iter->get_num_stolen_from_pending(all_threads, reset);
991
        return result;
992
    }
993

994
    std::int64_t threadmanager::get_num_stolen_from_staged(bool reset)
995
    {
996
        std::int64_t result = 0;
997
        for (auto const& pool_iter : pools_)
998
            result += pool_iter->get_num_stolen_from_staged(all_threads, reset);
999
        return result;
1000
    }
1001

1002
    std::int64_t threadmanager::get_num_stolen_to_pending(bool reset)
1003
    {
1004
        std::int64_t result = 0;
1005
        for (auto const& pool_iter : pools_)
1006
            result += pool_iter->get_num_stolen_to_pending(all_threads, reset);
1007
        return result;
1008
    }
1009

1010
    std::int64_t threadmanager::get_num_stolen_to_staged(bool reset)
1011
    {
1012
        std::int64_t result = 0;
1013
        for (auto const& pool_iter : pools_)
1014
            result += pool_iter->get_num_stolen_to_staged(all_threads, reset);
1015
        return result;
1016
    }
1017
#endif
1018

1019
    ///////////////////////////////////////////////////////////////////////////
1020
    bool threadmanager::run()
1,216✔
1021
    {
1022
        std::unique_lock<mutex_type> lk(mtx_);
1,216✔
1023

1024
        // the main thread needs to have a unique thread_num
1025
        // worker threads are numbered 0..N-1, so we can use N for this thread
1026
        auto& rp = hpx::resource::get_partitioner();
1,216✔
1027
        init_tss(rp.get_num_threads());
1,216✔
1028

1029
#ifdef HPX_HAVE_TIMER_POOL
1030
        LTM_(info).format("run: running timer pool");
1,216✔
1031
        timer_pool_.run(false);
1,216✔
1032
#endif
1033

1034
        for (auto& pool_iter : pools_)
2,774✔
1035
        {
1036
            std::size_t num_threads_in_pool =
1,558✔
1037
                rp.get_num_threads(pool_iter->get_pool_name());
1,558✔
1038

1039
            if (pool_iter->get_os_thread_count() != 0 ||
3,116✔
1040
                pool_iter->has_reached_state(hpx::state::running))
1,558✔
1041
            {
1042
                return true;    // do nothing if already running
×
1043
            }
1044

1045
            if (!pool_iter->run(lk, num_threads_in_pool))
1,558✔
1046
            {
1047
#ifdef HPX_HAVE_TIMER_POOL
1048
                timer_pool_.stop();
×
1049
#endif
1050
                return false;
×
1051
            }
1052

1053
            // set all states of all schedulers to "running"
1054
            policies::scheduler_base* sched = pool_iter->get_scheduler();
1,558✔
1055
            if (sched)
1,558✔
1056
                sched->set_all_states(hpx::state::running);
1,558✔
1057
        }
1058

1059
        LTM_(info).format("run: running");
1,216✔
1060
        return true;
1,216✔
1061
    }
1,216✔
1062

1063
    void threadmanager::stop(bool blocking)
3,641✔
1064
    {
1065
        LTM_(info).format("stop: blocking({})", blocking ? "true" : "false");
3,641✔
1066

1067
        std::unique_lock<mutex_type> lk(mtx_);
3,641✔
1068
        for (auto& pool_iter : pools_)
8,308✔
1069
        {
1070
            pool_iter->stop(lk, blocking);
4,667✔
1071
        }
1072
        deinit_tss();
3,641✔
1073
    }
3,641✔
1074

1075
    bool threadmanager::is_busy()
1,129,663✔
1076
    {
1077
        bool busy = false;
1,129,663✔
1078
        for (auto& pool_iter : pools_)
2,270,496✔
1079
        {
1080
            busy = busy || pool_iter->is_busy();
1,140,833✔
1081
        }
1082
        return busy;
1,129,663✔
1083
    }
1084

1085
    bool threadmanager::is_idle()
×
1086
    {
1087
        bool idle = true;
×
1088
        for (auto& pool_iter : pools_)
×
1089
        {
1090
            idle = idle && pool_iter->is_idle();
×
1091
        }
1092
        return idle;
×
1093
    }
1094

1095
    void threadmanager::wait()
2,481✔
1096
    {
1097
        std::size_t shutdown_check_count = util::get_entry_as<std::size_t>(
2,481✔
1098
            rtcfg_, "hpx.shutdown_check_count", 10);
2,481✔
1099
        hpx::util::detail::yield_while_count(
2,481✔
1100
            [this]() { return is_busy(); }, shutdown_check_count);
1,125,461✔
1101
    }
2,481✔
1102

1103
    void threadmanager::suspend()
119✔
1104
    {
1105
        wait();
119✔
1106

1107
        if (threads::get_self_ptr())
119✔
1108
        {
1109
            std::vector<hpx::future<void>> fs;
×
1110

1111
            for (auto& pool_iter : pools_)
×
1112
            {
1113
                fs.push_back(suspend_pool(*pool_iter));
×
1114
            }
1115

1116
            hpx::wait_all(fs);
×
1117
        }
×
1118
        else
1119
        {
1120
            for (auto& pool_iter : pools_)
238✔
1121
            {
1122
                pool_iter->suspend_direct();
119✔
1123
            }
1124
        }
1125
    }
119✔
1126

1127
    void threadmanager::resume()
570✔
1128
    {
1129
        if (threads::get_self_ptr())
570✔
1130
        {
1131
            std::vector<hpx::future<void>> fs;
451✔
1132

1133
            for (auto& pool_iter : pools_)
902✔
1134
            {
1135
                fs.push_back(resume_pool(*pool_iter));
451✔
1136
            }
1137
            hpx::wait_all(fs);
451✔
1138
        }
451✔
1139
        else
1140
        {
1141
            for (auto& pool_iter : pools_)
238✔
1142
            {
1143
                pool_iter->resume_direct();
119✔
1144
            }
1145
        }
1146
    }
570✔
1147
}}    // namespace hpx::threads
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc