• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

STEllAR-GROUP / hpx / #862

10 Jan 2023 05:30PM UTC coverage: 86.582% (-0.05%) from 86.634%
#862

push

StellarBot
Merge #6130

6130: Remove the mutex lock in the critical path of get_partitioner. r=hkaiser a=JiakunYan

Remove the mutex lock in the critical path of hpx::resource::detail::get_partitioner.

The protected variable `partitioner_ref` is only set once during initialization.

Co-authored-by: Jiakun Yan <jiakunyan1998@gmail.com>

6 of 6 new or added lines in 1 file covered. (100.0%)

174767 of 201851 relevant lines covered (86.58%)

2069816.07 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.32
/libs/core/threadmanager/src/threadmanager.cpp
1
//  Copyright (c) 2007-2017 Hartmut Kaiser
2
//  Copyright (c)      2011 Bryce Lelbach, Katelyn Kufahl
3
//  Copyright (c) 2008-2009 Chirag Dekate, Anshul Tandon
4
//  Copyright (c) 2015 Patricia Grubel
5
//  Copyright (c) 2017 Shoshana Jakobovits
6
//
7
//  SPDX-License-Identifier: BSL-1.0
8
//  Distributed under the Boost Software License, Version 1.0. (See accompanying
9
//  file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
10

11
#include <hpx/config.hpp>
12
#include <hpx/assert.hpp>
13
#include <hpx/async_combinators/wait_all.hpp>
14
#include <hpx/execution_base/this_thread.hpp>
15
#include <hpx/futures/future.hpp>
16
#include <hpx/hardware/timestamp.hpp>
17
#include <hpx/modules/errors.hpp>
18
#include <hpx/modules/logging.hpp>
19
#include <hpx/modules/schedulers.hpp>
20
#include <hpx/modules/threadmanager.hpp>
21
#include <hpx/resource_partitioner/detail/partitioner.hpp>
22
#include <hpx/runtime_configuration/runtime_configuration.hpp>
23
#include <hpx/thread_pool_util/thread_pool_suspension_helpers.hpp>
24
#include <hpx/thread_pools/scheduled_thread_pool.hpp>
25
#include <hpx/threading_base/set_thread_state.hpp>
26
#include <hpx/threading_base/thread_data.hpp>
27
#include <hpx/threading_base/thread_helpers.hpp>
28
#include <hpx/threading_base/thread_init_data.hpp>
29
#include <hpx/threading_base/thread_queue_init_parameters.hpp>
30
#include <hpx/topology/topology.hpp>
31
#include <hpx/type_support/unused.hpp>
32
#include <hpx/util/get_entry_as.hpp>
33

34
#include <cstddef>
35
#include <cstdint>
36
#include <functional>
37
#include <iosfwd>
38
#include <memory>
39
#include <mutex>
40
#include <numeric>
41
#include <string>
42
#include <utility>
43
#include <vector>
44

45
namespace hpx { namespace threads {
46
    namespace detail {
47
        void check_num_high_priority_queues(
1,229✔
48
            std::size_t num_threads, std::size_t num_high_priority_queues)
49
        {
50
            if (num_high_priority_queues > num_threads)
1,229✔
51
            {
52
                throw hpx::detail::command_line_error(
×
53
                    "Invalid command line option: "
54
                    "number of high priority threads ("
55
                    "--hpx:high-priority-threads), should not be larger "
56
                    "than number of threads (--hpx:threads)");
57
            }
58
        }
1,229✔
59
    }    // namespace detail
60

61
    ///////////////////////////////////////////////////////////////////////////
62
    threadmanager::threadmanager(hpx::util::runtime_configuration& rtcfg,
1,220✔
63
#ifdef HPX_HAVE_TIMER_POOL
64
        util::io_service_pool& timer_pool,
65
#endif
66
        notification_policy_type& notifier,
67
        detail::network_background_callback_type network_background_callback)
68
      : rtcfg_(rtcfg)
1,220✔
69
#ifdef HPX_HAVE_TIMER_POOL
70
      , timer_pool_(timer_pool)
1,220✔
71
#endif
72
      , notifier_(notifier)
1,220✔
73
      , network_background_callback_(network_background_callback)
1,220✔
74
    {
75
        using placeholders::_1;
76
        using placeholders::_3;
77

78
        // Add callbacks local to threadmanager.
79
        notifier.add_on_start_thread_callback(
2,440✔
80
            hpx::bind(&threadmanager::init_tss, this, _1));
1,220✔
81
        notifier.add_on_stop_thread_callback(
2,440✔
82
            hpx::bind(&threadmanager::deinit_tss, this));
1,220✔
83

84
        auto& rp = hpx::resource::get_partitioner();
1,220✔
85
        notifier.add_on_start_thread_callback(hpx::bind(
2,440✔
86
            &resource::detail::partitioner::assign_pu, std::ref(rp), _3, _1));
1,220✔
87
        notifier.add_on_stop_thread_callback(hpx::bind(
2,440✔
88
            &resource::detail::partitioner::unassign_pu, std::ref(rp), _3, _1));
1,220✔
89
    }
1,220✔
90

91
    void threadmanager::create_pools()
1,220✔
92
    {
93
        auto& rp = hpx::resource::get_partitioner();
1,220✔
94
        size_t num_pools = rp.get_num_pools();
1,220✔
95
        std::size_t thread_offset = 0;
1,220✔
96

97
        std::size_t max_background_threads =
1,220✔
98
            hpx::util::get_entry_as<std::size_t>(rtcfg_,
1,220✔
99
                "hpx.max_background_threads",
1,220✔
100
                (std::numeric_limits<std::size_t>::max)());
1,220✔
101
        std::size_t const max_idle_loop_count =
1,220✔
102
            hpx::util::get_entry_as<std::int64_t>(
1,220✔
103
                rtcfg_, "hpx.max_idle_loop_count", HPX_IDLE_LOOP_COUNT_MAX);
1,220✔
104
        std::size_t const max_busy_loop_count =
1,220✔
105
            hpx::util::get_entry_as<std::int64_t>(
1,220✔
106
                rtcfg_, "hpx.max_busy_loop_count", HPX_BUSY_LOOP_COUNT_MAX);
1,220✔
107

108
        std::int64_t const max_thread_count =
1,220✔
109
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
110
                "hpx.thread_queue.max_thread_count",
1,220✔
111
                HPX_THREAD_QUEUE_MAX_THREAD_COUNT);
1,220✔
112
        std::int64_t const min_tasks_to_steal_pending =
1,220✔
113
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
114
                "hpx.thread_queue.min_tasks_to_steal_pending",
1,220✔
115
                HPX_THREAD_QUEUE_MIN_TASKS_TO_STEAL_PENDING);
1,220✔
116
        std::int64_t const min_tasks_to_steal_staged =
1,220✔
117
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
118
                "hpx.thread_queue.min_tasks_to_steal_staged",
1,220✔
119
                HPX_THREAD_QUEUE_MIN_TASKS_TO_STEAL_STAGED);
1,220✔
120
        std::int64_t const min_add_new_count =
1,220✔
121
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
122
                "hpx.thread_queue.min_add_new_count",
1,220✔
123
                HPX_THREAD_QUEUE_MIN_ADD_NEW_COUNT);
1,220✔
124
        std::int64_t const max_add_new_count =
1,220✔
125
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
126
                "hpx.thread_queue.max_add_new_count",
1,220✔
127
                HPX_THREAD_QUEUE_MAX_ADD_NEW_COUNT);
1,220✔
128
        std::int64_t const min_delete_count =
1,220✔
129
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
130
                "hpx.thread_queue.min_delete_count",
1,220✔
131
                HPX_THREAD_QUEUE_MIN_DELETE_COUNT);
1,220✔
132
        std::int64_t const max_delete_count =
1,220✔
133
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
134
                "hpx.thread_queue.max_delete_count",
1,220✔
135
                HPX_THREAD_QUEUE_MAX_DELETE_COUNT);
1,220✔
136
        std::int64_t const max_terminated_threads =
1,220✔
137
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
138
                "hpx.thread_queue.max_terminated_threads",
1,220✔
139
                HPX_THREAD_QUEUE_MAX_TERMINATED_THREADS);
1,220✔
140
        std::int64_t const init_threads_count =
1,220✔
141
            hpx::util::get_entry_as<std::int64_t>(rtcfg_,
1,220✔
142
                "hpx.thread_queue.init_threads_count",
1,220✔
143
                HPX_THREAD_QUEUE_INIT_THREADS_COUNT);
1,220✔
144
        double const max_idle_backoff_time = hpx::util::get_entry_as<double>(
1,220✔
145
            rtcfg_, "hpx.max_idle_backoff_time", HPX_IDLE_BACKOFF_TIME_MAX);
1,220✔
146

147
        std::ptrdiff_t small_stacksize =
1,220✔
148
            rtcfg_.get_stack_size(thread_stacksize::small_);
1,220✔
149
        std::ptrdiff_t medium_stacksize =
1,220✔
150
            rtcfg_.get_stack_size(thread_stacksize::medium);
1,220✔
151
        std::ptrdiff_t large_stacksize =
1,220✔
152
            rtcfg_.get_stack_size(thread_stacksize::large);
1,220✔
153
        std::ptrdiff_t huge_stacksize =
1,220✔
154
            rtcfg_.get_stack_size(thread_stacksize::huge);
1,220✔
155

156
        policies::thread_queue_init_parameters thread_queue_init(
1,220✔
157
            max_thread_count, min_tasks_to_steal_pending,
1,220✔
158
            min_tasks_to_steal_staged, min_add_new_count, max_add_new_count,
1,220✔
159
            min_delete_count, max_delete_count, max_terminated_threads,
1,220✔
160
            init_threads_count, max_idle_backoff_time, small_stacksize,
1,220✔
161
            medium_stacksize, large_stacksize, huge_stacksize);
1,220✔
162

163
        if (!rtcfg_.enable_networking())
1,220✔
164
        {
165
            max_background_threads = 0;
936✔
166
        }
936✔
167

168
        // instantiate the pools
169
        for (size_t i = 0; i != num_pools; i++)
2,488✔
170
        {
171
            std::string name = rp.get_pool_name(i);
1,268✔
172
            resource::scheduling_policy sched_type = rp.which_scheduler(name);
1,268✔
173
            std::size_t num_threads_in_pool = rp.get_num_threads(i);
1,268✔
174
            policies::scheduler_mode scheduler_mode = rp.get_scheduler_mode(i);
1,268✔
175

176
            // make sure the first thread-pool that gets instantiated is the default one
177
            if (i == 0)
1,268✔
178
            {
179
                if (name != rp.get_default_pool_name())
1,220✔
180
                {
181
                    throw std::invalid_argument("Trying to instantiate pool " +
×
182
                        name +
×
183
                        " as first thread pool, but first thread pool must "
184
                        "be named " +
×
185
                        rp.get_default_pool_name());
×
186
                }
187
            }
1,220✔
188

189
            thread_pool_init_parameters thread_pool_init(name, i,
2,536✔
190
                scheduler_mode, num_threads_in_pool, thread_offset, notifier_,
1,268✔
191
                rp.get_affinity_data(), network_background_callback_,
1,268✔
192
                max_background_threads, max_idle_loop_count,
1,268✔
193
                max_busy_loop_count);
1,268✔
194

195
            std::size_t numa_sensitive = hpx::util::get_entry_as<std::size_t>(
1,268✔
196
                rtcfg_, "hpx.numa_sensitive", 0);
1,268✔
197

198
            switch (sched_type)
1,268✔
199
            {
200
            case resource::scheduling_policy::user_defined:
201
            {
202
                auto pool_func = rp.get_pool_creator(i);
6✔
203
                std::unique_ptr<thread_pool_base> pool(
204
                    pool_func(thread_pool_init, thread_queue_init));
6✔
205
                pools_.push_back(HPX_MOVE(pool));
6✔
206
                break;
207
            }
6✔
208
            case resource::scheduling_policy::unspecified:
209
            {
210
                throw std::invalid_argument(
×
211
                    "cannot instantiate a thread-manager if the thread-pool" +
×
212
                    name + " has an unspecified scheduler type");
×
213
            }
214
            case resource::scheduling_policy::local:
215
            {
216
                // instantiate the scheduler
217
                using local_sched_type =
218
                    hpx::threads::policies::local_queue_scheduler<>;
219

220
                local_sched_type::init_parameter_type init(
12✔
221
                    thread_pool_init.num_threads_,
12✔
222
                    thread_pool_init.affinity_data_, thread_queue_init,
12✔
223
                    "core-local_queue_scheduler");
224

225
                std::unique_ptr<local_sched_type> sched =
226
                    std::make_unique<local_sched_type>(init);
12✔
227

228
                // set the default scheduler flags
229
                sched->set_scheduler_mode(thread_pool_init.mode_);
12✔
230
                // conditionally set/unset this flag
231
                sched->update_scheduler_mode(
24✔
232
                    policies::scheduler_mode::enable_stealing_numa,
233
                    !numa_sensitive);
12✔
234

235
                // instantiate the pool
236
                std::unique_ptr<thread_pool_base> pool =
237
                    std::make_unique<hpx::threads::detail::
12✔
238
                            scheduled_thread_pool<local_sched_type>>(
239
                        HPX_MOVE(sched), thread_pool_init);
240
                pools_.push_back(HPX_MOVE(pool));
12✔
241
                break;
242
            }
12✔
243

244
            case resource::scheduling_policy::local_priority_fifo:
245
            {
246
                // set parameters for scheduler and pool instantiation and
247
                // perform compatibility checks
248
                std::size_t num_high_priority_queues =
1,184✔
249
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
1,184✔
250
                        "hpx.thread_queue.high_priority_queues",
1,184✔
251
                        thread_pool_init.num_threads_);
1,184✔
252
                detail::check_num_high_priority_queues(
1,184✔
253
                    thread_pool_init.num_threads_, num_high_priority_queues);
1,184✔
254

255
                // instantiate the scheduler
256
                using local_sched_type =
257
                    hpx::threads::policies::local_priority_queue_scheduler<
258
                        std::mutex, hpx::threads::policies::lockfree_fifo>;
259

260
                local_sched_type::init_parameter_type init(
1,184✔
261
                    thread_pool_init.num_threads_,
1,184✔
262
                    thread_pool_init.affinity_data_, num_high_priority_queues,
1,184✔
263
                    thread_queue_init, "core-local_priority_queue_scheduler");
1,184✔
264

265
                std::unique_ptr<local_sched_type> sched =
266
                    std::make_unique<local_sched_type>(init);
1,184✔
267

268
                // set the default scheduler flags
269
                sched->set_scheduler_mode(thread_pool_init.mode_);
1,184✔
270
                // conditionally set/unset this flag
271
                sched->update_scheduler_mode(
2,368✔
272
                    policies::scheduler_mode::enable_stealing_numa,
273
                    !numa_sensitive);
1,184✔
274

275
                // instantiate the pool
276
                std::unique_ptr<thread_pool_base> pool =
277
                    std::make_unique<hpx::threads::detail::
1,184✔
278
                            scheduled_thread_pool<local_sched_type>>(
279
                        HPX_MOVE(sched), thread_pool_init);
280
                pools_.push_back(HPX_MOVE(pool));
1,184✔
281

282
                break;
283
            }
1,184✔
284

285
            case resource::scheduling_policy::local_priority_lifo:
286
            {
287
#if defined(HPX_HAVE_CXX11_STD_ATOMIC_128BIT)
288
                // set parameters for scheduler and pool instantiation and
289
                // perform compatibility checks
290
                std::size_t num_high_priority_queues =
12✔
291
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
12✔
292
                        "hpx.thread_queue.high_priority_queues",
12✔
293
                        thread_pool_init.num_threads_);
12✔
294
                detail::check_num_high_priority_queues(
12✔
295
                    thread_pool_init.num_threads_, num_high_priority_queues);
12✔
296

297
                // instantiate the scheduler
298
                using local_sched_type =
299
                    hpx::threads::policies::local_priority_queue_scheduler<
300
                        std::mutex, hpx::threads::policies::lockfree_lifo>;
301

302
                local_sched_type::init_parameter_type init(
12✔
303
                    thread_pool_init.num_threads_,
12✔
304
                    thread_pool_init.affinity_data_, num_high_priority_queues,
12✔
305
                    thread_queue_init, "core-local_priority_queue_scheduler");
12✔
306

307
                std::unique_ptr<local_sched_type> sched =
308
                    std::make_unique<local_sched_type>(init);
12✔
309

310
                // set the default scheduler flags
311
                sched->set_scheduler_mode(thread_pool_init.mode_);
12✔
312
                // conditionally set/unset this flag
313
                sched->update_scheduler_mode(
24✔
314
                    policies::scheduler_mode::enable_stealing_numa,
315
                    !numa_sensitive);
12✔
316

317
                // instantiate the pool
318
                std::unique_ptr<thread_pool_base> pool =
319
                    std::make_unique<hpx::threads::detail::
12✔
320
                            scheduled_thread_pool<local_sched_type>>(
321
                        HPX_MOVE(sched), thread_pool_init);
322
                pools_.push_back(HPX_MOVE(pool));
12✔
323
#else
324
                throw hpx::detail::command_line_error(
325
                    "Command line option --hpx:queuing=local-priority-lifo "
326
                    "is not configured in this build. Please make sure 128bit "
327
                    "atomics are available.");
328
#endif
329
                break;
330
            }
12✔
331

332
            case resource::scheduling_policy::static_:
333
            {
334
                // instantiate the scheduler
335
                using local_sched_type =
336
                    hpx::threads::policies::static_queue_scheduler<>;
337

338
                local_sched_type::init_parameter_type init(
12✔
339
                    thread_pool_init.num_threads_,
12✔
340
                    thread_pool_init.affinity_data_, thread_queue_init,
12✔
341
                    "core-static_queue_scheduler");
342

343
                std::unique_ptr<local_sched_type> sched =
344
                    std::make_unique<local_sched_type>(init);
12✔
345

346
                // set the default scheduler flags
347
                sched->set_scheduler_mode(thread_pool_init.mode_);
12✔
348
                // conditionally set/unset this flag
349
                sched->update_scheduler_mode(
24✔
350
                    policies::scheduler_mode::enable_stealing_numa,
351
                    !numa_sensitive);
12✔
352

353
                // instantiate the pool
354
                std::unique_ptr<thread_pool_base> pool =
355
                    std::make_unique<hpx::threads::detail::
12✔
356
                            scheduled_thread_pool<local_sched_type>>(
357
                        HPX_MOVE(sched), thread_pool_init);
358
                pools_.push_back(HPX_MOVE(pool));
12✔
359
                break;
360
            }
12✔
361

362
            case resource::scheduling_policy::static_priority:
363
            {
364
                // set parameters for scheduler and pool instantiation and
365
                // perform compatibility checks
366
                std::size_t num_high_priority_queues =
10✔
367
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
10✔
368
                        "hpx.thread_queue.high_priority_queues",
10✔
369
                        thread_pool_init.num_threads_);
10✔
370
                detail::check_num_high_priority_queues(
10✔
371
                    thread_pool_init.num_threads_, num_high_priority_queues);
10✔
372

373
                // instantiate the scheduler
374
                using local_sched_type =
375
                    hpx::threads::policies::static_priority_queue_scheduler<>;
376

377
                local_sched_type::init_parameter_type init(
10✔
378
                    thread_pool_init.num_threads_,
10✔
379
                    thread_pool_init.affinity_data_, num_high_priority_queues,
10✔
380
                    thread_queue_init, "core-static_priority_queue_scheduler");
10✔
381

382
                std::unique_ptr<local_sched_type> sched =
383
                    std::make_unique<local_sched_type>(init);
10✔
384

385
                // set the default scheduler flags
386
                sched->set_scheduler_mode(thread_pool_init.mode_);
10✔
387
                // conditionally set/unset this flag
388
                sched->update_scheduler_mode(
20✔
389
                    policies::scheduler_mode::enable_stealing_numa,
390
                    !numa_sensitive);
10✔
391

392
                // instantiate the pool
393
                std::unique_ptr<thread_pool_base> pool =
394
                    std::make_unique<hpx::threads::detail::
10✔
395
                            scheduled_thread_pool<local_sched_type>>(
396
                        HPX_MOVE(sched), thread_pool_init);
397
                pools_.push_back(HPX_MOVE(pool));
10✔
398
                break;
399
            }
10✔
400

401
            case resource::scheduling_policy::abp_priority_fifo:
402
            {
403
#if defined(HPX_HAVE_CXX11_STD_ATOMIC_128BIT)
404
                // set parameters for scheduler and pool instantiation and
405
                // perform compatibility checks
406
                std::size_t num_high_priority_queues =
13✔
407
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
13✔
408
                        "hpx.thread_queue.high_priority_queues",
13✔
409
                        thread_pool_init.num_threads_);
13✔
410
                detail::check_num_high_priority_queues(
13✔
411
                    thread_pool_init.num_threads_, num_high_priority_queues);
13✔
412

413
                // instantiate the scheduler
414
                using local_sched_type =
415
                    hpx::threads::policies::local_priority_queue_scheduler<
416
                        std::mutex, hpx::threads::policies::lockfree_fifo>;
417

418
                local_sched_type::init_parameter_type init(
13✔
419
                    thread_pool_init.num_threads_,
13✔
420
                    thread_pool_init.affinity_data_, num_high_priority_queues,
13✔
421
                    thread_queue_init,
13✔
422
                    "core-abp_fifo_priority_queue_scheduler");
423

424
                std::unique_ptr<local_sched_type> sched =
425
                    std::make_unique<local_sched_type>(init);
13✔
426

427
                // set the default scheduler flags
428
                sched->set_scheduler_mode(thread_pool_init.mode_);
13✔
429
                // conditionally set/unset this flag
430
                sched->update_scheduler_mode(
26✔
431
                    policies::scheduler_mode::enable_stealing_numa,
432
                    !numa_sensitive);
13✔
433

434
                // instantiate the pool
435
                std::unique_ptr<thread_pool_base> pool =
436
                    std::make_unique<hpx::threads::detail::
13✔
437
                            scheduled_thread_pool<local_sched_type>>(
438
                        HPX_MOVE(sched), thread_pool_init);
439
                pools_.push_back(HPX_MOVE(pool));
13✔
440
#else
441
                throw hpx::detail::command_line_error(
442
                    "Command line option --hpx:queuing=abp-priority-fifo "
443
                    "is not configured in this build. Please make sure 128bit "
444
                    "atomics are available.");
445
#endif
446
                break;
447
            }
13✔
448

449
            case resource::scheduling_policy::abp_priority_lifo:
450
            {
451
#if defined(HPX_HAVE_CXX11_STD_ATOMIC_128BIT)
452
                // set parameters for scheduler and pool instantiation and
453
                // perform compatibility checks
454
                std::size_t num_high_priority_queues =
10✔
455
                    hpx::util::get_entry_as<std::size_t>(rtcfg_,
10✔
456
                        "hpx.thread_queue.high_priority_queues",
10✔
457
                        thread_pool_init.num_threads_);
10✔
458
                detail::check_num_high_priority_queues(
10✔
459
                    thread_pool_init.num_threads_, num_high_priority_queues);
10✔
460

461
                // instantiate the scheduler
462
                using local_sched_type =
463
                    hpx::threads::policies::local_priority_queue_scheduler<
464
                        std::mutex, hpx::threads::policies::lockfree_lifo>;
465

466
                local_sched_type::init_parameter_type init(
10✔
467
                    thread_pool_init.num_threads_,
10✔
468
                    thread_pool_init.affinity_data_, num_high_priority_queues,
10✔
469
                    thread_queue_init,
10✔
470
                    "core-abp_fifo_priority_queue_scheduler");
471

472
                std::unique_ptr<local_sched_type> sched =
473
                    std::make_unique<local_sched_type>(init);
10✔
474

475
                // set the default scheduler flags
476
                sched->set_scheduler_mode(thread_pool_init.mode_);
10✔
477
                // conditionally set/unset this flag
478
                sched->update_scheduler_mode(
20✔
479
                    policies::scheduler_mode::enable_stealing_numa,
480
                    !numa_sensitive);
10✔
481

482
                // instantiate the pool
483
                std::unique_ptr<thread_pool_base> pool =
484
                    std::make_unique<hpx::threads::detail::
10✔
485
                            scheduled_thread_pool<local_sched_type>>(
486
                        HPX_MOVE(sched), thread_pool_init);
487
                pools_.push_back(HPX_MOVE(pool));
10✔
488
#else
489
                throw hpx::detail::command_line_error(
490
                    "Command line option --hpx:queuing=abp-priority-lifo "
491
                    "is not configured in this build. Please make sure 128bit "
492
                    "atomics are available.");
493
#endif
494
                break;
495
            }
10✔
496

497
            case resource::scheduling_policy::shared_priority:
498
            {
499
                // instantiate the scheduler
500
                typedef hpx::threads::policies::
501
                    shared_priority_queue_scheduler<>
502
                        local_sched_type;
503
                local_sched_type::init_parameter_type init(
9✔
504
                    thread_pool_init.num_threads_, {1, 1, 1},
9✔
505
                    thread_pool_init.affinity_data_, thread_queue_init,
9✔
506
                    "core-shared_priority_queue_scheduler");
507

508
                std::unique_ptr<local_sched_type> sched =
509
                    std::make_unique<local_sched_type>(init);
9✔
510

511
                // set the default scheduler flags
512
                sched->set_scheduler_mode(thread_pool_init.mode_);
9✔
513
                // conditionally set/unset this flag
514
                sched->update_scheduler_mode(
18✔
515
                    policies::scheduler_mode::enable_stealing_numa,
516
                    !numa_sensitive);
9✔
517

518
                // instantiate the pool
519
                std::unique_ptr<thread_pool_base> pool =
520
                    std::make_unique<hpx::threads::detail::
9✔
521
                            scheduled_thread_pool<local_sched_type>>(
522
                        HPX_MOVE(sched), thread_pool_init);
523
                pools_.push_back(HPX_MOVE(pool));
9✔
524
                break;
525
            }
9✔
526
            }
527

528
            // update the thread_offset for the next pool
529
            thread_offset += num_threads_in_pool;
1,268✔
530
        }
1,268✔
531

532
        // fill the thread-lookup table
533
        for (auto& pool_iter : pools_)
2,488✔
534
        {
535
            std::size_t nt = rp.get_num_threads(pool_iter->get_pool_index());
1,268✔
536
            for (std::size_t i = 0; i < nt; i++)
5,643✔
537
            {
538
                threads_lookup_.emplace_back(pool_iter->get_pool_id());
4,375✔
539
            }
4,375✔
540
        }
541
    }
1,220✔
542

543
    threadmanager::~threadmanager() = default;
1,217✔
544

545
    void threadmanager::init()
1,220✔
546
    {
547
        auto& rp = hpx::resource::get_partitioner();
1,220✔
548
        std::size_t threads_offset = 0;
1,220✔
549

550
        // initialize all pools
551
        for (auto&& pool_iter : pools_)
2,488✔
552
        {
553
            std::size_t num_threads_in_pool =
1,268✔
554
                rp.get_num_threads(pool_iter->get_pool_index());
1,268✔
555
            pool_iter->init(num_threads_in_pool, threads_offset);
1,268✔
556
            threads_offset += num_threads_in_pool;
1,268✔
557
        }
558
    }
1,220✔
559

560
    void threadmanager::print_pools(std::ostream& os)
9✔
561
    {
562
        os << "The thread-manager owns "
9✔
563
           << static_cast<std::uint64_t>(pools_.size()) << " pool(s) : \n";
9✔
564

565
        for (auto&& pool_iter : pools_)
45✔
566
        {
567
            pool_iter->print_pool(os);
36✔
568
        }
569
    }
9✔
570

571
    thread_pool_base& threadmanager::default_pool() const
1,243,766✔
572
    {
573
        HPX_ASSERT(!pools_.empty());
1,243,766✔
574
        return *pools_[0];
1,243,789✔
575
    }
576

577
    thread_pool_base& threadmanager::get_pool(
166✔
578
        std::string const& pool_name) const
579
    {
580
        // if the given pool_name is default, we don't need to look for it
581
        // we must always return pool 0
582
        if (pool_name == "default" ||
166✔
583
            pool_name == resource::get_partitioner().get_default_pool_name())
101✔
584
        {
585
            return default_pool();
81✔
586
        }
587

588
        // now check the other pools - no need to check pool 0 again, so ++begin
589
        auto pool = std::find_if(++pools_.begin(), pools_.end(),
170✔
590
            [&pool_name](pool_type const& itp) -> bool {
248✔
591
                return (itp->get_pool_name() == pool_name);
163✔
592
            });
593

594
        if (pool != pools_.end())
85✔
595
        {
596
            return **pool;
85✔
597
        }
598

599
        //! FIXME Add names of available pools?
600
        HPX_THROW_EXCEPTION(hpx::error::bad_parameter,
×
601
            "threadmanager::get_pool",
602
            "the resource partitioner does not own a thread pool named '{}'.\n",
603
            pool_name);
604
    }
166✔
605

606
    thread_pool_base& threadmanager::get_pool(pool_id_type const& pool_id) const
×
607
    {
608
        return get_pool(pool_id.name());
×
609
    }
610

611
    thread_pool_base& threadmanager::get_pool(std::size_t thread_index) const
×
612
    {
613
        return get_pool(threads_lookup_[thread_index]);
×
614
    }
615

616
    bool threadmanager::pool_exists(std::string const& pool_name) const
3✔
617
    {
618
        // if the given pool_name is default, we don't need to look for it
619
        // we must always return pool 0
620
        if (pool_name == "default" ||
3✔
621
            pool_name == resource::get_partitioner().get_default_pool_name())
2✔
622
        {
623
            return true;
2✔
624
        }
625

626
        // now check the other pools - no need to check pool 0 again, so ++begin
627
        auto pool = std::find_if(++pools_.begin(), pools_.end(),
2✔
628
            [&pool_name](pool_type const& itp) -> bool {
4✔
629
                return (itp->get_pool_name() == pool_name);
3✔
630
            });
631

632
        if (pool != pools_.end())
1✔
633
        {
634
            return true;
×
635
        }
636

637
        return false;
1✔
638
    }
3✔
639

640
    bool threadmanager::pool_exists(std::size_t pool_index) const
5✔
641
    {
642
        return pool_index < pools_.size();
5✔
643
    }
644

645
    ///////////////////////////////////////////////////////////////////////////
646
    std::int64_t threadmanager::get_thread_count(thread_schedule_state state,
50,006✔
647
        thread_priority priority, std::size_t num_thread, bool reset)
648
    {
649
        std::int64_t total_count = 0;
50,006✔
650
        std::lock_guard<mutex_type> lk(mtx_);
50,006✔
651

652
        for (auto& pool_iter : pools_)
100,012✔
653
        {
654
            total_count +=
50,006✔
655
                pool_iter->get_thread_count(state, priority, num_thread, reset);
50,006✔
656
        }
657

658
        return total_count;
50,006✔
659
    }
50,006✔
660

661
    std::int64_t threadmanager::get_idle_core_count()
×
662
    {
663
        std::int64_t total_count = 0;
×
664
        std::lock_guard<mutex_type> lk(mtx_);
×
665

666
        for (auto& pool_iter : pools_)
×
667
        {
668
            total_count += pool_iter->get_idle_core_count();
×
669
        }
670

671
        return total_count;
×
672
    }
×
673

674
    mask_type threadmanager::get_idle_core_mask()
×
675
    {
676
        mask_type mask = mask_type();
×
677
        resize(mask, static_cast<std::size_t>(hardware_concurrency()));
×
678

679
        std::lock_guard<mutex_type> lk(mtx_);
×
680

681
        for (auto& pool_iter : pools_)
×
682
        {
683
            pool_iter->get_idle_core_mask(mask);
×
684
        }
685

686
        return mask;
×
687
    }
×
688

689
    std::int64_t threadmanager::get_background_thread_count() const
×
690
    {
691
        std::int64_t total_count = 0;
×
692
        std::lock_guard<mutex_type> lk(mtx_);
×
693

694
        for (auto& pool_iter : pools_)
×
695
        {
696
            total_count += pool_iter->get_background_thread_count();
×
697
        }
698

699
        return total_count;
×
700
    }
×
701

702
    ///////////////////////////////////////////////////////////////////////////
703
    // Enumerate all matching threads
704
    bool threadmanager::enumerate_threads(
1✔
705
        hpx::function<bool(thread_id_type)> const& f,
706
        thread_schedule_state state) const
707
    {
708
        std::lock_guard<mutex_type> lk(mtx_);
1✔
709
        bool result = true;
1✔
710

711
        for (auto& pool_iter : pools_)
2✔
712
        {
713
            result = result && pool_iter->enumerate_threads(f, state);
2✔
714
        }
715

716
        return result;
1✔
717
    }
1✔
718

719
    ///////////////////////////////////////////////////////////////////////////
720
    // Abort all threads which are in suspended state. This will set
721
    // the state of all suspended threads to \a pending while
722
    // supplying the wait_abort extended state flag
723
    void threadmanager::abort_all_suspended_threads()
×
724
    {
725
        std::lock_guard<mutex_type> lk(mtx_);
×
726
        for (auto& pool_iter : pools_)
×
727
        {
728
            pool_iter->abort_all_suspended_threads();
×
729
        }
730
    }
×
731

732
    ///////////////////////////////////////////////////////////////////////////
733
    // Clean up terminated threads. This deletes all threads which
734
    // have been terminated but which are still held in the queue
735
    // of terminated threads. Some schedulers might not do anything
736
    // here.
737
    bool threadmanager::cleanup_terminated(bool delete_all)
8,717✔
738
    {
739
        std::lock_guard<mutex_type> lk(mtx_);
8,717✔
740
        bool result = true;
8,717✔
741

742
        for (auto& pool_iter : pools_)
17,434✔
743
        {
744
            result = pool_iter->cleanup_terminated(delete_all) && result;
8,717✔
745
        }
746

747
        return result;
8,717✔
748
    }
8,717✔
749

750
    ///////////////////////////////////////////////////////////////////////////
751
    void threadmanager::register_thread(
1,220✔
752
        thread_init_data& data, thread_id_ref_type& id, error_code& ec)
753
    {
754
        thread_pool_base* pool = nullptr;
1,220✔
755
        auto thrd_data = get_self_id_data();
1,220✔
756
        if (thrd_data)
1,220✔
757
        {
758
            pool = thrd_data->get_scheduler_base()->get_parent_pool();
×
759
        }
×
760
        else
761
        {
762
            pool = &default_pool();
1,220✔
763
        }
764
        pool->create_thread(data, id, ec);
1,220✔
765
    }
1,220✔
766

767
    ///////////////////////////////////////////////////////////////////////////
768
    thread_id_ref_type threadmanager::register_work(
×
769
        thread_init_data& data, error_code& ec)
770
    {
771
        thread_pool_base* pool = nullptr;
×
772
        auto thrd_data = get_self_id_data();
×
773
        if (thrd_data)
×
774
        {
775
            pool = thrd_data->get_scheduler_base()->get_parent_pool();
×
776
        }
×
777
        else
778
        {
779
            pool = &default_pool();
×
780
        }
781
        return pool->create_work(data, ec);
×
782
    }
783

784
    ///////////////////////////////////////////////////////////////////////////
785
    constexpr std::size_t all_threads = std::size_t(-1);
786

787
    std::int64_t threadmanager::get_queue_length(bool reset)
1✔
788
    {
789
        std::int64_t result = 0;
1✔
790
        for (auto const& pool_iter : pools_)
2✔
791
            result += pool_iter->get_queue_length(all_threads, reset);
1✔
792
        return result;
1✔
793
    }
794

795
#ifdef HPX_HAVE_THREAD_QUEUE_WAITTIME
796
    std::int64_t threadmanager::get_average_thread_wait_time(bool reset)
797
    {
798
        std::int64_t result = 0;
799
        for (auto const& pool_iter : pools_)
800
            result +=
801
                pool_iter->get_average_thread_wait_time(all_threads, reset);
802
        return result;
803
    }
804

805
    std::int64_t threadmanager::get_average_task_wait_time(bool reset)
806
    {
807
        std::int64_t result = 0;
808
        for (auto const& pool_iter : pools_)
809
            result += pool_iter->get_average_task_wait_time(all_threads, reset);
810
        return result;
811
    }
812
#endif
813

814
    std::int64_t threadmanager::get_cumulative_duration(bool reset)
1✔
815
    {
816
        std::int64_t result = 0;
1✔
817
        for (auto const& pool_iter : pools_)
2✔
818
            result += pool_iter->get_cumulative_duration(all_threads, reset);
1✔
819
        return result;
1✔
820
    }
821

822
#if defined(HPX_HAVE_BACKGROUND_THREAD_COUNTERS) &&                            \
823
    defined(HPX_HAVE_THREAD_IDLE_RATES)
824
    std::int64_t threadmanager::get_background_work_duration(bool reset)
825
    {
826
        std::int64_t result = 0;
827
        for (auto const& pool_iter : pools_)
828
            result +=
829
                pool_iter->get_background_work_duration(all_threads, reset);
830
        return result;
831
    }
832

833
    std::int64_t threadmanager::get_background_overhead(bool reset)
834
    {
835
        std::int64_t result = 0;
836
        for (auto const& pool_iter : pools_)
837
            result += pool_iter->get_background_overhead(all_threads, reset);
838
        return result;
839
    }
840

841
    std::int64_t threadmanager::get_background_send_duration(bool reset)
842
    {
843
        std::int64_t result = 0;
844
        for (auto const& pool_iter : pools_)
845
            result +=
846
                pool_iter->get_background_send_duration(all_threads, reset);
847
        return result;
848
    }
849

850
    std::int64_t threadmanager::get_background_send_overhead(bool reset)
851
    {
852
        std::int64_t result = 0;
853
        for (auto const& pool_iter : pools_)
854
            result +=
855
                pool_iter->get_background_send_overhead(all_threads, reset);
856
        return result;
857
    }
858

859
    std::int64_t threadmanager::get_background_receive_duration(bool reset)
860
    {
861
        std::int64_t result = 0;
862
        for (auto const& pool_iter : pools_)
863
            result +=
864
                pool_iter->get_background_receive_duration(all_threads, reset);
865
        return result;
866
    }
867

868
    std::int64_t threadmanager::get_background_receive_overhead(bool reset)
869
    {
870
        std::int64_t result = 0;
871
        for (auto const& pool_iter : pools_)
872
            result +=
873
                pool_iter->get_background_receive_overhead(all_threads, reset);
874
        return result;
875
    }
876
#endif    // HPX_HAVE_BACKGROUND_THREAD_COUNTERS
877

878
#ifdef HPX_HAVE_THREAD_IDLE_RATES
879
    std::int64_t threadmanager::avg_idle_rate(bool reset)
880
    {
881
        std::int64_t result = 0;
882
        for (auto const& pool_iter : pools_)
883
            result += pool_iter->avg_idle_rate(all_threads, reset);
884
        return result;
885
    }
886

887
#ifdef HPX_HAVE_THREAD_CREATION_AND_CLEANUP_RATES
888
    std::int64_t threadmanager::avg_creation_idle_rate(bool reset)
889
    {
890
        std::int64_t result = 0;
891
        for (auto const& pool_iter : pools_)
892
            result += pool_iter->avg_creation_idle_rate(all_threads, reset);
893
        return result;
894
    }
895

896
    std::int64_t threadmanager::avg_cleanup_idle_rate(bool reset)
897
    {
898
        std::int64_t result = 0;
899
        for (auto const& pool_iter : pools_)
900
            result += pool_iter->avg_cleanup_idle_rate(all_threads, reset);
901
        return result;
902
    }
903
#endif
904
#endif
905

906
#ifdef HPX_HAVE_THREAD_CUMULATIVE_COUNTS
907
    std::int64_t threadmanager::get_executed_threads(bool reset)
1✔
908
    {
909
        std::int64_t result = 0;
1✔
910
        for (auto const& pool_iter : pools_)
2✔
911
            result += pool_iter->get_executed_threads(all_threads, reset);
1✔
912
        return result;
1✔
913
    }
914

915
    std::int64_t threadmanager::get_executed_thread_phases(bool reset)
1✔
916
    {
917
        std::int64_t result = 0;
1✔
918
        for (auto const& pool_iter : pools_)
2✔
919
            result += pool_iter->get_executed_thread_phases(all_threads, reset);
1✔
920
        return result;
1✔
921
    }
922

923
#ifdef HPX_HAVE_THREAD_IDLE_RATES
924
    std::int64_t threadmanager::get_thread_duration(bool reset)
925
    {
926
        std::int64_t result = 0;
927
        for (auto const& pool_iter : pools_)
928
            result += pool_iter->get_thread_duration(all_threads, reset);
929
        return result;
930
    }
931

932
    std::int64_t threadmanager::get_thread_phase_duration(bool reset)
933
    {
934
        std::int64_t result = 0;
935
        for (auto const& pool_iter : pools_)
936
            result += pool_iter->get_thread_phase_duration(all_threads, reset);
937
        return result;
938
    }
939

940
    std::int64_t threadmanager::get_thread_overhead(bool reset)
941
    {
942
        std::int64_t result = 0;
943
        for (auto const& pool_iter : pools_)
944
            result += pool_iter->get_thread_overhead(all_threads, reset);
945
        return result;
946
    }
947

948
    std::int64_t threadmanager::get_thread_phase_overhead(bool reset)
949
    {
950
        std::int64_t result = 0;
951
        for (auto const& pool_iter : pools_)
952
            result += pool_iter->get_thread_phase_overhead(all_threads, reset);
953
        return result;
954
    }
955

956
    std::int64_t threadmanager::get_cumulative_thread_duration(bool reset)
957
    {
958
        std::int64_t result = 0;
959
        for (auto const& pool_iter : pools_)
960
            result +=
961
                pool_iter->get_cumulative_thread_duration(all_threads, reset);
962
        return result;
963
    }
964

965
    std::int64_t threadmanager::get_cumulative_thread_overhead(bool reset)
966
    {
967
        std::int64_t result = 0;
968
        for (auto const& pool_iter : pools_)
969
            result +=
970
                pool_iter->get_cumulative_thread_overhead(all_threads, reset);
971
        return result;
972
    }
973
#endif
974
#endif
975

976
#ifdef HPX_HAVE_THREAD_STEALING_COUNTS
977
    std::int64_t threadmanager::get_num_pending_misses(bool reset)
978
    {
979
        std::int64_t result = 0;
980
        for (auto const& pool_iter : pools_)
981
            result += pool_iter->get_num_pending_misses(all_threads, reset);
982
        return result;
983
    }
984

985
    std::int64_t threadmanager::get_num_pending_accesses(bool reset)
986
    {
987
        std::int64_t result = 0;
988
        for (auto const& pool_iter : pools_)
989
            result += pool_iter->get_num_pending_accesses(all_threads, reset);
990
        return result;
991
    }
992

993
    std::int64_t threadmanager::get_num_stolen_from_pending(bool reset)
994
    {
995
        std::int64_t result = 0;
996
        for (auto const& pool_iter : pools_)
997
            result +=
998
                pool_iter->get_num_stolen_from_pending(all_threads, reset);
999
        return result;
1000
    }
1001

1002
    std::int64_t threadmanager::get_num_stolen_from_staged(bool reset)
1003
    {
1004
        std::int64_t result = 0;
1005
        for (auto const& pool_iter : pools_)
1006
            result += pool_iter->get_num_stolen_from_staged(all_threads, reset);
1007
        return result;
1008
    }
1009

1010
    std::int64_t threadmanager::get_num_stolen_to_pending(bool reset)
1011
    {
1012
        std::int64_t result = 0;
1013
        for (auto const& pool_iter : pools_)
1014
            result += pool_iter->get_num_stolen_to_pending(all_threads, reset);
1015
        return result;
1016
    }
1017

1018
    std::int64_t threadmanager::get_num_stolen_to_staged(bool reset)
1019
    {
1020
        std::int64_t result = 0;
1021
        for (auto const& pool_iter : pools_)
1022
            result += pool_iter->get_num_stolen_to_staged(all_threads, reset);
1023
        return result;
1024
    }
1025
#endif
1026

1027
    ///////////////////////////////////////////////////////////////////////////
1028
    bool threadmanager::run()
1,220✔
1029
    {
1030
        std::unique_lock<mutex_type> lk(mtx_);
1,220✔
1031

1032
        // the main thread needs to have a unique thread_num
1033
        // worker threads are numbered 0..N-1, so we can use N for this thread
1034
        auto& rp = hpx::resource::get_partitioner();
1,220✔
1035
        init_tss(rp.get_num_threads());
1,220✔
1036

1037
#ifdef HPX_HAVE_TIMER_POOL
1038
        LTM_(info).format("run: running timer pool");
1,220✔
1039
        timer_pool_.run(false);
1,220✔
1040
#endif
1041

1042
        for (auto& pool_iter : pools_)
2,488✔
1043
        {
1044
            std::size_t num_threads_in_pool =
1,268✔
1045
                rp.get_num_threads(pool_iter->get_pool_name());
1,268✔
1046

1047
            if (pool_iter->get_os_thread_count() != 0 ||
2,536✔
1048
                pool_iter->has_reached_state(hpx::state::running))
1,268✔
1049
            {
1050
                return true;    // do nothing if already running
×
1051
            }
1052

1053
            if (!pool_iter->run(lk, num_threads_in_pool))
1,268✔
1054
            {
1055
#ifdef HPX_HAVE_TIMER_POOL
1056
                timer_pool_.stop();
×
1057
#endif
1058
                return false;
×
1059
            }
1060

1061
            // set all states of all schedulers to "running"
1062
            policies::scheduler_base* sched = pool_iter->get_scheduler();
1,268✔
1063
            if (sched)
1,268✔
1064
                sched->set_all_states(hpx::state::running);
1,268✔
1065
        }
1066

1067
        LTM_(info).format("run: running");
1,220✔
1068
        return true;
1,220✔
1069
    }
1,220✔
1070

1071
    void threadmanager::stop(bool blocking)
3,653✔
1072
    {
1073
        LTM_(info).format("stop: blocking({})", blocking ? "true" : "false");
3,653✔
1074

1075
        std::unique_lock<mutex_type> lk(mtx_);
3,653✔
1076
        for (auto& pool_iter : pools_)
7,450✔
1077
        {
1078
            pool_iter->stop(lk, blocking);
3,797✔
1079
        }
1080
        deinit_tss();
3,653✔
1081
    }
3,653✔
1082

1083
    bool threadmanager::is_busy()
1,237,079✔
1084
    {
1085
        bool busy = false;
1,237,079✔
1086
        for (auto& pool_iter : pools_)
2,476,798✔
1087
        {
1088
            busy = busy || pool_iter->is_busy();
1,239,719✔
1089
        }
1090
        return busy;
1,237,079✔
1091
    }
1092

1093
    bool threadmanager::is_idle()
×
1094
    {
1095
        bool idle = true;
×
1096
        for (auto& pool_iter : pools_)
×
1097
        {
1098
            idle = idle && pool_iter->is_idle();
×
1099
        }
1100
        return idle;
×
1101
    }
1102

1103
    void threadmanager::wait()
2,494✔
1104
    {
1105
        std::size_t shutdown_check_count = util::get_entry_as<std::size_t>(
2,494✔
1106
            rtcfg_, "hpx.shutdown_check_count", 10);
2,494✔
1107
        hpx::util::detail::yield_while_count(
2,494✔
1108
            [this]() { return is_busy(); }, shutdown_check_count);
1,232,608✔
1109
    }
2,494✔
1110

1111
    void threadmanager::suspend()
118✔
1112
    {
1113
        wait();
118✔
1114

1115
        if (threads::get_self_ptr())
118✔
1116
        {
1117
            std::vector<hpx::future<void>> fs;
×
1118

1119
            for (auto& pool_iter : pools_)
×
1120
            {
1121
                fs.emplace_back(suspend_pool(*pool_iter));
×
1122
            }
1123

1124
            hpx::wait_all(fs);
×
1125
        }
×
1126
        else
1127
        {
1128
            for (auto& pool_iter : pools_)
236✔
1129
            {
1130
                pool_iter->suspend_direct();
118✔
1131
            }
1132
        }
1133
    }
118✔
1134

1135
    void threadmanager::resume()
570✔
1136
    {
1137
        if (threads::get_self_ptr())
570✔
1138
        {
1139
            std::vector<hpx::future<void>> fs;
452✔
1140

1141
            for (auto& pool_iter : pools_)
904✔
1142
            {
1143
                fs.emplace_back(resume_pool(*pool_iter));
452✔
1144
            }
1145
            hpx::wait_all(fs);
452✔
1146
        }
452✔
1147
        else
1148
        {
1149
            for (auto& pool_iter : pools_)
236✔
1150
            {
1151
                pool_iter->resume_direct();
118✔
1152
            }
1153
        }
1154
    }
570✔
1155
}}    // namespace hpx::threads
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc