• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

STEllAR-GROUP / hpx / #871

22 Jan 2023 11:22PM UTC coverage: 86.624% (+0.7%) from 85.97%
#871

push

StellarBot
Merge #6144

6144: General improvements to scheduling and related fixes r=hkaiser a=hkaiser

This is a collection of unrelated improvements applied to different parts of the code

Co-authored-by: Hartmut Kaiser <hartmut.kaiser@gmail.com>

152 of 152 new or added lines in 23 files covered. (100.0%)

174953 of 201969 relevant lines covered (86.62%)

1838882.76 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

80.26
/libs/core/thread_pools/include/hpx/thread_pools/scheduling_loop.hpp
1
//  Copyright (c) 2007-2023 Hartmut Kaiser
2
//
3
//  SPDX-License-Identifier: BSL-1.0
4
//  Distributed under the Boost Software License, Version 1.0. (See accompanying
5
//  file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6

7
#pragma once
8

9
#include <hpx/config.hpp>
10
#include <hpx/assert.hpp>
11
#include <hpx/execution_base/this_thread.hpp>
12
#include <hpx/hardware/timestamp.hpp>
13
#include <hpx/modules/itt_notify.hpp>
14
#include <hpx/thread_pools/detail/background_thread.hpp>
15
#include <hpx/thread_pools/detail/scheduling_callbacks.hpp>
16
#include <hpx/thread_pools/detail/scheduling_counters.hpp>
17
#include <hpx/thread_pools/detail/scheduling_log.hpp>
18
#include <hpx/threading_base/detail/switch_status.hpp>
19
#include <hpx/threading_base/scheduler_base.hpp>
20
#include <hpx/threading_base/scheduler_state.hpp>
21
#include <hpx/threading_base/thread_data.hpp>
22

23
#if defined(HPX_HAVE_APEX)
24
#include <hpx/threading_base/external_timer.hpp>
25
#endif
26

27
#include <atomic>
28
#include <cstddef>
29
#include <cstdint>
30
#include <limits>
31
#include <memory>
32
#include <utility>
33

34
namespace hpx::threads::detail {
35

36
    ///////////////////////////////////////////////////////////////////////
37
#ifdef HPX_HAVE_THREAD_IDLE_RATES
38
    struct idle_collect_rate
39
    {
40
        idle_collect_rate(
41
            std::int64_t& tfunc_time, std::int64_t& exec_time) noexcept
42
          : start_timestamp_(util::hardware::timestamp())
43
          , tfunc_time_(tfunc_time)
44
          , exec_time_(exec_time)
45
        {
46
        }
47

48
        void collect_exec_time(std::int64_t timestamp) noexcept
49
        {
50
            exec_time_ += util::hardware::timestamp() - timestamp;
51
        }
52

53
        void take_snapshot() noexcept
54
        {
55
            if (tfunc_time_ == std::int64_t(-1))
56
            {
57
                start_timestamp_ = util::hardware::timestamp();
58
                tfunc_time_ = 0;
59
                exec_time_ = 0;
60
            }
61
            else
62
            {
63
                tfunc_time_ = util::hardware::timestamp() - start_timestamp_;
64
            }
65
        }
66

67
        std::int64_t start_timestamp_;
68

69
        std::int64_t& tfunc_time_;
70
        std::int64_t& exec_time_;
71
    };
72

73
    struct exec_time_wrapper
74
    {
75
        explicit exec_time_wrapper(idle_collect_rate& idle_rate) noexcept
76
          : timestamp_(util::hardware::timestamp())
77
          , idle_rate_(idle_rate)
78
        {
79
        }
80
        ~exec_time_wrapper()
81
        {
82
            idle_rate_.collect_exec_time(timestamp_);
83
        }
84

85
        std::int64_t timestamp_;
86
        idle_collect_rate& idle_rate_;
87
    };
88

89
    struct tfunc_time_wrapper
90
    {
91
        explicit constexpr tfunc_time_wrapper(
92
            idle_collect_rate& idle_rate) noexcept
93
          : idle_rate_(idle_rate)
94
        {
95
        }
96
        ~tfunc_time_wrapper()
97
        {
98
            idle_rate_.take_snapshot();
99
        }
100

101
        idle_collect_rate& idle_rate_;
102
    };
103
#else
104
    struct idle_collect_rate
105
    {
106
        explicit constexpr idle_collect_rate(
4,081✔
107
            std::int64_t&, std::int64_t&) noexcept
108
        {
109
        }
4,025✔
110
    };
111

112
    struct exec_time_wrapper
113
    {
114
        explicit constexpr exec_time_wrapper(idle_collect_rate&) noexcept {}
9,552,336✔
115
    };
116

117
    struct tfunc_time_wrapper
118
    {
119
        explicit constexpr tfunc_time_wrapper(idle_collect_rate&) noexcept {}
19,103,429✔
120
    };
121
#endif
122

123
    ///////////////////////////////////////////////////////////////////////////
124
    struct is_active_wrapper
125
    {
126
        explicit is_active_wrapper(bool& is_active) noexcept
9,552,332✔
127
          : is_active_(is_active)
9,552,336✔
128
        {
129
            is_active = true;
9,552,336✔
130
        }
9,552,336✔
131
        ~is_active_wrapper()
9,550,964✔
132
        {
133
            is_active_ = false;
9,550,966✔
134
        }
9,550,966✔
135

136
        bool& is_active_;
137
    };
138

139
    template <typename SchedulingPolicy>
140
    void scheduling_loop(std::size_t num_thread, SchedulingPolicy& scheduler,
4,046✔
141
        scheduling_counters& counters, scheduling_callbacks& params)
142
    {
143
        std::atomic<hpx::state>& this_state = scheduler.get_state(num_thread);
3,913✔
144

145
#if HPX_HAVE_ITTNOTIFY != 0 && !defined(HPX_HAVE_APEX)
146
        util::itt::stack_context ctx;    // helper for itt support
147
        util::itt::thread_domain thread_domain;
148
        util::itt::id threadid(thread_domain, &scheduler);
149
        util::itt::string_handle task_id("task_id");
150
        util::itt::string_handle task_phase("task_phase");
151
        // util::itt::frame_context fctx(thread_domain);
152
#endif
153

154
        std::int64_t& idle_loop_count = counters.idle_loop_count_;
3,913✔
155
        std::int64_t& busy_loop_count = counters.busy_loop_count_;
3,913✔
156

157
        background_work_exec_time bg_work_exec_time_init(counters);
3,913✔
158

159
        idle_collect_rate idle_rate(counters.tfunc_time_, counters.exec_time_);
3,913✔
160
        [[maybe_unused]] tfunc_time_wrapper tfunc_time_collector(idle_rate);
3,913✔
161

162
        // spin for some time after queues have become empty
163
        bool may_exit = false;
3,913✔
164

165
        std::shared_ptr<bool> background_running;
3,913✔
166
        thread_id_ref_type background_thread;
3,913✔
167

168
        if (scheduler.has_scheduler_mode(
4,568✔
169
                policies::scheduler_mode::do_background_work) &&
3,913✔
170
            num_thread < params.max_background_threads_ &&
3,888✔
171
            !params.background_.empty())
521✔
172
        {
173
            background_thread = create_background_thread(scheduler, num_thread,
1,042✔
174
                params, background_running, idle_loop_count);
593✔
175
        }
592✔
176

177
        hpx::execution_base::this_thread::detail::agent_storage*
178
            context_storage =
4,321✔
179
                hpx::execution_base::this_thread::detail::get_agent_storage();
4,351✔
180

181
        std::size_t added = std::size_t(-1);
4,321✔
182
        thread_id_ref_type next_thrd;
4,321✔
183
        while (true)
389,348,635✔
184
        {
185
            thread_id_ref_type thrd = HPX_MOVE(next_thrd);
388,735,666✔
186
            next_thrd = thread_id_ref_type();
388,735,666✔
187

188
            // Get the next HPX thread from the queue
189
            bool running = this_state.load(std::memory_order_relaxed) <
388,735,666✔
190
                hpx::state::pre_sleep;
191

192
            // extract the stealing mode once per loop iteration
193
            bool enable_stealing = scheduler.has_scheduler_mode(
388,735,666✔
194
                policies::scheduler_mode::enable_stealing);
195

196
            // stealing staged threads is enabled if:
197
            // - fast idle mode is on: same as normal stealing
198
            // - fast idle mode off: only after normal stealing has failed for
199
            //                       a while
200
            bool enable_stealing_staged = enable_stealing;
388,735,666✔
201
            if (enable_stealing_staged &&
388,836,316✔
202
                !scheduler.has_scheduler_mode(
366,324,044✔
203
                    policies::scheduler_mode::fast_idle_mode))
204
            {
205
                enable_stealing_staged =
367,178,319✔
206
                    idle_loop_count > params.max_idle_loop_count_ / 2;
367,178,319✔
207
            }
367,178,319✔
208

209
            if (HPX_LIKELY(thrd ||
388,436,924✔
210
                    scheduler.get_next_thread(
211
                        num_thread, running, thrd, enable_stealing)))
212
            {
213
                [[maybe_unused]] tfunc_time_wrapper tfunc_time_collector(
9,552,016✔
214
                    idle_rate);
215
                HPX_ASSERT(get_thread_id_data(thrd)->get_scheduler_base() ==
9,552,078✔
216
                    &scheduler);
217

218
                idle_loop_count = 0;
9,551,974✔
219
                ++busy_loop_count;
9,551,974✔
220

221
                may_exit = false;
9,551,974✔
222

223
                // Only pending HPX threads will be executed. Any non-pending
224
                // HPX threads are leftovers from a set_state() call for a
225
                // previously pending HPX thread (see comments above).
226
                auto* thrdptr = get_thread_id_data(thrd);
9,551,974✔
227
                thread_state state = thrdptr->get_state();
9,551,974✔
228
                thread_schedule_state state_val = state.state();
9,551,974✔
229

230
                if (HPX_LIKELY(thread_schedule_state::pending == state_val))
9,551,974✔
231
                {
232
                    // switch the state of the thread to active and back to what
233
                    // the thread reports as its return value
234

235
                    {
236
                        // tries to set state to active (only if state is still
237
                        // the same as 'state')
238
                        detail::switch_status thrd_stat(thrd, state);
9,552,168✔
239
                        if (HPX_LIKELY(thrd_stat.is_valid() &&
9,551,974✔
240
                                thrd_stat.get_previous() ==
241
                                    thread_schedule_state::pending))
242
                        {
243
                            detail::write_state_log(scheduler, num_thread, thrd,
19,104,186✔
244
                                thrd_stat.get_previous(),
9,552,334✔
245
                                thread_schedule_state::active);
246

247
                            [[maybe_unused]] tfunc_time_wrapper
248
                                tfunc_time_collector(idle_rate);
9,551,106✔
249

250
                            // thread returns new required state store the
251
                            // returned state in the thread
252
                            {
253
                                is_active_wrapper utilization(
9,551,106✔
254
                                    counters.is_active_);
9,551,106✔
255
#if HPX_HAVE_ITTNOTIFY != 0 && !defined(HPX_HAVE_APEX)
256
                                util::itt::caller_context cctx(ctx);
257
                                // util::itt::undo_frame_context undoframe(fctx);
258
                                util::itt::task task =
259
                                    thrdptr->get_description().get_task_itt(
260
                                        thread_domain);
261
                                task.add_metadata(task_id, thrdptr);
262
                                task.add_metadata(
263
                                    task_phase, thrdptr->get_thread_phase());
264
#endif
265
                                // Record time elapsed in thread changing state
266
                                // and add to aggregate execution time.
267
                                [[maybe_unused]] exec_time_wrapper
268
                                    exec_time_collector(idle_rate);
9,551,106✔
269

270
#if defined(HPX_HAVE_APEX)
271
                                // get the APEX data pointer, in case we are
272
                                // resuming the thread and have to restore any
273
                                // leaf timers from direct actions, etc.
274

275
                                // the address of tmp_data is getting stored by
276
                                // APEX during this call
277
                                util::external_timer::scoped_timer profiler(
278
                                    thrdptr->get_timer_data());
279

280
                                thrd_stat = (*thrdptr)(context_storage);
281

282
                                if (thrd_stat.get_previous() ==
283
                                    thread_schedule_state::terminated)
284
                                {
285
                                    profiler.stop();
286
                                    // just in case, clean up the now dead pointer.
287
                                    thrdptr->set_timer_data(nullptr);
288
                                }
289
                                else
290
                                {
291
                                    profiler.yield();
292
                                }
293
#else
294
                                thrd_stat = (*thrdptr)(context_storage);
9,552,334✔
295
#endif
296
                            }
9,551,106✔
297

298
                            detail::write_state_log(scheduler, num_thread, thrd,
19,102,212✔
299
                                thread_schedule_state::active,
300
                                thrd_stat.get_previous());
9,551,913✔
301

302
#ifdef HPX_HAVE_THREAD_CUMULATIVE_COUNTS
303
                            ++counters.executed_thread_phases_;
9,552,252✔
304
#endif
305
                        }
9,552,252✔
306
                        else
307
                        {
308
                            // some other worker-thread got in between and
309
                            // started executing this HPX-thread, we just
310
                            // continue with the next one
311
                            thrd_stat.disable_restore();
×
312
                            detail::write_state_log_warning(scheduler,
×
313
                                num_thread, thrd, state_val, "no execution");
×
314
                            continue;
×
315
                        }
316

317
                        // store and retrieve the new state in the thread
318
                        if (HPX_UNLIKELY(!thrd_stat.store_state(state)))
9,552,265✔
319
                        {
320
                            // some other worker-thread got in between and
321
                            // changed the state of this thread, we just
322
                            // continue with the next one
323
                            detail::write_state_log_warning(scheduler,
×
324
                                num_thread, thrd, state_val, "no state change");
×
325
                            continue;
×
326
                        }
327

328
                        state_val = state.state();
9,552,193✔
329

330
                        // any exception thrown from the thread will reset its
331
                        // state at this point
332

333
                        // handle next thread id if given (switch directly to
334
                        // this thread)
335
                        next_thrd = thrd_stat.move_next_thread();
9,552,193✔
336
                    }
9,552,265✔
337

338
                    // Re-add this work item to our list of work items if the
339
                    // HPX thread should be re-scheduled. If the HPX thread is
340
                    // suspended now we just keep it in the map of threads.
341
                    if (HPX_UNLIKELY(
9,552,055✔
342
                            state_val == thread_schedule_state::pending))
343
                    {
344
                        if (HPX_LIKELY(next_thrd == nullptr))
2,340,466✔
345
                        {
346
                            // schedule other work
347
                            scheduler.wait_or_add_new(num_thread, running,
4,654,302✔
348
                                idle_loop_count, enable_stealing_staged, added);
2,327,148✔
349
                        }
2,327,152✔
350

351
                        // schedule this thread again, make sure it ends up at
352
                        // the end of the queue
353
                        scheduler.SchedulingPolicy::schedule_thread_last(
4,680,934✔
354
                            HPX_MOVE(thrd),
2,340,460✔
355
                            threads::thread_schedule_hint(
2,340,460✔
356
                                static_cast<std::int16_t>(num_thread)),
2,340,460✔
357
                            true);
358
                        scheduler.SchedulingPolicy::do_some_work(num_thread);
2,340,454✔
359
                    }
2,340,453✔
360
                    else if (HPX_UNLIKELY(state_val ==
7,211,660✔
361
                                 thread_schedule_state::pending_boost))
362
                    {
363
                        thrdptr->set_state(thread_schedule_state::pending);
473,007✔
364

365
                        if (HPX_LIKELY(next_thrd == nullptr))
473,007✔
366
                        {
367
                            // reschedule this thread right away if the
368
                            // background work will be triggered
369
                            if (HPX_UNLIKELY(busy_loop_count >
473,007✔
370
                                    params.max_busy_loop_count_))
371
                            {
372
                                next_thrd = HPX_MOVE(thrd);
211✔
373
                            }
211✔
374
                            else
375
                            {
376
                                // schedule other work
377
                                scheduler.wait_or_add_new(num_thread, running,
945,592✔
378
                                    idle_loop_count, enable_stealing_staged,
472,796✔
379
                                    added);
380

381
                                // schedule this thread again immediately with
382
                                // boosted priority
383
                                scheduler.SchedulingPolicy::schedule_thread(
945,592✔
384
                                    HPX_MOVE(thrd),
472,796✔
385
                                    threads::thread_schedule_hint(
472,796✔
386
                                        static_cast<std::int16_t>(num_thread)),
472,796✔
387
                                    true, thread_priority::boost);
388
                                scheduler.SchedulingPolicy::do_some_work(
945,592✔
389
                                    num_thread);
472,796✔
390
                            }
391
                        }
473,007✔
392
                        else if (HPX_LIKELY(next_thrd != thrd))
×
393
                        {
394
                            // schedule this thread again immediately with
395
                            // boosted priority
396
                            scheduler.SchedulingPolicy::schedule_thread(
×
397
                                HPX_MOVE(thrd),
×
398
                                threads::thread_schedule_hint(
×
399
                                    static_cast<std::int16_t>(num_thread)),
×
400
                                true, thread_priority::boost);
401
                            scheduler.SchedulingPolicy::do_some_work(
×
402
                                num_thread);
×
403
                        }
×
404
                    }
473,007✔
405
                }
9,552,065✔
406
                else if (HPX_UNLIKELY(
×
407
                             thread_schedule_state::active == state_val))
408
                {
409
                    write_rescheduling_log_warning(scheduler, num_thread, thrd);
×
410

411
                    // re-schedule thread, if it is still marked as active this
412
                    // might happen, if some thread has been added to the
413
                    // scheduler queue already but the state has not been reset
414
                    // yet
415
                    auto priority = thrdptr->get_priority();
×
416
                    scheduler.SchedulingPolicy::schedule_thread(HPX_MOVE(thrd),
×
417
                        threads::thread_schedule_hint(
×
418
                            static_cast<std::int16_t>(num_thread)),
×
419
                        true, priority);
×
420
                    scheduler.SchedulingPolicy::do_some_work(num_thread);
×
421
                }
×
422

423
                // Remove the mapping from thread_map_ if HPX thread is depleted
424
                // or terminated, this will delete the HPX thread. REVIEW: what
425
                // has to be done with depleted HPX threads?
426
                if (HPX_LIKELY(state_val == thread_schedule_state::depleted ||
9,552,065✔
427
                        state_val == thread_schedule_state::terminated))
428
                {
429
#ifdef HPX_HAVE_THREAD_CUMULATIVE_COUNTS
430
                    ++counters.executed_threads_;
5,606,273✔
431
#endif
432
                    thrd = thread_id_type();
5,606,273✔
433
                }
5,606,273✔
434
            }
9,552,228✔
435

436
            // if nothing else has to be done either wait or terminate
437
            else
438
            {
439
                ++idle_loop_count;
381,213,892✔
440

441
                if (scheduler.wait_or_add_new(num_thread, running,
756,889,034✔
442
                        idle_loop_count, enable_stealing_staged, added,
381,213,892✔
443
                        &next_thrd))
444
                {
445
                    // Clean up terminated threads before trying to exit
446
                    bool can_exit = !running &&
355,490,476✔
447
                        scheduler.SchedulingPolicy::cleanup_terminated(
39,634✔
448
                            num_thread, true) &&
19,817✔
449
                        scheduler.SchedulingPolicy::get_queue_length(
33,042✔
450
                            num_thread) == 0;
16,521✔
451

452
                    if (this_state.load(std::memory_order_relaxed) ==
354,546,067✔
453
                        hpx::state::pre_sleep)
454
                    {
455
                        if (can_exit)
13,208✔
456
                        {
457
                            scheduler.SchedulingPolicy::suspend(num_thread);
877✔
458
                        }
877✔
459
                    }
13,212✔
460
                    else
461
                    {
462
                        can_exit = can_exit &&
354,537,947✔
463
                            scheduler.SchedulingPolicy::get_thread_count(
9,998✔
464
                                thread_schedule_state::suspended,
465
                                thread_priority::default_, num_thread) == 0;
4,999✔
466

467
                        if (can_exit)
354,350,026✔
468
                        {
469
                            if (!scheduler.has_scheduler_mode(
5,032✔
470
                                    policies::scheduler_mode::delay_exit))
471
                            {
472
                                // If this is an inner scheduler, try to exit
473
                                // immediately
474
                                if (background_thread != nullptr)
8✔
475
                                {
476
                                    HPX_ASSERT(background_running);
×
477
                                    *background_running = false;    //-V522
×
478

479
                                    // do background work in parcel layer and in agas
480
                                    [[maybe_unused]] bool has_exited =
×
481
                                        call_background_thread(
×
482
                                            background_thread, next_thrd,
483
                                            scheduler, num_thread,
×
484
                                            bg_work_exec_time_init,
485
                                            context_storage);
×
486

487
                                    // the background thread should have exited
488
                                    HPX_ASSERT(has_exited);
×
489

490
                                    background_thread = thread_id_type();
×
491
                                    background_running.reset();
×
492
                                }
×
493
                                else
494
                                {
495
                                    this_state.store(hpx::state::stopped);
8✔
496
                                    break;
8✔
497
                                }
498
                            }
×
499
                            else
500
                            {
501
                                // Otherwise, keep idling for some time
502
                                if (!may_exit)
5,017✔
503
                                    idle_loop_count = 0;
5,020✔
504
                                may_exit = true;
5,010✔
505
                            }
506
                        }
5,010✔
507
                    }
508
                }
354,336,101✔
509
                else if (!may_exit && added == 0 &&
26,495,556✔
510
                    (scheduler.has_scheduler_mode(
24,617,753✔
511
                        policies::scheduler_mode::fast_idle_mode)))
512
                {
513
                    // speed up idle suspend if no work was stolen
514
                    idle_loop_count += params.max_idle_loop_count_ / 1024;
×
515
                    added = std::size_t(-1);
×
516
                }
×
517

518
                // if stealing yielded a new task, run it first
519
                if (next_thrd != nullptr)
380,656,724✔
520
                {
521
                    continue;
×
522
                }
523

524
                // do background work in parcel layer and in agas
525
                call_and_create_background_thread(background_thread, next_thrd,
382,960,723✔
526
                    scheduler, num_thread, bg_work_exec_time_init,
382,960,723✔
527
                    context_storage, params, background_running,
382,960,723✔
528
                    idle_loop_count);
382,960,723✔
529

530
                // call back into invoking context
531
                if (!params.inner_.empty())
381,102,179✔
532
                {
533
                    params.inner_();
×
534
                    context_storage = hpx::execution_base::this_thread::detail::
×
535
                        get_agent_storage();
536
                }
×
537
            }
538

539
            if (scheduler.custom_polling_function() ==
390,235,301✔
540
                policies::detail::polling_status::busy)
541
            {
542
                idle_loop_count = 0;
×
543
            }
×
544

545
            // something went badly wrong, give up
546
            if (HPX_UNLIKELY(this_state.load(std::memory_order_relaxed) ==
387,711,304✔
547
                    hpx::state::terminating))
548
            {
549
                break;
×
550
            }
551

552
            if (busy_loop_count > params.max_busy_loop_count_)
387,275,295✔
553
            {
554
                busy_loop_count = 0;
4,400✔
555

556
                // do background work in parcel layer and in agas
557
                call_and_create_background_thread(background_thread, next_thrd,
4,400✔
558
                    scheduler, num_thread, bg_work_exec_time_init,
4,400✔
559
                    context_storage, params, background_running,
4,400✔
560
                    idle_loop_count);
4,400✔
561
            }
4,400✔
562
            else if (idle_loop_count > params.max_idle_loop_count_ || may_exit)
387,270,895✔
563
            {
564
                if (idle_loop_count > params.max_idle_loop_count_)
5,411✔
565
                    idle_loop_count = 0;
421✔
566

567
                // call back into invoking context
568
                if (!params.outer_.empty())
5,429✔
569
                {
570
                    params.outer_();
5,627✔
571
                    context_storage = hpx::execution_base::this_thread::detail::
5,627✔
572
                        get_agent_storage();
573
                }
5,631✔
574

575
                // break if we were idling after 'may_exit'
576
                if (may_exit)
5,632✔
577
                {
578
                    HPX_ASSERT(this_state.load(std::memory_order_relaxed) !=
5,211✔
579
                        hpx::state::pre_sleep);
580

581
                    if (background_thread)
5,191✔
582
                    {
583
                        HPX_ASSERT(background_running);
598✔
584
                        *background_running = false;
600✔
585

586
                        // do background work in parcel layer and in agas
587
                        [[maybe_unused]] bool has_exited =
600✔
588
                            call_background_thread(background_thread, next_thrd,
600✔
589
                                scheduler, num_thread, bg_work_exec_time_init,
600✔
590
                                context_storage);
600✔
591

592
                        // the background thread should have exited
593
                        HPX_ASSERT(has_exited);
600✔
594

595
                        background_thread = thread_id_type();
596✔
596
                        background_running.reset();
596✔
597
                    }
596✔
598
                    else
599
                    {
600
                        bool can_exit = !running &&
9,028✔
601
                            scheduler.SchedulingPolicy::cleanup_terminated(
4,458✔
602
                                true) &&
4,456✔
603
                            scheduler.SchedulingPolicy::get_thread_count(
8,410✔
604
                                thread_schedule_state::suspended,
605
                                thread_priority::default_, num_thread) == 0 &&
4,205✔
606
                            scheduler.SchedulingPolicy::get_queue_length(
8,572✔
607
                                num_thread) == 0;
4,286✔
608

609
                        if (can_exit)
4,440✔
610
                        {
611
                            this_state.store(hpx::state::stopped);
4,341✔
612
                            break;
4,341✔
613
                        }
614
                    }
615

616
                    may_exit = false;
847✔
617
                }
847✔
618
                else
619
                {
620
                    scheduler.SchedulingPolicy::cleanup_terminated(true);
421✔
621
                }
622
            }
1,268✔
623
        }
387,952,094✔
624
    }
4,327✔
625
}    // namespace hpx::threads::detail
626

627
// NOTE: This line only exists to please doxygen. Without the line doxygen
628
// generates incomplete xml output.
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc