• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

STEllAR-GROUP / hpx / #882

31 Aug 2023 07:44PM UTC coverage: 41.798% (-44.7%) from 86.546%
#882

push

19442 of 46514 relevant lines covered (41.8%)

126375.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

62.67
/libs/core/threading_base/src/scheduler_base.cpp
1
//  Copyright (c) 2007-2025 Hartmut Kaiser
2
//
3
//  SPDX-License-Identifier: BSL-1.0
4
//  Distributed under the Boost Software License, Version 1.0. (See accompanying
5
//  file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6

7
#include <hpx/config.hpp>
8
#include <hpx/assert.hpp>
9
#include <hpx/modules/execution_base.hpp>
10
#include <hpx/modules/itt_notify.hpp>
11
#include <hpx/threading_base/scheduler_base.hpp>
12
#include <hpx/threading_base/scheduler_mode.hpp>
13
#include <hpx/threading_base/scheduler_state.hpp>
14
#include <hpx/threading_base/thread_init_data.hpp>
15
#include <hpx/threading_base/thread_pool_base.hpp>
16

17
#if defined(HPX_HAVE_SCHEDULER_LOCAL_STORAGE)
18
#include <hpx/modules/coroutines.hpp>
19
#endif
20

21
#include <algorithm>
22
#include <atomic>
23
#include <chrono>
24
#include <cmath>
25
#include <condition_variable>
26
#include <cstddef>
27
#include <cstdint>
28
#include <limits>
29
#include <memory>
30
#include <mutex>
31
#include <ostream>
32
#include <set>
33
#include <string>
34
#include <utility>
35
#include <vector>
36

37
///////////////////////////////////////////////////////////////////////////////
38
namespace hpx::threads::policies {
64✔
39

40
    scheduler_base::scheduler_base(std::size_t num_threads,
41
        char const* description,
64✔
42
        thread_queue_init_parameters const& thread_queue_init,
64✔
43
        scheduler_mode mode)
64✔
44
      : modes_(num_threads)
64✔
45
#if defined(HPX_HAVE_THREAD_MANAGER_IDLE_BACKOFF)
64✔
46
      , max_idle_backoff_time_(thread_queue_init.max_idle_backoff_time_)
64✔
47
      , wait_count_data_(num_threads)
64✔
48
#endif
64✔
49
      , suspend_mtxs_(num_threads)
50
      , suspend_conds_(num_threads)
51
      , pu_mtxs_(num_threads)
52
      , states_(num_threads)
53
      , description_(description)
54
      , thread_queue_init_(thread_queue_init)
55
      , parent_pool_(nullptr)
64✔
56
      , background_thread_count_(0)
57
      , polling_function_mpi_(&null_polling_function)
64✔
58
      , polling_function_cuda_(&null_polling_function)
59
      , polling_function_sycl_(&null_polling_function)
60
      , polling_work_count_function_mpi_(&null_polling_work_count_function)
64✔
61
      , polling_work_count_function_cuda_(&null_polling_work_count_function)
62
      , polling_work_count_function_sycl_(&null_polling_work_count_function)
64✔
63
    {
194✔
64
        for (std::size_t i = 0; i != num_threads; ++i)
65
        {
130✔
66
            modes_[i].data_.store(mode, std::memory_order::relaxed);
130✔
67
            states_[i].data_.store(
68
                hpx::state::initialized, std::memory_order::relaxed);
69
        }
70
    }
194✔
71

72
    bool scheduler_base::idle_callback([[maybe_unused]] std::size_t num_thread)
64✔
73
    {
74
#if defined(HPX_HAVE_THREAD_MANAGER_IDLE_BACKOFF)
332✔
75
        if (modes_[num_thread].data_.load(std::memory_order_relaxed) &
76
            policies::scheduler_mode::enable_idle_backoff)
77
        {
332✔
78
#if HPX_HAVE_ITTNOTIFY != 0 && !defined(HPX_HAVE_APEX)
79
            static hpx::util::itt::event notify_event("idle_callback");
80
            hpx::util::itt::mark_event e(notify_event);
81
#endif
82
            // Put this thread to sleep for some time, additionally it gets
83
            // woken up on new work.
84

85
            auto& data = wait_count_data_[num_thread].data_;
86

87
            std::unique_lock<pu_mutex_type> l(data.wait_mtx, std::try_to_lock);
88
            if (!l)
89
            {
664✔
90
                return false;
332✔
91
            }
92

332✔
93
            // Exponential back-off with a maximum sleep time.
332✔
94
            constexpr double max_exponent =
95
                std::numeric_limits<double>::max_exponent - 1;
332✔
96
            double const exponent =
97
                (std::min) (static_cast<double>(data.wait_count), max_exponent);
332✔
98

332✔
99
            std::chrono::microseconds const period(std::lround(
100
                (std::min) (max_idle_backoff_time_, std::pow(2.0, exponent))));
101

102
            ++data.wait_count;
91✔
103

104
            if (data.wait_cond.wait_for(l, period) ==    //-V1089
105
                std::cv_status::no_timeout)
106
            {
332✔
107
                // reset counter if thread was woken up
108
                data.wait_count = 0;
109
            }
110
            return true;
111
        }
1,145,726✔
112
#endif
113
        return false;
114
    }
1,145,726✔
115

116
    /// This function gets called by the thread-manager whenever new work
117
    /// has been added, allowing the scheduler to reactivate one or more of
1,145,662✔
118
    /// possibly idling OS threads
119
    void scheduler_base::do_some_work([[maybe_unused]] std::size_t num_thread)
120
    {
1,145,726✔
121
#if defined(HPX_HAVE_THREAD_MANAGER_IDLE_BACKOFF)
122
#if HPX_HAVE_ITTNOTIFY != 0 && !defined(HPX_HAVE_APEX)
×
123
        static hpx::util::itt::event notify_event("do_some_work");
124
        hpx::util::itt::mark_event e(notify_event);
125
#endif
126

127
        if (static_cast<std::size_t>(-1) == num_thread)
128
        {
×
129
            auto const size = wait_count_data_.size();
130
            for (std::size_t i = 0; i != size; ++i)
131
            {
132
                if (modes_[i].data_.load(std::memory_order_relaxed) &
133
                    policies::scheduler_mode::enable_idle_backoff)
134
                {
135
                    wait_count_data_[i].data_.wait_count = 0;
136
                    wait_count_data_[i].data_.wait_cond.notify_one();
137
                }
138
            }
139
        }
140
        else if (modes_[num_thread].data_.load(std::memory_order_relaxed) &
×
141
            policies::scheduler_mode::enable_idle_backoff)
142
        {
526✔
143
            auto const size = wait_count_data_.size();
144
            for (std::size_t i = 0; i != size; ++i)
526✔
145
            {
146
                wait_count_data_[i].data_.wait_count = 0;
×
147
                wait_count_data_[i].data_.wait_cond.notify_one();
148
            }
×
149
        }
150
#endif
151
    }
152

153
    void scheduler_base::suspend(std::size_t num_thread)
154
    {
526✔
155
        HPX_ASSERT(num_thread < suspend_conds_.size());
156

526✔
157
        states_[num_thread].data_.store(hpx::state::sleeping);
158
        std::unique_lock<pu_mutex_type> l(suspend_mtxs_[num_thread]);
1,145,281✔
159
        suspend_conds_[num_thread].wait(l);    //-V1089
160

161
        // Only set running if still in hpx::state::sleeping. Can be set with
1,145,281✔
162
        // non-blocking/locking functions to stopping or terminating, in which
163
        // case the state is left untouched.
164
        hpx::state expected = hpx::state::sleeping;
165
        states_[num_thread].data_.compare_exchange_strong(
166
            expected, hpx::state::running);
×
167

168
        HPX_ASSERT(expected == hpx::state::sleeping ||
169
            expected == hpx::state::stopping ||
170
            expected == hpx::state::terminating);
171
    }
×
172

173
    void scheduler_base::resume(std::size_t num_thread)
×
174
    {
×
175
        if (num_thread == static_cast<std::size_t>(-1))
176
        {
177
            for (std::condition_variable& c : suspend_conds_)
×
178
            {
179
                c.notify_one();
×
180
            }
×
181
        }
182
        else
183
        {
184
            HPX_ASSERT(num_thread < suspend_conds_.size());
185
            suspend_conds_[num_thread].notify_one();
186
        }
×
187
    }
188

×
189
    std::size_t scheduler_base::select_active_pu(
190
        std::size_t num_thread, bool allow_fallback)
×
191
    {
192
        if (modes_[num_thread].data_.load(std::memory_order_relaxed) &
×
193
            threads::policies::scheduler_mode::enable_elasticity)
194
        {
195
            std::size_t states_size = states_.size();
196

197
            if (!allow_fallback)
198
            {
×
199
                // Try indefinitely as long as at least one thread is available
×
200
                // for scheduling. Increase allowed state if no threads are
201
                // available for scheduling.
×
202
                auto max_allowed_state = hpx::state::suspended;
203

204
                hpx::util::yield_while([this, states_size, &num_thread,
205
                                           &max_allowed_state]() {
×
206
                    std::size_t num_allowed_threads = 0;
207

×
208
                    for (std::size_t offset = 0; offset < states_size; ++offset)
209
                    {
×
210
                        std::size_t const num_thread_local =
211
                            (num_thread + offset) % states_size;
×
212

213
                        {
×
214
                            std::unique_lock<pu_mutex_type> l(
215
                                pu_mtxs_[num_thread_local], std::try_to_lock);
216

217
                            if (l.owns_lock())
218
                            {
219
                                if (states_[num_thread_local].data_.load(
220
                                        std::memory_order_relaxed) <=
221
                                    max_allowed_state)
222
                                {
223
                                    num_thread = num_thread_local;
224
                                    return false;
225
                                }
226
                            }
227
                        }
×
228

229
                        if (states_[num_thread_local].data_.load(
230
                                std::memory_order_relaxed) <= max_allowed_state)
231
                        {
232
                            ++num_allowed_threads;
×
233
                        }
234
                    }
×
235

×
236
                    if (0 == num_allowed_threads)
237
                    {
238
                        if (max_allowed_state <= hpx::state::suspended)
239
                        {
240
                            max_allowed_state = hpx::state::sleeping;
×
241
                        }
242
                        else if (max_allowed_state <= hpx::state::sleeping)
243
                        {
244
                            max_allowed_state = hpx::state::stopping;
245
                        }
246
                        else
247
                        {
248
                            // All threads are terminating or stopped. Just
249
                            // return num_thread to avoid infinite loop.
1,145,281✔
250
                            return false;
251
                        }
252
                    }
253

83,534✔
254
                    // Yield after trying all pus, then try again
255
                    return true;
256
                });
83,534✔
257

258
                return num_thread;
259
            }
×
260

261
            // Try all pus only once if fallback is allowed
262
            HPX_ASSERT(num_thread != static_cast<std::size_t>(-1));
263
            for (std::size_t offset = 0; offset < states_size; ++offset)
×
264
            {
265
                std::size_t const num_thread_local =
266
                    (num_thread + offset) % states_size;
64✔
267

268
                std::unique_lock<pu_mutex_type> l(
194✔
269
                    pu_mtxs_[num_thread_local], std::try_to_lock);
270

271
                if (l.owns_lock() &&
272
                    states_[num_thread_local].data_.load(
64✔
273
                        std::memory_order_relaxed) <= hpx::state::suspended)
274
                {
128✔
275
                    return num_thread_local;
276
                }
388✔
277
            }
278
        }
260✔
279

280
        return num_thread;
281
    }
282

283
    // allow to access/manipulate states
128✔
284
    std::atomic<hpx::state>& scheduler_base::get_state(std::size_t num_thread)
285
    {
286
        HPX_ASSERT(num_thread < states_.size());
128✔
287
        return states_[num_thread].data_;
288
    }
128✔
289

290
    std::atomic<hpx::state> const& scheduler_base::get_state(
128✔
291
        std::size_t num_thread) const
292
    {
293
        HPX_ASSERT(num_thread < states_.size());
294
        return states_[num_thread].data_;
295
    }
296

×
297
    void scheduler_base::set_all_states(hpx::state s)
298
    {
×
299
        for (auto& state : states_)
300
        {
×
301
            state.data_.store(s);
302
        }
303
    }
304

305
    void scheduler_base::set_all_states_at_least(hpx::state s)
306
    {
349✔
307
        for (auto& state : states_)
308
        {
349✔
309
            if (state.data_.load(std::memory_order_relaxed) < s)
310
            {
311
                state.data_.store(s, std::memory_order_release);
312
            }
698✔
313
        }
314
    }
349✔
315

349✔
316
    // return whether all states are at least at the given one
349✔
317
    bool scheduler_base::has_reached_state(hpx::state s) const
318
    {
319
        for (auto const& state : states_)
349✔
320
        {
321
            if (state.data_.load(std::memory_order_relaxed) < s)
322
                return false;
323
        }
193✔
324
        return true;
325
    }
326

327
    bool scheduler_base::is_state(hpx::state s) const
193✔
328
    {
193✔
329
        for (auto const& state : states_)
330
        {
7✔
331
            if (state.data_.load(std::memory_order_relaxed) != s)
332
                return false;
333
        }
7✔
334
        return true;
7✔
335
    }
336

58✔
337
    std::pair<hpx::state, hpx::state> scheduler_base::get_minmax_state() const
338
    {
339
        std::pair<hpx::state, hpx::state> result(
58✔
340
            hpx::state::last_valid_runtime_state,
58✔
341
            hpx::state::first_valid_runtime_state);
342

×
343
        for (auto const& state_iter : states_)
344
        {
345
            hpx::state s = state_iter.data_.load(std::memory_order_relaxed);
346
            result.first = (std::min) (result.first, s);
347
            result.second = (std::max) (result.second, s);
×
348
        }
×
349

350
        return result;
64✔
351
    }
352

353
    // get/set scheduler mode
64✔
354
    void scheduler_base::set_scheduler_mode(
355
        scheduler_mode mode, hpx::threads::mask_cref_type pu_mask) noexcept
6✔
356
    {
357
        HPX_ASSERT(hpx::threads::count(pu_mask) <= modes_.size());
358

359
        // distribute the same value across all cores
58✔
360
        std::size_t const size = hpx::threads::mask_size(pu_mask);
361
        for (std::size_t i = 0, j = 0; i != size; ++i)
64✔
362
        {
363
            if (hpx::threads::test(pu_mask, i))
364
            {
3,580✔
365
                modes_[j++].data_.store(mode, std::memory_order_release);
366
            }
3,580✔
367
        }
368
        do_some_work(static_cast<std::size_t>(-1));
369
    }
6✔
370

371
    void scheduler_base::add_scheduler_mode(
372
        scheduler_mode mode, hpx::threads::mask_cref_type pu_mask) noexcept
6✔
373
    {
374
        HPX_ASSERT(hpx::threads::count(pu_mask) <= modes_.size());
6✔
375

376
        // distribute the same value across all cores
377
        std::size_t const size = hpx::threads::mask_size(pu_mask);
6✔
378
        for (std::size_t i = 0, j = 0; i != size; ++i)
379
        {
380
            if (hpx::threads::test(pu_mask, i))
381
            {
382
                auto const old_mode =
383
                    modes_[j].data_.load(std::memory_order::relaxed);
384
                modes_[j++].data_.store(
385
                    old_mode | mode, std::memory_order_release);
386
            }
387
        }
388
    }
389

390
    void scheduler_base::remove_scheduler_mode(
391
        scheduler_mode mode, hpx::threads::mask_cref_type pu_mask) noexcept
392
    {
393
        HPX_ASSERT(hpx::threads::count(pu_mask) <= modes_.size());
394

395
        std::size_t const size = hpx::threads::mask_size(pu_mask);
396
        for (std::size_t i = 0, j = 0; i != size; ++i)
397
        {
398
            if (hpx::threads::test(pu_mask, i))
399
            {
400
                auto const old_mode =
401
                    modes_[j].data_.load(std::memory_order::relaxed);
402
                modes_[j++].data_.store(
403
                    static_cast<scheduler_mode>(old_mode & ~mode),
404
                    std::memory_order_release);
405
            }
406
        }
407
    }
408

409
    void scheduler_base::add_remove_scheduler_mode(scheduler_mode to_add_mode,
410
        scheduler_mode to_remove_mode,
411
        hpx::threads::mask_cref_type pu_mask) noexcept
412
    {
413
        HPX_ASSERT(hpx::threads::count(pu_mask) <= modes_.size());
414

415
        std::size_t const size = hpx::threads::mask_size(pu_mask);
416
        for (std::size_t i = 0, j = 0; i != size; ++i)
417
        {
418
            if (hpx::threads::test(pu_mask, i))
419
            {
420
                auto const old_mode =
421
                    modes_[j].data_.load(std::memory_order::relaxed);
422
                modes_[j++].data_.store(
423
                    static_cast<scheduler_mode>(
424
                        (old_mode | to_add_mode) & ~to_remove_mode),
425
                    std::memory_order_release);
426
            }
427
        }
428
    }
429

430
    void scheduler_base::update_scheduler_mode(scheduler_mode mode, bool set,
431
        hpx::threads::mask_cref_type pu_mask) noexcept
432
    {
433
        if (set)
434
        {
43,912✔
435
            add_scheduler_mode(mode, pu_mask);
436
        }
437
        else
43,912✔
438
        {
439
            remove_scheduler_mode(mode, pu_mask);
×
440
        }
441
    }
442

443
    ///////////////////////////////////////////////////////////////////////////
444
    std::int64_t scheduler_base::get_background_thread_count() const noexcept
43,912✔
445
    {
446
        return background_thread_count_;
43,541✔
447
    }
43,541✔
448

449
    void scheduler_base::increment_background_thread_count() noexcept
265✔
450
    {
265✔
451
        ++background_thread_count_;
452
    }
106✔
453

106✔
454
    void scheduler_base::decrement_background_thread_count() noexcept
455
    {
×
456
        --background_thread_count_;
×
457
    }
458

459
#if defined(HPX_HAVE_SCHEDULER_LOCAL_STORAGE)
460
    coroutines::detail::tss_data_node* scheduler_base::find_tss_data(
461
        void const* key)
462
    {
463
        if (!thread_data_)
464
            return nullptr;
465
        return thread_data_->find(key);
466
    }
467

×
468
    void scheduler_base::add_new_tss_node(void const* key,
469
        std::shared_ptr<coroutines::detail::tss_cleanup_function> const& func,
470
        void* tss_data)
×
471
    {
472
        if (!thread_data_)
473
        {
474
            thread_data_ = std::make_shared<coroutines::detail::tss_storage>();
475
        }
476
        thread_data_->insert(key, func, tss_data);
477
    }
×
478

479
    void scheduler_base::erase_tss_node(void const* key, bool cleanup_existing)
×
480
    {
481
        if (thread_data_)
482
            thread_data_->erase(key, cleanup_existing);
483
    }
484

485
    void* scheduler_base::get_tss_data(void const* key)
×
486
    {
487
        if (coroutines::detail::tss_data_node* const current_node =
×
488
                find_tss_data(key))
489
        {
490
            return current_node->get_value();
491
        }
492
        return nullptr;
493
    }
494

×
495
    void scheduler_base::set_tss_data(void const* key,
496
        std::shared_ptr<coroutines::detail::tss_cleanup_function> const& func,
×
497
        void* tss_data, bool cleanup_existing)
498
    {
499
        if (coroutines::detail::tss_data_node* const current_node =
500
                find_tss_data(key))
501
        {
502
            if (func || (tss_data != 0))
×
503
                current_node->reinit(func, tss_data, cleanup_existing);
504
            else
×
505
                erase_tss_node(key, cleanup_existing);
506
        }
507
        else if (func || (tss_data != 0))
508
        {
509
            add_new_tss_node(key, func, tss_data);
510
        }
511
    }
×
512
#endif
513

×
514
    std::ptrdiff_t scheduler_base::get_stack_size(
515
        threads::thread_stacksize stacksize) const noexcept
516
    {
517
        if (stacksize == thread_stacksize::current)
518
        {
519
            stacksize = get_self_stacksize_enum();
×
520
        }
521

54,124,621✔
522
        HPX_ASSERT(stacksize != thread_stacksize::current);
523

524
        switch (stacksize)
525
        {
54,124,621✔
526
        case thread_stacksize::small_:
527
            return thread_queue_init_.small_stacksize_;
528

529
        case thread_stacksize::medium:
530
            return thread_queue_init_.medium_stacksize_;
531

532
        case thread_stacksize::large:
533
            return thread_queue_init_.large_stacksize_;
534

535
        case thread_stacksize::huge:
536
            return thread_queue_init_.huge_stacksize_;
537

538
        case thread_stacksize::nostack:
539
            return (std::numeric_limits<std::ptrdiff_t>::max)();
540

541
        default:
542
            HPX_ASSERT_MSG(
543
                false, util::format("Invalid stack size {1}", stacksize));
544
            break;
545
        }
54,124,621✔
546

547
        return thread_queue_init_.small_stacksize_;
548
    }
3,580✔
549

550
    void scheduler_base::set_mpi_polling_functions(
551
        polling_function_ptr mpi_func,
552
        polling_work_count_function_ptr mpi_work_count_func)
553
    {
3,580✔
554
        polling_function_mpi_.store(mpi_func, std::memory_order_relaxed);
555
        polling_work_count_function_mpi_.store(
556
            mpi_work_count_func, std::memory_order_relaxed);
557
    }
558

559
    void scheduler_base::clear_mpi_polling_function()
560
    {
561
        polling_function_mpi_.store(
562
            &null_polling_function, std::memory_order_relaxed);
563
        polling_work_count_function_mpi_.store(
3,580✔
564
            &null_polling_work_count_function, std::memory_order_relaxed);
565
    }
566

×
567
    void scheduler_base::set_cuda_polling_functions(
568
        polling_function_ptr cuda_func,
×
569
        polling_work_count_function_ptr cuda_work_count_func)
570
    {
×
571
        polling_function_cuda_.store(cuda_func, std::memory_order_relaxed);
572
        polling_work_count_function_cuda_.store(
573
            cuda_work_count_func, std::memory_order_relaxed);
574
    }
575

576
    void scheduler_base::clear_cuda_polling_function()
577
    {
578
        polling_function_cuda_.store(
579
            &null_polling_function, std::memory_order_relaxed);
580
        polling_work_count_function_cuda_.store(
581
            &null_polling_work_count_function, std::memory_order_relaxed);
582
    }
583

584
    void scheduler_base::set_sycl_polling_functions(
585
        polling_function_ptr sycl_func,
586
        polling_work_count_function_ptr sycl_work_count_func)
587
    {
588
        polling_function_sycl_.store(sycl_func, std::memory_order_relaxed);
589
        polling_work_count_function_sycl_.store(
590
            sycl_work_count_func, std::memory_order_relaxed);
591
    }
592

593
    void scheduler_base::clear_sycl_polling_function()
594
    {
595
        polling_function_sycl_.store(
596
            &null_polling_function, std::memory_order_relaxed);
597
        polling_work_count_function_sycl_.store(
598
            &null_polling_work_count_function, std::memory_order_relaxed);
599
    }
600

601
    detail::polling_status scheduler_base::custom_polling_function() const
602
    {
603
        detail::polling_status status = detail::polling_status::idle;
604
#if defined(HPX_HAVE_MODULE_ASYNC_MPI)
605
        if ((*polling_function_mpi_.load(std::memory_order_relaxed))() ==
606
            detail::polling_status::busy)
607
        {
608
            status = detail::polling_status::busy;
609
        }
610
#endif
611
#if defined(HPX_HAVE_MODULE_ASYNC_CUDA)
612
        if ((*polling_function_cuda_.load(std::memory_order_relaxed))() ==
613
            detail::polling_status::busy)
614
        {
615
            status = detail::polling_status::busy;
616
        }
617
#endif
618
#if defined(HPX_HAVE_MODULE_ASYNC_SYCL)
619
        if ((*polling_function_sycl_.load(std::memory_order_relaxed))() ==
620
            detail::polling_status::busy)
621
        {
622
            status = detail::polling_status::busy;
623
        }
624
#endif
625
        return status;
626
    }
627

628
    std::size_t scheduler_base::get_polling_work_count() const
629
    {
630
        std::size_t work_count = 0;
631
#if defined(HPX_HAVE_MODULE_ASYNC_MPI)
632
        work_count +=
633
            polling_work_count_function_mpi_.load(std::memory_order_relaxed)();
634
#endif
635
#if defined(HPX_HAVE_MODULE_ASYNC_CUDA)
636
        work_count +=
637
            polling_work_count_function_cuda_.load(std::memory_order_relaxed)();
638
#endif
639
#if defined(HPX_HAVE_MODULE_ASYNC_SYCL)
640
        work_count +=
641
            polling_work_count_function_sycl_.load(std::memory_order_relaxed)();
642
#endif
643
        return work_count;
644
    }
645

646
    std::ostream& operator<<(std::ostream& os, scheduler_base const& scheduler)
647
    {
648
        os << scheduler.get_description() << "(" << &scheduler << ")";
649

650
        return os;
651
    }
652
}    // namespace hpx::threads::policies
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc