• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

STEllAR-GROUP / hpx / #882

31 Aug 2023 07:44PM UTC coverage: 41.798% (-44.7%) from 86.546%
#882

push

19442 of 46514 relevant lines covered (41.8%)

126375.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

62.75
/libs/core/threading_base/src/thread_pool_base.cpp
1
//  Copyright (c) 2007-2024 Hartmut Kaiser
2
//
3
//  SPDX-License-Identifier: BSL-1.0
4
//  Distributed under the Boost Software License, Version 1.0. (See accompanying
5
//  file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6

7
#include <hpx/modules/affinity.hpp>
8
#include <hpx/modules/hardware.hpp>
9
#include <hpx/modules/timing.hpp>
10
#include <hpx/modules/topology.hpp>
11
#include <hpx/threading_base/scheduler_base.hpp>
12
#include <hpx/threading_base/scheduler_state.hpp>
13
#include <hpx/threading_base/thread_pool_base.hpp>
14

15
#include <cstddef>
16
#include <cstdint>
17
#include <memory>
18
#include <ostream>
19

20
namespace hpx::threads {
21

22
    ///////////////////////////////////////////////////////////////////////////
23
    thread_pool_base::thread_pool_base(thread_pool_init_parameters const& init)
64✔
24
      : id_(init.index_, init.name_)
64✔
25
      , thread_offset_(init.thread_offset_)
64✔
26
      , affinity_data_(init.affinity_data_)
64✔
27
      , timestamp_scale_(1.0)
64✔
28
      , notifier_(init.notifier_)
64✔
29
    {
30
    }
64✔
31

32
    ///////////////////////////////////////////////////////////////////////////
33
    mask_type thread_pool_base::get_used_processing_units(
1✔
34
        std::size_t num_cores, bool full_cores) const
35
    {
36
        auto const& topo = create_topology();
1✔
37
        auto const sched = get_scheduler();
1✔
38

39
        auto used_processing_units = mask_type();
1✔
40
        threads::resize(used_processing_units,
1✔
41
            static_cast<std::size_t>(hardware_concurrency()));
1✔
42

43
        std::size_t const max_cores = get_os_thread_count();
1✔
44
        for (std::size_t thread_num = 0;
1✔
45
            thread_num != max_cores && num_cores != 0; ++thread_num)
2✔
46
        {
47
            if (sched->get_state(thread_num).load() <= hpx::state::suspended)
1✔
48
            {
49
                if (!full_cores)
1✔
50
                {
51
                    used_processing_units |= affinity_data_.get_pu_mask(
2✔
52
                        topo, thread_num + get_thread_offset());
53
                }
54
                else
55
                {
56
                    used_processing_units |= topo.get_core_affinity_mask(
×
57
                        thread_num + get_thread_offset());
58
                }
59
                --num_cores;
1✔
60
            }
61
        }
62

63
        return used_processing_units;
1✔
64
    }
65

66
    mask_type thread_pool_base::get_used_processing_units(bool full_cores) const
1✔
67
    {
68
        return get_used_processing_units(get_os_thread_count(), full_cores);
1✔
69
    }
70

71
    mask_type thread_pool_base::get_used_processing_unit(
×
72
        std::size_t thread_num, bool full_cores) const
73
    {
74
        auto const& topo = create_topology();
×
75
        if (!full_cores)
×
76
        {
77
            return affinity_data_.get_pu_mask(
×
78
                topo, thread_num + get_thread_offset());
×
79
        }
80
        return topo.get_core_affinity_mask(thread_num + get_thread_offset());
×
81
    }
82

83
    hwloc_bitmap_ptr thread_pool_base::get_numa_domain_bitmap() const
×
84
    {
85
        auto const& topo = create_topology();
×
86
        mask_type const used_processing_units = get_used_processing_units();
×
87
        return topo.cpuset_to_nodeset(used_processing_units);
×
88
    }
89

90
    std::int64_t thread_pool_base::get_thread_count_unknown(
×
91
        std::size_t num_thread, bool reset)
92
    {
93
        return get_thread_count(thread_schedule_state::unknown,
94
            thread_priority::default_, num_thread, reset);
×
95
    }
96

97
    std::int64_t thread_pool_base::get_thread_count_active(
×
98
        std::size_t num_thread, bool reset)
99
    {
100
        return get_thread_count(thread_schedule_state::active,
×
101
            thread_priority::default_, num_thread, reset);
102
    }
103

104
    std::int64_t thread_pool_base::get_thread_count_pending(
×
105
        std::size_t num_thread, bool reset)
106
    {
107
        return get_thread_count(thread_schedule_state::pending,
108
            thread_priority::default_, num_thread, reset);
64✔
109
    }
110

111
    std::int64_t thread_pool_base::get_thread_count_suspended(
112
        std::size_t num_thread, bool reset)
113
    {
114
        return get_thread_count(thread_schedule_state::suspended,
115
            thread_priority::default_, num_thread, reset);
116
    }
117

47,316✔
118
    std::int64_t thread_pool_base::get_thread_count_terminated(
119
        std::size_t num_thread, bool reset)
120
    {
121
        return get_thread_count(thread_schedule_state::terminated,
122
            thread_priority::default_, num_thread, reset);
123
    }
64✔
124

125
    std::int64_t thread_pool_base::get_thread_count_staged(
64✔
126
        std::size_t num_thread, bool reset)
64✔
127
    {
128
        return get_thread_count(thread_schedule_state::staged,
64✔
129
            thread_priority::default_, num_thread, reset);
130
    }
64✔
131

132
    std::size_t thread_pool_base::get_active_os_thread_count() const
133
    {
64✔
134
        std::size_t active_os_thread_count = 0;
64✔
135

136
        for (std::size_t thread_num = 0; thread_num < get_os_thread_count();
×
137
            ++thread_num)
138
        {
139
            if (get_scheduler()->get_state(thread_num).load() <=
140
                hpx::state::suspended)
×
141
            {
142
                ++active_os_thread_count;
×
143
            }
144
        }
145

146
        return active_os_thread_count;
147
    }
148

149
    ///////////////////////////////////////////////////////////////////////////
150
    void thread_pool_base::init_pool_time_scale()
151
    {
152
        // scale timestamps to nanoseconds
153
        std::uint64_t const base_timestamp = util::hardware::timestamp();
154
        std::uint64_t const base_time =
155
            hpx::chrono::high_resolution_clock::now();
156
        std::uint64_t curr_timestamp = util::hardware::timestamp();
157
        std::uint64_t curr_time = hpx::chrono::high_resolution_clock::now();
158

159
        while ((curr_time - base_time) <= 100000)
160
        {
161
            curr_timestamp = util::hardware::timestamp();
162
            curr_time = hpx::chrono::high_resolution_clock::now();
163
        }
164

165
        if (curr_timestamp - base_timestamp != 0)
166
        {
167
            timestamp_scale_ = static_cast<double>(curr_time - base_time) /
168
                static_cast<double>(curr_timestamp - base_timestamp);
169
        }
170
    }
171

172
    void thread_pool_base::init(
173
        std::size_t /* pool_threads */, std::size_t threads_offset)
174
    {
175
        thread_offset_ = threads_offset;
176
    }
177

178
    std::ostream& operator<<(
179
        std::ostream& os, thread_pool_base const& thread_pool)
180
    {
181
        auto const id = thread_pool.get_pool_id();
182
        os << id.name() << "(" << static_cast<std::uint64_t>(id.index()) << ")";
183

184
        return os;
185
    }
186
}    // namespace hpx::threads
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc