• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

STEllAR-GROUP / hpx / #882

31 Aug 2023 07:44PM UTC coverage: 41.798% (-44.7%) from 86.546%
#882

push

19442 of 46514 relevant lines covered (41.8%)

126375.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

15.0
/libs/core/synchronization/src/local_barrier.cpp
1
//  Copyright (c) 2007-2023 Hartmut Kaiser
2
//  Copyright (c) 2016 Thomas Heller
3
//
4
//  SPDX-License-Identifier: BSL-1.0
5
//  Distributed under the Boost Software License, Version 1.0. (See accompanying
6
//  file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
7

8
#include <hpx/synchronization/barrier.hpp>
9

10
#include <atomic>
11
#include <cstddef>
12
#include <mutex>
13
#include <utility>
14

15
///////////////////////////////////////////////////////////////////////////////
16
namespace hpx::detail {
17

130✔
18
    void intrusive_ptr_add_ref(barrier_data* p) noexcept
19
    {
20
        p->count_.increment();
130✔
21
    }
22

195✔
23
    void intrusive_ptr_release(barrier_data* p) noexcept
24
    {
195✔
25
        if (0 == p->count_.decrement())
26
        {
65✔
27
            // The thread that decrements the reference count to zero must
28
            // perform an acquire to ensure that it doesn't start destructing
195✔
29
            // the object until all previous writes have drained.
30
            std::atomic_thread_fence(std::memory_order_acquire);
31

32
            delete p;
33
        }
34
    }
×
35
}    // namespace hpx::detail
×
36

×
37
///////////////////////////////////////////////////////////////////////////////
38
namespace hpx::lcos::local {
×
39

40
    barrier::barrier(std::size_t expected)
×
41
      : number_of_threads_(expected)
42
      , total_(barrier_flag)
×
43
    {
44
    }
×
45

46
    barrier::~barrier()
×
47
    {
48
        std::unique_lock<mutex_type> l(mtx_);
49

×
50
        while (total_ > barrier_flag)    //-V776
51
        {
×
52
            // Wait until everyone exits the barrier
53
            cond_.wait(l, "barrier::~barrier");
×
54
        }
55
    }
×
56

57
    void barrier::wait()
×
58
    {
59
        std::unique_lock<mutex_type> l(mtx_);
60

×
61
        while (total_ > barrier_flag)    //-V776
62
        {
63
            // wait until everyone exits the barrier
64
            cond_.wait(l, "barrier::wait");
×
65
        }
×
66

67
        // Are we the first to enter?
×
68
        if (total_ == barrier_flag)
69
            total_ = 0;
×
70

71
        ++total_;
×
72

×
73
        if (total_ == number_of_threads_)
74
        {
75
            total_ += barrier_flag - 1;
76
            cond_.notify_all(HPX_MOVE(l));
×
77
        }
78
        else
79
        {
×
80
            while (total_ < barrier_flag)    //-V776
81
            {
×
82
                // wait until enough threads enter the barrier
83
                cond_.wait(l, "barrier::wait");
84
            }
×
85
            --total_;
86

×
87
            // get entering threads to wake up
88
            if (total_ == barrier_flag)    //-V547
89
            {
×
90
                cond_.notify_all(HPX_MOVE(l));
91
            }
×
92
        }
93
    }
×
94

×
95
    void barrier::count_up()
×
96
    {
97
        std::unique_lock<mutex_type> l(mtx_);
×
98
        ++number_of_threads_;
99
    }
×
100

×
101
    void barrier::reset(std::size_t number_of_threads)
×
102
    {
103
        std::unique_lock<mutex_type> l(mtx_);
104
        this->number_of_threads_ = number_of_threads;
105
    }
106

107
}    // namespace hpx::lcos::local
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc