• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

jbaldwin / libcoro / 15456186781

05 Jun 2025 01:19AM UTC coverage: 87.778%. First build
15456186781

Pull #336

github

web-flow
Merge 9c9afad2c into f0cccaaf4
Pull Request #336: coro::ring_buffer use coro::mutex instead of std::mutex

68 of 69 new or added lines in 3 files covered. (98.55%)

1580 of 1800 relevant lines covered (87.78%)

5561276.86 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.44
/src/mutex.cpp
1
#include "coro/detail/awaiter_list.hpp"
2
#include "coro/mutex.hpp"
3

4
namespace coro
5
{
6
namespace detail
7
{
8
auto lock_operation_base::await_ready() const noexcept -> bool
49,096,902✔
9
{
10
    return m_mutex.try_lock();
49,096,902✔
11
}
12

13
auto lock_operation_base::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
2,770,722✔
14
{
15
    m_awaiting_coroutine = awaiting_coroutine;
2,770,722✔
16
    auto& state = m_mutex.m_state;
2,770,722✔
17
    void* current = state.load(std::memory_order::acquire);
2,770,722✔
18
    const void* unlocked_value = m_mutex.unlocked_value();
2,791,562✔
19
    do
20
    {
21
        // While trying to suspend the lock can become available, if so attempt to grab it and then don't suspend.
22
        // If the lock never becomes available then we place ourself at the head of the waiter list and suspend.
23

24
        if (current == unlocked_value)
3,590,070✔
25
        {
26
            // The lock has become available, try and lock.
27
            if (state.compare_exchange_weak(current, nullptr, std::memory_order::acq_rel, std::memory_order::acquire))
510,477✔
28
            {
29
                // We've acquired the lock, don't suspend.
30
                m_awaiting_coroutine = nullptr;
433,681✔
31
                return false;
456,000✔
32
            }
33
        }
34
        else // if (current == nullptr || current is of type lock_operation_base*)
35
        {
36
            // The lock is still owned, attempt to add ourself as a waiter.
37
            m_next = static_cast<lock_operation_base*>(current);
3,079,593✔
38
            if (state.compare_exchange_weak(current, static_cast<void*>(this), std::memory_order::acq_rel, std::memory_order::acquire))
3,079,593✔
39
            {
40
                // We've successfully added ourself to the waiter queue.
41
                return true;
2,439,611✔
42
            }
43
        }
44
    } while (true);
45
}
46

47
} // namespace detail
48

49
scoped_lock::~scoped_lock()
27,363,202✔
50
{
51
    unlock();
27,363,202✔
52
}
27,376,283✔
53

54
auto scoped_lock::unlock() -> void
32,748,002✔
55
{
56
    if (m_mutex != nullptr)
32,748,002✔
57
    {
58
        std::atomic_thread_fence(std::memory_order::acq_rel);
59
        m_mutex->unlock();
27,389,112✔
60
        m_mutex = nullptr;
27,378,247✔
61
    }
62
}
32,737,137✔
63

64
auto mutex::try_lock() -> bool
49,101,942✔
65
{
66
    void* expected = const_cast<void*>(unlocked_value());
49,101,942✔
67
    return m_state.compare_exchange_strong(expected, nullptr, std::memory_order::acq_rel, std::memory_order::relaxed);
49,124,429✔
68
}
69

70
auto mutex::unlock() -> void
49,389,525✔
71
{
72
    void* current = m_state.load(std::memory_order::acquire);
49,389,525✔
73
    do
74
    {
75
        // Sanity check that the mutex isn't already unlocked.
76
        if (current == const_cast<void*>(unlocked_value()))
49,637,270✔
77
        {
NEW
78
            throw std::runtime_error{"coro::mutex is already unlocked"};
×
79
        }
80

81
        // There are no current waiters, attempt to set the mutex as unlocked.
82
        if (current == nullptr)
49,637,270✔
83
        {
84
            if (m_state.compare_exchange_weak(
47,196,466✔
85
                current,
86
                const_cast<void*>(unlocked_value()),
47,196,466✔
87
                std::memory_order::acq_rel,
88
                std::memory_order::acquire))
89
            {
90
                // We've successfully unlocked the mutex, return since there are no current waiters.
91
                std::atomic_thread_fence(std::memory_order::acq_rel);
92
                return;
46,948,719✔
93
            }
94
            else
95
            {
96
                // This means someone has added themselves as a waiter, we need to try again with our updated current state.
97
                // assert(m_state now holds a lock_operation_base*)
98
                continue;
247,745✔
99
            }
100
        }
101
        else
102
        {
103
            // There are waiters, lets wake the first one up. This will set the state to the next waiter, or nullptr (no waiters but locked).
104
            std::atomic<detail::lock_operation_base*>* casted = reinterpret_cast<std::atomic<detail::lock_operation_base*>*>(&m_state);
2,440,804✔
105
            auto* waiter = detail::awaiter_list_pop<detail::lock_operation_base>(*casted);
2,440,804✔
106
            // assert waiter != nullptr, nobody else should be unlocking this mutex.
107
            // Directly transfer control to the waiter, they are now responsible for unlocking the mutex.
108
            std::atomic_thread_fence(std::memory_order::acq_rel);
109
            waiter->m_awaiting_coroutine.resume();
2,440,804✔
110
            return;
2,425,487✔
111
        }
112
    } while (true);
247,745✔
113
}
114

115
} // namespace coro
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc