• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

jbaldwin / libcoro / 15456224751

05 Jun 2025 01:22AM UTC coverage: 87.987%. First build
15456224751

Pull #336

github

web-flow
Merge 8d844c6ee into f0cccaaf4
Pull Request #336: coro::ring_buffer use coro::mutex instead of std::mutex

68 of 69 new or added lines in 3 files covered. (98.55%)

1582 of 1798 relevant lines covered (87.99%)

5564162.17 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.44
/src/mutex.cpp
1
#include "coro/detail/awaiter_list.hpp"
2
#include "coro/mutex.hpp"
3

4
namespace coro
5
{
6
namespace detail
7
{
8
auto lock_operation_base::await_ready() const noexcept -> bool
49,062,744✔
9
{
10
    return m_mutex.try_lock();
49,062,744✔
11
}
12

13
auto lock_operation_base::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
2,844,431✔
14
{
15
    m_awaiting_coroutine = awaiting_coroutine;
2,844,431✔
16
    auto& state = m_mutex.m_state;
2,844,431✔
17
    void* current = state.load(std::memory_order::acquire);
2,844,431✔
18
    const void* unlocked_value = m_mutex.unlocked_value();
2,866,575✔
19
    do
20
    {
21
        // While trying to suspend the lock can become available, if so attempt to grab it and then don't suspend.
22
        // If the lock never becomes available then we place ourself at the head of the waiter list and suspend.
23

24
        if (current == unlocked_value)
3,782,343✔
25
        {
26
            // The lock has become available, try and lock.
27
            if (state.compare_exchange_weak(current, nullptr, std::memory_order::acq_rel, std::memory_order::acquire))
494,466✔
28
            {
29
                // We've acquired the lock, don't suspend.
30
                m_awaiting_coroutine = nullptr;
401,225✔
31
                return false;
437,713✔
32
            }
33
        }
34
        else // if (current == nullptr || current is of type lock_operation_base*)
35
        {
36
            // The lock is still owned, attempt to add ourself as a waiter.
37
            m_next = static_cast<lock_operation_base*>(current);
3,287,877✔
38
            if (state.compare_exchange_weak(current, static_cast<void*>(this), std::memory_order::acq_rel, std::memory_order::acquire))
3,287,877✔
39
            {
40
                // We've successfully added ourself to the waiter queue.
41
                return true;
2,557,778✔
42
            }
43
        }
44
    } while (true);
45
}
46

47
} // namespace detail
48

49
scoped_lock::~scoped_lock()
27,334,432✔
50
{
51
    unlock();
27,334,432✔
52
}
27,346,510✔
53

54
auto scoped_lock::unlock() -> void
32,690,964✔
55
{
56
    if (m_mutex != nullptr)
32,690,964✔
57
    {
58
        std::atomic_thread_fence(std::memory_order::acq_rel);
59
        m_mutex->unlock();
27,361,463✔
60
        m_mutex = nullptr;
27,349,924✔
61
    }
62
}
32,679,425✔
63

64
auto mutex::try_lock() -> bool
49,065,300✔
65
{
66
    void* expected = const_cast<void*>(unlocked_value());
49,065,300✔
67
    return m_state.compare_exchange_strong(expected, nullptr, std::memory_order::acq_rel, std::memory_order::relaxed);
49,085,893✔
68
}
69

70
auto mutex::unlock() -> void
49,361,876✔
71
{
72
    void* current = m_state.load(std::memory_order::acquire);
49,361,876✔
73
    do
74
    {
75
        // Sanity check that the mutex isn't already unlocked.
76
        if (current == const_cast<void*>(unlocked_value()))
49,603,332✔
77
        {
NEW
78
            throw std::runtime_error{"coro::mutex is already unlocked"};
×
79
        }
80

81
        // There are no current waiters, attempt to set the mutex as unlocked.
82
        if (current == nullptr)
49,603,332✔
83
        {
84
            if (m_state.compare_exchange_weak(
47,043,740✔
85
                current,
86
                const_cast<void*>(unlocked_value()),
47,043,740✔
87
                std::memory_order::acq_rel,
88
                std::memory_order::acquire))
89
            {
90
                // We've successfully unlocked the mutex, return since there are no current waiters.
91
                std::atomic_thread_fence(std::memory_order::acq_rel);
92
                return;
46,802,284✔
93
            }
94
            else
95
            {
96
                // This means someone has added themselves as a waiter, we need to try again with our updated current state.
97
                // assert(m_state now holds a lock_operation_base*)
98
                continue;
241,456✔
99
            }
100
        }
101
        else
102
        {
103
            // There are waiters, lets wake the first one up. This will set the state to the next waiter, or nullptr (no waiters but locked).
104
            std::atomic<detail::lock_operation_base*>* casted = reinterpret_cast<std::atomic<detail::lock_operation_base*>*>(&m_state);
2,559,592✔
105
            auto* waiter = detail::awaiter_list_pop<detail::lock_operation_base>(*casted);
2,559,592✔
106
            // assert waiter != nullptr, nobody else should be unlocking this mutex.
107
            // Directly transfer control to the waiter, they are now responsible for unlocking the mutex.
108
            std::atomic_thread_fence(std::memory_order::acq_rel);
109
            waiter->m_awaiting_coroutine.resume();
2,559,592✔
110
            return;
2,540,389✔
111
        }
112
    } while (true);
241,456✔
113
}
114

115
} // namespace coro
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc