• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

jbaldwin / libcoro / 16708247588

03 Aug 2025 06:44PM UTC coverage: 88.09%. First build
16708247588

Pull #365

github

web-flow
Merge c76976ebb into 10ca7de0a
Pull Request #365: coro::shared_mutex use coro::mutex instead of std::mutex

55 of 66 new or added lines in 2 files covered. (83.33%)

1642 of 1864 relevant lines covered (88.09%)

13668233.66 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.62
/include/coro/shared_mutex.hpp
1
#pragma once
2

3
#include "coro/concepts/executor.hpp"
4
#include "coro/mutex.hpp"
5
#include "coro/task.hpp"
6

7
#include <atomic>
8
#include <coroutine>
9

10
namespace coro
11
{
12
template<concepts::executor executor_type>
13
class shared_mutex;
14

15
namespace detail
16
{
17
template<concepts::executor executor_type>
18
struct shared_lock_operation
19
{
20
    explicit shared_lock_operation(coro::shared_mutex<executor_type>& shared_mutex, const bool exclusive)
87✔
21
        : m_shared_mutex(shared_mutex),
87✔
22
          m_exclusive(exclusive)
87✔
23
    {}
87✔
24
    ~shared_lock_operation() = default;
25

26
    shared_lock_operation(const shared_lock_operation&) = delete;
27
    shared_lock_operation(shared_lock_operation&&) = delete;
28
    auto operator=(const shared_lock_operation&) -> shared_lock_operation& = delete;
29
    auto operator=(shared_lock_operation&&) -> shared_lock_operation& = delete;
30

31
    auto await_ready() const noexcept -> bool
87✔
32
    {
33
        // If either mode can be acquired, unlock the internal mutex and resume.
34

35
        if (m_exclusive)
87✔
36
        {
37
            if (m_shared_mutex.try_lock_locked())
3✔
38
            {
39
                m_shared_mutex.m_mutex.unlock();
3✔
40
                return true;
3✔
41
            }
42
        }
43
        else if (m_shared_mutex.try_lock_shared_locked())
84✔
44
        {
45
            m_shared_mutex.m_mutex.unlock();
43✔
46
            return true;
43✔
47
        }
48

49
        return false;
41✔
50
    }
51

52
    auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
41✔
53
    {
54
        // For sure the lock is currently held in a manner that it cannot be acquired, suspend ourself
55
        // at the end of the waiter list.
56

57
        auto* tail_waiter = m_shared_mutex.m_tail_waiter.load(std::memory_order::acquire);
41✔
58

59
        if (tail_waiter == nullptr)
41✔
60
        {
61
            m_shared_mutex.m_head_waiter = this;
1✔
62
            m_shared_mutex.m_tail_waiter = this;
1✔
63
        }
64
        else
65
        {
66
            tail_waiter->m_next = this;
40✔
67
            m_shared_mutex.m_tail_waiter         = this;
40✔
68
        }
69

70
        // If this is an exclusive lock acquire then mark it as so so that shared locks after this
71
        // exclusive one will also suspend so this exclusive lock doesn't get starved.
72
        if (m_exclusive)
41✔
73
        {
NEW
74
            ++m_shared_mutex.m_exclusive_waiters;
×
75
        }
76

77
        m_awaiting_coroutine = awaiting_coroutine;
41✔
78
        m_shared_mutex.m_mutex.unlock();
41✔
79
        return true;
41✔
80
    }
81

82
    auto await_resume() noexcept -> void { }
87✔
83

84
protected:
85
    friend class coro::shared_mutex<executor_type>;
86

87
    std::coroutine_handle<> m_awaiting_coroutine;
88
    shared_lock_operation* m_next{nullptr};
89
    coro::shared_mutex<executor_type>& m_shared_mutex;
90
    bool m_exclusive{false};
91
};
92

93
} // namespace detail
94

95
template<concepts::executor executor_type>
96
class shared_mutex
97
{
98
public:
99
    /**
100
     * @param e The executor for when multiple shared waiters can be woken up at the same time,
101
     *          each shared waiter will be scheduled to immediately run on this executor in
102
     *          parallel.
103
     */
104
    explicit shared_mutex(std::shared_ptr<executor_type> e) : m_executor(std::move(e))
5✔
105
    {
106
        if (m_executor == nullptr)
5✔
107
        {
NEW
108
            throw std::runtime_error{"coro::shared_mutex cannot have a nullptr executor"};
×
109
        }
110
    }
5✔
111
    ~shared_mutex() = default;
5✔
112

113
    shared_mutex(const shared_mutex&)                    = delete;
114
    shared_mutex(shared_mutex&&)                         = delete;
115
    auto operator=(const shared_mutex&) -> shared_mutex& = delete;
116
    auto operator=(shared_mutex&&) -> shared_mutex&      = delete;
117

118
    /**
119
     * Acquires the lock in a shared state, executes the scoped task, and then unlocks the shared lock.
120
     * Because unlocking a coro::shared_mutex is a task this scoped version cannot be returned as a RAII
121
     * object due to destructors not being able to be co_await'ed.
122
     * @param scoped_task The user's scoped task to execute after acquiring the shared lock.
123
     */
124
    [[nodiscard]] auto scoped_lock_shared(coro::task<void> scoped_task) -> coro::task<void>
1✔
125
    {
126
        co_await m_mutex.lock();
127
        co_await detail::shared_lock_operation<executor_type>{*this, false};
128
        co_await scoped_task;
129
        co_await unlock_shared();
130
        co_return;
131
    }
2✔
132

133
    /**
134
     * Acquires the lock in an exclusive state, executes the scoped task, and then unlocks the exclusive lock.
135
     * Because unlocking a coro::shared_mutex is a task this scoped version cannot be returned as a RAII
136
     * object due to destructors not being able to be co_await'ed.
137
     * @param scoped_task The user's scoped task to execute after acquiring the exclusive lock.
138
     */
139
    [[nodiscard]] auto scoped_lock(coro::task<void> scoped_task) -> coro::task<void>
1✔
140
    {
141
        co_await m_mutex.lock();
142
        co_await detail::shared_lock_operation<executor_type>{*this, true};
143
        co_await scoped_task;
144
        co_await unlock();
145
        co_return;
146
    }
2✔
147

148
    /**
149
     * Acquires the lock in a shared state. The shared_mutex must be unlock_shared() to release.
150
     * @return task
151
     */
152
    [[nodiscard]] auto lock_shared() -> coro::task<void>
83✔
153
    {
154
        co_await m_mutex.lock();
155
        co_await detail::shared_lock_operation<executor_type>{*this, false};
156
        co_return;
157
    }
166✔
158

159
    /**
160
     * Acquires the lock in an exclusive state. The shared_mutex must be unlock()'ed to release.
161
     * @return task
162
     */
163
    [[nodiscard]] auto lock() -> coro::task<void>
2✔
164
    {
165
        co_await m_mutex.lock();
166
        co_await detail::shared_lock_operation<executor_type>{*this, true};
167
        co_return;
168
    }
4✔
169

170
    /**
171
     * @return True if the lock could immediately be acquired in a shared state.
172
     */
173
    [[nodiscard]] auto try_lock_shared() -> bool
6✔
174
    {
175
        // To acquire the shared lock the state must be one of two states:
176
        //   1) unlocked
177
        //   2) shared locked with zero exclusive waiters
178
        //          Zero exclusive waiters prevents exclusive starvation if shared locks are
179
        //          always continuously happening.
180

181
        if (m_mutex.try_lock())
6✔
182
        {
183
            coro::scoped_lock lk{m_mutex};
6✔
184
            return try_lock_shared_locked();
6✔
185
        }
6✔
NEW
186
        return false;
×
187
    }
188

189
    /**
190
     * @return True if the lock could immediately be acquired in an exclusive state.
191
     */
192
    [[nodiscard]] auto try_lock() -> bool
12✔
193
    {
194
        // To acquire the exclusive lock the state must be unlocked.
195
        if (m_mutex.try_lock())
12✔
196
        {
197
            coro::scoped_lock lk{m_mutex};
12✔
198
            return try_lock_locked();
12✔
199
        }
12✔
NEW
200
        return false;
×
201
    }
202

203
    /**
204
     * Unlocks a single shared state user. *REQUIRES* that the lock was first acquired exactly once
205
     * via `lock_shared()` or `try_lock_shared() -> True` before being called, otherwise undefined
206
     * behavior.
207
     *
208
     * If the shared user count drops to zero and this lock has an exclusive waiter then the exclusive
209
     * waiter acquires the lock.
210
     */
211
    [[nodiscard]] auto unlock_shared() -> coro::task<void>
88✔
212
    {
213
        auto lk = co_await m_mutex.scoped_lock();
214
        --m_shared_users;
215

216
        // Only wake waiters from shared state if all shared users have completed.
217
        if (m_shared_users == 0)
218
        {
219
            if (m_head_waiter != nullptr)
220
            {
221
                wake_waiters(lk);
222
            }
223
            else
224
            {
225
                m_state = state::unlocked;
226
            }
227
        }
228

229
        co_return;
230
    }
176✔
231

232
    /**
233
     * Unlocks the mutex from its exclusive state. If there is a following exclusive waiter then
234
     * that exclusive waiter acquires the lock.  If there are 1 or more shared waiters then all the
235
     * shared waiters acquire the lock in a shared state in parallel and are resumed on the original
236
     * executor this shared mutex was created with.
237
     */
238
    [[nodiscard]] auto unlock() -> coro::task<void>
11✔
239
    {
240
        auto lk = co_await m_mutex.scoped_lock();
241
        if (m_head_waiter != nullptr)
242
        {
243
            wake_waiters(lk);
244
        }
245
        else
246
        {
247
            m_state = state::unlocked;
248
        }
249

250
        co_return;
251
    }
22✔
252

253
    /**
254
     * @brief Gets the executor that drives the shared mutex.
255
     *
256
     * @return std::shared_ptr<executor_type>
257
     */
NEW
258
    [[nodiscard]] auto executor() -> std::shared_ptr<executor_type>
×
259
    {
NEW
260
        return m_executor;
×
261
    }
262

263
private:
264
    friend struct detail::shared_lock_operation<executor_type>;
265

266
    enum class state
267
    {
268
        /// @brief The shared mutex is unlocked.
269
        unlocked,
270
        /// @brief The shared mutex is locked in shared mode.
271
        locked_shared,
272
        /// @brief The shared mutex is locked in exclusive mode.
273
        locked_exclusive
274
    };
275

276
    /// @brief This executor is for resuming multiple shared waiters.
277
    std::shared_ptr<executor_type> m_executor{nullptr};
278
    /// @brief Exclusive access for mutating the shared mutex's state.
279
    coro::mutex m_mutex;
280
    /// @brief The current state of the shared mutex.
281
    std::atomic<state> m_state{state::unlocked};
282

283
    /// @brief The current number of shared users that have acquired the lock.
284
    std::atomic<uint64_t> m_shared_users{0};
285
    /// @brief The current number of exclusive waiters waiting to acquire the lock.  This is used to block
286
    ///        new incoming shared lock attempts so the exclusive waiter is not starved.
287
    std::atomic<uint64_t> m_exclusive_waiters{0};
288

289
    std::atomic<detail::shared_lock_operation<executor_type>*> m_head_waiter{nullptr};
290
    std::atomic<detail::shared_lock_operation<executor_type>*> m_tail_waiter{nullptr};
291

292
    auto try_lock_shared_locked() -> bool
90✔
293
    {
294
        if (m_state == state::unlocked)
90✔
295
        {
296
            // If the shared mutex is unlocked put it into shared mode and add ourself as using the lock.
297
            m_state = state::locked_shared;
44✔
298
            ++m_shared_users;
44✔
299
            return true;
44✔
300
        }
301
        else if (m_state == state::locked_shared && m_exclusive_waiters == 0)
46✔
302
        {
303
            // If the shared mutex is in a shared locked state and there are no exclusive waiters
304
            // the add ourself as using the lock.
305
            ++m_shared_users;
3✔
306
            return true;
3✔
307
        }
308

309
        // If the lock is in shared mode but there are exclusive waiters then we will also wait so
310
        // the writers are not starved.
311

312
        // If the lock is in exclusive mode already then we need to wait.
313

314
        return false;
43✔
315
    }
316

317
    auto try_lock_locked() -> bool
15✔
318
    {
319
        if (m_state == state::unlocked)
15✔
320
        {
321
            m_state = state::locked_exclusive;
11✔
322
            return true;
11✔
323
        }
324
        return false;
4✔
325
    }
326

327
    auto wake_waiters(coro::scoped_lock& lk) -> void
1✔
328
    {
329
        // First determine what the next lock state will be based on the first waiter.
330
        if (m_head_waiter.load()->m_exclusive)
1✔
331
        {
332
            // If its exclusive then only this waiter can be woken up.
333
            m_state                   = state::locked_exclusive;
×
334
            detail::shared_lock_operation<executor_type>* to_resume = m_head_waiter.load();
×
NEW
335
            m_head_waiter             = m_head_waiter.load()->m_next;
×
NEW
336
            --m_exclusive_waiters;
×
337
            if (m_head_waiter == nullptr)
×
338
            {
NEW
339
                m_tail_waiter = nullptr;
×
340
            }
341

342
            // Since this is an exclusive lock waiting we can resume it directly.
NEW
343
            lk.unlock();
×
NEW
344
            to_resume->m_awaiting_coroutine.resume();
×
345
        }
346
        else
347
        {
348
            // If its shared then we will scan forward and awake all shared waiters onto the given
349
            // thread pool so they can run in parallel.
350
            m_state = state::locked_shared;
1✔
351
            do
352
            {
353
                detail::shared_lock_operation<executor_type>* to_resume = m_head_waiter.load();
41✔
354
                m_head_waiter             = m_head_waiter.load()->m_next;
41✔
355
                if (m_head_waiter == nullptr)
41✔
356
                {
357
                    m_tail_waiter = nullptr;
1✔
358
                }
359
                ++m_shared_users;
41✔
360

361
                m_executor->resume(to_resume->m_awaiting_coroutine);
41✔
362
            } while (m_head_waiter != nullptr && !m_head_waiter.load()->m_exclusive);
41✔
363

364
            // Cannot unlock until the entire set of shared waiters has been traversed.  I think this
365
            // makes more sense than allocating space for all the shared waiters, unlocking, and then
366
            // resuming in a batch?
367
            lk.unlock();
1✔
368
        }
369
    }
1✔
370
};
371

372
} // namespace coro
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc