• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

CrowCpp / Crow / 687

16 Jun 2025 05:17PM UTC coverage: 87.332% (-0.3%) from 87.617%
687

push

gh-actions

gittiver
Merge branch 'unix_socket' into master_merge_unix_socket

* unix_socket:
  add unix_socket test
  fix unittesterror
  add example for unix socket
  set reuse addr false
  add handle_upgrade for unix socket
  add unix domain socket

90 of 114 new or added lines in 7 files covered. (78.95%)

2 existing lines in 2 files now uncovered.

4088 of 4681 relevant lines covered (87.33%)

400.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.36
/include/crow/http_server.h
1
#pragma once
2

3
#ifdef CROW_USE_BOOST
4
#include <boost/asio.hpp>
5
#ifdef CROW_ENABLE_SSL
6
#include <boost/asio/ssl.hpp>
7
#endif
8
#else
9
#ifndef ASIO_STANDALONE
10
#define ASIO_STANDALONE
11
#endif
12
#include <asio.hpp>
13
#ifdef CROW_ENABLE_SSL
14
#include <asio/ssl.hpp>
15
#endif
16
#endif
17

18
#include <atomic>
19
#include <chrono>
20
#include <cstdint>
21
#include <future>
22
#include <memory>
23
#include <vector>
24

25
#include "crow/version.h"
26
#include "crow/http_connection.h"
27
#include "crow/logging.h"
28
#include "crow/task_timer.h"
29
#include "crow/socket_acceptors.h"
30

31

32
namespace crow // NOTE: Already documented in "crow/app.h"
33
{
34
#ifdef CROW_USE_BOOST
35
    namespace asio = boost::asio;
36
    using error_code = boost::system::error_code;
37
#else
38
    using error_code = asio::error_code;
39
#endif
40
    using tcp = asio::ip::tcp;
41
    using stream_protocol = asio::local::stream_protocol;
42

43
    template<typename Handler, typename Acceptor = TCPAcceptor, typename Adaptor = SocketAdaptor, typename... Middlewares>
44
    class Server
45
    {
46
    public:
47
      Server(Handler* handler,
105✔
48
             typename Acceptor::endpoint endpoint, 
49
             std::string server_name = std::string("Crow/") + VERSION,
50
             std::tuple<Middlewares...>* middlewares = nullptr,
51
             uint16_t concurrency = 1,
52
             uint8_t timeout = 5,
53
             typename Adaptor::context* adaptor_ctx = nullptr):
54
          concurrency_(concurrency),
105✔
55
          task_queue_length_pool_(concurrency_ - 1),
210✔
56
          acceptor_(io_context_),
105✔
57
          signals_(io_context_),
105✔
58
          tick_timer_(io_context_),
105✔
59
          handler_(handler),
105✔
60
          timeout_(timeout),
105✔
61
          server_name_(server_name),
105✔
62
          middlewares_(middlewares),
105✔
63
          adaptor_ctx_(adaptor_ctx)
315✔
64
        {
65
            if (startup_failed_) {
105✔
66
                CROW_LOG_ERROR << "Startup failed; not running server.";
×
67
                return;
×
68
            }
69

70
            error_code ec;
105✔
71

72
            acceptor_.raw_acceptor().open(endpoint.protocol(), ec);
105✔
73
            if (ec) {
105✔
74
                CROW_LOG_ERROR << "Failed to open acceptor: " << ec.message();
×
75
                startup_failed_ = true;
×
76
                return;
×
77
            }
78

79
            acceptor_.raw_acceptor().set_option(Acceptor::reuse_address_option(), ec);
105✔
80
            if (ec) {
105✔
81
                CROW_LOG_ERROR << "Failed to set socket option: " << ec.message();
×
82
                startup_failed_ = true;
×
83
                return;
×
84
            }
85

86
            acceptor_.raw_acceptor().bind(endpoint, ec);
105✔
87
            if (ec) {
105✔
NEW
88
                CROW_LOG_ERROR << "Failed to bind to " << acceptor_.address()
×
NEW
89
                            << ":" << acceptor_.port() << " - " << ec.message();
×
90
                startup_failed_ = true;
×
91
                return;
×
92
            }
93

94
            acceptor_.raw_acceptor().listen(tcp::acceptor::max_listen_connections, ec);
105✔
95
            if (ec) {
105✔
96
                CROW_LOG_ERROR << "Failed to listen on port: " << ec.message();
×
97
                startup_failed_ = true;
×
98
                return;
×
99
            }
100

101

102
        }
×
103

104
        void set_tick_function(std::chrono::milliseconds d, std::function<void()> f)
102✔
105
        {
106
            tick_interval_ = d;
102✔
107
            tick_function_ = f;
102✔
108
        }
102✔
109

110
        void on_tick()
×
111
        {
112
            tick_function_();
×
113
            tick_timer_.expires_after(std::chrono::milliseconds(tick_interval_.count()));
×
114
            tick_timer_.async_wait([this](const error_code& ec) {
×
115
                if (ec)
×
116
                    return;
×
117
                on_tick();
×
118
            });
119
        }
120

121
        void run()
102✔
122
        {
123

124
            if (startup_failed_) {
102✔
125
                CROW_LOG_ERROR << "Server startup failed. Aborting run().";
×
126
                return;
×
127
            }
128

129
            uint16_t worker_thread_count = concurrency_ - 1;
102✔
130
            for (int i = 0; i < worker_thread_count; i++)
204✔
131
                io_context_pool_.emplace_back(new asio::io_context());
102✔
132
            get_cached_date_str_pool_.resize(worker_thread_count);
102✔
133
            task_timer_pool_.resize(worker_thread_count);
102✔
134

135
            std::vector<std::future<void>> v;
102✔
136
            std::atomic<int> init_count(0);
102✔
137
            for (uint16_t i = 0; i < worker_thread_count; i++)
204✔
138
                v.push_back(
102✔
139
                  std::async(
140
                    std::launch::async, [this, i, &init_count] {
306✔
141
                        // thread local date string get function
142
                        auto last = std::chrono::steady_clock::now();
102✔
143

144
                        std::string date_str;
102✔
145
                        auto update_date_str = [&] {
285✔
146
                            auto last_time_t = time(0);
183✔
147
                            tm my_tm;
148

149
#if defined(_MSC_VER) || defined(__MINGW32__)
150
                            gmtime_s(&my_tm, &last_time_t);
151
#else
152
                            gmtime_r(&last_time_t, &my_tm);
183✔
153
#endif
154
                            date_str.resize(100);
183✔
155
                            size_t date_str_sz = strftime(&date_str[0], 99, "%a, %d %b %Y %H:%M:%S GMT", &my_tm);
183✔
156
                            date_str.resize(date_str_sz);
183✔
157
                        };
158
                        update_date_str();
102✔
159
                        get_cached_date_str_pool_[i] = [&]() -> std::string {
324✔
160
                            if (std::chrono::steady_clock::now() - last >= std::chrono::seconds(1))
222✔
161
                            {
162
                                last = std::chrono::steady_clock::now();
81✔
163
                                update_date_str();
81✔
164
                            }
165
                            return date_str;
219✔
166
                        };
167

168
                        // initializing task timers
169
                        detail::task_timer task_timer(*io_context_pool_[i]);
102✔
170
                        task_timer.set_default_timeout(timeout_);
102✔
171
                        task_timer_pool_[i] = &task_timer;
102✔
172
                        task_queue_length_pool_[i] = 0;
102✔
173

174
                        init_count++;
102✔
175
                        while (1)
99✔
176
                        {
177
                            try
178
                            {
179
                                if (io_context_pool_[i]->run() == 0)
201✔
180
                                {
181
                                    // when io_service.run returns 0, there are no more works to do.
182
                                    break;
102✔
183
                                }
184
                            }
185
                            catch (std::exception& e)
×
186
                            {
187
                                CROW_LOG_ERROR << "Worker Crash: An uncaught exception occurred: " << e.what();
×
188
                            }
189
                        }
190
                    }));
102✔
191

192
            if (tick_function_ && tick_interval_.count() > 0)
102✔
193
            {
194
                tick_timer_.expires_after(std::chrono::milliseconds(tick_interval_.count()));
×
195
                tick_timer_.async_wait(
×
196
                  [this](const error_code& ec) {
×
197
                      if (ec)
×
198
                          return;
×
199
                      on_tick();
×
200
                  });
201
            }
202
            handler_->port(acceptor_.port());
102✔
203
            CROW_LOG_INFO << server_name_ 
204✔
204
                          << " server is running at " << acceptor_.url_display(handler_->ssl_used()) 
204✔
205
                          << " using " << concurrency_ << " threads";
102✔
206
            CROW_LOG_INFO << "Call `app.loglevel(crow::LogLevel::Warning)` to hide Info level logs.";
102✔
207

208
            signals_.async_wait(
102✔
209
              [&](const error_code& /*error*/, int /*signal_number*/) {
102✔
210
                  stop();
×
211
              });
212

213
            while (worker_thread_count != init_count)
1,500✔
214
                std::this_thread::yield();
1,398✔
215

216
            do_accept();
102✔
217

218
            std::thread(
102✔
219
              [this] {
204✔
220
                  notify_start();
102✔
221
                  io_context_.run();
102✔
222
                  CROW_LOG_INFO << "Exiting.";
102✔
223
              })
224
              .join();
102✔
225
        }
102✔
226

227
        void stop()
102✔
228
        {
229
            shutting_down_ = true; // Prevent the acceptor from taking new connections
102✔
230

231
            // Explicitly close the acceptor
232
            // else asio will throw an exception (linux only), when trying to start server again:
233
            // what():  bind: Address already in use
234
            if (acceptor_.raw_acceptor().is_open())
102✔
235
            {
236
                CROW_LOG_INFO << "Closing acceptor. " << &acceptor_;
102✔
237
                error_code ec;
102✔
238
                acceptor_.raw_acceptor().close(ec);
102✔
239
                if (ec)
102✔
240
                {
NEW
241
                    CROW_LOG_WARNING << "Failed to close acceptor: " << ec.message();
×
242
                }
243
            }
244

245
            for (auto& io_context : io_context_pool_)
204✔
246
            {
247
                if (io_context != nullptr)
102✔
248
                {
249
                    CROW_LOG_INFO << "Closing IO service " << &io_context;
102✔
250
                    io_context->stop(); // Close all io_services (and HTTP connections)
102✔
251
                }
252
            }
253

254
            CROW_LOG_INFO << "Closing main IO service (" << &io_context_ << ')';
102✔
255
            io_context_.stop(); // Close main io_service
102✔
256
        }
102✔
257

258
        
259
        uint16_t port() const {
12✔
260
            return acceptor_.local_endpoint().port();
12✔
261
        }
262

263
        /// Wait until the server has properly started or until timeout
264
        std::cv_status wait_for_start(std::chrono::steady_clock::time_point wait_until)
102✔
265
        {
266
            std::unique_lock<std::mutex> lock(start_mutex_);
102✔
267

268
            std::cv_status status = std::cv_status::no_timeout;
102✔
269
            while (!server_started_ && !startup_failed_ && status == std::cv_status::no_timeout)
204✔
270
                status = cv_started_.wait_until(lock, wait_until);
102✔
271
            return status;
102✔
272
        }
102✔
273

274

275
        void signal_clear()
3✔
276
        {
277
            signals_.clear();
3✔
278
        }
3✔
279

280
        void signal_add(int signal_number)
204✔
281
        {
282
            signals_.add(signal_number);
204✔
283
        }
204✔
284

285
    private:
286
        uint16_t pick_io_context_idx()
303✔
287
        {
288
            uint16_t min_queue_idx = 0;
303✔
289

290
            // TODO improve load balancing
291
            // size_t is used here to avoid the security issue https://codeql.github.com/codeql-query-help/cpp/cpp-comparison-with-wider-type/
292
            // even though the max value of this can be only uint16_t as concurrency is uint16_t.
293
            for (size_t i = 1; i < task_queue_length_pool_.size() && task_queue_length_pool_[min_queue_idx] > 0; i++)
303✔
294
            // No need to check other io_services if the current one has no tasks
295
            {
296
                if (task_queue_length_pool_[i] < task_queue_length_pool_[min_queue_idx])
×
297
                    min_queue_idx = i;
×
298
            }
299
            return min_queue_idx;
303✔
300
        }
301

302
        void do_accept()
405✔
303
        {
304
            if (!shutting_down_)
405✔
305
            {
306
                uint16_t context_idx = pick_io_context_idx();
303✔
307
                asio::io_context& ic = *io_context_pool_[context_idx];
303✔
308
                auto p = std::make_shared<Connection<Adaptor, Handler, Middlewares...>>(
606✔
309
                    ic, handler_, server_name_, middlewares_,
303✔
310
                    get_cached_date_str_pool_[context_idx], *task_timer_pool_[context_idx], adaptor_ctx_, task_queue_length_pool_[context_idx]);
303✔
311
                    
312
                CROW_LOG_DEBUG << &ic << " {" << context_idx << "} queue length: " << task_queue_length_pool_[context_idx];
303✔
313

314
                acceptor_.raw_acceptor().async_accept(
600✔
315
                  p->socket(),
303✔
316
                  [this, p, &ic, context_idx](error_code ec) {
909✔
317
                      if (!ec)
303✔
318
                      {
319
                          asio::post(ic,
204✔
320
                            [p] {
816✔
321
                                p->start();
204✔
322
                            });
323
                      }
324
                      do_accept();
303✔
325
                  });
326
            }
303✔
327
        }
405✔
328

329
        /// Notify anything using `wait_for_start()` to proceed
330
        void notify_start()
102✔
331
        {
332
            std::unique_lock<std::mutex> lock(start_mutex_);
102✔
333
            server_started_ = true;
102✔
334
            cv_started_.notify_all();
102✔
335
        }
102✔
336

337
    private:
338
        uint16_t concurrency_{2};
339
        std::vector<std::atomic<unsigned int>> task_queue_length_pool_;
340
        std::vector<std::unique_ptr<asio::io_context>> io_context_pool_;
341
        asio::io_context io_context_;
342
        std::vector<detail::task_timer*> task_timer_pool_;
343
        std::vector<std::function<std::string()>> get_cached_date_str_pool_;
344
        Acceptor acceptor_;
345
        bool shutting_down_ = false;
346
        bool server_started_{false};
347
        bool startup_failed_ = false;
348
        std::condition_variable cv_started_;
349
        std::mutex start_mutex_;
350
        asio::signal_set signals_;
351

352
        asio::basic_waitable_timer<std::chrono::high_resolution_clock> tick_timer_;
353

354
        Handler* handler_;
355
        std::uint8_t timeout_;
356
        std::string server_name_;
357
        bool use_unix_;
358

359
        std::chrono::milliseconds tick_interval_;
360
        std::function<void()> tick_function_;
361

362
        std::tuple<Middlewares...>* middlewares_;
363

364
        typename Adaptor::context* adaptor_ctx_;
365
    };
366
} // namespace crow
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc