• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

NeonGE / geEngineSDK / ac7673f3-08dc-4326-87b7-3c14ad7938bf

05 Mar 2026 04:29PM UTC coverage: 57.84% (-0.02%) from 57.862%
ac7673f3-08dc-4326-87b7-3c14ad7938bf

push

circleci

NeonGE
Refactor input mapping, improve exception safety & tasks

- Replaced SFML-to-ImGui key mapping switch with static map for clarity.
- Updated input event lambdas to avoid capturing `this`.
- Marked `swap` methods as `_NOEXCEPT` in Matrix4, Path, Vector2.
- Added assertions and removed redundant checks in frame allocator.
- Refactored platform termination logic for clarity and portability.
- Modernized task scheduler waits using condition variable predicates.
- Updated debug.css font stack for better cross-platform support.

6 of 11 new or added lines in 4 files covered. (54.55%)

8 existing lines in 3 files now uncovered.

5603 of 9687 relevant lines covered (57.84%)

9107.71 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

95.48
/sdk/geUtilities/src/geTaskScheduler.cpp
1
/*****************************************************************************/
2
/**
3
 * @file    geTaskScheduler.cpp
4
 * @author  Samuel Prince (samuel.prince.quezada@gmail.com)
5
 * @date    2017/10/14
6
 * @brief   Represents a task scheduler running on multiple threads.
7
 *
8
 * Represents a task scheduler running on multiple threads. You may queue tasks
9
 * on it from any thread and they will be executed in user specified order on
10
 * any available thread.
11
 *
12
 * @bug     No known bugs.
13
 */
14
/*****************************************************************************/
15

16
/*****************************************************************************/
17
/**
18
 * Includes
19
 */
20
/*****************************************************************************/
21
#include "geTaskScheduler.h"
22
#include "geThreadPool.h"
23

24
namespace geEngineSDK {
25
  using std::bind;
26
  using std::find;
27
  using std::move;
28

29
  Task::Task(const PrivatelyConstruct&,
70✔
30
             const String& name,
31
             function<void()> taskWorker,
32
             TASKPRIORITY::E priority,
33
             SPtr<Task> dependency)
70✔
34
    : m_name(name),
70✔
35
      m_priority(priority),
70✔
36
      m_taskId(0),
70✔
37
      m_taskWorker(std::move(taskWorker)),
70✔
38
      m_taskDependency(std::move(dependency)),
70✔
39
      m_state(0),
70✔
40
      m_parent(nullptr) {}
70✔
41

42
  SPtr<Task>
43
  Task::create(const String& name,
70✔
44
               function<void()> taskWorker,
45
               TASKPRIORITY::E priority,
46
               SPtr<Task> dependency) {
47
    return ge_shared_ptr_new<Task>(PrivatelyConstruct(),
70✔
48
                                   name,
49
                                   std::move(taskWorker),
70✔
50
                                   priority,
51
                                   std::move(dependency));
210✔
52
  }
53

54
  bool
55
  Task::isComplete() const {
17✔
56
    return m_state == 2;
17✔
57
  }
58

59
  bool
60
  Task::isCanceled() const {
250✔
61
    return m_state == 3;
250✔
62
  }
63

64
  void
65
  Task::wait() {
5✔
66
    if (nullptr != m_parent) {
5✔
67
      m_parent->waitUntilComplete(this);
4✔
68
    }
69
  }
5✔
70

71
  void
72
  Task::cancel() {
1✔
73
    m_state = 3;
1✔
74
  }
1✔
75

76
  TaskGroup::TaskGroup(const PrivatelyConstruct& /*dummy*/,
1✔
77
                       String name,
78
                       function<void(uint32)> taskWorker,
79
                       uint32 count,
80
                       TASKPRIORITY::E priority,
81
                       SPtr<Task> dependency)
1✔
82
    : m_name(std::move(name)),
1✔
83
      m_count(count),
1✔
84
      m_priority(priority),
1✔
85
      m_taskWorker(std::move(taskWorker)),
1✔
86
      m_taskDependency(std::move(dependency)),
1✔
87
      m_numRemainingTasks(count)
1✔
88
  {}
1✔
89

90
  SPtr<TaskGroup>
91
  TaskGroup::create(String name,
1✔
92
                    function<void(uint32)> taskWorker,
93
                    uint32 count,
94
                    TASKPRIORITY::E priority,
95
                    SPtr<Task> dependency) {
96
    return ge_shared_ptr_new<TaskGroup>(PrivatelyConstruct(),
1✔
97
                                        std::move(name),
1✔
98
                                        std::move(taskWorker),
1✔
99
                                        count, priority,
100
                                        std::move(dependency));
3✔
101
  }
102

103
  bool
104
  TaskGroup::isComplete() const {
1✔
105
    return 0 == m_numRemainingTasks;
1✔
106
  }
107

108
  void
109
  TaskGroup::wait() {
1✔
110
    if (nullptr != m_parent) {
1✔
111
      m_parent->waitUntilComplete(this);
1✔
112
    }
113
  }
1✔
114

115
  TaskScheduler::TaskScheduler()
5✔
116
    : m_taskQueue(&TaskScheduler::taskCompare),
5✔
117
      m_maxActiveTasks(GE_THREAD_HARDWARE_CONCURRENCY),
5✔
118
      m_nextTaskId(0),
5✔
119
      m_shutdown(false),
5✔
120
      m_checkTasks(false) {
15✔
121
    m_taskSchedulerThread = ThreadPool::instance().run("TaskScheduler",
10✔
122
                                                       bind(&TaskScheduler::runMain, this));
5✔
123
  }
5✔
124

125
  TaskScheduler::~TaskScheduler() {
5✔
126
    //Wait until all tasks complete
127
    {
128
      Lock activeTaskLock(m_readyMutex);
5✔
129

130
      while (!m_activeTasks.empty()) {
5✔
UNCOV
131
        SPtr<Task> task = m_activeTasks[0];
×
UNCOV
132
        activeTaskLock.unlock();
×
133

UNCOV
134
        task->wait();
×
UNCOV
135
        activeTaskLock.lock();
×
UNCOV
136
      }
×
137
    }
5✔
138

139
    //Start shutdown of the main queue worker and wait until it exits
140
    {
141
      Lock lock(m_readyMutex);
5✔
142
      m_shutdown = true;
5✔
143
    }
5✔
144

145
    m_taskReadyCond.notify_one();
5✔
146
    m_taskSchedulerThread.blockUntilComplete();
5✔
147
  }
5✔
148

149
  void
150
  TaskScheduler::addTask(SPtr<Task> task) {
6✔
151
    Lock lock(m_readyMutex);
6✔
152

153
    if (task->isCanceled()) {
6✔
154
      return;
1✔
155
    }
156

157
    GE_ASSERT(1 != task->m_state &&
158
              "Task is already executing, it cannot be executed again until "
159
              "it finishes.");
160

161
    task->m_parent = this;
5✔
162
    task->m_taskId = m_nextTaskId++;
5✔
163
    task->m_state.store(0); //Reset state in case the task is getting re-queued
5✔
164

165
    m_checkTasks = true;
5✔
166
    m_taskQueue.insert(std::move(task));
5✔
167

168
    //Wake main scheduler thread
169
    m_taskReadyCond.notify_one();
5✔
170
  }
6✔
171

172
  void
173
  TaskScheduler::addTaskGroup(const SPtr<TaskGroup>& taskGroup) {
1✔
174
    Lock lock(m_readyMutex);
1✔
175

176
    taskGroup->m_numRemainingTasks.store(taskGroup->m_count, std::memory_order_relaxed);
1✔
177

178
    for (uint32 i = 0; i < taskGroup->m_count; ++i) {
65✔
179
      const auto worker = [i, taskGroup]
128✔
180
      {
181
        taskGroup->m_taskWorker(i);
64✔
182
        taskGroup->m_numRemainingTasks.fetch_sub(1, std::memory_order_acq_rel);
64✔
183
      };
64✔
184

185
      SPtr<Task> task = Task::create(taskGroup->m_name,
64✔
186
                                     worker,
187
                                     taskGroup->m_priority,
64✔
188
                                     taskGroup->m_taskDependency);
256✔
189
      task->m_parent = this;
64✔
190
      task->m_taskId = m_nextTaskId++;
64✔
191
      task->m_state.store(0); //Reset state in case the task is getting re-queued
64✔
192

193
      m_checkTasks = true;
64✔
194
      m_taskQueue.insert(std::move(task));
64✔
195
    }
64✔
196

197
    taskGroup->m_parent = this;
1✔
198

199
    //Wake main scheduler thread
200
    m_taskReadyCond.notify_one();
1✔
201
  }
1✔
202

203
  void
204
  TaskScheduler::addWorker() {
4✔
205
    Lock lock(m_readyMutex);
4✔
206
    ++m_maxActiveTasks;
4✔
207

208
    //A spot freed up, queue new tasks on main scheduler thread if they exist
209
    m_taskReadyCond.notify_one();
4✔
210
  }
4✔
211

212
  void
213
  TaskScheduler::removeWorker() {
4✔
214
    Lock lock(m_readyMutex);
4✔
215

216
    if (m_maxActiveTasks > 0) {
4✔
217
      --m_maxActiveTasks;
4✔
218
    }
219
  }
4✔
220

221
  void
222
  TaskScheduler::runMain() {
182✔
223
    while (true) {
224
      Lock lock(m_readyMutex);
182✔
225

226
      m_taskReadyCond.wait(lock, [this]
182✔
227
      {
228
        return m_shutdown || (m_checkTasks && m_activeTasks.size() < m_maxActiveTasks);
191✔
229
      });
230

231
      m_checkTasks = false;
182✔
232

233
      if (m_shutdown) {
182✔
234
        break;
5✔
235
      }
236
      m_checkTasks = false;
177✔
237

238
      for (auto iter = m_taskQueue.begin(); iter != m_taskQueue.end();) {
247✔
239
        if (static_cast<uint32>(m_activeTasks.size()) >= m_maxActiveTasks) {
239✔
240
          break;
169✔
241
        }
242

243
        SPtr<Task> curTask = *iter;
239✔
244

245
        if (curTask->isCanceled()) {
239✔
246
          iter = m_taskQueue.erase(iter);
×
247
          continue;
×
248
        }
249

250
        if (nullptr != curTask->m_taskDependency &&
241✔
251
            !curTask->m_taskDependency->isComplete()) {
2✔
252
          ++iter;
1✔
253
          continue;
1✔
254
        }
255

256
        /**
257
         * Spin until a thread becomes available. This happens primarily
258
           because our m_acctiveTask count and ThreadPool's thread idle count
259
           aren't synced, so while the task manager thinks it's free to run new
260
           tasks, the ThreadPool might still have those threads as running,
261
           meaning their allocation will fail.
262
         * So we just spin here for a bit, in that rare case.
263
         */        
264
        if (ThreadPool::instance().getNumAvailable() == 0) {
238✔
265
          m_checkTasks = true;
169✔
266
          break;
169✔
267
        }
268

269
        iter = m_taskQueue.erase(iter);
69✔
270

271
        curTask->m_state.store(1);
69✔
272
        m_activeTasks.push_back(curTask);
69✔
273

274
        ThreadPool::instance().run(curTask->m_name,
138✔
275
                                   bind(&TaskScheduler::runTask, this, curTask));
138✔
276
      }
239✔
277
    }
359✔
278
  }
5✔
279

280
  void
281
  TaskScheduler::runTask(SPtr<Task> task) {
69✔
282
    task->m_taskWorker();
69✔
283

284
    {
285
      Lock lock(m_readyMutex);
69✔
286

287
      auto findIter = find(m_activeTasks.begin(), m_activeTasks.end(), task);
69✔
288
      if (findIter != m_activeTasks.end()) {
69✔
289
        m_activeTasks.erase(findIter);
69✔
290
      }
291
    }
69✔
292

293
    {
294
      Lock lock(m_completeMutex);
69✔
295
      task->m_state.store(2);
69✔
296

297
      m_taskCompleteCond.notify_all();
69✔
298
    }
69✔
299

300
    //Wake the main scheduler thread in case there are other tasks waiting or
301
    //this task was someone's dependency
302
    {
303
      Lock lock(m_readyMutex);
69✔
304

305
      m_checkTasks = true;
69✔
306
      m_taskReadyCond.notify_one();
69✔
307
    }
69✔
308
  }
69✔
309

310
  void
311
  TaskScheduler::waitUntilComplete(const Task* task) {
4✔
312
    if (task->isCanceled()) {
4✔
313
      return;
×
314
    }
315

316
    while (!task->isComplete()) {
7✔
317
      addWorker();
3✔
318
      {
319
        Lock lock(m_completeMutex);
3✔
320
        m_taskCompleteCond.wait(lock, [task] { return task->isComplete(); });
10✔
321
      }
3✔
322
      removeWorker();
3✔
323
    }
324
  }
325

326
  void
327
  TaskScheduler::waitUntilComplete(const TaskGroup* taskGroup) {
1✔
328
    while (taskGroup->m_numRemainingTasks.load(std::memory_order_acquire) > 0) {
4✔
329
      addWorker();
1✔
330
      {
331
        Lock lock(m_completeMutex);
1✔
332
        m_taskCompleteCond.wait(lock, [taskGroup]
1✔
333
        {
334
          return taskGroup->m_numRemainingTasks.load(std::memory_order_acquire) == 0;
120✔
335
        });
336
      }
1✔
337
      removeWorker();
1✔
338
    }
339
  }
1✔
340

341
  bool
342
  TaskScheduler::taskCompare(const SPtr<Task>& lhs, const SPtr<Task>& rhs) {
574✔
343
    //If priority is the same, sort by the order the tasks were queued
344
    if (lhs->m_priority == rhs->m_priority) {
574✔
345
      return  lhs->m_taskId < rhs->m_taskId;
572✔
346
    }
347

348
    //Otherwise we go by smaller id, as that task was queued earlier than the other
349
    return lhs->m_priority > rhs->m_priority;
2✔
350
  }
351
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc