• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

saitoha / libsixel / 20070897793

09 Dec 2025 04:31PM UTC coverage: 41.32% (-8.1%) from 49.384%
20070897793

push

github

saitoha
Merge branch 'perf/vpte' into develop

10821 of 40782 branches covered (26.53%)

18 of 1086 new or added lines in 6 files covered. (1.66%)

1395 existing lines in 42 files now uncovered.

14850 of 35939 relevant lines covered (41.32%)

2731560.64 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/threadpool.c
1
/*
2
 * SPDX-License-Identifier: MIT
3
 *
4
 * Copyright (c) 2025 libsixel developers. See `AUTHORS`.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy of
7
 * this software and associated documentation files (the "Software"), to deal in
8
 * the Software without restriction, including without limitation the rights to
9
 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
10
 * of the Software, and to permit persons to whom the Software is furnished to do
11
 * so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in all
14
 * copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
 * SOFTWARE.
23
 */
24

25
#include "config.h"
26

27
#include <errno.h>
28
#include <stdlib.h>
29
#include <string.h>
30

31
#include "threadpool.h"
32
#include "threading.h"
33

34
typedef struct threadpool_worker threadpool_worker_t;
35

36
struct threadpool_worker {
37
    threadpool_t *pool;
38
    sixel_thread_t thread;
39
    void *workspace;
40
    int started;
41
    int index;
42
    int pinned;
43
};
44

45
struct threadpool {
46
    int nthreads;
47
    int qsize;
48
    size_t workspace_size;
49
    tp_worker_fn worker;
50
    void *userdata;
51
    tp_job_t *jobs;
52
    int head;
53
    int tail;
54
    int count;
55
    int running;
56
    int shutting_down;
57
    int joined;
58
    int error;
59
    int threads_started;
60
    int worker_capacity;
61
    int pin_threads;
62
    int hw_threads;
63
    sixel_mutex_t mutex;
64
    sixel_cond_t cond_not_empty;
65
    sixel_cond_t cond_not_full;
66
    sixel_cond_t cond_drained;
67
    int mutex_ready;
68
    int cond_not_empty_ready;
69
    int cond_not_full_ready;
70
    int cond_drained_ready;
71
    threadpool_worker_t **workers; /* owned worker slots (stable addresses) */
72
};
73

74
static void threadpool_free(threadpool_t *pool);
75
static int threadpool_worker_main(void *arg);
76
static int threadpool_spawn_worker(threadpool_t *pool,
77
                                   threadpool_worker_t *worker);
78

79
/*
80
 * Release every dynamically allocated component of the pool. Callers must
81
 * ensure that worker threads have already terminated before invoking this
82
 * helper; otherwise joining would operate on freed memory.
83
 */
84
static void
85
threadpool_free(threadpool_t *pool)
×
86
{
87
    int i;
×
88

89
    if (pool == NULL) {
×
90
        return;
91
    }
92
    if (pool->workers != NULL) {
×
93
        for (i = 0; i < pool->worker_capacity; ++i) {
×
94
            if (pool->workers[i] == NULL) {
×
95
                continue;
×
96
            }
97
            if (pool->workers[i]->workspace != NULL) {
×
98
                free(pool->workers[i]->workspace);
×
99
            }
100
            free(pool->workers[i]);
×
101
        }
102
        free(pool->workers);
×
103
    }
104
    if (pool->jobs != NULL) {
×
105
        free(pool->jobs);
×
106
    }
107
    if (pool->cond_drained_ready) {
×
108
        sixel_cond_destroy(&pool->cond_drained);
×
109
    }
110
    if (pool->cond_not_full_ready) {
×
111
        sixel_cond_destroy(&pool->cond_not_full);
×
112
    }
113
    if (pool->cond_not_empty_ready) {
×
114
        sixel_cond_destroy(&pool->cond_not_empty);
×
115
    }
116
    if (pool->mutex_ready) {
×
117
        sixel_mutex_destroy(&pool->mutex);
×
118
    }
119
    free(pool);
×
120
}
×
121

122
/*
123
 * Worker threads pull jobs from the ring buffer, execute the supplied callback
124
 * outside the critical section, and record the first failure code. All
125
 * synchronization is delegated to the mutex/condition helpers provided by the
126
 * threading abstraction.
127
 */
128
static int
129
threadpool_worker_main(void *arg)
×
130
{
131
    threadpool_worker_t *worker;
×
132
    threadpool_t *pool;
×
133
    tp_job_t job;
×
134
    int rc;
×
135

136
    worker = (threadpool_worker_t *)arg;
×
137
    pool = worker->pool;
×
138
    for (;;) {
×
139
        sixel_mutex_lock(&pool->mutex);
×
140
        while (pool->count == 0 && !pool->shutting_down) {
×
141
            sixel_cond_wait(&pool->cond_not_empty, &pool->mutex);
×
142
        }
143
        if (pool->count == 0 && pool->shutting_down) {
×
144
            sixel_mutex_unlock(&pool->mutex);
×
145
            break;
×
146
        }
147
        job = pool->jobs[pool->head];
×
148
        pool->head = (pool->head + 1) % pool->qsize;
×
149
        pool->count -= 1;
×
150
        pool->running += 1;
×
151
        sixel_cond_signal(&pool->cond_not_full);
×
152
        sixel_mutex_unlock(&pool->mutex);
×
153

NEW
154
        if (pool->pin_threads && !worker->pinned && pool->hw_threads > 0) {
×
NEW
155
            int cpu_index;
×
156

NEW
157
            cpu_index = worker->index % pool->hw_threads;
×
NEW
158
            (void)sixel_thread_pin_self(cpu_index);
×
NEW
159
            worker->pinned = 1;
×
160
        }
161

UNCOV
162
        rc = pool->worker(job, pool->userdata, worker->workspace);
×
163

164
        sixel_mutex_lock(&pool->mutex);
×
165
        pool->running -= 1;
×
166
        if (rc != SIXEL_OK && pool->error == SIXEL_OK) {
×
167
            pool->error = rc;
×
168
        }
169
        if (pool->count == 0 && pool->running == 0) {
×
170
            sixel_cond_broadcast(&pool->cond_drained);
×
171
        }
172
        sixel_mutex_unlock(&pool->mutex);
×
173
    }
174
    return SIXEL_OK;
×
175
}
176

177
SIXELAPI threadpool_t *
178
threadpool_create(int nthreads,
×
179
                  int qsize,
180
                  size_t workspace_size,
181
                  tp_worker_fn worker,
182
                  void *userdata)
183
{
184
    threadpool_t *pool;
×
185
    int i;
×
186
    int rc;
×
187

188
    if (nthreads <= 0 || qsize <= 0 || worker == NULL) {
×
189
        return NULL;
190
    }
191
    pool = (threadpool_t *)calloc(1, sizeof(threadpool_t));
×
192
    if (pool == NULL) {
×
193
        return NULL;
194
    }
195
    pool->nthreads = nthreads;
×
196
    pool->qsize = qsize;
×
197
    pool->workspace_size = workspace_size;
×
198
    pool->worker = worker;
×
199
    pool->userdata = userdata;
×
200
    pool->jobs = NULL;
×
201
    pool->head = 0;
×
202
    pool->tail = 0;
×
203
    pool->count = 0;
×
204
    pool->running = 0;
×
205
    pool->shutting_down = 0;
×
206
    pool->joined = 0;
×
207
    pool->error = SIXEL_OK;
×
208
    pool->threads_started = 0;
×
209
    pool->mutex_ready = 0;
×
210
    pool->cond_not_empty_ready = 0;
×
211
    pool->cond_not_full_ready = 0;
×
212
    pool->cond_drained_ready = 0;
×
NEW
213
    pool->pin_threads = 0;
×
NEW
214
    pool->hw_threads = 0;
×
UNCOV
215
    pool->workers = NULL;
×
216

217
    rc = sixel_mutex_init(&pool->mutex);
×
218
    if (rc != SIXEL_OK) {
×
219
        errno = EINVAL;
×
220
        threadpool_free(pool);
×
221
        return NULL;
×
222
    }
223
    pool->mutex_ready = 1;
×
224

225
    rc = sixel_cond_init(&pool->cond_not_empty);
×
226
    if (rc != SIXEL_OK) {
×
227
        errno = EINVAL;
×
228
        threadpool_free(pool);
×
229
        return NULL;
×
230
    }
231
    pool->cond_not_empty_ready = 1;
×
232

233
    rc = sixel_cond_init(&pool->cond_not_full);
×
234
    if (rc != SIXEL_OK) {
×
235
        errno = EINVAL;
×
236
        threadpool_free(pool);
×
237
        return NULL;
×
238
    }
239
    pool->cond_not_full_ready = 1;
×
240

241
    rc = sixel_cond_init(&pool->cond_drained);
×
242
    if (rc != SIXEL_OK) {
×
243
        errno = EINVAL;
×
244
        threadpool_free(pool);
×
245
        return NULL;
×
246
    }
247
    pool->cond_drained_ready = 1;
×
248

249
    pool->jobs = (tp_job_t *)malloc(sizeof(tp_job_t) * (size_t)qsize);
×
250
    if (pool->jobs == NULL) {
×
251
        threadpool_free(pool);
×
252
        return NULL;
×
253
    }
254

255
    pool->worker_capacity = nthreads;
×
256
    pool->workers = (threadpool_worker_t **)calloc((size_t)nthreads,
×
257
            sizeof(threadpool_worker_t *));
258
    if (pool->workers == NULL) {
×
259
        threadpool_free(pool);
×
260
        return NULL;
×
261
    }
262

263
    for (i = 0; i < nthreads; ++i) {
×
264
        pool->workers[i] = (threadpool_worker_t *)
×
265
            calloc(1, sizeof(threadpool_worker_t));
×
266
        if (pool->workers[i] == NULL) {
×
267
            pool->shutting_down = 1;
×
268
            sixel_cond_broadcast(&pool->cond_not_empty);
×
269
            break;
×
270
        }
271
        pool->workers[i]->pool = pool;
×
272
        pool->workers[i]->workspace = NULL;
×
273
        pool->workers[i]->started = 0;
×
NEW
274
        pool->workers[i]->index = i;
×
NEW
275
        pool->workers[i]->pinned = 0;
×
UNCOV
276
        if (workspace_size > 0) {
×
277
            /*
278
             * Zero-initialize the per-thread workspace so that structures like
279
             * `sixel_parallel_worker_state_t` start with predictable values.
280
             * The worker initialization logic assumes fields such as
281
             * `initialized` are cleared before the first job.
282
             */
283
            pool->workers[i]->workspace = calloc(1, workspace_size);
×
284
            if (pool->workers[i]->workspace == NULL) {
×
285
                pool->shutting_down = 1;
×
286
                sixel_cond_broadcast(&pool->cond_not_empty);
×
287
                break;
×
288
            }
289
        }
290
        rc = threadpool_spawn_worker(pool, pool->workers[i]);
×
291
        if (rc != SIXEL_OK) {
×
292
            break;
293
        }
294
    }
295

296
    if (pool->threads_started != nthreads) {
×
297
        int started;
298

299
        started = pool->threads_started;
300
        for (i = 0; i < started; ++i) {
×
301
            sixel_cond_broadcast(&pool->cond_not_empty);
×
302
            sixel_thread_join(&pool->workers[i]->thread);
×
303
        }
304
        threadpool_free(pool);
×
305
        return NULL;
×
306
    }
307

308
    return pool;
309
}
310

311
SIXELAPI void
NEW
312
threadpool_set_affinity(threadpool_t *pool, int pin_threads)
×
313
{
NEW
314
    if (pool == NULL) {
×
315
        return;
316
    }
317

NEW
318
    sixel_mutex_lock(&pool->mutex);
×
NEW
319
    pool->pin_threads = (pin_threads != 0) ? 1 : 0;
×
NEW
320
    if (pool->pin_threads != 0) {
×
NEW
321
        pool->hw_threads = sixel_get_hw_threads();
×
NEW
322
        if (pool->hw_threads < 1) {
×
NEW
323
            pool->pin_threads = 0;
×
324
        }
325
    } else {
NEW
326
        pool->hw_threads = 0;
×
327
    }
NEW
328
    sixel_mutex_unlock(&pool->mutex);
×
329
}
330

331
static int
332
threadpool_spawn_worker(threadpool_t *pool, threadpool_worker_t *worker)
×
333
{
334
    int rc;
×
335

336
    if (pool == NULL || worker == NULL) {
×
337
        return SIXEL_BAD_ARGUMENT;
338
    }
339
    rc = sixel_thread_create(&worker->thread,
×
340
                             threadpool_worker_main,
341
                             worker);
342
    if (rc != SIXEL_OK) {
×
343
        sixel_mutex_lock(&pool->mutex);
×
344
        pool->shutting_down = 1;
×
345
        sixel_cond_broadcast(&pool->cond_not_empty);
×
346
        sixel_mutex_unlock(&pool->mutex);
×
347
        return rc;
×
348
    }
349
    worker->started = 1;
×
350
    pool->threads_started += 1;
×
351
    return SIXEL_OK;
×
352
}
353

354
SIXELAPI void
355
threadpool_destroy(threadpool_t *pool)
×
356
{
357
    if (pool == NULL) {
×
358
        return;
359
    }
360
    threadpool_finish(pool);
×
361
    threadpool_free(pool);
×
362
}
363

364
SIXELAPI void
365
threadpool_push(threadpool_t *pool, tp_job_t job)
×
366
{
367
    if (pool == NULL) {
×
368
        return;
369
    }
370
    sixel_mutex_lock(&pool->mutex);
×
371
    if (pool->shutting_down) {
×
372
        sixel_mutex_unlock(&pool->mutex);
×
373
        return;
×
374
    }
375
    while (pool->count == pool->qsize && !pool->shutting_down) {
×
376
        sixel_cond_wait(&pool->cond_not_full, &pool->mutex);
×
377
    }
378
    if (pool->shutting_down) {
×
379
        sixel_mutex_unlock(&pool->mutex);
×
380
        return;
×
381
    }
382
    pool->jobs[pool->tail] = job;
×
383
    pool->tail = (pool->tail + 1) % pool->qsize;
×
384
    pool->count += 1;
×
385
    sixel_cond_signal(&pool->cond_not_empty);
×
386
    sixel_mutex_unlock(&pool->mutex);
×
387
}
388

389
SIXELAPI void
390
threadpool_finish(threadpool_t *pool)
×
391
{
392
    int i;
×
393

394
    if (pool == NULL) {
×
395
        return;
396
    }
397
    sixel_mutex_lock(&pool->mutex);
×
398
    if (pool->joined) {
×
399
        sixel_mutex_unlock(&pool->mutex);
×
400
        return;
×
401
    }
402
    pool->shutting_down = 1;
×
403
    sixel_cond_broadcast(&pool->cond_not_empty);
×
404
    sixel_cond_broadcast(&pool->cond_not_full);
×
405
    while (pool->count > 0 || pool->running > 0) {
×
406
        sixel_cond_wait(&pool->cond_drained, &pool->mutex);
×
407
    }
408
    sixel_mutex_unlock(&pool->mutex);
×
409

410
    for (i = 0; i < pool->threads_started; ++i) {
×
411
        if (pool->workers[i] != NULL && pool->workers[i]->started) {
×
412
            sixel_thread_join(&pool->workers[i]->thread);
×
413
            pool->workers[i]->started = 0;
×
414
        }
415
    }
416

417
    sixel_mutex_lock(&pool->mutex);
×
418
    pool->joined = 1;
×
419
    sixel_mutex_unlock(&pool->mutex);
×
420
}
×
421

422
SIXELAPI int
423
threadpool_grow(threadpool_t *pool, int additional_threads)
×
424
{
425
    threadpool_worker_t **expanded;
×
426
    int new_target;
×
427
    int started_new;
×
428
    int i;
×
429
    int rc;
×
430

431
    if (pool == NULL || additional_threads <= 0) {
×
432
        return SIXEL_OK;
433
    }
434

435
    sixel_mutex_lock(&pool->mutex);
×
436
    if (pool->shutting_down) {
×
437
        sixel_mutex_unlock(&pool->mutex);
×
438
        return SIXEL_RUNTIME_ERROR;
×
439
    }
440
    new_target = pool->nthreads + additional_threads;
×
441
    /*
442
     * Worker structs stay heap-allocated per slot so pointer-table growth
443
     * never invalidates addresses already held by running threads.
444
     */
445
    if (new_target > pool->worker_capacity) {
×
446
        expanded = (threadpool_worker_t **)realloc(
×
447
            pool->workers,
×
448
            (size_t)new_target * sizeof(threadpool_worker_t *));
×
449
        if (expanded == NULL) {
×
450
            sixel_mutex_unlock(&pool->mutex);
×
451
            return SIXEL_BAD_ALLOCATION;
×
452
        }
453
        memset(expanded + pool->worker_capacity,
×
454
               0,
UNCOV
455
               (size_t)(new_target - pool->worker_capacity)
×
456
                   * sizeof(threadpool_worker_t *));
457
        pool->workers = expanded;
×
458
        pool->worker_capacity = new_target;
×
459
    }
460
    sixel_mutex_unlock(&pool->mutex);
×
461

462
    started_new = 0;
×
463
    rc = SIXEL_OK;
×
464
    for (i = pool->nthreads; i < new_target; ++i) {
×
465
        pool->workers[i] = (threadpool_worker_t *)
×
466
            calloc(1, sizeof(threadpool_worker_t));
×
467
        if (pool->workers[i] == NULL) {
×
468
            rc = SIXEL_BAD_ALLOCATION;
469
            break;
470
        }
471
        pool->workers[i]->pool = pool;
×
472
        pool->workers[i]->workspace = NULL;
×
473
        pool->workers[i]->started = 0;
×
NEW
474
        pool->workers[i]->index = i;
×
NEW
475
        pool->workers[i]->pinned = 0;
×
476
        if (pool->workspace_size > 0) {
×
477
            pool->workers[i]->workspace =
×
478
                calloc(1, pool->workspace_size);
×
479
            if (pool->workers[i]->workspace == NULL) {
×
480
                rc = SIXEL_BAD_ALLOCATION;
481
                break;
482
            }
483
        }
484

485
        rc = threadpool_spawn_worker(pool, pool->workers[i]);
×
486
        if (rc != SIXEL_OK) {
×
487
            break;
488
        }
489
        started_new += 1;
×
490
    }
491

492
    if (rc != SIXEL_OK) {
×
493
        int j;
494

495
        for (j = i; j < new_target; ++j) {
×
496
            if (pool->workers[j] != NULL) {
×
497
                if (pool->workers[j]->workspace != NULL) {
×
498
                    free(pool->workers[j]->workspace);
×
499
                }
500
                free(pool->workers[j]);
×
501
                pool->workers[j] = NULL;
×
502
            }
503
        }
504
    }
505

506
    sixel_mutex_lock(&pool->mutex);
×
507
    pool->nthreads = pool->nthreads + started_new;
×
508
    sixel_mutex_unlock(&pool->mutex);
×
509

510
    return rc;
×
511
}
512

513
SIXELAPI int
514
threadpool_get_error(threadpool_t *pool)
×
515
{
516
    int error;
×
517

518
    if (pool == NULL) {
×
519
        return SIXEL_BAD_ARGUMENT;
520
    }
521
    sixel_mutex_lock(&pool->mutex);
×
522
    error = pool->error;
×
523
    sixel_mutex_unlock(&pool->mutex);
×
524
    return error;
×
525
}
526

527
/* emacs Local Variables:      */
528
/* emacs mode: c               */
529
/* emacs tab-width: 4          */
530
/* emacs indent-tabs-mode: nil */
531
/* emacs c-basic-offset: 4     */
532
/* emacs End:                  */
533
/* vim: set expandtab ts=4 sts=4 sw=4 : */
534
/* EOF */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc