• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

saitoha / libsixel / 19339450837

13 Nov 2025 05:01PM UTC coverage: 43.831% (-0.3%) from 44.162%
19339450837

push

github

saitoha
build: guard metadata probe on platforms without fork

8298 of 28238 branches covered (29.39%)

11836 of 27004 relevant lines covered (43.83%)

991518.58 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/threadpool.c
1
/*
2
 * Copyright (c) 2025 libsixel developers. See `AUTHORS`.
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a copy of
5
 * this software and associated documentation files (the "Software"), to deal in
6
 * the Software without restriction, including without limitation the rights to
7
 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
8
 * of the Software, and to permit persons to whom the Software is furnished to do
9
 * so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in all
12
 * copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20
 * SOFTWARE.
21
 */
22

23
#include "config.h"
24

25
#include <errno.h>
26
#include <stdlib.h>
27
#include <string.h>
28

29
#include "threadpool.h"
30

31
typedef struct threadpool_worker threadpool_worker_t;
32

33
struct threadpool_worker {
34
    threadpool_t *pool;
35
    sixel_thread_t thread;
36
    void *workspace;
37
    int started;
38
};
39

40
struct threadpool {
41
    int nthreads;
42
    int qsize;
43
    size_t workspace_size;
44
    tp_worker_fn worker;
45
    void *userdata;
46
    tp_job_t *jobs;
47
    int head;
48
    int tail;
49
    int count;
50
    int running;
51
    int shutting_down;
52
    int joined;
53
    int error;
54
    int threads_started;
55
    sixel_mutex_t mutex;
56
    sixel_cond_t cond_not_empty;
57
    sixel_cond_t cond_not_full;
58
    sixel_cond_t cond_drained;
59
    int mutex_ready;
60
    int cond_not_empty_ready;
61
    int cond_not_full_ready;
62
    int cond_drained_ready;
63
    threadpool_worker_t *workers;
64
};
65

66
static void threadpool_free(threadpool_t *pool);
67
static int threadpool_worker_main(void *arg);
68

69
/*
70
 * Release every dynamically allocated component of the pool. Callers must
71
 * ensure that worker threads have already terminated before invoking this
72
 * helper; otherwise joining would operate on freed memory.
73
 */
74
static void
75
threadpool_free(threadpool_t *pool)
×
76
{
77
    int i;
78

79
    if (pool == NULL) {
×
80
        return;
×
81
    }
82
    if (pool->workers != NULL) {
×
83
        for (i = 0; i < pool->nthreads; ++i) {
×
84
            if (pool->workers[i].workspace != NULL) {
×
85
                free(pool->workers[i].workspace);
×
86
            }
87
        }
88
        free(pool->workers);
×
89
    }
90
    if (pool->jobs != NULL) {
×
91
        free(pool->jobs);
×
92
    }
93
    if (pool->cond_drained_ready) {
×
94
        sixel_cond_destroy(&pool->cond_drained);
×
95
    }
96
    if (pool->cond_not_full_ready) {
×
97
        sixel_cond_destroy(&pool->cond_not_full);
×
98
    }
99
    if (pool->cond_not_empty_ready) {
×
100
        sixel_cond_destroy(&pool->cond_not_empty);
×
101
    }
102
    if (pool->mutex_ready) {
×
103
        sixel_mutex_destroy(&pool->mutex);
×
104
    }
105
    free(pool);
×
106
}
107

108
/*
109
 * Worker threads pull jobs from the ring buffer, execute the supplied callback
110
 * outside the critical section, and record the first failure code. All
111
 * synchronization is delegated to the mutex/condition helpers provided by the
112
 * threading abstraction.
113
 */
114
static int
115
threadpool_worker_main(void *arg)
×
116
{
117
    threadpool_worker_t *worker;
118
    threadpool_t *pool;
119
    tp_job_t job;
120
    int rc;
121

122
    worker = (threadpool_worker_t *)arg;
×
123
    pool = worker->pool;
×
124
    for (;;) {
125
        sixel_mutex_lock(&pool->mutex);
×
126
        while (pool->count == 0 && !pool->shutting_down) {
×
127
            sixel_cond_wait(&pool->cond_not_empty, &pool->mutex);
×
128
        }
129
        if (pool->count == 0 && pool->shutting_down) {
×
130
            sixel_mutex_unlock(&pool->mutex);
×
131
            break;
×
132
        }
133
        job = pool->jobs[pool->head];
×
134
        pool->head = (pool->head + 1) % pool->qsize;
×
135
        pool->count -= 1;
×
136
        pool->running += 1;
×
137
        sixel_cond_signal(&pool->cond_not_full);
×
138
        sixel_mutex_unlock(&pool->mutex);
×
139

140
        rc = pool->worker(job, pool->userdata, worker->workspace);
×
141

142
        sixel_mutex_lock(&pool->mutex);
×
143
        pool->running -= 1;
×
144
        if (rc != SIXEL_OK && pool->error == SIXEL_OK) {
×
145
            pool->error = rc;
×
146
        }
147
        if (pool->count == 0 && pool->running == 0) {
×
148
            sixel_cond_broadcast(&pool->cond_drained);
×
149
        }
150
        sixel_mutex_unlock(&pool->mutex);
×
151
    }
152
    return SIXEL_OK;
×
153
}
154

155
SIXELAPI threadpool_t *
156
threadpool_create(int nthreads,
×
157
                  int qsize,
158
                  size_t workspace_size,
159
                  tp_worker_fn worker,
160
                  void *userdata)
161
{
162
    threadpool_t *pool;
163
    int i;
164
    int rc;
165

166
    if (nthreads <= 0 || qsize <= 0 || worker == NULL) {
×
167
        return NULL;
×
168
    }
169
    pool = (threadpool_t *)calloc(1, sizeof(threadpool_t));
×
170
    if (pool == NULL) {
×
171
        return NULL;
×
172
    }
173
    pool->nthreads = nthreads;
×
174
    pool->qsize = qsize;
×
175
    pool->workspace_size = workspace_size;
×
176
    pool->worker = worker;
×
177
    pool->userdata = userdata;
×
178
    pool->jobs = NULL;
×
179
    pool->head = 0;
×
180
    pool->tail = 0;
×
181
    pool->count = 0;
×
182
    pool->running = 0;
×
183
    pool->shutting_down = 0;
×
184
    pool->joined = 0;
×
185
    pool->error = SIXEL_OK;
×
186
    pool->threads_started = 0;
×
187
    pool->mutex_ready = 0;
×
188
    pool->cond_not_empty_ready = 0;
×
189
    pool->cond_not_full_ready = 0;
×
190
    pool->cond_drained_ready = 0;
×
191
    pool->workers = NULL;
×
192

193
    rc = sixel_mutex_init(&pool->mutex);
×
194
    if (rc != SIXEL_OK) {
×
195
        errno = EINVAL;
×
196
        threadpool_free(pool);
×
197
        return NULL;
×
198
    }
199
    pool->mutex_ready = 1;
×
200

201
    rc = sixel_cond_init(&pool->cond_not_empty);
×
202
    if (rc != SIXEL_OK) {
×
203
        errno = EINVAL;
×
204
        threadpool_free(pool);
×
205
        return NULL;
×
206
    }
207
    pool->cond_not_empty_ready = 1;
×
208

209
    rc = sixel_cond_init(&pool->cond_not_full);
×
210
    if (rc != SIXEL_OK) {
×
211
        errno = EINVAL;
×
212
        threadpool_free(pool);
×
213
        return NULL;
×
214
    }
215
    pool->cond_not_full_ready = 1;
×
216

217
    rc = sixel_cond_init(&pool->cond_drained);
×
218
    if (rc != SIXEL_OK) {
×
219
        errno = EINVAL;
×
220
        threadpool_free(pool);
×
221
        return NULL;
×
222
    }
223
    pool->cond_drained_ready = 1;
×
224

225
    pool->jobs = (tp_job_t *)malloc(sizeof(tp_job_t) * (size_t)qsize);
×
226
    if (pool->jobs == NULL) {
×
227
        threadpool_free(pool);
×
228
        return NULL;
×
229
    }
230

231
    pool->workers = (threadpool_worker_t *)
×
232
        calloc((size_t)nthreads, sizeof(threadpool_worker_t));
×
233
    if (pool->workers == NULL) {
×
234
        threadpool_free(pool);
×
235
        return NULL;
×
236
    }
237

238
    for (i = 0; i < nthreads; ++i) {
×
239
        pool->workers[i].pool = pool;
×
240
        pool->workers[i].workspace = NULL;
×
241
        pool->workers[i].started = 0;
×
242
        if (workspace_size > 0) {
×
243
            /*
244
             * Zero-initialize the per-thread workspace so that structures like
245
             * `sixel_parallel_worker_state_t` start with predictable values.
246
             * The worker initialization logic assumes fields such as
247
             * `initialized` are cleared before the first job.
248
             */
249
            pool->workers[i].workspace = calloc(1, workspace_size);
×
250
            if (pool->workers[i].workspace == NULL) {
×
251
                pool->shutting_down = 1;
×
252
                sixel_cond_broadcast(&pool->cond_not_empty);
×
253
                break;
×
254
            }
255
        }
256
        rc = sixel_thread_create(&pool->workers[i].thread,
×
257
                                 threadpool_worker_main,
258
                                 &pool->workers[i]);
×
259
        if (rc != SIXEL_OK) {
×
260
            pool->shutting_down = 1;
×
261
            sixel_cond_broadcast(&pool->cond_not_empty);
×
262
            break;
×
263
        }
264
        pool->workers[i].started = 1;
×
265
        pool->threads_started += 1;
×
266
    }
267

268
    if (pool->threads_started != nthreads) {
×
269
        int started;
270

271
        started = pool->threads_started;
×
272
        for (i = 0; i < started; ++i) {
×
273
            sixel_cond_broadcast(&pool->cond_not_empty);
×
274
            sixel_thread_join(&pool->workers[i].thread);
×
275
        }
276
        threadpool_free(pool);
×
277
        return NULL;
×
278
    }
279

280
    return pool;
×
281
}
282

283
SIXELAPI void
284
threadpool_destroy(threadpool_t *pool)
×
285
{
286
    if (pool == NULL) {
×
287
        return;
×
288
    }
289
    threadpool_finish(pool);
×
290
    threadpool_free(pool);
×
291
}
292

293
SIXELAPI void
294
threadpool_push(threadpool_t *pool, tp_job_t job)
×
295
{
296
    if (pool == NULL) {
×
297
        return;
×
298
    }
299
    sixel_mutex_lock(&pool->mutex);
×
300
    if (pool->shutting_down) {
×
301
        sixel_mutex_unlock(&pool->mutex);
×
302
        return;
×
303
    }
304
    while (pool->count == pool->qsize && !pool->shutting_down) {
×
305
        sixel_cond_wait(&pool->cond_not_full, &pool->mutex);
×
306
    }
307
    if (pool->shutting_down) {
×
308
        sixel_mutex_unlock(&pool->mutex);
×
309
        return;
×
310
    }
311
    pool->jobs[pool->tail] = job;
×
312
    pool->tail = (pool->tail + 1) % pool->qsize;
×
313
    pool->count += 1;
×
314
    sixel_cond_signal(&pool->cond_not_empty);
×
315
    sixel_mutex_unlock(&pool->mutex);
×
316
}
317

318
SIXELAPI void
319
threadpool_finish(threadpool_t *pool)
×
320
{
321
    int i;
322

323
    if (pool == NULL) {
×
324
        return;
×
325
    }
326
    sixel_mutex_lock(&pool->mutex);
×
327
    if (pool->joined) {
×
328
        sixel_mutex_unlock(&pool->mutex);
×
329
        return;
×
330
    }
331
    pool->shutting_down = 1;
×
332
    sixel_cond_broadcast(&pool->cond_not_empty);
×
333
    sixel_cond_broadcast(&pool->cond_not_full);
×
334
    while (pool->count > 0 || pool->running > 0) {
×
335
        sixel_cond_wait(&pool->cond_drained, &pool->mutex);
×
336
    }
337
    sixel_mutex_unlock(&pool->mutex);
×
338

339
    for (i = 0; i < pool->threads_started; ++i) {
×
340
        if (pool->workers[i].started) {
×
341
            sixel_thread_join(&pool->workers[i].thread);
×
342
            pool->workers[i].started = 0;
×
343
        }
344
    }
345

346
    sixel_mutex_lock(&pool->mutex);
×
347
    pool->joined = 1;
×
348
    sixel_mutex_unlock(&pool->mutex);
×
349
}
350

351
SIXELAPI int
352
threadpool_get_error(threadpool_t *pool)
×
353
{
354
    int error;
355

356
    if (pool == NULL) {
×
357
        return SIXEL_BAD_ARGUMENT;
×
358
    }
359
    sixel_mutex_lock(&pool->mutex);
×
360
    error = pool->error;
×
361
    sixel_mutex_unlock(&pool->mutex);
×
362
    return error;
×
363
}
364

365
/* emacs Local Variables:      */
366
/* emacs mode: c               */
367
/* emacs tab-width: 4          */
368
/* emacs indent-tabs-mode: nil */
369
/* emacs c-basic-offset: 4     */
370
/* emacs End:                  */
371
/* vim: set expandtab ts=4 sts=4 sw=4 : */
372
/* EOF */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc