• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

saitoha / libsixel / 20068035931

09 Dec 2025 03:01PM UTC coverage: 41.067% (-0.2%) from 41.292%
20068035931

push

github

saitoha
lookup: add vpte tile size tuning via environment variables

10771 of 40760 branches covered (26.43%)

0 of 66 new or added lines in 2 files covered. (0.0%)

1228 existing lines in 4 files now uncovered.

14798 of 36034 relevant lines covered (41.07%)

2724277.3 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/threadpool.c
1
/*
2
 * SPDX-License-Identifier: MIT
3
 *
4
 * Copyright (c) 2025 libsixel developers. See `AUTHORS`.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy of
7
 * this software and associated documentation files (the "Software"), to deal in
8
 * the Software without restriction, including without limitation the rights to
9
 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
10
 * of the Software, and to permit persons to whom the Software is furnished to do
11
 * so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in all
14
 * copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
 * SOFTWARE.
23
 */
24

25
#include "config.h"
26

27
#include <errno.h>
28
#include <stdlib.h>
29
#include <string.h>
30

31
#include "threadpool.h"
32
#include "threading.h"
33

34
typedef struct threadpool_worker threadpool_worker_t;
35

36
struct threadpool_worker {
37
    threadpool_t *pool;
38
    sixel_thread_t thread;
39
    void *workspace;
40
    int started;
41
    int index;
42
    int pinned;
43
};
44

45
struct threadpool {
46
    int nthreads;
47
    int qsize;
48
    size_t workspace_size;
49
    tp_worker_fn worker;
50
    void *userdata;
51
    tp_job_t *jobs;
52
    int head;
53
    int tail;
54
    int count;
55
    int running;
56
    int shutting_down;
57
    int joined;
58
    int error;
59
    int threads_started;
60
    int worker_capacity;
61
    int pin_threads;
62
    int hw_threads;
63
    sixel_mutex_t mutex;
64
    sixel_cond_t cond_not_empty;
65
    sixel_cond_t cond_not_full;
66
    sixel_cond_t cond_drained;
67
    int mutex_ready;
68
    int cond_not_empty_ready;
69
    int cond_not_full_ready;
70
    int cond_drained_ready;
71
    threadpool_worker_t **workers; /* owned worker slots (stable addresses) */
72
};
73

74
static void threadpool_free(threadpool_t *pool);
75
static int threadpool_worker_main(void *arg);
76
static int threadpool_spawn_worker(threadpool_t *pool,
77
                                   threadpool_worker_t *worker);
78

79
/*
80
 * Release every dynamically allocated component of the pool. Callers must
81
 * ensure that worker threads have already terminated before invoking this
82
 * helper; otherwise joining would operate on freed memory.
83
 */
84
static void
UNCOV
85
threadpool_free(threadpool_t *pool)
×
86
{
87
    int i;
×
88

89
    if (pool == NULL) {
×
90
        return;
91
    }
92
    if (pool->workers != NULL) {
×
93
        for (i = 0; i < pool->worker_capacity; ++i) {
×
UNCOV
94
            if (pool->workers[i] == NULL) {
×
95
                continue;
×
96
            }
97
            if (pool->workers[i]->workspace != NULL) {
×
UNCOV
98
                free(pool->workers[i]->workspace);
×
99
            }
100
            free(pool->workers[i]);
×
101
        }
102
        free(pool->workers);
×
103
    }
UNCOV
104
    if (pool->jobs != NULL) {
×
105
        free(pool->jobs);
×
106
    }
UNCOV
107
    if (pool->cond_drained_ready) {
×
108
        sixel_cond_destroy(&pool->cond_drained);
×
109
    }
UNCOV
110
    if (pool->cond_not_full_ready) {
×
111
        sixel_cond_destroy(&pool->cond_not_full);
×
112
    }
UNCOV
113
    if (pool->cond_not_empty_ready) {
×
114
        sixel_cond_destroy(&pool->cond_not_empty);
×
115
    }
UNCOV
116
    if (pool->mutex_ready) {
×
UNCOV
117
        sixel_mutex_destroy(&pool->mutex);
×
118
    }
UNCOV
119
    free(pool);
×
120
}
×
121

122
/*
123
 * Worker threads pull jobs from the ring buffer, execute the supplied callback
124
 * outside the critical section, and record the first failure code. All
125
 * synchronization is delegated to the mutex/condition helpers provided by the
126
 * threading abstraction.
127
 */
128
static int
129
threadpool_worker_main(void *arg)
×
130
{
131
    threadpool_worker_t *worker;
×
132
    threadpool_t *pool;
×
133
    tp_job_t job;
×
134
    int rc;
×
135

136
    worker = (threadpool_worker_t *)arg;
×
UNCOV
137
    pool = worker->pool;
×
138
    for (;;) {
×
139
        sixel_mutex_lock(&pool->mutex);
×
140
        while (pool->count == 0 && !pool->shutting_down) {
×
UNCOV
141
            sixel_cond_wait(&pool->cond_not_empty, &pool->mutex);
×
142
        }
143
        if (pool->count == 0 && pool->shutting_down) {
×
144
            sixel_mutex_unlock(&pool->mutex);
×
145
            break;
×
146
        }
147
        job = pool->jobs[pool->head];
×
UNCOV
148
        pool->head = (pool->head + 1) % pool->qsize;
×
149
        pool->count -= 1;
×
UNCOV
150
        pool->running += 1;
×
151
        sixel_cond_signal(&pool->cond_not_full);
×
152
        sixel_mutex_unlock(&pool->mutex);
×
153

154
        if (pool->pin_threads && !worker->pinned && pool->hw_threads > 0) {
×
UNCOV
155
            int cpu_index;
×
156

157
            cpu_index = worker->index % pool->hw_threads;
×
UNCOV
158
            (void)sixel_thread_pin_self(cpu_index);
×
159
            worker->pinned = 1;
×
160
        }
161

UNCOV
162
        rc = pool->worker(job, pool->userdata, worker->workspace);
×
163

UNCOV
164
        sixel_mutex_lock(&pool->mutex);
×
165
        pool->running -= 1;
×
UNCOV
166
        if (rc != SIXEL_OK && pool->error == SIXEL_OK) {
×
UNCOV
167
            pool->error = rc;
×
168
        }
UNCOV
169
        if (pool->count == 0 && pool->running == 0) {
×
UNCOV
170
            sixel_cond_broadcast(&pool->cond_drained);
×
171
        }
172
        sixel_mutex_unlock(&pool->mutex);
×
173
    }
UNCOV
174
    return SIXEL_OK;
×
175
}
176

177
SIXELAPI threadpool_t *
178
threadpool_create(int nthreads,
×
179
                  int qsize,
180
                  size_t workspace_size,
181
                  tp_worker_fn worker,
182
                  void *userdata)
183
{
184
    threadpool_t *pool;
×
185
    int i;
×
186
    int rc;
×
187

188
    if (nthreads <= 0 || qsize <= 0 || worker == NULL) {
×
189
        return NULL;
190
    }
191
    pool = (threadpool_t *)calloc(1, sizeof(threadpool_t));
×
192
    if (pool == NULL) {
×
193
        return NULL;
194
    }
195
    pool->nthreads = nthreads;
×
196
    pool->qsize = qsize;
×
197
    pool->workspace_size = workspace_size;
×
198
    pool->worker = worker;
×
199
    pool->userdata = userdata;
×
200
    pool->jobs = NULL;
×
UNCOV
201
    pool->head = 0;
×
202
    pool->tail = 0;
×
203
    pool->count = 0;
×
204
    pool->running = 0;
×
205
    pool->shutting_down = 0;
×
206
    pool->joined = 0;
×
UNCOV
207
    pool->error = SIXEL_OK;
×
208
    pool->threads_started = 0;
×
UNCOV
209
    pool->mutex_ready = 0;
×
210
    pool->cond_not_empty_ready = 0;
×
211
    pool->cond_not_full_ready = 0;
×
212
    pool->cond_drained_ready = 0;
×
213
    pool->pin_threads = 0;
×
214
    pool->hw_threads = 0;
×
UNCOV
215
    pool->workers = NULL;
×
216

UNCOV
217
    rc = sixel_mutex_init(&pool->mutex);
×
218
    if (rc != SIXEL_OK) {
×
219
        errno = EINVAL;
×
220
        threadpool_free(pool);
×
221
        return NULL;
×
222
    }
UNCOV
223
    pool->mutex_ready = 1;
×
224

UNCOV
225
    rc = sixel_cond_init(&pool->cond_not_empty);
×
226
    if (rc != SIXEL_OK) {
×
227
        errno = EINVAL;
×
228
        threadpool_free(pool);
×
229
        return NULL;
×
230
    }
UNCOV
231
    pool->cond_not_empty_ready = 1;
×
232

UNCOV
233
    rc = sixel_cond_init(&pool->cond_not_full);
×
234
    if (rc != SIXEL_OK) {
×
235
        errno = EINVAL;
×
236
        threadpool_free(pool);
×
237
        return NULL;
×
238
    }
UNCOV
239
    pool->cond_not_full_ready = 1;
×
240

241
    rc = sixel_cond_init(&pool->cond_drained);
×
UNCOV
242
    if (rc != SIXEL_OK) {
×
243
        errno = EINVAL;
×
244
        threadpool_free(pool);
×
245
        return NULL;
×
246
    }
UNCOV
247
    pool->cond_drained_ready = 1;
×
248

249
    pool->jobs = (tp_job_t *)malloc(sizeof(tp_job_t) * (size_t)qsize);
×
250
    if (pool->jobs == NULL) {
×
251
        threadpool_free(pool);
×
252
        return NULL;
×
253
    }
254

UNCOV
255
    pool->worker_capacity = nthreads;
×
256
    pool->workers = (threadpool_worker_t **)calloc((size_t)nthreads,
×
257
            sizeof(threadpool_worker_t *));
258
    if (pool->workers == NULL) {
×
259
        threadpool_free(pool);
×
UNCOV
260
        return NULL;
×
261
    }
262

UNCOV
263
    for (i = 0; i < nthreads; ++i) {
×
UNCOV
264
        pool->workers[i] = (threadpool_worker_t *)
×
UNCOV
265
            calloc(1, sizeof(threadpool_worker_t));
×
266
        if (pool->workers[i] == NULL) {
×
267
            pool->shutting_down = 1;
×
268
            sixel_cond_broadcast(&pool->cond_not_empty);
×
269
            break;
×
270
        }
UNCOV
271
        pool->workers[i]->pool = pool;
×
UNCOV
272
        pool->workers[i]->workspace = NULL;
×
273
        pool->workers[i]->started = 0;
×
274
        pool->workers[i]->index = i;
×
UNCOV
275
        pool->workers[i]->pinned = 0;
×
UNCOV
276
        if (workspace_size > 0) {
×
277
            /*
278
             * Zero-initialize the per-thread workspace so that structures like
279
             * `sixel_parallel_worker_state_t` start with predictable values.
280
             * The worker initialization logic assumes fields such as
281
             * `initialized` are cleared before the first job.
282
             */
283
            pool->workers[i]->workspace = calloc(1, workspace_size);
×
284
            if (pool->workers[i]->workspace == NULL) {
×
285
                pool->shutting_down = 1;
×
UNCOV
286
                sixel_cond_broadcast(&pool->cond_not_empty);
×
287
                break;
×
288
            }
289
        }
UNCOV
290
        rc = threadpool_spawn_worker(pool, pool->workers[i]);
×
UNCOV
291
        if (rc != SIXEL_OK) {
×
292
            break;
293
        }
294
    }
295

UNCOV
296
    if (pool->threads_started != nthreads) {
×
297
        int started;
298

299
        started = pool->threads_started;
UNCOV
300
        for (i = 0; i < started; ++i) {
×
UNCOV
301
            sixel_cond_broadcast(&pool->cond_not_empty);
×
302
            sixel_thread_join(&pool->workers[i]->thread);
×
303
        }
UNCOV
304
        threadpool_free(pool);
×
305
        return NULL;
×
306
    }
307

308
    return pool;
309
}
310

311
SIXELAPI void
312
threadpool_set_affinity(threadpool_t *pool, int pin_threads)
×
313
{
314
    if (pool == NULL) {
×
315
        return;
316
    }
317

318
    sixel_mutex_lock(&pool->mutex);
×
UNCOV
319
    pool->pin_threads = (pin_threads != 0) ? 1 : 0;
×
320
    if (pool->pin_threads != 0) {
×
UNCOV
321
        pool->hw_threads = sixel_get_hw_threads();
×
UNCOV
322
        if (pool->hw_threads < 1) {
×
323
            pool->pin_threads = 0;
×
324
        }
325
    } else {
UNCOV
326
        pool->hw_threads = 0;
×
327
    }
328
    sixel_mutex_unlock(&pool->mutex);
×
329
}
330

331
static int
UNCOV
332
threadpool_spawn_worker(threadpool_t *pool, threadpool_worker_t *worker)
×
333
{
334
    int rc;
×
335

336
    if (pool == NULL || worker == NULL) {
×
337
        return SIXEL_BAD_ARGUMENT;
338
    }
339
    rc = sixel_thread_create(&worker->thread,
×
340
                             threadpool_worker_main,
341
                             worker);
342
    if (rc != SIXEL_OK) {
×
343
        sixel_mutex_lock(&pool->mutex);
×
UNCOV
344
        pool->shutting_down = 1;
×
345
        sixel_cond_broadcast(&pool->cond_not_empty);
×
346
        sixel_mutex_unlock(&pool->mutex);
×
347
        return rc;
×
348
    }
349
    worker->started = 1;
×
UNCOV
350
    pool->threads_started += 1;
×
UNCOV
351
    return SIXEL_OK;
×
352
}
353

354
SIXELAPI void
355
threadpool_destroy(threadpool_t *pool)
×
356
{
357
    if (pool == NULL) {
×
358
        return;
359
    }
360
    threadpool_finish(pool);
×
361
    threadpool_free(pool);
×
362
}
363

364
SIXELAPI void
365
threadpool_push(threadpool_t *pool, tp_job_t job)
×
366
{
367
    if (pool == NULL) {
×
368
        return;
369
    }
UNCOV
370
    sixel_mutex_lock(&pool->mutex);
×
371
    if (pool->shutting_down) {
×
UNCOV
372
        sixel_mutex_unlock(&pool->mutex);
×
373
        return;
×
374
    }
375
    while (pool->count == pool->qsize && !pool->shutting_down) {
×
376
        sixel_cond_wait(&pool->cond_not_full, &pool->mutex);
×
377
    }
UNCOV
378
    if (pool->shutting_down) {
×
UNCOV
379
        sixel_mutex_unlock(&pool->mutex);
×
380
        return;
×
381
    }
382
    pool->jobs[pool->tail] = job;
×
UNCOV
383
    pool->tail = (pool->tail + 1) % pool->qsize;
×
UNCOV
384
    pool->count += 1;
×
UNCOV
385
    sixel_cond_signal(&pool->cond_not_empty);
×
386
    sixel_mutex_unlock(&pool->mutex);
×
387
}
388

389
SIXELAPI void
390
threadpool_finish(threadpool_t *pool)
×
391
{
392
    int i;
×
393

394
    if (pool == NULL) {
×
395
        return;
396
    }
UNCOV
397
    sixel_mutex_lock(&pool->mutex);
×
398
    if (pool->joined) {
×
399
        sixel_mutex_unlock(&pool->mutex);
×
400
        return;
×
401
    }
UNCOV
402
    pool->shutting_down = 1;
×
403
    sixel_cond_broadcast(&pool->cond_not_empty);
×
UNCOV
404
    sixel_cond_broadcast(&pool->cond_not_full);
×
UNCOV
405
    while (pool->count > 0 || pool->running > 0) {
×
UNCOV
406
        sixel_cond_wait(&pool->cond_drained, &pool->mutex);
×
407
    }
408
    sixel_mutex_unlock(&pool->mutex);
×
409

410
    for (i = 0; i < pool->threads_started; ++i) {
×
411
        if (pool->workers[i] != NULL && pool->workers[i]->started) {
×
412
            sixel_thread_join(&pool->workers[i]->thread);
×
413
            pool->workers[i]->started = 0;
×
414
        }
415
    }
416

UNCOV
417
    sixel_mutex_lock(&pool->mutex);
×
418
    pool->joined = 1;
×
UNCOV
419
    sixel_mutex_unlock(&pool->mutex);
×
420
}
×
421

422
SIXELAPI int
423
threadpool_grow(threadpool_t *pool, int additional_threads)
×
424
{
425
    threadpool_worker_t **expanded;
×
426
    int new_target;
×
427
    int started_new;
×
428
    int i;
×
429
    int rc;
×
430

UNCOV
431
    if (pool == NULL || additional_threads <= 0) {
×
432
        return SIXEL_OK;
433
    }
434

435
    sixel_mutex_lock(&pool->mutex);
×
436
    if (pool->shutting_down) {
×
437
        sixel_mutex_unlock(&pool->mutex);
×
438
        return SIXEL_RUNTIME_ERROR;
×
439
    }
440
    new_target = pool->nthreads + additional_threads;
×
441
    /*
442
     * Worker structs stay heap-allocated per slot so pointer-table growth
443
     * never invalidates addresses already held by running threads.
444
     */
UNCOV
445
    if (new_target > pool->worker_capacity) {
×
446
        expanded = (threadpool_worker_t **)realloc(
×
447
            pool->workers,
×
UNCOV
448
            (size_t)new_target * sizeof(threadpool_worker_t *));
×
UNCOV
449
        if (expanded == NULL) {
×
450
            sixel_mutex_unlock(&pool->mutex);
×
UNCOV
451
            return SIXEL_BAD_ALLOCATION;
×
452
        }
453
        memset(expanded + pool->worker_capacity,
×
454
               0,
UNCOV
455
               (size_t)(new_target - pool->worker_capacity)
×
456
                   * sizeof(threadpool_worker_t *));
457
        pool->workers = expanded;
×
458
        pool->worker_capacity = new_target;
×
459
    }
UNCOV
460
    sixel_mutex_unlock(&pool->mutex);
×
461

462
    started_new = 0;
×
UNCOV
463
    rc = SIXEL_OK;
×
UNCOV
464
    for (i = pool->nthreads; i < new_target; ++i) {
×
UNCOV
465
        pool->workers[i] = (threadpool_worker_t *)
×
UNCOV
466
            calloc(1, sizeof(threadpool_worker_t));
×
467
        if (pool->workers[i] == NULL) {
×
468
            rc = SIXEL_BAD_ALLOCATION;
469
            break;
470
        }
471
        pool->workers[i]->pool = pool;
×
UNCOV
472
        pool->workers[i]->workspace = NULL;
×
UNCOV
473
        pool->workers[i]->started = 0;
×
UNCOV
474
        pool->workers[i]->index = i;
×
475
        pool->workers[i]->pinned = 0;
×
UNCOV
476
        if (pool->workspace_size > 0) {
×
477
            pool->workers[i]->workspace =
×
UNCOV
478
                calloc(1, pool->workspace_size);
×
479
            if (pool->workers[i]->workspace == NULL) {
×
480
                rc = SIXEL_BAD_ALLOCATION;
481
                break;
482
            }
483
        }
484

485
        rc = threadpool_spawn_worker(pool, pool->workers[i]);
×
UNCOV
486
        if (rc != SIXEL_OK) {
×
487
            break;
488
        }
UNCOV
489
        started_new += 1;
×
490
    }
491

UNCOV
492
    if (rc != SIXEL_OK) {
×
493
        int j;
494

UNCOV
495
        for (j = i; j < new_target; ++j) {
×
UNCOV
496
            if (pool->workers[j] != NULL) {
×
UNCOV
497
                if (pool->workers[j]->workspace != NULL) {
×
UNCOV
498
                    free(pool->workers[j]->workspace);
×
499
                }
UNCOV
500
                free(pool->workers[j]);
×
UNCOV
501
                pool->workers[j] = NULL;
×
502
            }
503
        }
504
    }
505

UNCOV
506
    sixel_mutex_lock(&pool->mutex);
×
UNCOV
507
    pool->nthreads = pool->nthreads + started_new;
×
UNCOV
508
    sixel_mutex_unlock(&pool->mutex);
×
509

UNCOV
510
    return rc;
×
511
}
512

513
SIXELAPI int
UNCOV
514
threadpool_get_error(threadpool_t *pool)
×
515
{
UNCOV
516
    int error;
×
517

UNCOV
518
    if (pool == NULL) {
×
519
        return SIXEL_BAD_ARGUMENT;
520
    }
UNCOV
521
    sixel_mutex_lock(&pool->mutex);
×
UNCOV
522
    error = pool->error;
×
UNCOV
523
    sixel_mutex_unlock(&pool->mutex);
×
UNCOV
524
    return error;
×
525
}
526

527
/* emacs Local Variables:      */
528
/* emacs mode: c               */
529
/* emacs tab-width: 4          */
530
/* emacs indent-tabs-mode: nil */
531
/* emacs c-basic-offset: 4     */
532
/* emacs End:                  */
533
/* vim: set expandtab ts=4 sts=4 sw=4 : */
534
/* EOF */
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc