• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

OISF / suricata / 23374838686

21 Mar 2026 07:29AM UTC coverage: 59.341% (-20.0%) from 79.315%
23374838686

Pull #15075

github

web-flow
Merge 90b4e834f into 6587e363a
Pull Request #15075: Stack 8001 v16.4

38 of 70 new or added lines in 10 files covered. (54.29%)

34165 existing lines in 563 files now uncovered.

119621 of 201584 relevant lines covered (59.34%)

650666.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

44.22
/src/tmqh-packetpool.c
1
/* Copyright (C) 2007-2022 Open Information Security Foundation
2
 *
3
 * You can copy, redistribute or modify this Program under the terms of
4
 * the GNU General Public License version 2 as published by the Free
5
 * Software Foundation.
6
 *
7
 * This program is distributed in the hope that it will be useful,
8
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
 * GNU General Public License for more details.
11
 *
12
 * You should have received a copy of the GNU General Public License
13
 * version 2 along with this program; if not, write to the Free Software
14
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15
 * 02110-1301, USA.
16
 */
17

18
/**
19
 * \file
20
 *
21
 * \author Victor Julien <victor@inliniac.net>
22
 *
23
 * Packetpool queue handlers. Packet pool is implemented as a stack.
24
 */
25

26
#include "suricata-common.h"
27
#include "tmqh-packetpool.h"
28
#include "tm-queuehandlers.h"
29
#include "tm-threads.h"
30
#include "threads.h"
31
#include "decode.h"
32
#include "tm-modules.h"
33
#include "packet.h"
34
#include "util-profiling.h"
35
#include "util-validate.h"
36
#include "action-globals.h"
37

38
extern uint32_t max_pending_packets;
39

40
/* Number of freed packet to save for one pool before freeing them. */
41
#define MAX_PENDING_RETURN_PACKETS 32
42
static uint32_t max_pending_return_packets = MAX_PENDING_RETURN_PACKETS;
43

44
thread_local PktPool thread_pkt_pool;
45

46
static inline PktPool *GetThreadPacketPool(void)
47
{
1,743,566✔
48
    return &thread_pkt_pool;
1,743,566✔
49
}
1,743,566✔
50

51
/**
52
 * \brief TmqhPacketpoolRegister
53
 * \initonly
54
 */
55
void TmqhPacketpoolRegister (void)
56
{
2✔
57
    tmqh_table[TMQH_PACKETPOOL].name = "packetpool";
2✔
58
    tmqh_table[TMQH_PACKETPOOL].InHandler = TmqhInputPacketpool;
2✔
59
    tmqh_table[TMQH_PACKETPOOL].OutHandler = TmqhOutputPacketpool;
2✔
60
}
2✔
61

62
static void UpdateReturnThreshold(PktPool *pool)
63
{
853,688✔
64
    const float perc = (float)pool->cnt / (float)max_pending_packets;
853,688✔
65
    uint32_t threshold = (uint32_t)(perc * (float)max_pending_return_packets);
853,688✔
66
    if (threshold != SC_ATOMIC_GET(pool->return_stack.return_threshold)) {
853,688✔
67
        SC_ATOMIC_SET(pool->return_stack.return_threshold, threshold);
774✔
68
    }
774✔
69
}
853,688✔
70

71
void PacketPoolWait(void)
72
{
35,676✔
73
    PktPool *my_pool = GetThreadPacketPool();
35,676✔
74

75
    if (my_pool->head == NULL) {
35,676✔
UNCOV
76
        SC_ATOMIC_SET(my_pool->return_stack.return_threshold, 1);
×
77

UNCOV
78
        SCMutexLock(&my_pool->return_stack.mutex);
×
UNCOV
79
        int rc = 0;
×
UNCOV
80
        while (my_pool->return_stack.cnt == 0 && rc == 0) {
×
UNCOV
81
            rc = SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex);
×
UNCOV
82
        }
×
UNCOV
83
        SCMutexUnlock(&my_pool->return_stack.mutex);
×
84

UNCOV
85
        UpdateReturnThreshold(my_pool);
×
UNCOV
86
    }
×
87
}
35,676✔
88

89
/** \brief a initialized packet
90
 *
91
 *  \warning Use *only* at init, not at packet runtime
92
 */
93
static void PacketPoolStorePacket(Packet *p)
94
{
256✔
95
    p->pool = GetThreadPacketPool();
256✔
96
    p->ReleasePacket = PacketPoolReturnPacket;
256✔
97
    PacketPoolReturnPacket(p);
256✔
98
}
256✔
99

100
static void PacketPoolGetReturnedPackets(PktPool *pool)
UNCOV
101
{
×
UNCOV
102
    SCMutexLock(&pool->return_stack.mutex);
×
103
    /* Move all the packets from the locked return stack to the local stack. */
UNCOV
104
    pool->head = pool->return_stack.head;
×
UNCOV
105
    pool->return_stack.head = NULL;
×
UNCOV
106
    pool->cnt += pool->return_stack.cnt;
×
UNCOV
107
    pool->return_stack.cnt = 0;
×
UNCOV
108
    SCMutexUnlock(&pool->return_stack.mutex);
×
UNCOV
109
}
×
110

111
/** \brief Get a new packet from the packet pool
112
 *
113
 * Only allocates from the thread's local stack, or mallocs new packets.
114
 * If the local stack is empty, first move all the return stack packets to
115
 * the local stack.
116
 *  \retval Packet pointer, or NULL on failure.
117
 */
118
Packet *PacketPoolGetPacket(void)
119
{
853,688✔
120
    PktPool *pool = GetThreadPacketPool();
853,688✔
121
    DEBUG_VALIDATE_BUG_ON(pool->initialized == 0);
853,688✔
122
    DEBUG_VALIDATE_BUG_ON(pool->destroyed == 1);
853,688✔
123
    if (pool->head) {
853,688✔
124
        /* Stack is not empty. */
125
        Packet *p = pool->head;
853,688✔
126
        pool->head = p->next;
853,688✔
127
        pool->cnt--;
853,688✔
128
        p->pool = pool;
853,688✔
129
        PacketReinit(p);
853,688✔
130

131
        UpdateReturnThreshold(pool);
853,688✔
132
        SCLogDebug("pp: %0.2f cnt:%u max:%d threshold:%u",
853,688✔
133
                ((float)pool->cnt / (float)max_pending_packets) * (float)100, pool->cnt,
853,688✔
134
                max_pending_packets, SC_ATOMIC_GET(pool->return_stack.return_threshold));
853,688✔
135
        return p;
853,688✔
136
    }
853,688✔
137

138
    /* Local Stack is empty, so check the return stack, which requires
139
     * locking. */
UNCOV
140
    PacketPoolGetReturnedPackets(pool);
×
141

142
    /* Try to allocate again. Need to check for not empty again, since the
143
     * return stack might have been empty too.
144
     */
UNCOV
145
    if (pool->head) {
×
146
        /* Stack is not empty. */
UNCOV
147
        Packet *p = pool->head;
×
UNCOV
148
        pool->head = p->next;
×
UNCOV
149
        pool->cnt--;
×
UNCOV
150
        p->pool = pool;
×
UNCOV
151
        PacketReinit(p);
×
152

UNCOV
153
        UpdateReturnThreshold(pool);
×
UNCOV
154
        SCLogDebug("pp: %0.2f cnt:%u max:%d threshold:%u",
×
UNCOV
155
                ((float)pool->cnt / (float)max_pending_packets) * (float)100, pool->cnt,
×
UNCOV
156
                max_pending_packets, SC_ATOMIC_GET(pool->return_stack.return_threshold));
×
UNCOV
157
        return p;
×
UNCOV
158
    }
×
159

160
    /* Failed to allocate a packet, so return NULL. */
161
    /* Optionally, could allocate a new packet here. */
UNCOV
162
    return NULL;
×
UNCOV
163
}
×
164

165
/** \brief Return packet to Packet pool
166
 *
167
 */
168
void PacketPoolReturnPacket(Packet *p)
169
{
853,944✔
170
    PktPool *my_pool = GetThreadPacketPool();
853,944✔
171
    PktPool *pool = p->pool;
853,944✔
172
    if (pool == NULL) {
853,944✔
173
        PacketFree(p);
×
174
        return;
×
175
    }
×
176

177
    PacketReleaseRefs(p);
853,944✔
178

179
#ifdef DEBUG_VALIDATION
180
    BUG_ON(pool->initialized == 0);
181
    BUG_ON(pool->destroyed == 1);
182
    BUG_ON(my_pool->initialized == 0);
183
    BUG_ON(my_pool->destroyed == 1);
184
#endif /* DEBUG_VALIDATION */
185

186
    if (pool == my_pool) {
853,944✔
187
        /* Push back onto this thread's own stack, so no locking. */
188
        p->next = my_pool->head;
853,944✔
189
        my_pool->head = p;
853,944✔
190
        my_pool->cnt++;
853,944✔
191
    } else {
853,944✔
UNCOV
192
        PktPool *pending_pool = my_pool->pending_pool;
×
UNCOV
193
        if (pending_pool == NULL || pending_pool == pool) {
×
UNCOV
194
            if (pending_pool == NULL) {
×
195
                /* No pending packet, so store the current packet. */
UNCOV
196
                p->next = NULL;
×
UNCOV
197
                my_pool->pending_pool = pool;
×
UNCOV
198
                my_pool->pending_head = p;
×
UNCOV
199
                my_pool->pending_tail = p;
×
UNCOV
200
                my_pool->pending_count = 1;
×
UNCOV
201
            } else if (pending_pool == pool) {
×
202
                /* Another packet for the pending pool list. */
UNCOV
203
                p->next = my_pool->pending_head;
×
UNCOV
204
                my_pool->pending_head = p;
×
UNCOV
205
                my_pool->pending_count++;
×
UNCOV
206
            }
×
207

UNCOV
208
            const uint32_t threshold = SC_ATOMIC_GET(pool->return_stack.return_threshold);
×
UNCOV
209
            if (my_pool->pending_count >= threshold) {
×
210
                /* Return the entire list of pending packets. */
UNCOV
211
                SCMutexLock(&pool->return_stack.mutex);
×
UNCOV
212
                my_pool->pending_tail->next = pool->return_stack.head;
×
UNCOV
213
                pool->return_stack.head = my_pool->pending_head;
×
UNCOV
214
                pool->return_stack.cnt += my_pool->pending_count;
×
UNCOV
215
                SCCondSignal(&pool->return_stack.cond);
×
UNCOV
216
                SCMutexUnlock(&pool->return_stack.mutex);
×
217
                /* Clear the list of pending packets to return. */
UNCOV
218
                my_pool->pending_pool = NULL;
×
UNCOV
219
                my_pool->pending_head = NULL;
×
UNCOV
220
                my_pool->pending_tail = NULL;
×
UNCOV
221
                my_pool->pending_count = 0;
×
UNCOV
222
            }
×
UNCOV
223
        } else {
×
224
            /* Push onto return stack for this pool */
UNCOV
225
            SCMutexLock(&pool->return_stack.mutex);
×
UNCOV
226
            p->next = pool->return_stack.head;
×
UNCOV
227
            pool->return_stack.head = p;
×
UNCOV
228
            pool->return_stack.cnt++;
×
UNCOV
229
            SCCondSignal(&pool->return_stack.cond);
×
UNCOV
230
            SCMutexUnlock(&pool->return_stack.mutex);
×
UNCOV
231
        }
×
UNCOV
232
    }
×
233
}
853,944✔
234

235
void PacketPoolInit(void)
236
{
2✔
237
    PktPool *my_pool = GetThreadPacketPool();
2✔
238

239
#ifdef DEBUG_VALIDATION
240
    BUG_ON(my_pool->initialized);
241
    my_pool->initialized = 1;
242
    my_pool->destroyed = 0;
243
#endif /* DEBUG_VALIDATION */
244

245
    SCMutexInit(&my_pool->return_stack.mutex, NULL);
2✔
246
    SCCondInit(&my_pool->return_stack.cond, NULL);
2✔
247
    SC_ATOMIC_INIT(my_pool->return_stack.return_threshold);
2✔
248
    SC_ATOMIC_SET(my_pool->return_stack.return_threshold, 32);
2✔
249

250
    /* pre allocate packets */
251
    SCLogDebug("preallocating packets... packet size %" PRIuMAX "",
2✔
252
               (uintmax_t)SIZE_OF_PACKET);
2✔
253
    for (uint32_t i = 0; i < max_pending_packets; i++) {
258✔
254
        Packet *p = PacketGetFromAlloc();
256✔
255
        if (unlikely(p == NULL)) {
256✔
256
            FatalError("Fatal error encountered while allocating a packet. Exiting...");
×
257
        }
×
258
        PacketPoolStorePacket(p);
256✔
259
    }
256✔
260

261
    //SCLogInfo("preallocated %"PRIiMAX" packets. Total memory %"PRIuMAX"",
262
    //        max_pending_packets, (uintmax_t)(max_pending_packets*SIZE_OF_PACKET));
263
}
2✔
264

265
void PacketPoolDestroy(void)
UNCOV
266
{
×
UNCOV
267
    Packet *p = NULL;
×
UNCOV
268
    PktPool *my_pool = GetThreadPacketPool();
×
269

270
#ifdef DEBUG_VALIDATION
271
    BUG_ON(my_pool && my_pool->destroyed);
272
#endif /* DEBUG_VALIDATION */
273

UNCOV
274
    if (my_pool && my_pool->pending_pool != NULL) {
×
UNCOV
275
        p = my_pool->pending_head;
×
UNCOV
276
        while (p) {
×
UNCOV
277
            Packet *next_p = p->next;
×
UNCOV
278
            PacketFree(p);
×
UNCOV
279
            p = next_p;
×
UNCOV
280
            my_pool->pending_count--;
×
UNCOV
281
        }
×
282
#ifdef DEBUG_VALIDATION
283
        BUG_ON(my_pool->pending_count);
284
#endif /* DEBUG_VALIDATION */
UNCOV
285
        my_pool->pending_pool = NULL;
×
UNCOV
286
        my_pool->pending_head = NULL;
×
UNCOV
287
        my_pool->pending_tail = NULL;
×
UNCOV
288
    }
×
289

UNCOV
290
    while ((p = PacketPoolGetPacket()) != NULL) {
×
UNCOV
291
        PacketFree(p);
×
UNCOV
292
    }
×
293

294
#ifdef DEBUG_VALIDATION
295
    my_pool->initialized = 0;
296
    my_pool->destroyed = 1;
297
#endif /* DEBUG_VALIDATION */
UNCOV
298
}
×
299

300
Packet *TmqhInputPacketpool(ThreadVars *tv)
301
{
×
302
    return PacketPoolGetPacket();
×
303
}
×
304

305
void TmqhOutputPacketpool(ThreadVars *t, Packet *p)
306
{
841,021✔
307
    bool proot = false;
841,021✔
308

309
    SCEnter();
841,021✔
310
    SCLogDebug("Packet %p, p->root %p, alloced %s", p, p->root, BOOL2STR(p->pool == NULL));
841,021✔
311

312
    if (PacketIsTunnel(p)) {
841,021✔
313
        SCLogDebug("Packet %p is a tunnel packet: %s",
71,633✔
314
            p,p->root ? "upper layer" : "tunnel root");
71,633✔
315

316
        /* get a lock to access root packet fields */
317
        SCSpinlock *lock = p->root ? &p->root->persistent.tunnel_lock : &p->persistent.tunnel_lock;
71,633✔
318
        SCSpinLock(lock);
71,633✔
319

320
        if (PacketIsTunnelRoot(p)) {
71,633✔
321
            SCLogDebug("IS_TUNNEL_ROOT_PKT == TRUE");
35,050✔
322
            CaptureStatsUpdate(t, p); // TODO move out of lock
35,050✔
323

324
            const uint16_t outstanding = TUNNEL_PKT_TPR(p) - TUNNEL_PKT_RTV(p);
35,050✔
325
            SCLogDebug("root pkt: outstanding %u", outstanding);
35,050✔
326
            if (outstanding == 0) {
35,050✔
327
                SCLogDebug("no tunnel packets outstanding, no more tunnel "
35,050✔
328
                        "packet(s) depending on this root");
35,050✔
329
                /* if this packet is the root and there are no
330
                 * more tunnel packets to consider
331
                 *
332
                 * return it to the pool */
333
            } else {
35,050✔
UNCOV
334
                SCLogDebug("tunnel root Packet %p: outstanding > 0, so "
×
UNCOV
335
                        "packets are still depending on this root, setting "
×
UNCOV
336
                        "SET_TUNNEL_PKT_VERDICTED", p);
×
337
                /* if this is the root and there are more tunnel
338
                 * packets, return this to the pool. It's still referenced
339
                 * by the tunnel packets, and we will return it
340
                 * when we handle them */
UNCOV
341
                PacketTunnelSetVerdicted(p);
×
342

UNCOV
343
                PACKET_PROFILING_END(p);
×
UNCOV
344
                SCSpinUnlock(lock);
×
UNCOV
345
                SCReturn;
×
UNCOV
346
            }
×
347
        } else {
36,583✔
348
            SCLogDebug("NOT IS_TUNNEL_ROOT_PKT, so tunnel pkt");
36,583✔
349

350
            TUNNEL_INCR_PKT_RTV_NOLOCK(p);
36,583✔
351
            const uint16_t outstanding = TUNNEL_PKT_TPR(p) - TUNNEL_PKT_RTV(p);
36,583✔
352
            SCLogDebug("tunnel pkt: outstanding %u", outstanding);
36,583✔
353
            /* all tunnel packets are processed except us. Root already
354
             * processed. So return tunnel pkt and root packet to the
355
             * pool. */
356
            if (outstanding == 0 && p->root && PacketTunnelIsVerdicted(p->root)) {
36,583✔
UNCOV
357
                SCLogDebug("root verdicted == true && no outstanding");
×
358

359
                /* handle freeing the root as well*/
UNCOV
360
                SCLogDebug("setting proot = 1 for root pkt, p->root %p "
×
UNCOV
361
                        "(tunnel packet %p)", p->root, p);
×
UNCOV
362
                proot = true;
×
363

364
                /* fall through */
365

366
            } else {
36,583✔
367
                /* root not ready yet, or not the last tunnel packet,
368
                 * so get rid of the tunnel pkt only */
369

370
                SCLogDebug("NOT IS_TUNNEL_PKT_VERDICTED (%s) || "
36,583✔
371
                           "outstanding > 0 (%u)",
36,583✔
372
                        (p->root && PacketTunnelIsVerdicted(p->root)) ? "true" : "false",
36,583✔
373
                        outstanding);
36,583✔
374

375
                /* fall through */
376
            }
36,583✔
377
        }
36,583✔
378
        SCSpinUnlock(lock);
71,633✔
379

380
        SCLogDebug("tunnel stuff done, move on (proot %d)", proot);
71,633✔
381

382
    } else {
769,388✔
383
        CaptureStatsUpdate(t, p);
769,388✔
384
    }
769,388✔
385

386
    SCLogDebug("[packet %p][%s] %s", p,
841,021✔
387
            PacketIsTunnel(p) ? PacketIsTunnelRoot(p) ? "tunnel::root" : "tunnel::leaf"
841,021✔
388
                              : "no tunnel",
841,021✔
389
            (p->action & ACTION_DROP) ? "DROP" : "no drop");
841,021✔
390

391
    /* we're done with the tunnel root now as well */
392
    if (proot) {
841,021✔
UNCOV
393
        SCLogDebug("getting rid of root pkt... alloc'd %s", BOOL2STR(p->root->pool == NULL));
×
394

UNCOV
395
        PacketReleaseRefs(p->root);
×
UNCOV
396
        p->root->ReleasePacket(p->root);
×
UNCOV
397
        p->root = NULL;
×
UNCOV
398
    }
×
399

400
    PACKET_PROFILING_END(p);
841,021✔
401

402
    PacketReleaseRefs(p);
841,021✔
403
    p->ReleasePacket(p);
841,021✔
404

405
    SCReturn;
841,021✔
406
}
841,021✔
407

408
/**
409
 *  \brief Release all the packets in the queue back to the packetpool.  Mainly
410
 *         used by threads that have failed, and wants to return the packets back
411
 *         to the packetpool.
412
 *
413
 *  \param pq Pointer to the packetqueue from which the packets have to be
414
 *            returned back to the packetpool
415
 *
416
 *  \warning this function assumes that the pq does not use locking
417
 */
418
void TmqhReleasePacketsToPacketPool(PacketQueue *pq)
419
{
×
420
    Packet *p = NULL;
×
421

422
    if (pq == NULL)
×
423
        return;
×
424

425
    while ((p = PacketDequeue(pq)) != NULL) {
×
426
        DEBUG_VALIDATE_BUG_ON(p->flow != NULL);
×
427
        TmqhOutputPacketpool(NULL, p);
×
428
    }
×
429
}
×
430

431
/** number of packets to keep reserved when calculating the pending
432
 *  return packets count. This assumes we need at max 10 packets in one
433
 *  PacketPoolWaitForN call. The actual number is 9 now, so this has a
434
 *  bit of margin. */
UNCOV
435
#define RESERVED_PACKETS 10
×
436

437
/**
438
 *  \brief Set the max_pending_return_packets value
439
 *
440
 *  Set it to the max pending packets value, divided by the number
441
 *  of lister threads. Normally, in autofp these are the stream/detect/log
442
 *  worker threads.
443
 *
444
 *  The max_pending_return_packets value needs to stay below the packet
445
 *  pool size of the 'producers' (normally pkt capture threads but also
446
 *  flow timeout injection ) to avoid a deadlock where all the 'workers'
447
 *  keep packets in their return pools, while the capture thread can't
448
 *  continue because its pool is empty.
449
 */
450
void PacketPoolPostRunmodes(void)
UNCOV
451
{
×
UNCOV
452
    extern uint32_t max_pending_packets;
×
UNCOV
453
    uint32_t pending_packets = max_pending_packets;
×
UNCOV
454
    if (pending_packets < RESERVED_PACKETS) {
×
455
        FatalError("'max-pending-packets' setting "
×
456
                   "must be at least %d",
×
457
                RESERVED_PACKETS);
×
458
    }
×
UNCOV
459
    uint32_t threads = TmThreadCountThreadsByTmmFlags(TM_FLAG_FLOWWORKER_TM);
×
UNCOV
460
    if (threads == 0)
×
UNCOV
461
        return;
×
462

UNCOV
463
    uint32_t packets = (pending_packets / threads) - 1;
×
UNCOV
464
    if (packets < max_pending_return_packets)
×
465
        max_pending_return_packets = packets;
×
466

467
    /* make sure to have a margin in the return logic */
UNCOV
468
    if (max_pending_return_packets >= RESERVED_PACKETS)
×
UNCOV
469
        max_pending_return_packets -= RESERVED_PACKETS;
×
470

UNCOV
471
    SCLogDebug("detect threads %u, max packets %u, max_pending_return_packets %u",
×
UNCOV
472
            threads, packets, max_pending_return_packets);
×
UNCOV
473
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc