• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

OISF / suricata / 22618661228

02 Mar 2026 09:33PM UTC coverage: 42.258% (-34.4%) from 76.611%
22618661228

push

github

victorjulien
github-actions: bump actions/download-artifact from 7.0.0 to 8.0.0

Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 7.0.0 to 8.0.0.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/37930b1c2...70fc10c6e)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: 8.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

91511 of 216553 relevant lines covered (42.26%)

3416852.41 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.92
/src/flow-timeout.c
1
/* Copyright (C) 2007-2024 Open Information Security Foundation
2
 *
3
 * You can copy, redistribute or modify this Program under the terms of
4
 * the GNU General Public License version 2 as published by the Free
5
 * Software Foundation.
6
 *
7
 * This program is distributed in the hope that it will be useful,
8
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
 * GNU General Public License for more details.
11
 *
12
 * You should have received a copy of the GNU General Public License
13
 * version 2 along with this program; if not, write to the Free Software
14
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15
 * 02110-1301, USA.
16
 */
17

18
/**
19
 * \file
20
 *
21
 * \author Anoop Saldanha <anoopsaldanha@gmail.com>
22
 */
23

24
#include "suricata-common.h"
25
#include "suricata.h"
26
#include "decode.h"
27
#include "conf.h"
28
#include "threadvars.h"
29
#include "tm-threads.h"
30
#include "runmodes.h"
31

32
#include "util-random.h"
33
#include "util-time.h"
34

35
#include "flow.h"
36
#include "flow-queue.h"
37
#include "flow-hash.h"
38
#include "flow-util.h"
39
#include "flow-var.h"
40
#include "flow-private.h"
41
#include "flow-manager.h"
42
#include "flow-timeout.h"
43
#include "pkt-var.h"
44
#include "host.h"
45

46
#include "stream-tcp-private.h"
47
#include "stream-tcp-reassemble.h"
48
#include "stream-tcp.h"
49

50
#include "util-unittest.h"
51
#include "util-unittest-helper.h"
52
#include "util-byte.h"
53

54
#include "util-debug.h"
55
#include "util-privs.h"
56
#include "util-datalink.h"
57

58
#include "detect.h"
59
#include "detect-engine-state.h"
60
#include "stream.h"
61

62
#include "rust.h"
63
#include "app-layer-frames.h"
64
#include "app-layer-parser.h"
65
#include "app-layer.h"
66

67
#include "util-profiling.h"
68

69
/**
70
 * \internal
71
 * \brief Pseudo packet setup to finish a flow when needed.
72
 *
73
 * \param p         a dummy pseudo packet from packet pool.  Not all pseudo
74
 *                  packets need to force reassembly, in which case we just
75
 *                  set dummy ack/seq values.
76
 * \param direction Direction of the packet.  0 indicates toserver and 1
77
 *                  indicates toclient.
78
 * \param f         Pointer to the flow.
79
 * \param ssn       Pointer to the tcp session.
80
 * \retval          pseudo packet with everything set up
81
 */
82
static inline Packet *FlowPseudoPacketSetup(
83
        Packet *p, int direction, Flow *f, const TcpSession *ssn)
84
{
2,296✔
85
    const int orig_dir = direction;
2,296✔
86
    p->tenant_id = f->tenant_id;
2,296✔
87
    p->datalink = DatalinkGetGlobalType();
2,296✔
88
    p->proto = IPPROTO_TCP;
2,296✔
89
    FlowReference(&p->flow, f);
2,296✔
90
    p->flags |= PKT_STREAM_EST;
2,296✔
91
    p->flags |= PKT_HAS_FLOW;
2,296✔
92
    p->flags |= PKT_PSEUDO_STREAM_END;
2,296✔
93
    memcpy(&p->vlan_id[0], &f->vlan_id[0], sizeof(p->vlan_id));
2,296✔
94
    p->vlan_idx = f->vlan_idx;
2,296✔
95
    p->livedev = (struct LiveDevice_ *)f->livedev;
2,296✔
96

97
    if (f->flags & FLOW_NOPAYLOAD_INSPECTION) {
2,296✔
98
        DecodeSetNoPayloadInspectionFlag(p);
568✔
99
    }
568✔
100

101
    if (direction == 0)
2,296✔
102
        p->flowflags |= FLOW_PKT_TOSERVER;
1,111✔
103
    else
1,185✔
104
        p->flowflags |= FLOW_PKT_TOCLIENT;
1,185✔
105
    p->flowflags |= FLOW_PKT_ESTABLISHED;
2,296✔
106
    p->payload = NULL;
2,296✔
107
    p->payload_len = 0;
2,296✔
108

109
    /* apply reversed flow logic after setting direction to the packet */
110
    direction ^= ((f->flags & FLOW_DIR_REVERSED) != 0);
2,296✔
111

112
    if (FLOW_IS_IPV4(f)) {
2,296✔
113
        if (direction == 0) {
2,260✔
114
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->src, &p->src);
1,092✔
115
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->dst, &p->dst);
1,092✔
116
            p->sp = f->sp;
1,092✔
117
            p->dp = f->dp;
1,092✔
118
        } else {
1,168✔
119
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->src, &p->dst);
1,168✔
120
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->dst, &p->src);
1,168✔
121
            p->sp = f->dp;
1,168✔
122
            p->dp = f->sp;
1,168✔
123
        }
1,168✔
124

125
        /* Check if we have enough room in direct data. We need ipv4 hdr + tcp hdr.
126
         * Force an allocation if it is not the case.
127
         */
128
        if (GET_PKT_DIRECT_MAX_SIZE(p) <  40) {
2,260✔
129
            if (PacketCallocExtPkt(p, 40) == -1) {
17✔
130
                goto error;
×
131
            }
×
132
        }
17✔
133
        /* set the ip header */
134
        IPV4Hdr *ip4h = PacketSetIPV4(p, GET_PKT_DATA(p));
2,260✔
135
        /* version 4 and length 20 bytes for the tcp header */
136
        ip4h->ip_verhl = 0x45;
2,260✔
137
        ip4h->ip_tos = 0;
2,260✔
138
        ip4h->ip_len = htons(40);
2,260✔
139
        ip4h->ip_id = 0;
2,260✔
140
        ip4h->ip_off = 0;
2,260✔
141
        ip4h->ip_ttl = 64;
2,260✔
142
        ip4h->ip_proto = IPPROTO_TCP;
2,260✔
143
        //p->ip4h->ip_csum =
144
        if (direction == 0) {
2,260✔
145
            ip4h->s_ip_src.s_addr = f->src.addr_data32[0];
1,093✔
146
            ip4h->s_ip_dst.s_addr = f->dst.addr_data32[0];
1,093✔
147
        } else {
1,168✔
148
            ip4h->s_ip_src.s_addr = f->dst.addr_data32[0];
1,167✔
149
            ip4h->s_ip_dst.s_addr = f->src.addr_data32[0];
1,167✔
150
        }
1,167✔
151

152
        /* set the tcp header */
153
        PacketSetTCP(p, GET_PKT_DATA(p) + 20);
2,260✔
154

155
        SET_PKT_LEN(p, 40); /* ipv4 hdr + tcp hdr */
2,260✔
156

157
    } else if (FLOW_IS_IPV6(f)) {
2,260✔
158
        if (direction == 0) {
35✔
159
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->src, &p->src);
19✔
160
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->dst, &p->dst);
19✔
161
            p->sp = f->sp;
19✔
162
            p->dp = f->dp;
19✔
163
        } else {
19✔
164
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->src, &p->dst);
16✔
165
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->dst, &p->src);
16✔
166
            p->sp = f->dp;
16✔
167
            p->dp = f->sp;
16✔
168
        }
16✔
169

170
        /* Check if we have enough room in direct data. We need ipv6 hdr + tcp hdr.
171
         * Force an allocation if it is not the case.
172
         */
173
        if (GET_PKT_DIRECT_MAX_SIZE(p) <  60) {
35✔
174
            if (PacketCallocExtPkt(p, 60) == -1) {
×
175
                goto error;
×
176
            }
×
177
        }
×
178
        /* set the ip header */
179
        IPV6Hdr *ip6h = PacketSetIPV6(p, GET_PKT_DATA(p));
35✔
180
        /* version 6 */
181
        ip6h->s_ip6_vfc = 0x60;
35✔
182
        ip6h->s_ip6_flow = 0;
35✔
183
        ip6h->s_ip6_nxt = IPPROTO_TCP;
35✔
184
        ip6h->s_ip6_plen = htons(20);
35✔
185
        ip6h->s_ip6_hlim = 64;
35✔
186
        if (direction == 0) {
35✔
187
            ip6h->s_ip6_src[0] = f->src.addr_data32[0];
19✔
188
            ip6h->s_ip6_src[1] = f->src.addr_data32[1];
19✔
189
            ip6h->s_ip6_src[2] = f->src.addr_data32[2];
19✔
190
            ip6h->s_ip6_src[3] = f->src.addr_data32[3];
19✔
191
            ip6h->s_ip6_dst[0] = f->dst.addr_data32[0];
19✔
192
            ip6h->s_ip6_dst[1] = f->dst.addr_data32[1];
19✔
193
            ip6h->s_ip6_dst[2] = f->dst.addr_data32[2];
19✔
194
            ip6h->s_ip6_dst[3] = f->dst.addr_data32[3];
19✔
195
        } else {
19✔
196
            ip6h->s_ip6_src[0] = f->dst.addr_data32[0];
16✔
197
            ip6h->s_ip6_src[1] = f->dst.addr_data32[1];
16✔
198
            ip6h->s_ip6_src[2] = f->dst.addr_data32[2];
16✔
199
            ip6h->s_ip6_src[3] = f->dst.addr_data32[3];
16✔
200
            ip6h->s_ip6_dst[0] = f->src.addr_data32[0];
16✔
201
            ip6h->s_ip6_dst[1] = f->src.addr_data32[1];
16✔
202
            ip6h->s_ip6_dst[2] = f->src.addr_data32[2];
16✔
203
            ip6h->s_ip6_dst[3] = f->src.addr_data32[3];
16✔
204
        }
16✔
205

206
        /* set the tcp header */
207
        PacketSetTCP(p, GET_PKT_DATA(p) + 40);
35✔
208

209
        SET_PKT_LEN(p, 60); /* ipv6 hdr + tcp hdr */
35✔
210
    }
35✔
211

212
    p->l4.hdrs.tcph->th_offx2 = 0x50;
2,296✔
213
    p->l4.hdrs.tcph->th_flags = 0;
2,296✔
214
    p->l4.hdrs.tcph->th_win = 10;
2,296✔
215
    p->l4.hdrs.tcph->th_urp = 0;
2,296✔
216

217
    /* to server */
218
    if (orig_dir == 0) {
2,296✔
219
        p->l4.hdrs.tcph->th_sport = htons(f->sp);
1,111✔
220
        p->l4.hdrs.tcph->th_dport = htons(f->dp);
1,111✔
221

222
        p->l4.hdrs.tcph->th_seq = htonl(ssn->client.next_seq);
1,111✔
223
        p->l4.hdrs.tcph->th_ack = 0;
1,111✔
224

225
        /* to client */
226
    } else {
1,185✔
227
        p->l4.hdrs.tcph->th_sport = htons(f->dp);
1,185✔
228
        p->l4.hdrs.tcph->th_dport = htons(f->sp);
1,185✔
229

230
        p->l4.hdrs.tcph->th_seq = htonl(ssn->server.next_seq);
1,185✔
231
        p->l4.hdrs.tcph->th_ack = 0;
1,185✔
232
    }
1,185✔
233

234
    if (FLOW_IS_IPV4(f)) {
2,296✔
235
        IPV4Hdr *ip4h = p->l3.hdrs.ip4h;
2,260✔
236
        p->l4.hdrs.tcph->th_sum = TCPChecksum(ip4h->s_ip_addrs, (uint16_t *)p->l4.hdrs.tcph, 20, 0);
2,260✔
237
        /* calc ipv4 csum as we may log it and barnyard might reject
238
         * a wrong checksum */
239
        ip4h->ip_csum = IPV4Checksum((uint16_t *)ip4h, IPV4_GET_RAW_HLEN(ip4h), 0);
2,260✔
240
    } else if (FLOW_IS_IPV6(f)) {
2,260✔
241
        const IPV6Hdr *ip6h = PacketGetIPv6(p);
35✔
242
        p->l4.hdrs.tcph->th_sum =
35✔
243
                TCPChecksum(ip6h->s_ip6_addrs, (uint16_t *)p->l4.hdrs.tcph, 20, 0);
35✔
244
    }
35✔
245

246
    p->ts = TimeGet();
2,296✔
247

248
    if (direction == 0) {
2,296✔
249
        if (f->alparser && !STREAM_HAS_SEEN_DATA(&ssn->client)) {
1,112✔
250
            SCAppLayerParserStateSetFlag(f->alparser, APP_LAYER_PARSER_EOF_TS);
26✔
251
        }
26✔
252
    } else {
1,184✔
253
        if (f->alparser && !STREAM_HAS_SEEN_DATA(&ssn->server)) {
1,184✔
254
            SCAppLayerParserStateSetFlag(f->alparser, APP_LAYER_PARSER_EOF_TC);
109✔
255
        }
109✔
256
    }
1,184✔
257

258
    return p;
2,296✔
259

260
error:
×
261
    FlowDeReference(&p->flow);
×
262
    return NULL;
×
263
}
2,296✔
264

265
Packet *FlowPseudoPacketGet(int direction, Flow *f, const TcpSession *ssn)
266
{
2,296✔
267
    PacketPoolWait();
2,296✔
268
    Packet *p = PacketPoolGetPacket();
2,296✔
269
    if (p == NULL) {
2,296✔
270
        return NULL;
×
271
    }
×
272

273
    PACKET_PROFILING_START(p);
2,296✔
274

275
    return FlowPseudoPacketSetup(p, direction, f, ssn);
2,296✔
276
}
2,296✔
277

278
/**
279
 *  \brief Check if a flow needs forced reassembly, or any other processing
280
 *
281
 *  \param f *LOCKED* flow
282
 *
283
 *  \retval false no
284
 *  \retval true yes
285
 */
286
bool FlowNeedsReassembly(Flow *f)
287
{
4,084✔
288
    if (f == NULL || f->protoctx == NULL) {
4,084✔
289
        return false;
4✔
290
    }
4✔
291

292
    TcpSession *ssn = (TcpSession *)f->protoctx;
4,080✔
293
    uint8_t client = StreamNeedsReassembly(ssn, STREAM_TOSERVER);
4,080✔
294
    uint8_t server = StreamNeedsReassembly(ssn, STREAM_TOCLIENT);
4,080✔
295

296
    /* if state is not fully closed we assume that we haven't fully
297
     * inspected the app layer state yet */
298
    if (ssn->state >= TCP_ESTABLISHED && ssn->state != TCP_CLOSED)
4,080✔
299
    {
1,106✔
300
        client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,106✔
301
        server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,106✔
302
    }
1,106✔
303

304
    /* if app layer still needs some love, push through */
305
    if (f->alproto != ALPROTO_UNKNOWN && f->alstate != NULL) {
4,080✔
306
        const uint64_t total_txs = AppLayerParserGetTxCnt(f, f->alstate);
3,235✔
307

308
        if (AppLayerParserGetTransactionActive(f, f->alparser, STREAM_TOCLIENT) < total_txs)
3,235✔
309
        {
1,634✔
310
            server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,634✔
311
        }
1,634✔
312
        if (AppLayerParserGetTransactionActive(f, f->alparser, STREAM_TOSERVER) < total_txs)
3,235✔
313
        {
1,170✔
314
            client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,170✔
315
        }
1,170✔
316
    }
3,235✔
317

318
    /* if any frame is present we assume it still needs work */
319
    FramesContainer *frames_container = AppLayerFramesGetContainer(f);
4,080✔
320
    if (frames_container) {
4,080✔
321
        if (frames_container->toserver.cnt)
×
322
            client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
×
323
        if (frames_container->toclient.cnt)
×
324
            server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
×
325
    }
×
326

327
    /* nothing to do */
328
    if (client == STREAM_HAS_UNPROCESSED_SEGMENTS_NONE &&
4,080✔
329
        server == STREAM_HAS_UNPROCESSED_SEGMENTS_NONE) {
4,080✔
330
        return false;
1,306✔
331
    }
1,306✔
332

333
    f->ffr_ts = client;
2,774✔
334
    f->ffr_tc = server;
2,774✔
335
    return true;
2,774✔
336
}
4,080✔
337

338
/**
339
 * \internal
340
 * \brief Sends the flow to its respective thread's flow queue.
341
 *
342
 *        The function requires flow to be locked beforehand.
343
 *
344
 * Normally, the first thread_id value should be used. This is when the flow is
345
 * created on seeing the first packet to the server; when the flow's reversed
346
 * flag is set, choose the second thread_id (to client/source).
347
 *
348
 * \param f Pointer to the flow.
349
 */
350
void FlowSendToLocalThread(Flow *f)
351
{
1,385✔
352
    // Choose the thread_id based on whether the flow has been
353
    // reversed.
354
    int idx = f->flags & FLOW_DIR_REVERSED ? 1 : 0;
1,385✔
355
    TmThreadsInjectFlowById(f, (const int)f->thread_id[idx]);
1,385✔
356
}
1,385✔
357

358
/**
359
 * \internal
360
 * \brief Remove flows from the hash bucket as they have more work to be done in
361
 *        in the detection engine.
362
 *
363
 * When this function is called we're running in virtually dead engine,
364
 * so locking the flows is not strictly required. The reasons it is still
365
 * done are:
366
 * - code consistency
367
 * - silence complaining profilers
368
 * - allow us to aggressively check using debug validation assertions
369
 * - be robust in case of future changes
370
 * - locking overhead is negligible when no other thread fights us
371
 */
372
static inline void FlowRemoveHash(void)
373
{
1,397✔
374
    for (uint32_t idx = 0; idx < flow_config.hash_size; idx++) {
91,555,189✔
375
        FlowBucket *fb = &flow_hash[idx];
91,553,792✔
376
        FBLOCK_LOCK(fb);
91,553,792✔
377

378
        Flow *f = fb->head;
91,553,792✔
379
        Flow *prev_f = NULL;
91,553,792✔
380

381
        /* we need to loop through all the flows in the queue */
382
        while (f != NULL) {
91,558,154✔
383
            Flow *next_f = f->next;
4,362✔
384

385
            FLOWLOCK_WRLOCK(f);
4,362✔
386

387
            /* Get the tcp session for the flow */
388
            TcpSession *ssn = (TcpSession *)f->protoctx;
4,362✔
389
            /* \todo Also skip flows that shouldn't be inspected */
390
            if (ssn == NULL) {
4,362✔
391
                FLOWLOCK_UNLOCK(f);
1,684✔
392
                prev_f = f;
1,684✔
393
                f = next_f;
1,684✔
394
                continue;
1,684✔
395
            }
1,684✔
396

397
            /* in case of additional work, we pull the flow out of the
398
             * hash and xfer ownership to the injected packet(s) */
399
            if (FlowNeedsReassembly(f)) {
2,678✔
400
                RemoveFromHash(f, prev_f);
1,372✔
401
                f->flow_end_flags |= FLOW_END_FLAG_SHUTDOWN;
1,372✔
402
                FlowSendToLocalThread(f);
1,372✔
403
                FLOWLOCK_UNLOCK(f);
1,372✔
404
                f = next_f;
1,372✔
405
                continue;
1,372✔
406
            }
1,372✔
407

408
            FLOWLOCK_UNLOCK(f);
1,306✔
409

410
            /* next flow in the queue */
411
            prev_f = f;
1,306✔
412
            f = f->next;
1,306✔
413
        }
1,306✔
414
        FBLOCK_UNLOCK(fb);
91,553,792✔
415
    }
91,553,792✔
416
}
1,397✔
417

418
/**
419
 * \brief Clean up all the flows that have unprocessed segments and have
420
 *        some work to do in the detection engine.
421
 */
422
void FlowWorkToDoCleanup(void)
423
{
1,397✔
424
    /* Carry out cleanup of unattended flows */
425
    FlowRemoveHash();
1,397✔
426
}
1,397✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc