• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

OISF / suricata / 23374838686

21 Mar 2026 07:29AM UTC coverage: 59.341% (-20.0%) from 79.315%
23374838686

Pull #15075

github

web-flow
Merge 90b4e834f into 6587e363a
Pull Request #15075: Stack 8001 v16.4

38 of 70 new or added lines in 10 files covered. (54.29%)

34165 existing lines in 563 files now uncovered.

119621 of 201584 relevant lines covered (59.34%)

650666.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

77.43
/src/flow-timeout.c
1
/* Copyright (C) 2007-2024 Open Information Security Foundation
2
 *
3
 * You can copy, redistribute or modify this Program under the terms of
4
 * the GNU General Public License version 2 as published by the Free
5
 * Software Foundation.
6
 *
7
 * This program is distributed in the hope that it will be useful,
8
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
 * GNU General Public License for more details.
11
 *
12
 * You should have received a copy of the GNU General Public License
13
 * version 2 along with this program; if not, write to the Free Software
14
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15
 * 02110-1301, USA.
16
 */
17

18
/**
19
 * \file
20
 *
21
 * \author Anoop Saldanha <anoopsaldanha@gmail.com>
22
 */
23

24
#include "suricata-common.h"
25
#include "suricata.h"
26
#include "decode.h"
27
#include "conf.h"
28
#include "threadvars.h"
29
#include "tm-threads.h"
30
#include "runmodes.h"
31

32
#include "util-random.h"
33
#include "util-time.h"
34

35
#include "flow.h"
36
#include "flow-queue.h"
37
#include "flow-hash.h"
38
#include "flow-util.h"
39
#include "flow-var.h"
40
#include "flow-private.h"
41
#include "flow-manager.h"
42
#include "flow-timeout.h"
43
#include "pkt-var.h"
44
#include "host.h"
45

46
#include "stream-tcp-private.h"
47
#include "stream-tcp-reassemble.h"
48
#include "stream-tcp.h"
49

50
#include "util-unittest.h"
51
#include "util-unittest-helper.h"
52
#include "util-byte.h"
53

54
#include "util-debug.h"
55
#include "util-privs.h"
56
#include "util-datalink.h"
57

58
#include "detect.h"
59
#include "detect-engine-state.h"
60
#include "stream.h"
61

62
#include "rust.h"
63
#include "app-layer-frames.h"
64
#include "app-layer-parser.h"
65
#include "app-layer.h"
66

67
#include "util-profiling.h"
68

69
/**
70
 * \internal
71
 * \brief Pseudo packet setup to finish a flow when needed.
72
 *
73
 * \param p         a dummy pseudo packet from packet pool.  Not all pseudo
74
 *                  packets need to force reassembly, in which case we just
75
 *                  set dummy ack/seq values.
76
 * \param direction Direction of the packet.  0 indicates toserver and 1
77
 *                  indicates toclient.
78
 * \param f         Pointer to the flow.
79
 * \param ssn       Pointer to the tcp session.
80
 * \retval          pseudo packet with everything set up
81
 */
82
static inline Packet *FlowPseudoPacketSetup(
83
        Packet *p, int direction, Flow *f, const TcpSession *ssn)
84
{
10,851✔
85
    const int orig_dir = direction;
10,851✔
86
    p->tenant_id = f->tenant_id;
10,851✔
87
    p->datalink = DatalinkGetGlobalType();
10,851✔
88
    p->proto = IPPROTO_TCP;
10,851✔
89
    FlowReference(&p->flow, f);
10,851✔
90
    p->flags |= PKT_STREAM_EST;
10,851✔
91
    p->flags |= PKT_HAS_FLOW;
10,851✔
92
    p->flags |= PKT_PSEUDO_STREAM_END;
10,851✔
93
    memcpy(&p->vlan_id[0], &f->vlan_id[0], sizeof(p->vlan_id));
10,851✔
94
    p->vlan_idx = f->vlan_idx;
10,851✔
95
    p->livedev = (struct LiveDevice_ *)f->livedev;
10,851✔
96

97
    if (f->flags & FLOW_NOPAYLOAD_INSPECTION) {
10,851✔
98
        DecodeSetNoPayloadInspectionFlag(p);
70✔
99
    }
70✔
100

101
    if (direction == 0)
10,851✔
102
        p->flowflags |= FLOW_PKT_TOSERVER;
5,651✔
103
    else
5,200✔
104
        p->flowflags |= FLOW_PKT_TOCLIENT;
5,200✔
105
    p->flowflags |= FLOW_PKT_ESTABLISHED;
10,851✔
106
    p->payload = NULL;
10,851✔
107
    p->payload_len = 0;
10,851✔
108

109
    /* apply reversed flow logic after setting direction to the packet */
110
    direction ^= ((f->flags & FLOW_DIR_REVERSED) != 0);
10,851✔
111

112
    if (FLOW_IS_IPV4(f)) {
10,851✔
113
        if (direction == 0) {
10,516✔
114
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->src, &p->src);
5,540✔
115
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->dst, &p->dst);
5,540✔
116
            p->sp = f->sp;
5,540✔
117
            p->dp = f->dp;
5,540✔
118
        } else {
5,540✔
119
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->src, &p->dst);
4,976✔
120
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->dst, &p->src);
4,976✔
121
            p->sp = f->dp;
4,976✔
122
            p->dp = f->sp;
4,976✔
123
        }
4,976✔
124

125
        /* Check if we have enough room in direct data. We need ipv4 hdr + tcp hdr.
126
         * Force an allocation if it is not the case.
127
         */
128
        if (GET_PKT_DIRECT_MAX_SIZE(p) <  40) {
10,516✔
UNCOV
129
            if (PacketCallocExtPkt(p, 40) == -1) {
×
130
                goto error;
×
131
            }
×
UNCOV
132
        }
×
133
        /* set the ip header */
134
        IPV4Hdr *ip4h = PacketSetIPV4(p, GET_PKT_DATA(p));
10,516✔
135
        /* version 4 and length 20 bytes for the tcp header */
136
        ip4h->ip_verhl = 0x45;
10,516✔
137
        ip4h->ip_tos = 0;
10,516✔
138
        ip4h->ip_len = htons(40);
10,516✔
139
        ip4h->ip_id = 0;
10,516✔
140
        ip4h->ip_off = 0;
10,516✔
141
        ip4h->ip_ttl = 64;
10,516✔
142
        ip4h->ip_proto = IPPROTO_TCP;
10,516✔
143
        //p->ip4h->ip_csum =
144
        if (direction == 0) {
10,516✔
145
            ip4h->s_ip_src.s_addr = f->src.addr_data32[0];
5,540✔
146
            ip4h->s_ip_dst.s_addr = f->dst.addr_data32[0];
5,540✔
147
        } else {
5,540✔
148
            ip4h->s_ip_src.s_addr = f->dst.addr_data32[0];
4,976✔
149
            ip4h->s_ip_dst.s_addr = f->src.addr_data32[0];
4,976✔
150
        }
4,976✔
151

152
        /* set the tcp header */
153
        PacketSetTCP(p, GET_PKT_DATA(p) + 20);
10,516✔
154

155
        SET_PKT_LEN(p, 40); /* ipv4 hdr + tcp hdr */
10,516✔
156

157
    } else if (FLOW_IS_IPV6(f)) {
10,516✔
158
        if (direction == 0) {
335✔
159
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->src, &p->src);
165✔
160
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->dst, &p->dst);
165✔
161
            p->sp = f->sp;
165✔
162
            p->dp = f->dp;
165✔
163
        } else {
170✔
164
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->src, &p->dst);
170✔
165
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->dst, &p->src);
170✔
166
            p->sp = f->dp;
170✔
167
            p->dp = f->sp;
170✔
168
        }
170✔
169

170
        /* Check if we have enough room in direct data. We need ipv6 hdr + tcp hdr.
171
         * Force an allocation if it is not the case.
172
         */
173
        if (GET_PKT_DIRECT_MAX_SIZE(p) <  60) {
335✔
174
            if (PacketCallocExtPkt(p, 60) == -1) {
×
175
                goto error;
×
176
            }
×
177
        }
×
178
        /* set the ip header */
179
        IPV6Hdr *ip6h = PacketSetIPV6(p, GET_PKT_DATA(p));
335✔
180
        /* version 6 */
181
        ip6h->s_ip6_vfc = 0x60;
335✔
182
        ip6h->s_ip6_flow = 0;
335✔
183
        ip6h->s_ip6_nxt = IPPROTO_TCP;
335✔
184
        ip6h->s_ip6_plen = htons(20);
335✔
185
        ip6h->s_ip6_hlim = 64;
335✔
186
        if (direction == 0) {
335✔
187
            ip6h->s_ip6_src[0] = f->src.addr_data32[0];
165✔
188
            ip6h->s_ip6_src[1] = f->src.addr_data32[1];
165✔
189
            ip6h->s_ip6_src[2] = f->src.addr_data32[2];
165✔
190
            ip6h->s_ip6_src[3] = f->src.addr_data32[3];
165✔
191
            ip6h->s_ip6_dst[0] = f->dst.addr_data32[0];
165✔
192
            ip6h->s_ip6_dst[1] = f->dst.addr_data32[1];
165✔
193
            ip6h->s_ip6_dst[2] = f->dst.addr_data32[2];
165✔
194
            ip6h->s_ip6_dst[3] = f->dst.addr_data32[3];
165✔
195
        } else {
170✔
196
            ip6h->s_ip6_src[0] = f->dst.addr_data32[0];
170✔
197
            ip6h->s_ip6_src[1] = f->dst.addr_data32[1];
170✔
198
            ip6h->s_ip6_src[2] = f->dst.addr_data32[2];
170✔
199
            ip6h->s_ip6_src[3] = f->dst.addr_data32[3];
170✔
200
            ip6h->s_ip6_dst[0] = f->src.addr_data32[0];
170✔
201
            ip6h->s_ip6_dst[1] = f->src.addr_data32[1];
170✔
202
            ip6h->s_ip6_dst[2] = f->src.addr_data32[2];
170✔
203
            ip6h->s_ip6_dst[3] = f->src.addr_data32[3];
170✔
204
        }
170✔
205

206
        /* set the tcp header */
207
        PacketSetTCP(p, GET_PKT_DATA(p) + 40);
335✔
208

209
        SET_PKT_LEN(p, 60); /* ipv6 hdr + tcp hdr */
335✔
210
    }
335✔
211

212
    p->l4.hdrs.tcph->th_offx2 = 0x50;
10,851✔
213
    p->l4.hdrs.tcph->th_flags = 0;
10,851✔
214
    p->l4.hdrs.tcph->th_win = 10;
10,851✔
215
    p->l4.hdrs.tcph->th_urp = 0;
10,851✔
216

217
    /* to server */
218
    if (orig_dir == 0) {
10,851✔
219
        p->l4.hdrs.tcph->th_sport = htons(f->sp);
5,651✔
220
        p->l4.hdrs.tcph->th_dport = htons(f->dp);
5,651✔
221

222
        p->l4.hdrs.tcph->th_seq = htonl(ssn->client.next_seq);
5,651✔
223
        p->l4.hdrs.tcph->th_ack = 0;
5,651✔
224

225
        /* to client */
226
    } else {
5,651✔
227
        p->l4.hdrs.tcph->th_sport = htons(f->dp);
5,200✔
228
        p->l4.hdrs.tcph->th_dport = htons(f->sp);
5,200✔
229

230
        p->l4.hdrs.tcph->th_seq = htonl(ssn->server.next_seq);
5,200✔
231
        p->l4.hdrs.tcph->th_ack = 0;
5,200✔
232
    }
5,200✔
233

234
    if (FLOW_IS_IPV4(f)) {
10,851✔
235
        IPV4Hdr *ip4h = p->l3.hdrs.ip4h;
10,516✔
236
        p->l4.hdrs.tcph->th_sum = TCPChecksum(ip4h->s_ip_addrs, (uint16_t *)p->l4.hdrs.tcph, 20, 0);
10,516✔
237
        /* calc ipv4 csum as we may log it and barnyard might reject
238
         * a wrong checksum */
239
        ip4h->ip_csum = IPV4Checksum((uint16_t *)ip4h, IPV4_GET_RAW_HLEN(ip4h), 0);
10,516✔
240
    } else if (FLOW_IS_IPV6(f)) {
10,516✔
241
        const IPV6Hdr *ip6h = PacketGetIPv6(p);
335✔
242
        p->l4.hdrs.tcph->th_sum =
335✔
243
                TCPChecksum(ip6h->s_ip6_addrs, (uint16_t *)p->l4.hdrs.tcph, 20, 0);
335✔
244
    }
335✔
245

246
    p->ts = TimeGet();
10,851✔
247

248
    if (direction == 0) {
10,851✔
249
        if (f->alparser && !STREAM_HAS_SEEN_DATA(&ssn->client)) {
5,705✔
250
            SCAppLayerParserStateSetFlag(f->alparser, APP_LAYER_PARSER_EOF_TS);
294✔
251
        }
294✔
252
    } else {
5,705✔
253
        if (f->alparser && !STREAM_HAS_SEEN_DATA(&ssn->server)) {
5,146✔
254
            SCAppLayerParserStateSetFlag(f->alparser, APP_LAYER_PARSER_EOF_TC);
765✔
255
        }
765✔
256
    }
5,146✔
257

258
    return p;
10,851✔
259

260
error:
×
261
    FlowDeReference(&p->flow);
×
262
    return NULL;
×
263
}
10,851✔
264

265
Packet *FlowPseudoPacketGet(int direction, Flow *f, const TcpSession *ssn)
266
{
10,851✔
267
    PacketPoolWait();
10,851✔
268
    Packet *p = PacketPoolGetPacket();
10,851✔
269
    if (p == NULL) {
10,851✔
270
        return NULL;
×
271
    }
×
272

273
    PACKET_PROFILING_START(p);
10,851✔
274

275
    return FlowPseudoPacketSetup(p, direction, f, ssn);
10,851✔
276
}
10,851✔
277

278
/**
279
 *  \brief Check if a flow needs forced reassembly, or any other processing
280
 *
281
 *  \param f *LOCKED* flow
282
 *
283
 *  \retval false no
284
 *  \retval true yes
285
 */
286
bool FlowNeedsReassembly(Flow *f)
287
{
6,997✔
288
    if (f == NULL || f->protoctx == NULL) {
6,997✔
289
        return false;
53✔
290
    }
53✔
291

292
    TcpSession *ssn = (TcpSession *)f->protoctx;
6,944✔
293
    uint8_t client = StreamNeedsReassembly(ssn, STREAM_TOSERVER);
6,944✔
294
    uint8_t server = StreamNeedsReassembly(ssn, STREAM_TOCLIENT);
6,944✔
295

296
    /* if state is not fully closed we assume that we haven't fully
297
     * inspected the app layer state yet */
298
    if (ssn->state >= TCP_ESTABLISHED && ssn->state != TCP_CLOSED)
6,944✔
299
    {
4,918✔
300
        client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
4,918✔
301
        server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
4,918✔
302
    }
4,918✔
303

304
    /* if app layer still needs some love, push through */
305
    if (f->alproto != ALPROTO_UNKNOWN && f->alstate != NULL) {
6,944✔
306
        const uint64_t total_txs = AppLayerParserGetTxCnt(f, f->alstate);
3,218✔
307

308
        if (AppLayerParserGetTransactionActive(f, f->alparser, STREAM_TOCLIENT) < total_txs)
3,218✔
309
        {
2,068✔
310
            server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
2,068✔
311
        }
2,068✔
312
        if (AppLayerParserGetTransactionActive(f, f->alparser, STREAM_TOSERVER) < total_txs)
3,218✔
313
        {
1,918✔
314
            client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,918✔
315
        }
1,918✔
316
    }
3,218✔
317

318
    /* if any frame is present we assume it still needs work */
319
    FramesContainer *frames_container = AppLayerFramesGetContainer(f);
6,944✔
320
    if (frames_container) {
6,944✔
321
        if (frames_container->toserver.cnt)
2,646✔
322
            client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,556✔
323
        if (frames_container->toclient.cnt)
2,646✔
324
            server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,411✔
325
    }
2,646✔
326

327
    /* nothing to do */
328
    if (client == STREAM_HAS_UNPROCESSED_SEGMENTS_NONE &&
6,944✔
329
        server == STREAM_HAS_UNPROCESSED_SEGMENTS_NONE) {
6,944✔
330
        return false;
1,229✔
331
    }
1,229✔
332

333
    f->ffr_ts = client;
5,715✔
334
    f->ffr_tc = server;
5,715✔
335
    return true;
5,715✔
336
}
6,944✔
337

338
/**
339
 * \internal
340
 * \brief Sends the flow to its respective thread's flow queue.
341
 *
342
 *        The function requires flow to be locked beforehand.
343
 *
344
 * Normally, the first thread_id value should be used. This is when the flow is
345
 * created on seeing the first packet to the server; when the flow's reversed
346
 * flag is set, choose the second thread_id (to client/source).
347
 *
348
 * \param f Pointer to the flow.
349
 */
350
void FlowSendToLocalThread(Flow *f)
UNCOV
351
{
×
352
    // Choose the thread_id based on whether the flow has been
353
    // reversed.
UNCOV
354
    int idx = f->flags & FLOW_DIR_REVERSED ? 1 : 0;
×
UNCOV
355
    TmThreadsInjectFlowById(f, (const int)f->thread_id[idx]);
×
UNCOV
356
}
×
357

358
/**
359
 * \internal
360
 * \brief Remove flows from the hash bucket as they have more work to be done in
361
 *        in the detection engine.
362
 *
363
 * When this function is called we're running in virtually dead engine,
364
 * so locking the flows is not strictly required. The reasons it is still
365
 * done are:
366
 * - code consistency
367
 * - silence complaining profilers
368
 * - allow us to aggressively check using debug validation assertions
369
 * - be robust in case of future changes
370
 * - locking overhead is negligible when no other thread fights us
371
 */
372
static inline void FlowRemoveHash(void)
UNCOV
373
{
×
UNCOV
374
    for (uint32_t idx = 0; idx < flow_config.hash_size; idx++) {
×
UNCOV
375
        FlowBucket *fb = &flow_hash[idx];
×
UNCOV
376
        FBLOCK_LOCK(fb);
×
377

UNCOV
378
        Flow *f = fb->head;
×
UNCOV
379
        Flow *prev_f = NULL;
×
380

381
        /* we need to loop through all the flows in the queue */
UNCOV
382
        while (f != NULL) {
×
UNCOV
383
            Flow *next_f = f->next;
×
384

UNCOV
385
            FLOWLOCK_WRLOCK(f);
×
386

387
            /* Get the tcp session for the flow */
UNCOV
388
            TcpSession *ssn = (TcpSession *)f->protoctx;
×
389
            /* \todo Also skip flows that shouldn't be inspected */
UNCOV
390
            if (ssn == NULL) {
×
UNCOV
391
                FLOWLOCK_UNLOCK(f);
×
UNCOV
392
                prev_f = f;
×
UNCOV
393
                f = next_f;
×
UNCOV
394
                continue;
×
UNCOV
395
            }
×
396

397
            /* in case of additional work, we pull the flow out of the
398
             * hash and xfer ownership to the injected packet(s) */
UNCOV
399
            if (FlowNeedsReassembly(f)) {
×
UNCOV
400
                RemoveFromHash(f, prev_f);
×
UNCOV
401
                f->flow_end_flags |= FLOW_END_FLAG_SHUTDOWN;
×
UNCOV
402
                FlowSendToLocalThread(f);
×
UNCOV
403
                FLOWLOCK_UNLOCK(f);
×
UNCOV
404
                f = next_f;
×
UNCOV
405
                continue;
×
UNCOV
406
            }
×
407

UNCOV
408
            FLOWLOCK_UNLOCK(f);
×
409

410
            /* next flow in the queue */
UNCOV
411
            prev_f = f;
×
UNCOV
412
            f = f->next;
×
UNCOV
413
        }
×
UNCOV
414
        FBLOCK_UNLOCK(fb);
×
UNCOV
415
    }
×
UNCOV
416
}
×
417

418
/**
419
 * \brief Clean up all the flows that have unprocessed segments and have
420
 *        some work to do in the detection engine.
421
 */
422
void FlowWorkToDoCleanup(void)
UNCOV
423
{
×
424
    /* Carry out cleanup of unattended flows */
UNCOV
425
    FlowRemoveHash();
×
UNCOV
426
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc