• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

jasonish / suricata / 23268535754

18 Mar 2026 09:42PM UTC coverage: 79.084% (-0.2%) from 79.297%
23268535754

push

github

jasonish
github-ci/builds: update ubuntu builds to rust 1.89

264897 of 334955 relevant lines covered (79.08%)

3068071.33 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.25
/src/flow-timeout.c
1
/* Copyright (C) 2007-2024 Open Information Security Foundation
2
 *
3
 * You can copy, redistribute or modify this Program under the terms of
4
 * the GNU General Public License version 2 as published by the Free
5
 * Software Foundation.
6
 *
7
 * This program is distributed in the hope that it will be useful,
8
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
 * GNU General Public License for more details.
11
 *
12
 * You should have received a copy of the GNU General Public License
13
 * version 2 along with this program; if not, write to the Free Software
14
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15
 * 02110-1301, USA.
16
 */
17

18
/**
19
 * \file
20
 *
21
 * \author Anoop Saldanha <anoopsaldanha@gmail.com>
22
 */
23

24
#include "suricata-common.h"
25
#include "suricata.h"
26
#include "decode.h"
27
#include "conf.h"
28
#include "threadvars.h"
29
#include "tm-threads.h"
30
#include "runmodes.h"
31

32
#include "util-random.h"
33
#include "util-time.h"
34

35
#include "flow.h"
36
#include "flow-queue.h"
37
#include "flow-hash.h"
38
#include "flow-util.h"
39
#include "flow-var.h"
40
#include "flow-private.h"
41
#include "flow-manager.h"
42
#include "flow-timeout.h"
43
#include "pkt-var.h"
44
#include "host.h"
45

46
#include "stream-tcp-private.h"
47
#include "stream-tcp-reassemble.h"
48
#include "stream-tcp.h"
49

50
#include "util-unittest.h"
51
#include "util-unittest-helper.h"
52
#include "util-byte.h"
53

54
#include "util-debug.h"
55
#include "util-privs.h"
56
#include "util-datalink.h"
57

58
#include "detect.h"
59
#include "detect-engine-state.h"
60
#include "stream.h"
61

62
#include "rust.h"
63
#include "app-layer-frames.h"
64
#include "app-layer-parser.h"
65
#include "app-layer.h"
66

67
#include "util-profiling.h"
68

69
/**
70
 * \internal
71
 * \brief Pseudo packet setup to finish a flow when needed.
72
 *
73
 * \param p         a dummy pseudo packet from packet pool.  Not all pseudo
74
 *                  packets need to force reassembly, in which case we just
75
 *                  set dummy ack/seq values.
76
 * \param direction Direction of the packet.  0 indicates toserver and 1
77
 *                  indicates toclient.
78
 * \param f         Pointer to the flow.
79
 * \param ssn       Pointer to the tcp session.
80
 * \retval          pseudo packet with everything set up
81
 */
82
static inline Packet *FlowPseudoPacketSetup(
83
        Packet *p, int direction, Flow *f, const TcpSession *ssn)
84
{
16,640✔
85
    const int orig_dir = direction;
16,640✔
86
    p->tenant_id = f->tenant_id;
16,640✔
87
    p->datalink = DatalinkGetGlobalType();
16,640✔
88
    p->proto = IPPROTO_TCP;
16,640✔
89
    FlowReference(&p->flow, f);
16,640✔
90
    p->flags |= PKT_STREAM_EST;
16,640✔
91
    p->flags |= PKT_HAS_FLOW;
16,640✔
92
    p->flags |= PKT_PSEUDO_STREAM_END;
16,640✔
93
    memcpy(&p->vlan_id[0], &f->vlan_id[0], sizeof(p->vlan_id));
16,640✔
94
    p->vlan_idx = f->vlan_idx;
16,640✔
95
    p->livedev = (struct LiveDevice_ *)f->livedev;
16,640✔
96

97
    if (f->flags & FLOW_NOPAYLOAD_INSPECTION) {
16,640✔
98
        DecodeSetNoPayloadInspectionFlag(p);
2,908✔
99
    }
2,908✔
100

101
    if (direction == 0)
16,640✔
102
        p->flowflags |= FLOW_PKT_TOSERVER;
8,461✔
103
    else
8,179✔
104
        p->flowflags |= FLOW_PKT_TOCLIENT;
8,179✔
105
    p->flowflags |= FLOW_PKT_ESTABLISHED;
16,640✔
106
    p->payload = NULL;
16,640✔
107
    p->payload_len = 0;
16,640✔
108

109
    /* apply reversed flow logic after setting direction to the packet */
110
    direction ^= ((f->flags & FLOW_DIR_REVERSED) != 0);
16,640✔
111

112
    if (FLOW_IS_IPV4(f)) {
16,640✔
113
        if (direction == 0) {
16,234✔
114
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->src, &p->src);
8,316✔
115
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->dst, &p->dst);
8,316✔
116
            p->sp = f->sp;
8,316✔
117
            p->dp = f->dp;
8,316✔
118
        } else {
8,481✔
119
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->src, &p->dst);
7,918✔
120
            FLOW_COPY_IPV4_ADDR_TO_PACKET(&f->dst, &p->src);
7,918✔
121
            p->sp = f->dp;
7,918✔
122
            p->dp = f->sp;
7,918✔
123
        }
7,918✔
124

125
        /* Check if we have enough room in direct data. We need ipv4 hdr + tcp hdr.
126
         * Force an allocation if it is not the case.
127
         */
128
        if (GET_PKT_DIRECT_MAX_SIZE(p) <  40) {
16,234✔
129
            if (PacketCallocExtPkt(p, 40) == -1) {
×
130
                goto error;
×
131
            }
×
132
        }
×
133
        /* set the ip header */
134
        IPV4Hdr *ip4h = PacketSetIPV4(p, GET_PKT_DATA(p));
16,234✔
135
        /* version 4 and length 20 bytes for the tcp header */
136
        ip4h->ip_verhl = 0x45;
16,234✔
137
        ip4h->ip_tos = 0;
16,234✔
138
        ip4h->ip_len = htons(40);
16,234✔
139
        ip4h->ip_id = 0;
16,234✔
140
        ip4h->ip_off = 0;
16,234✔
141
        ip4h->ip_ttl = 64;
16,234✔
142
        ip4h->ip_proto = IPPROTO_TCP;
16,234✔
143
        //p->ip4h->ip_csum =
144
        if (direction == 0) {
16,234✔
145
            ip4h->s_ip_src.s_addr = f->src.addr_data32[0];
8,316✔
146
            ip4h->s_ip_dst.s_addr = f->dst.addr_data32[0];
8,316✔
147
        } else {
8,481✔
148
            ip4h->s_ip_src.s_addr = f->dst.addr_data32[0];
7,918✔
149
            ip4h->s_ip_dst.s_addr = f->src.addr_data32[0];
7,918✔
150
        }
7,918✔
151

152
        /* set the tcp header */
153
        PacketSetTCP(p, GET_PKT_DATA(p) + 20);
16,234✔
154

155
        SET_PKT_LEN(p, 40); /* ipv4 hdr + tcp hdr */
16,234✔
156

157
    } else if (FLOW_IS_IPV6(f)) {
16,234✔
158
        if (direction == 0) {
406✔
159
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->src, &p->src);
200✔
160
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->dst, &p->dst);
200✔
161
            p->sp = f->sp;
200✔
162
            p->dp = f->dp;
200✔
163
        } else {
206✔
164
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->src, &p->dst);
206✔
165
            FLOW_COPY_IPV6_ADDR_TO_PACKET(&f->dst, &p->src);
206✔
166
            p->sp = f->dp;
206✔
167
            p->dp = f->sp;
206✔
168
        }
206✔
169

170
        /* Check if we have enough room in direct data. We need ipv6 hdr + tcp hdr.
171
         * Force an allocation if it is not the case.
172
         */
173
        if (GET_PKT_DIRECT_MAX_SIZE(p) <  60) {
406✔
174
            if (PacketCallocExtPkt(p, 60) == -1) {
×
175
                goto error;
×
176
            }
×
177
        }
×
178
        /* set the ip header */
179
        IPV6Hdr *ip6h = PacketSetIPV6(p, GET_PKT_DATA(p));
406✔
180
        /* version 6 */
181
        ip6h->s_ip6_vfc = 0x60;
406✔
182
        ip6h->s_ip6_flow = 0;
406✔
183
        ip6h->s_ip6_nxt = IPPROTO_TCP;
406✔
184
        ip6h->s_ip6_plen = htons(20);
406✔
185
        ip6h->s_ip6_hlim = 64;
406✔
186
        if (direction == 0) {
406✔
187
            ip6h->s_ip6_src[0] = f->src.addr_data32[0];
200✔
188
            ip6h->s_ip6_src[1] = f->src.addr_data32[1];
200✔
189
            ip6h->s_ip6_src[2] = f->src.addr_data32[2];
200✔
190
            ip6h->s_ip6_src[3] = f->src.addr_data32[3];
200✔
191
            ip6h->s_ip6_dst[0] = f->dst.addr_data32[0];
200✔
192
            ip6h->s_ip6_dst[1] = f->dst.addr_data32[1];
200✔
193
            ip6h->s_ip6_dst[2] = f->dst.addr_data32[2];
200✔
194
            ip6h->s_ip6_dst[3] = f->dst.addr_data32[3];
200✔
195
        } else {
206✔
196
            ip6h->s_ip6_src[0] = f->dst.addr_data32[0];
206✔
197
            ip6h->s_ip6_src[1] = f->dst.addr_data32[1];
206✔
198
            ip6h->s_ip6_src[2] = f->dst.addr_data32[2];
206✔
199
            ip6h->s_ip6_src[3] = f->dst.addr_data32[3];
206✔
200
            ip6h->s_ip6_dst[0] = f->src.addr_data32[0];
206✔
201
            ip6h->s_ip6_dst[1] = f->src.addr_data32[1];
206✔
202
            ip6h->s_ip6_dst[2] = f->src.addr_data32[2];
206✔
203
            ip6h->s_ip6_dst[3] = f->src.addr_data32[3];
206✔
204
        }
206✔
205

206
        /* set the tcp header */
207
        PacketSetTCP(p, GET_PKT_DATA(p) + 40);
406✔
208

209
        SET_PKT_LEN(p, 60); /* ipv6 hdr + tcp hdr */
406✔
210
    }
406✔
211

212
    p->l4.hdrs.tcph->th_offx2 = 0x50;
16,640✔
213
    p->l4.hdrs.tcph->th_flags = 0;
16,640✔
214
    p->l4.hdrs.tcph->th_win = 10;
16,640✔
215
    p->l4.hdrs.tcph->th_urp = 0;
16,640✔
216

217
    /* to server */
218
    if (orig_dir == 0) {
16,640✔
219
        p->l4.hdrs.tcph->th_sport = htons(f->sp);
8,461✔
220
        p->l4.hdrs.tcph->th_dport = htons(f->dp);
8,461✔
221

222
        p->l4.hdrs.tcph->th_seq = htonl(ssn->client.next_seq);
8,461✔
223
        p->l4.hdrs.tcph->th_ack = 0;
8,461✔
224

225
        /* to client */
226
    } else {
8,629✔
227
        p->l4.hdrs.tcph->th_sport = htons(f->dp);
8,179✔
228
        p->l4.hdrs.tcph->th_dport = htons(f->sp);
8,179✔
229

230
        p->l4.hdrs.tcph->th_seq = htonl(ssn->server.next_seq);
8,179✔
231
        p->l4.hdrs.tcph->th_ack = 0;
8,179✔
232
    }
8,179✔
233

234
    if (FLOW_IS_IPV4(f)) {
16,640✔
235
        IPV4Hdr *ip4h = p->l3.hdrs.ip4h;
16,234✔
236
        p->l4.hdrs.tcph->th_sum = TCPChecksum(ip4h->s_ip_addrs, (uint16_t *)p->l4.hdrs.tcph, 20, 0);
16,234✔
237
        /* calc ipv4 csum as we may log it and barnyard might reject
238
         * a wrong checksum */
239
        ip4h->ip_csum = IPV4Checksum((uint16_t *)ip4h, IPV4_GET_RAW_HLEN(ip4h), 0);
16,234✔
240
    } else if (FLOW_IS_IPV6(f)) {
16,234✔
241
        const IPV6Hdr *ip6h = PacketGetIPv6(p);
406✔
242
        p->l4.hdrs.tcph->th_sum =
406✔
243
                TCPChecksum(ip6h->s_ip6_addrs, (uint16_t *)p->l4.hdrs.tcph, 20, 0);
406✔
244
    }
406✔
245

246
    p->ts = TimeGet();
16,640✔
247

248
    if (direction == 0) {
16,640✔
249
        if (f->alparser && !STREAM_HAS_SEEN_DATA(&ssn->client)) {
8,516✔
250
            SCAppLayerParserStateSetFlag(f->alparser, APP_LAYER_PARSER_EOF_TS);
309✔
251
        }
309✔
252
    } else {
8,682✔
253
        if (f->alparser && !STREAM_HAS_SEEN_DATA(&ssn->server)) {
8,124✔
254
            SCAppLayerParserStateSetFlag(f->alparser, APP_LAYER_PARSER_EOF_TC);
1,292✔
255
        }
1,292✔
256
    }
8,124✔
257

258
    return p;
16,640✔
259

260
error:
×
261
    FlowDeReference(&p->flow);
×
262
    return NULL;
×
263
}
16,640✔
264

265
Packet *FlowPseudoPacketGet(int direction, Flow *f, const TcpSession *ssn)
266
{
16,640✔
267
    PacketPoolWait();
16,640✔
268
    Packet *p = PacketPoolGetPacket();
16,640✔
269
    if (p == NULL) {
16,640✔
270
        return NULL;
×
271
    }
×
272

273
    PACKET_PROFILING_START(p);
16,640✔
274

275
    return FlowPseudoPacketSetup(p, direction, f, ssn);
16,640✔
276
}
16,640✔
277

278
/**
279
 *  \brief Check if a flow needs forced reassembly, or any other processing
280
 *
281
 *  \param f *LOCKED* flow
282
 *
283
 *  \retval false no
284
 *  \retval true yes
285
 */
286
bool FlowNeedsReassembly(Flow *f)
287
{
16,524✔
288
    if (f == NULL || f->protoctx == NULL) {
16,524✔
289
        return false;
64✔
290
    }
64✔
291

292
    TcpSession *ssn = (TcpSession *)f->protoctx;
16,460✔
293
    uint8_t client = StreamNeedsReassembly(ssn, STREAM_TOSERVER);
16,460✔
294
    uint8_t server = StreamNeedsReassembly(ssn, STREAM_TOCLIENT);
16,460✔
295

296
    /* if state is not fully closed we assume that we haven't fully
297
     * inspected the app layer state yet */
298
    if (ssn->state >= TCP_ESTABLISHED && ssn->state != TCP_CLOSED)
16,460✔
299
    {
8,677✔
300
        client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
8,677✔
301
        server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
8,677✔
302
    }
8,677✔
303

304
    /* if app layer still needs some love, push through */
305
    if (f->alproto != ALPROTO_UNKNOWN && f->alstate != NULL) {
16,460✔
306
        const uint64_t total_txs = AppLayerParserGetTxCnt(f, f->alstate);
11,099✔
307

308
        if (AppLayerParserGetTransactionActive(f, f->alparser, STREAM_TOCLIENT) < total_txs)
11,099✔
309
        {
7,369✔
310
            server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
7,369✔
311
        }
7,369✔
312
        if (AppLayerParserGetTransactionActive(f, f->alparser, STREAM_TOSERVER) < total_txs)
11,099✔
313
        {
6,839✔
314
            client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
6,839✔
315
        }
6,839✔
316
    }
11,099✔
317

318
    /* if any frame is present we assume it still needs work */
319
    FramesContainer *frames_container = AppLayerFramesGetContainer(f);
16,460✔
320
    if (frames_container) {
16,460✔
321
        if (frames_container->toserver.cnt)
2,703✔
322
            client = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,580✔
323
        if (frames_container->toclient.cnt)
2,703✔
324
            server = STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_ONLY_DETECTION;
1,436✔
325
    }
2,703✔
326

327
    /* nothing to do */
328
    if (client == STREAM_HAS_UNPROCESSED_SEGMENTS_NONE &&
16,460✔
329
        server == STREAM_HAS_UNPROCESSED_SEGMENTS_NONE) {
16,460✔
330
        return false;
4,632✔
331
    }
4,632✔
332

333
    f->ffr_ts = client;
11,828✔
334
    f->ffr_tc = server;
11,828✔
335
    return true;
11,828✔
336
}
16,460✔
337

338
/**
339
 * \internal
340
 * \brief Sends the flow to its respective thread's flow queue.
341
 *
342
 *        The function requires flow to be locked beforehand.
343
 *
344
 * Normally, the first thread_id value should be used. This is when the flow is
345
 * created on seeing the first packet to the server; when the flow's reversed
346
 * flag is set, choose the second thread_id (to client/source).
347
 *
348
 * \param f Pointer to the flow.
349
 */
350
void FlowSendToLocalThread(Flow *f)
351
{
3,082✔
352
    // Choose the thread_id based on whether the flow has been
353
    // reversed.
354
    int idx = f->flags & FLOW_DIR_REVERSED ? 1 : 0;
3,082✔
355
    TmThreadsInjectFlowById(f, (const int)f->thread_id[idx]);
3,082✔
356
}
3,082✔
357

358
/**
359
 * \internal
360
 * \brief Remove flows from the hash bucket as they have more work to be done in
361
 *        in the detection engine.
362
 *
363
 * When this function is called we're running in virtually dead engine,
364
 * so locking the flows is not strictly required. The reasons it is still
365
 * done are:
366
 * - code consistency
367
 * - silence complaining profilers
368
 * - allow us to aggressively check using debug validation assertions
369
 * - be robust in case of future changes
370
 * - locking overhead is negligible when no other thread fights us
371
 */
372
static inline void FlowRemoveHash(void)
373
{
2,029✔
374
    for (uint32_t idx = 0; idx < flow_config.hash_size; idx++) {
132,974,573✔
375
        FlowBucket *fb = &flow_hash[idx];
132,972,544✔
376
        FBLOCK_LOCK(fb);
132,972,544✔
377

378
        Flow *f = fb->head;
132,972,544✔
379
        Flow *prev_f = NULL;
132,972,544✔
380

381
        /* we need to loop through all the flows in the queue */
382
        while (f != NULL) {
132,981,706✔
383
            Flow *next_f = f->next;
9,162✔
384

385
            FLOWLOCK_WRLOCK(f);
9,162✔
386

387
            /* Get the tcp session for the flow */
388
            TcpSession *ssn = (TcpSession *)f->protoctx;
9,162✔
389
            /* \todo Also skip flows that shouldn't be inspected */
390
            if (ssn == NULL) {
9,162✔
391
                FLOWLOCK_UNLOCK(f);
3,467✔
392
                prev_f = f;
3,467✔
393
                f = next_f;
3,467✔
394
                continue;
3,467✔
395
            }
3,467✔
396

397
            /* in case of additional work, we pull the flow out of the
398
             * hash and xfer ownership to the injected packet(s) */
399
            if (FlowNeedsReassembly(f)) {
5,695✔
400
                RemoveFromHash(f, prev_f);
2,923✔
401
                f->flow_end_flags |= FLOW_END_FLAG_SHUTDOWN;
2,923✔
402
                FlowSendToLocalThread(f);
2,923✔
403
                FLOWLOCK_UNLOCK(f);
2,923✔
404
                f = next_f;
2,923✔
405
                continue;
2,923✔
406
            }
2,923✔
407

408
            FLOWLOCK_UNLOCK(f);
2,772✔
409

410
            /* next flow in the queue */
411
            prev_f = f;
2,772✔
412
            f = f->next;
2,772✔
413
        }
2,772✔
414
        FBLOCK_UNLOCK(fb);
132,972,544✔
415
    }
132,972,544✔
416
}
2,029✔
417

418
/**
419
 * \brief Clean up all the flows that have unprocessed segments and have
420
 *        some work to do in the detection engine.
421
 */
422
void FlowWorkToDoCleanup(void)
423
{
2,029✔
424
    /* Carry out cleanup of unattended flows */
425
    FlowRemoveHash();
2,029✔
426
}
2,029✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc