• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

TyRoXx / NonlocalityOS / 22053886699

16 Feb 2026 07:30AM UTC coverage: 78.279% (+0.2%) from 78.043%
22053886699

Pull #418

github

web-flow
Merge c5d3b921a into e3367509b
Pull Request #418: Fix: Sometimes the storage garbage collector appears to collect new trees that are still needed

723 of 826 new or added lines in 29 files covered. (87.53%)

54 existing lines in 5 files now uncovered.

7323 of 9355 relevant lines covered (78.28%)

26492.72 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

64.49
/fuzz/fuzz_functions/src/write_read_large_files.rs
1
use astraea::{
2
    in_memory_storage::InMemoryTreeStorage,
3
    storage::StoreTree,
4
    tree::{HashedTree, Tree, TreeBlob, TreeChildren, TREE_BLOB_MAX_LENGTH},
5
};
6
use dogbox_tree_editor::{OpenFileContentBuffer, OptimizedWriteBuffer};
7
use pretty_assertions::assert_eq;
8
use serde::{Deserialize, Serialize};
9
use std::{collections::BTreeSet, sync::Arc};
10
use tokio::runtime::Runtime;
11
use tracing::info;
12

13
struct BufferState {
14
    storage: Arc<InMemoryTreeStorage>,
15
    buffer: OpenFileContentBuffer,
16
}
17

18
impl BufferState {
19
    fn new(storage: Arc<InMemoryTreeStorage>, buffer: OpenFileContentBuffer) -> Self {
3✔
20
        Self { storage, buffer }
21
    }
22
}
23

24
async fn compare_buffers(buffers: &mut [BufferState]) {
8✔
25
    assert_eq!(
26
        1,
27
        std::collections::BTreeSet::from_iter(buffers.iter().map(|buffer| buffer.buffer.size()))
28
            .len()
29
    );
30
    let mut checked = 0;
8✔
31
    let expected_size = buffers[0].buffer.size();
12✔
32
    while checked < expected_size {
149✔
33
        let mut all_read_bytes = std::collections::BTreeSet::new();
290✔
34
        let position = checked;
290✔
35
        for read_result in buffers.iter_mut().map(|buffer| {
1,305✔
36
            buffer.buffer.read(
870✔
37
                position,
435✔
38
                (expected_size - position) as usize,
435✔
39
                buffer.storage.clone(),
435✔
40
            )
41
        }) {
42
            let read_bytes = read_result.await.unwrap();
1,305✔
43
            let is_expected_to_be_new = all_read_bytes.is_empty();
1,305✔
44
            if is_expected_to_be_new {
580✔
45
                checked += read_bytes.len() as u64;
145✔
46
            }
47
            let is_new = all_read_bytes.insert(read_bytes);
1,740✔
48
            assert_eq!(is_expected_to_be_new, is_new);
49
        }
50
    }
51
    assert_eq!(expected_size, checked);
52
}
53

54
#[derive(Serialize, Deserialize, Debug)]
55
enum FileOperation {
56
    Write {
57
        position: u32,
58
        data: Vec<u8>,
59
    },
60
    WriteRandomData {
61
        position: u32,
62
        size: u32,
63
    },
64
    Nothing,
65
    WriteWholeBlockOfRandomData {
66
        block_index: u16,
67
    },
68
    CopyBlock {
69
        from_block_index: u16,
70
        to_block_index: u16,
71
    },
72
    SaveToStorage,
73
}
74

75
#[derive(Serialize, Deserialize, Debug)]
76
pub struct GeneratedTest {
77
    operations: Vec<FileOperation>,
78
    write_buffer_in_blocks: u8,
79
}
80

81
async fn write_to_all_buffers(buffers: &mut [BufferState], position: u64, data: &bytes::Bytes) {
4✔
82
    for buffer in buffers {
8✔
83
        buffer
6✔
84
            .buffer
6✔
85
            .write(
86
                position,
6✔
87
                OptimizedWriteBuffer::from_bytes(position, data.clone()).await,
24✔
88
                buffer.storage.clone(),
6✔
89
            )
90
            .await
6✔
91
            .unwrap();
92
    }
93
}
94

NEW
95
async fn read_from_all_buffers(
×
96
    buffers: &mut [BufferState],
97
    position: u64,
98
    count: usize,
99
) -> Option<bytes::Bytes> {
NEW
100
    let mut all_data_read = BTreeSet::new();
×
NEW
101
    for buffer in buffers {
×
NEW
102
        let data_read = buffer
×
NEW
103
            .buffer
×
NEW
104
            .read(position, count, buffer.storage.clone())
×
NEW
105
            .await
×
106
            .unwrap();
107
        assert!(data_read.len() <= count);
NEW
108
        all_data_read.insert(data_read);
×
109
    }
110
    assert_eq!(1, all_data_read.len());
NEW
111
    let read = all_data_read.into_iter().next().unwrap();
×
NEW
112
    if read.len() == count {
×
NEW
113
        Some(read)
×
114
    } else {
NEW
115
        None
×
116
    }
117
}
118

119
async fn save_all_buffers(buffers: &mut [BufferState]) {
2✔
120
    let mut status = BTreeSet::new();
2✔
121
    for buffer in buffers {
4✔
122
        buffer
6✔
123
            .buffer
6✔
124
            .store_all(buffer.storage.clone())
6✔
125
            .await
3✔
126
            .unwrap();
127
        status.insert(buffer.buffer.last_known_digest());
12✔
128
    }
129
    assert_eq!(1, status.len());
130
}
131

132
fn run_generated_test(test: GeneratedTest) -> bool {
1✔
133
    let runtime = Runtime::new().unwrap();
3✔
134
    runtime.block_on(async move {
3✔
135
        let max_tested_file_size: u64 = TREE_BLOB_MAX_LENGTH as u64 * 128;
3✔
136
        use rand::rngs::SmallRng;
137
        use rand::Rng;
138
        use rand::SeedableRng;
139
        let mut small_rng = SmallRng::seed_from_u64(12345);
2✔
140

141
        let initial_content: Vec<u8> = Vec::new();
3✔
142
        let last_known_digest_file_size = initial_content.len();
3✔
143
        let mut buffers = Vec::new();
2✔
144
        for _ in 0..3 {
1✔
145
            let storage = Arc::new(InMemoryTreeStorage::empty());
9✔
146
            let last_known_reference = storage
9✔
147
                .store_tree(&HashedTree::from(Arc::new(Tree::new(
15✔
148
                    TreeBlob::empty(),
3✔
149
                    TreeChildren::empty(),
3✔
150
                ))))
151
                .await
3✔
152
                .unwrap();
3✔
153
            buffers.push(BufferState::new(
9✔
154
                storage,
3✔
155
                OpenFileContentBuffer::from_data(
3✔
156
                    initial_content.clone(),
6✔
157
                    last_known_reference,
3✔
158
                    last_known_digest_file_size as u64,
3✔
159
                    test.write_buffer_in_blocks as usize,
3✔
160
                )
161
                .unwrap(),
3✔
162
            ));
163
        }
164

165
        for operation in test.operations {
4✔
166
            // buffers[2] is recreated from storage before every operation.
167
            {
168
                let storage = buffers[2].storage.clone();
9✔
169
                buffers[2].buffer.store_all(storage).await.unwrap();
12✔
170
                let (_digest, size, reference) = buffers[2].buffer.last_known_digest();
12✔
171
                buffers[2].buffer = OpenFileContentBuffer::from_storage(
6✔
172
                    reference,
3✔
173
                    size,
3✔
174
                    test.write_buffer_in_blocks as usize,
3✔
175
                );
176
            }
177

178
            info!("{:?}", &operation);
6✔
179
            match &operation {
3✔
NEW
180
                FileOperation::Write { position, data } => {
×
NEW
181
                    let end_of_write = (*position as u64)
×
NEW
182
                        .checked_add(data.len() as u64)
×
NEW
183
                        .expect("Cannot overflow");
×
NEW
184
                    if end_of_write > max_tested_file_size {
×
NEW
185
                        return false;
×
186
                    }
NEW
187
                    let data = bytes::Bytes::copy_from_slice(&data[..]);
×
NEW
188
                    let position = *position as u64;
×
NEW
189
                    write_to_all_buffers(&mut buffers, position, &data).await;
×
190
                }
NEW
191
                FileOperation::WriteRandomData { position, size } => {
×
NEW
192
                    let end_of_write = (*position as u64)
×
NEW
193
                        .checked_add(*size as u64)
×
NEW
194
                        .expect("Cannot overflow");
×
NEW
195
                    if end_of_write > max_tested_file_size {
×
NEW
196
                        return false;
×
197
                    }
NEW
198
                    let data = bytes::Bytes::from_iter((0..*size).map(|_| small_rng.gen()));
×
NEW
199
                    let position = *position as u64;
×
NEW
200
                    write_to_all_buffers(&mut buffers, position, &data).await;
×
201
                }
202
                FileOperation::Nothing => {}
1✔
203
                FileOperation::WriteWholeBlockOfRandomData { block_index } => {
2✔
204
                    if ((*block_index as u64 + 1) * TREE_BLOB_MAX_LENGTH as u64)
2✔
205
                        > max_tested_file_size
2✔
206
                    {
NEW
207
                        return false;
×
208
                    }
209
                    let data =
2✔
210
                        bytes::Bytes::from_iter((0..TREE_BLOB_MAX_LENGTH).map(|_| small_rng.gen()));
256,006✔
211
                    let position = *block_index as u64 * TREE_BLOB_MAX_LENGTH as u64;
4✔
212
                    write_to_all_buffers(&mut buffers, position, &data).await;
8✔
213
                }
214
                FileOperation::CopyBlock {
NEW
215
                    from_block_index,
×
NEW
216
                    to_block_index,
×
217
                } => {
NEW
218
                    if ((*from_block_index as u64 + 1) * TREE_BLOB_MAX_LENGTH as u64)
×
NEW
219
                        > max_tested_file_size
×
220
                    {
NEW
221
                        return false;
×
222
                    }
NEW
223
                    if ((*to_block_index as u64 + 1) * TREE_BLOB_MAX_LENGTH as u64)
×
NEW
224
                        > max_tested_file_size
×
225
                    {
NEW
226
                        return false;
×
227
                    }
NEW
228
                    let read_position = *from_block_index as u64 * TREE_BLOB_MAX_LENGTH as u64;
×
NEW
229
                    let maybe_data =
×
NEW
230
                        read_from_all_buffers(&mut buffers, read_position, TREE_BLOB_MAX_LENGTH)
×
NEW
231
                            .await;
×
NEW
232
                    if let Some(data) = maybe_data {
×
NEW
233
                        let write_position = *to_block_index as u64 * TREE_BLOB_MAX_LENGTH as u64;
×
NEW
234
                        write_to_all_buffers(&mut buffers, write_position, &data).await;
×
235
                    }
236
                }
237
                FileOperation::SaveToStorage => {
NEW
238
                    save_all_buffers(&mut buffers).await;
×
239
                }
240
            }
241

242
            // nothing special happens with buffers[0].
243

244
            // buffers[1] is forced into the storage after every operation.
245
            {
246
                let storage = buffers[1].storage.clone();
9✔
247
                buffers[1].buffer.store_all(storage).await.unwrap();
12✔
248
            }
249

250
            compare_buffers(&mut buffers).await;
6✔
251
        }
252

253
        save_all_buffers(&mut buffers).await;
2✔
254
        compare_buffers(&mut buffers).await;
2✔
255
        true
1✔
256
    })
257
}
258

259
pub fn fuzz_function(data: &[u8]) -> bool {
1✔
260
    let generated_test = match postcard::take_from_bytes(data) {
2✔
261
        Ok((parsed, rest)) => {
2✔
262
            if rest.is_empty() {
2✔
263
                parsed
1✔
264
            } else {
NEW
265
                return false;
×
266
            }
267
        }
NEW
268
        Err(_) => return false,
×
269
    };
270
    info!("{:?}", &generated_test);
2✔
271
    run_generated_test(generated_test)
2✔
272
}
273

274
#[test]
275
fn crash_0() {
1✔
276
    assert!(fuzz_function(&[3, 2, 3, 16, 3, 63, 7]));
277
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc