• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mattwparas / steel / 18461079395

13 Oct 2025 09:20AM UTC coverage: 42.731% (-0.9%) from 43.668%
18461079395

Pull #536

github

web-flow
Merge 6f55a8b56 into e378cba22
Pull Request #536: Initial proposal for no_std support

63 of 755 new or added lines in 38 files covered. (8.34%)

73 existing lines in 15 files now uncovered.

12324 of 28841 relevant lines covered (42.73%)

3215759.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

39.02
/crates/steel-core/src/values/closed.rs
1
use std::{cell::RefCell, collections::HashSet};
2

3
#[cfg(feature = "sync")]
4
use std::{sync::Arc, thread::JoinHandle};
5

6
#[cfg(feature = "sync")]
7
use std::sync::Mutex;
8

9
#[cfg(feature = "sync")]
10
use crate::rvals::cycles::BreadthFirstSearchSteelValReferenceVisitor2;
11

12
#[cfg(feature = "sync")]
13
use crate::rvals::SteelValPointer;
14

15
use crate::{
16
    compiler::map::SymbolMap,
17
    gc::{
18
        shared::{MutContainer, ShareableMut, StandardShared, StandardSharedMut, WeakShared},
19
        GcMut,
20
    },
21
    rvals::{
22
        AsRefSteelVal, Custom, IntoSteelVal, OpaqueIterator, RestArgsIter, SteelComplex,
23
        SteelVector,
24
    },
25
    steel_vm::vm::{Continuation, ContinuationMark, Synchronizer, VmCore},
26
    values::lists::List,
27
    SteelErr,
28
};
29

30
#[cfg(feature = "sync")]
31
use crate::steel_vm::vm::apply;
32

33
#[cfg(feature = "sync")]
34
use crossbeam_channel::{Receiver, Sender};
35

36
use num_bigint::BigInt;
37
use num_rational::{BigRational, Rational32};
38

39
#[cfg(feature = "sync")]
40
use once_cell::sync::Lazy;
41

42
use steel_gen::OpCode;
43

44
use crate::{
45
    gc::{unsafe_erased_pointers::OpaqueReference, Gc},
46
    rvals::{
47
        cycles::BreadthFirstSearchSteelValVisitor, BoxedAsyncFunctionSignature, CustomType,
48
        FunctionSignature, FutureResult, MutFunctionSignature, SteelHashMap, SteelHashSet,
49
        SteelString, Syntax,
50
    },
51
    steel_vm::vm::BuiltInSignature,
52
    values::functions::ByteCodeLambda,
53
    SteelVal,
54
};
55

56
use super::{
57
    functions::BoxedDynFunction,
58
    lazy_stream::LazyStream,
59
    port::SteelPort,
60
    structs::UserDefinedStruct,
61
    transducers::{Reducer, Transducer},
62
};
63

64
#[cfg(feature = "sync")]
65
use super::lists::Pair;
66

67
#[cfg(feature = "sync")]
68
use super::Vector;
69

70
#[derive(Default)]
71
pub struct GlobalSlotRecycler {
72
    // Use a hashset to check for free slots.
73
    // The idea here is that a collection will traverse
74
    // all active values (excluding roots with this index)
75
    // and we'll check to make sure these are reachable.
76
    //
77
    // If the values are eventually reachable, then we can keep
78
    // iterating until this is completely drained.
79
    //
80
    // If we reach the end of our iteration and this isn't
81
    // drained, whatever is left is now freeable, and we can make
82
    // this as free in the symbol map
83
    slots: HashSet<usize>,
84

85
    queue: Vec<SteelVal>,
86
}
87

88
impl GlobalSlotRecycler {
89
    pub fn free_shadowed_rooted_values(
223✔
90
        roots: &mut [SteelVal],
91
        symbol_map: &mut SymbolMap,
92
        heap: &mut Heap,
93
    ) {
94
        let mut recycler = GlobalSlotRecycler::default();
446✔
95

96
        recycler.recycle(roots, symbol_map, heap);
1,115✔
97
    }
98

99
    // TODO:
100
    // Take the global roots, without the shadowed values, and iterate over them,
101
    // push the values back, visit, mark visited, move on.
102
    pub fn recycle(&mut self, roots: &mut [SteelVal], symbol_map: &mut SymbolMap, heap: &mut Heap) {
223✔
103
        self.slots.clear();
446✔
104

105
        // TODO: Right now, after one pass, we'll ignore it forever.
106
        // we should move it to another stage that we check later.
107
        for slot in symbol_map
33,098✔
108
            .free_list
223✔
109
            .shadowed_slots
223✔
110
            .drain(..)
446✔
111
            .chain(symbol_map.free_list.lambda_lifted.drain(..))
892✔
112
        {
113
            self.slots.insert(slot);
114
        }
115

116
        for (index, root) in roots.iter().enumerate() {
305,356✔
117
            if !self.slots.contains(&index) {
281,071✔
118
                self.push_back(root.clone());
281,071✔
119
            }
120
        }
121

122
        // Mark all unreachable for the purposes of the global
123
        // collection.
124
        heap.memory_free_list.mark_all_unreachable();
446✔
125
        heap.vector_free_list.mark_all_unreachable();
446✔
126

127
        // Actually walk the tree, looking for unreachable stuff
128
        self.visit();
446✔
129

130
        heap.memory_free_list.recount();
446✔
131
        heap.vector_free_list.recount();
446✔
132

133
        // TODO: Check this stuff!
134
        // put them back as unreachable
135

136
        // heap.memory.iter().for_each(|x| x.write().reset());
137
        // heap.vectors.iter().for_each(|x| x.write().reset());
138

139
        // Anything that is still remaining will require
140
        // getting added to the free list that is left.
141
        for index in self.slots.drain() {
33,321✔
142
            if index < roots.len() {
23,616✔
143
                symbol_map.free_list.free_list.push(index);
94,464✔
144
                roots[index] = SteelVal::Void;
23,616✔
145
            }
146
        }
147
    }
148
}
149

150
impl BreadthFirstSearchSteelValVisitor for GlobalSlotRecycler {
151
    type Output = ();
152

153
    fn default_output(&mut self) -> Self::Output {}
×
154

155
    fn pop_front(&mut self) -> Option<SteelVal> {
28,858✔
156
        self.queue.pop()
57,716✔
157
    }
158

159
    fn visit(&mut self) -> Self::Output {
223✔
160
        use SteelVal::*;
161

162
        while let Some(value) = self.pop_front() {
57,493✔
163
            if self.slots.is_empty() {
164
                return;
×
165
            }
166

167
            match value {
168
                Closure(c) => self.visit_closure(c),
14,209✔
169
                BoolV(b) => self.visit_bool(b),
×
170
                NumV(n) => self.visit_float(n),
×
171
                IntV(i) => self.visit_int(i),
×
172
                Rational(x) => self.visit_rational(x),
×
173
                BigRational(x) => self.visit_bigrational(x),
×
174
                BigNum(b) => self.visit_bignum(b),
×
175
                Complex(x) => self.visit_complex(x),
×
176
                CharV(c) => self.visit_char(c),
×
177
                VectorV(v) => self.visit_immutable_vector(v),
×
178
                Void => self.visit_void(),
×
179
                StringV(s) => self.visit_string(s),
×
180
                FuncV(f) => self.visit_function_pointer(f),
×
181
                SymbolV(s) => self.visit_symbol(s),
×
182
                SteelVal::Custom(c) => self.visit_custom_type(c),
38,120✔
183
                HashMapV(h) => self.visit_hash_map(h),
1,768✔
184
                HashSetV(s) => self.visit_hash_set(s),
104✔
185
                CustomStruct(c) => self.visit_steel_struct(c),
6,448✔
186
                PortV(p) => self.visit_port(p),
312✔
187
                IterV(t) => self.visit_transducer(t),
×
188
                ReducerV(r) => self.visit_reducer(r),
×
189
                FutureFunc(f) => self.visit_future_function(f),
×
190
                FutureV(f) => self.visit_future(f),
×
191
                StreamV(s) => self.visit_stream(s),
1,784✔
192
                BoxedFunction(b) => self.visit_boxed_function(b),
×
193
                ContinuationFunction(c) => self.visit_continuation(c),
×
194
                ListV(l) => self.visit_list(l),
4,220✔
195
                MutFunc(m) => self.visit_mutable_function(m),
×
196
                BuiltIn(b) => self.visit_builtin_function(b),
×
197
                MutableVector(b) => self.visit_mutable_vector(b),
×
198
                BoxedIterator(b) => self.visit_boxed_iterator(b),
×
199
                SteelVal::SyntaxObject(s) => self.visit_syntax_object(s),
×
200
                Boxed(b) => self.visit_boxed_value(b),
×
201
                Reference(r) => self.visit_reference_value(r),
×
202
                HeapAllocated(b) => self.visit_heap_allocated(b),
4,948✔
203
                Pair(p) => self.visit_pair(p),
×
204
                ByteVector(b) => self.visit_bytevector(b),
×
205
            };
206
        }
207
    }
208

209
    fn push_back(&mut self, value: SteelVal) {
296,549✔
210
        // TODO: Determine if all numbers should push back.
211
        match &value {
296,549✔
212
            SteelVal::BoolV(_)
213
            | SteelVal::NumV(_)
214
            | SteelVal::IntV(_)
215
            | SteelVal::CharV(_)
216
            | SteelVal::Void
217
            | SteelVal::StringV(_)
218
            | SteelVal::FuncV(_)
219
            | SteelVal::SymbolV(_)
220
            | SteelVal::FutureFunc(_)
221
            | SteelVal::FutureV(_)
222
            | SteelVal::BoxedFunction(_)
223
            | SteelVal::MutFunc(_)
224
            | SteelVal::BuiltIn(_)
225
            | SteelVal::ByteVector(_)
226
            | SteelVal::BigNum(_) => {}
268,070✔
227
            _ => {
28,479✔
228
                self.queue.push(value);
56,958✔
229
            }
230
        }
231
    }
232

233
    fn visit_bytevector(&mut self, _bytevector: crate::rvals::SteelByteVector) -> Self::Output {}
×
234
    fn visit_bignum(&mut self, _: Gc<BigInt>) -> Self::Output {}
×
235
    fn visit_complex(&mut self, _: Gc<SteelComplex>) -> Self::Output {}
×
236
    fn visit_bool(&mut self, _boolean: bool) -> Self::Output {}
×
237
    fn visit_boxed_function(&mut self, _function: Gc<BoxedDynFunction>) -> Self::Output {}
×
238
    // TODO: Revisit this when the boxed iterator is cleaned up
239
    fn visit_boxed_iterator(&mut self, iterator: GcMut<OpaqueIterator>) -> Self::Output {
×
240
        self.push_back(iterator.read().root.clone());
×
241
    }
242
    fn visit_boxed_value(&mut self, boxed_value: GcMut<SteelVal>) -> Self::Output {
×
243
        self.push_back(boxed_value.read().clone());
×
244
    }
245

246
    fn visit_builtin_function(&mut self, _function: BuiltInSignature) -> Self::Output {}
×
247

248
    fn visit_char(&mut self, _c: char) -> Self::Output {}
×
249
    fn visit_closure(&mut self, closure: Gc<ByteCodeLambda>) -> Self::Output {
14,209✔
250
        // for heap_ref in closure.heap_allocated.borrow().iter() {
251
        // todo!()
252
        // self.mark_heap_reference(&heap_ref.strong_ptr())
253
        // }
254

255
        for capture in closure.captures() {
28,782✔
256
            self.push_back(capture.clone());
257
        }
258

259
        if let Some(contract) = closure.get_contract_information().as_ref() {
14,287✔
260
            self.push_back(contract.clone());
261
        }
262

263
        for instruction in closure.body_exp.iter() {
721,762✔
264
            match instruction.op_code {
265
                // If this instruction touches this global variable,
266
                // then we want to mark it as possibly referenced here.
267
                OpCode::CALLGLOBAL
268
                | OpCode::PUSH
269
                | OpCode::CALLGLOBALTAIL
270
                | OpCode::CALLGLOBALNOARITY
271
                | OpCode::CALLGLOBALTAILNOARITY => {
139,996✔
272
                    self.slots.remove(&(instruction.payload_size.to_usize()));
139,996✔
273
                }
274
                _ => {}
553,348✔
275
            }
276
        }
277
    }
278
    fn visit_continuation(&mut self, continuation: Continuation) -> Self::Output {
×
279
        let continuation = (*continuation.inner.read()).clone();
×
280

281
        match continuation {
×
282
            ContinuationMark::Closed(continuation) => {
×
283
                for value in &continuation.stack {
×
284
                    self.push_back(value.clone());
285
                }
286

287
                for value in &continuation.current_frame.function.captures {
×
288
                    self.push_back(value.clone());
289
                }
290

291
                for frame in &continuation.stack_frames {
×
292
                    for value in &frame.function.captures {
×
293
                        self.push_back(value.clone());
294
                    }
295

296
                    // if let Some(handler) = &frame.handler {
297
                    //     self.push_back((*handler.as_ref()).clone());
298
                    // }
299

300
                    if let Some(handler) =
×
301
                        frame.attachments.as_ref().and_then(|x| x.handler.clone())
×
302
                    {
303
                        self.push_back(handler);
304
                    }
305
                }
306
            }
307

308
            ContinuationMark::Open(continuation) => {
×
309
                for value in &continuation.current_stack_values {
×
310
                    self.push_back(value.clone());
311
                }
312

313
                for value in &continuation.current_frame.function.captures {
×
314
                    self.push_back(value.clone());
315
                }
316
            }
317
        }
318
    }
319
    // TODO: Come back to this
320
    fn visit_custom_type(&mut self, custom_type: GcMut<Box<dyn CustomType>>) -> Self::Output {
9,530✔
321
        let mut queue = MarkAndSweepContext {
322
            queue: &mut self.queue,
9,530✔
323
            stats: MarkAndSweepStats::default(),
9,530✔
324
        };
325

326
        custom_type.read().visit_children(&mut queue);
19,060✔
327
    }
328

329
    fn visit_float(&mut self, _float: f64) -> Self::Output {}
×
330

331
    fn visit_function_pointer(&mut self, _ptr: FunctionSignature) -> Self::Output {}
×
332

333
    fn visit_future(&mut self, _future: Gc<FutureResult>) -> Self::Output {}
×
334

335
    fn visit_future_function(&mut self, _function: BoxedAsyncFunctionSignature) -> Self::Output {}
×
336

337
    fn visit_hash_map(&mut self, hashmap: SteelHashMap) -> Self::Output {
442✔
338
        for (key, value) in hashmap.iter() {
5,434✔
339
            self.push_back(key.clone());
340
            self.push_back(value.clone());
341
        }
342
    }
343

344
    fn visit_hash_set(&mut self, hashset: SteelHashSet) -> Self::Output {
26✔
345
        for value in hashset.iter() {
286✔
346
            self.push_back(value.clone());
347
        }
348
    }
349

350
    fn visit_heap_allocated(&mut self, heap_ref: HeapRef<SteelVal>) -> Self::Output {
1,237✔
351
        let mut queue = MarkAndSweepContext {
352
            queue: &mut self.queue,
1,237✔
353
            stats: MarkAndSweepStats::default(),
1,237✔
354
        };
355

356
        queue.mark_heap_reference(&heap_ref.strong_ptr());
3,711✔
357
    }
358

359
    fn visit_immutable_vector(&mut self, vector: SteelVector) -> Self::Output {
×
360
        for value in vector.iter() {
×
361
            self.push_back(value.clone());
362
        }
363
    }
364
    fn visit_int(&mut self, _int: isize) -> Self::Output {}
×
365
    fn visit_rational(&mut self, _: Rational32) -> Self::Output {}
×
366
    fn visit_bigrational(&mut self, _: Gc<BigRational>) -> Self::Output {}
×
367

368
    fn visit_list(&mut self, list: List<SteelVal>) -> Self::Output {
1,055✔
369
        for value in list {
3,031✔
370
            self.push_back(value);
371
        }
372
    }
373

374
    fn visit_mutable_function(&mut self, _function: MutFunctionSignature) -> Self::Output {}
×
375

376
    fn visit_mutable_vector(&mut self, vector: HeapRef<Vec<SteelVal>>) -> Self::Output {
×
377
        let mut queue = MarkAndSweepContext {
378
            queue: &mut self.queue,
×
379
            stats: MarkAndSweepStats::default(),
×
380
        };
381

382
        queue.mark_heap_vector(&vector.strong_ptr())
×
383
    }
384

385
    fn visit_port(&mut self, _port: SteelPort) -> Self::Output {}
156✔
386

387
    fn visit_reducer(&mut self, reducer: Gc<Reducer>) -> Self::Output {
×
388
        match reducer.as_ref().clone() {
×
389
            Reducer::ForEach(f) => self.push_back(f),
×
390
            Reducer::Generic(rf) => {
×
391
                self.push_back(rf.initial_value);
×
392
                self.push_back(rf.function);
×
393
            }
394
            _ => {}
×
395
        }
396
    }
397

398
    // TODO: Revisit this
399
    fn visit_reference_value(&mut self, _reference: Gc<OpaqueReference<'static>>) -> Self::Output {}
×
400

401
    fn visit_steel_struct(&mut self, steel_struct: Gc<UserDefinedStruct>) -> Self::Output {
1,612✔
402
        for field in steel_struct.fields.iter() {
7,046✔
403
            self.push_back(field.clone());
404
        }
405
    }
406

407
    fn visit_stream(&mut self, stream: Gc<LazyStream>) -> Self::Output {
446✔
408
        self.push_back(stream.initial_value.clone());
1,784✔
409
        self.push_back(stream.stream_thunk.clone());
1,784✔
410
    }
411

412
    fn visit_string(&mut self, _string: SteelString) -> Self::Output {}
×
413

414
    fn visit_symbol(&mut self, _symbol: SteelString) -> Self::Output {}
×
415

416
    fn visit_syntax_object(&mut self, syntax_object: Gc<Syntax>) -> Self::Output {
×
417
        if let Some(raw) = syntax_object.raw.clone() {
×
418
            self.push_back(raw);
419
        }
420

421
        self.push_back(syntax_object.syntax.clone());
×
422
    }
423

424
    fn visit_transducer(&mut self, transducer: Gc<Transducer>) -> Self::Output {
×
425
        for transducer in transducer.ops.iter() {
×
426
            match transducer.clone() {
427
                crate::values::transducers::Transducers::Map(m) => self.push_back(m),
×
428
                crate::values::transducers::Transducers::Filter(v) => self.push_back(v),
×
429
                crate::values::transducers::Transducers::Take(t) => self.push_back(t),
×
430
                crate::values::transducers::Transducers::Drop(d) => self.push_back(d),
×
431
                crate::values::transducers::Transducers::FlatMap(fm) => self.push_back(fm),
×
432
                crate::values::transducers::Transducers::Flatten => {}
×
433
                crate::values::transducers::Transducers::Window(w) => self.push_back(w),
×
434
                crate::values::transducers::Transducers::TakeWhile(tw) => self.push_back(tw),
×
435
                crate::values::transducers::Transducers::DropWhile(dw) => self.push_back(dw),
×
436
                crate::values::transducers::Transducers::Extend(e) => self.push_back(e),
×
437
                crate::values::transducers::Transducers::Cycle => {}
×
438
                crate::values::transducers::Transducers::Enumerating => {}
×
439
                crate::values::transducers::Transducers::Zipping(z) => self.push_back(z),
×
440
                crate::values::transducers::Transducers::Interleaving(i) => self.push_back(i),
×
441
            }
442
        }
443
    }
444

445
    fn visit_void(&mut self) -> Self::Output {}
×
446

447
    fn visit_pair(&mut self, pair: Gc<super::lists::Pair>) -> Self::Output {
×
448
        self.push_back(pair.car());
×
449
        self.push_back(pair.cdr());
×
450
    }
451
}
452

453
const GC_THRESHOLD: usize = 256 * 1000;
454
const GC_GROW_FACTOR: usize = 2;
455
const RESET_LIMIT: usize = 9;
456

457
// TODO: Do these roots needs to be truly global?
458
// Replace this with a lazy static
459
thread_local! {
460
    static ROOTS: RefCell<Roots> = RefCell::new(Roots::default());
461
}
462

463
// stash roots in the global area
464
#[cfg(feature = "sync")]
465
static GLOBAL_ROOTS: Lazy<Mutex<Roots>> = Lazy::new(|| Mutex::new(Roots::default()));
2✔
466

467
#[derive(Default)]
468
pub struct Roots {
469
    generation: usize,
470
    offset: usize,
471
    roots: fxhash::FxHashMap<(usize, usize), SteelVal>,
472
}
473

474
#[derive(Debug, PartialEq, Eq, Hash)]
475
pub struct RootToken {
476
    generation: usize,
477
    offset: usize,
478
}
479

480
impl Drop for RootToken {
481
    #[cfg(not(feature = "sync"))]
482
    fn drop(&mut self) {
483
        ROOTS.with(|x| x.borrow_mut().free(self))
484
    }
485

486
    #[cfg(feature = "sync")]
487
    fn drop(&mut self) {
×
488
        GLOBAL_ROOTS.lock().unwrap().free(self)
×
489
    }
490
}
491

492
#[derive(Debug)]
493
pub struct RootedSteelVal {
494
    value: SteelVal,
495
    token: RootToken,
496
}
497

498
impl RootedSteelVal {
499
    pub fn value(&self) -> &SteelVal {
×
500
        &self.value
×
501
    }
502
}
503

504
impl Roots {
505
    fn root(&mut self, value: SteelVal) -> RootToken {
×
506
        let generation = self.generation;
×
507
        let offset = self.offset;
×
508

509
        self.offset += 1;
×
510

511
        self.roots.insert((generation, offset), value);
×
512

513
        RootToken { generation, offset }
514
    }
515

516
    fn free(&mut self, token: &RootToken) {
×
517
        self.roots.remove(&(token.generation, token.offset));
×
518
    }
519

520
    fn increment_generation(&mut self) {
29✔
521
        self.generation += 1;
29✔
522
    }
523
}
524

525
impl SteelVal {
526
    pub fn mark_rooted(&self) -> RootToken {
×
527
        #[cfg(feature = "sync")]
528
        {
529
            GLOBAL_ROOTS.lock().unwrap().root(self.clone())
×
530
        }
531

532
        #[cfg(not(feature = "sync"))]
533
        {
534
            ROOTS.with(|x| x.borrow_mut().root(self.clone()))
535
        }
536
    }
537

538
    // If we're storing in an external struct that could escape
539
    // the runtime, we probably want to be marked as rooted
540
    pub fn as_rooted(&self) -> RootedSteelVal {
×
541
        let token = self.mark_rooted();
×
542

543
        RootedSteelVal {
544
            token,
545
            value: self.clone(),
×
546
        }
547
    }
548
}
549

550
type HeapValue = StandardSharedMut<HeapAllocated<SteelVal>>;
551
type HeapVector = StandardSharedMut<HeapAllocated<Vec<SteelVal>>>;
552

553
type HeapElement<T> = StandardSharedMut<HeapAllocated<T>>;
554

555
#[cfg(feature = "sync")]
556
static MARKER: std::sync::LazyLock<ParallelMarker> = std::sync::LazyLock::new(ParallelMarker::new);
557

558
#[cfg(feature = "sync")]
559
pub struct WillExecutor {
560
    // Determining if these are reachable should be either:
561
    // * Value is a weak reference: then you use weak references
562
    // * value is contained within _other_ will executors, so the will executors need to have references to
563
    //   each other to check if ther value is contained within other ones.
564
    values: Mutex<Vec<Option<Pair>>>,
565

566
    // The incoming queue. We'll drain on this when the reified buffer of values
567
    // is empty since we need to scan the values to find the right one.
568
    incoming: Receiver<Pair>,
569

570
    sender: Sender<Pair>,
571
}
572

573
#[cfg(not(feature = "sync"))]
574
pub struct WillExecutor {}
575

576
#[cfg(feature = "sync")]
577
impl WillExecutor {
578
    fn block_until_incoming(&self) {
×
579
        log::debug!(target: "will", "Blocking until something shows in the queue");
×
580
        if let Ok(next) = self.incoming.recv() {
×
581
            self.values.lock().unwrap().push(Some(next));
582
        }
583
    }
584

585
    // Returns the pair of values next to be used. Not the best data structure choice.
586
    fn find_next(&self) -> Option<Pair> {
×
587
        log::debug!(target: "will", "Finding the next value in the will executor list");
×
588
        // Proactively check if the value is ready to be received.
589
        // TODO: Merge the behavior below and above since we can just return this value
590
        // immediately without putting on to the queue.
591
        let top_guard = self.values.lock().unwrap();
×
592
        if top_guard.is_empty() && self.incoming.is_empty() {
×
593
            log::debug!(target: "will", "Initial values list is empty. Waiting on the queue.");
×
594
            drop(top_guard);
×
595

596
            // Block until the next thing is pushed on
597
            if let Ok(next) = self.incoming.recv() {
×
598
                log::debug!(target: "will", "Found a value off the queue, trying to lock the values.");
×
599

600
                self.values.lock().unwrap().push(Some(next));
601
            }
602
        } else {
603
            drop(top_guard);
×
604
        }
605

606
        log::debug!(target: "will", "Done optimistically checking off the queue. Locking the values.");
×
607

608
        let mut guard = self.values.lock().unwrap();
×
609

610
        // Drain the incoming, push them in to the values:
611

612
        while let Ok(incoming) = self.incoming.try_recv() {
×
613
            // TODO: Use a free list here instead? That way we can
614
            // re-use the space provided from earlier? Run some compaction
615
            // every one in a while?
616
            guard.push(Some(incoming));
617
        }
618

619
        log::debug!(target: "will", "Iterating over the values to drop");
×
620

621
        // TODO:
622
        // Periodically run a compaction on this
623
        for pair_slot in guard.iter_mut() {
×
624
            if let Some(value) = pair_slot {
×
625
                let is_sole_reference = match value.car_ref() {
626
                    SteelVal::Closure(gc) => Gc::strong_count(gc) == 1,
×
627
                    SteelVal::VectorV(steel_vector) => Gc::strong_count(&steel_vector.0) == 1,
×
628
                    SteelVal::StringV(steel_string) => Gc::strong_count(&steel_string.0) == 1,
×
629
                    SteelVal::SymbolV(steel_string) => Gc::strong_count(&steel_string.0) == 1,
×
630
                    SteelVal::Custom(gc) => Gc::strong_count(gc) == 1,
×
631
                    SteelVal::HashMapV(steel_hash_map) => Gc::strong_count(&steel_hash_map.0) == 1,
×
632
                    SteelVal::HashSetV(steel_hash_set) => Gc::strong_count(&steel_hash_set.0) == 1,
×
633
                    SteelVal::CustomStruct(gc) => Gc::strong_count(gc) == 1,
×
634
                    SteelVal::PortV(steel_port) => Gc::strong_count(&steel_port.port) == 1,
×
635
                    SteelVal::IterV(gc) => Gc::strong_count(gc) == 1,
×
636
                    SteelVal::ReducerV(gc) => Gc::strong_count(gc) == 1,
×
637
                    SteelVal::FutureV(gc) => Gc::strong_count(gc) == 1,
×
638
                    SteelVal::StreamV(gc) => Gc::strong_count(gc) == 1,
×
639
                    SteelVal::BoxedFunction(gc) => Gc::strong_count(gc) == 1,
×
640
                    SteelVal::ContinuationFunction(continuation) => {
×
641
                        StandardShared::strong_count(&continuation.inner) == 1
×
642
                    }
643
                    SteelVal::ListV(generic_list) => generic_list.strong_count() == 1,
×
644
                    SteelVal::Pair(gc) => Gc::strong_count(gc) == 1,
×
645

646
                    // In theory, if this is a weak value, this can be the thing to check
647
                    // the values.
648
                    SteelVal::MutableVector(heap_ref) => {
×
649
                        WeakShared::weak_count(&heap_ref.inner) == 1
×
650
                            || !heap_ref.inner.upgrade().unwrap().read().is_reachable()
×
651
                    }
652
                    SteelVal::BoxedIterator(gc) => Gc::strong_count(gc) == 1,
×
653
                    SteelVal::SyntaxObject(gc) => Gc::strong_count(gc) == 1,
×
654
                    SteelVal::Boxed(gc) => Gc::strong_count(gc) == 1,
×
655
                    SteelVal::HeapAllocated(heap_ref) => {
×
656
                        WeakShared::weak_count(&heap_ref.inner) == 1
×
657
                            || !heap_ref.inner.upgrade().unwrap().read().is_reachable()
×
658
                    }
659
                    SteelVal::Reference(gc) => Gc::strong_count(gc) == 1,
×
660
                    SteelVal::BigNum(gc) => Gc::strong_count(gc) == 1,
×
661
                    SteelVal::BigRational(gc) => Gc::strong_count(gc) == 1,
×
662
                    SteelVal::Complex(gc) => Gc::strong_count(gc) == 1,
×
663
                    SteelVal::ByteVector(steel_byte_vector) => {
×
664
                        Gc::strong_count(&steel_byte_vector.vec) == 1
×
665
                    }
666
                    _ => false,
×
667
                };
668

669
                if is_sole_reference {
670
                    let value = std::mem::take(pair_slot).unwrap();
×
671
                    return Some(value);
672
                }
673
            }
674
        }
675

676
        // Compact the values
677
        guard.retain(|x| x.is_some());
×
678

679
        None
680
    }
681

682
    pub fn register(&self, key: SteelVal, func: SteelVal) -> Result<(), SteelErr> {
×
683
        log::debug!(target: "will", "Registering: {}", key);
×
684
        if !func.is_function() {
×
685
            stop!(TypeMismatch => "will-register expects a function, found: {}", func)
×
686
        }
687

688
        self.sender
×
689
            .send(Pair {
×
690
                car: key,
×
691
                cdr: func,
×
692
            })
693
            .unwrap();
694

695
        Ok(())
×
696
    }
697
}
698

699
impl Custom for WillExecutor {}
700

701
#[cfg(feature = "sync")]
702
#[steel_derive::function(name = "make-will-executor")]
703
pub fn make_will_executor() -> Result<SteelVal, SteelErr> {
223✔
704
    let (sender, incoming) = crossbeam_channel::unbounded();
669✔
705
    WillExecutor {
706
        values: Mutex::new(Vec::new()),
669✔
707
        sender,
708
        incoming,
709
    }
710
    .into_steelval()
711
}
712

713
#[cfg(not(feature = "sync"))]
714
#[steel_derive::function(name = "make-will-executor")]
715
pub fn make_will_executor() -> Result<SteelVal, SteelErr> {
716
    WillExecutor {}.into_steelval()
717
}
718

719
#[cfg(feature = "sync")]
720
#[steel_derive::context(name = "will-execute", arity = "Exact(1)")]
721
pub fn will_execute(ctx: &mut VmCore, args: &[SteelVal]) -> Option<Result<SteelVal, SteelErr>> {
722
    let executor = WillExecutor::as_ref(&args[0]).unwrap();
723

724
    // TODO: Make this find the next thing? Perhaps just have this be a channel
725
    // or something that waits until the next thing is found?
726
    // How to do this _with_ blocking? Maybe have a queue for values that are being registered,
727
    // and then otherwise drain the vector and put them in when found.
728
    if let SteelVal::Pair(pair) = ctx
729
        .thread
730
        .enter_safepoint(|_| loop {
731
            if let Some(value) = executor.find_next() {
732
                return Ok(SteelVal::Pair(Gc::new(value)));
733
            } else {
734
                // Control frequency here, but for when there is no throughput,
735
                // we want to at least poll a bit
736
                executor.block_until_incoming();
737
            }
738
        })
739
        .unwrap()
740
    {
741
        let Pair { car, cdr } = pair.try_unwrap().unwrap();
742
        apply(ctx, &[cdr, SteelVal::ListV(vec![car].into())])
743
    } else {
744
        unreachable!()
745
    }
746
}
747

748
#[cfg(not(feature = "sync"))]
749
#[steel_derive::context(name = "will-execute", arity = "Exact(1)")]
750
pub fn will_execute(_ctx: &mut VmCore, args: &[SteelVal]) -> Option<Result<SteelVal, SteelErr>> {
751
    let _ = WillExecutor::as_ref(&args[0]).unwrap();
752
    Some(Ok(SteelVal::Void))
753
}
754

755
#[cfg(feature = "sync")]
756
#[steel_derive::function(name = "will-register")]
757
pub fn will_register(
×
758
    will: SteelVal,
759
    value: SteelVal,
760
    func: SteelVal,
761
) -> Result<SteelVal, SteelErr> {
762
    let will = WillExecutor::as_ref(&will)?;
×
763
    will.register(value, func)?;
×
764
    Ok(SteelVal::Void)
×
765
}
766

767
#[cfg(not(feature = "sync"))]
768
#[steel_derive::function(name = "will-register")]
769
pub fn will_register(
770
    _will: SteelVal,
771
    _value: SteelVal,
772
    _func: SteelVal,
773
) -> Result<SteelVal, SteelErr> {
774
    Ok(SteelVal::Void)
775
}
776

777
// Not considered part of the reachability graph? i.e. we simply don't traverse it.
778
pub struct WeakBox {
779
    value: HeapRef<SteelVal>,
780
}
781

782
impl Custom for WeakBox {}
783

784
/// Allocates a weak box.
785
///
786
/// A weak box is similar to a box, but when the garbage collector can prove
787
/// that the value of a weak box is only reachable through weak references,
788
/// the weak box value will always return #false.
789
///
790
/// In other words, a weak box does not keep the value contained alive through
791
/// a gc collection.
792
#[steel_derive::context(name = "make-weak-box", arity = "Exact(1)")]
793
pub fn make_weak_box(ctx: &mut VmCore, args: &[SteelVal]) -> Option<Result<SteelVal, SteelErr>> {
794
    let value = args[0].clone();
795
    if let SteelVal::HeapAllocated(made_box) = ctx.make_box(value) {
796
        Some(WeakBox { value: made_box }.into_steelval())
797
    } else {
798
        unreachable!()
799
    }
800
}
801

802
/// Returns the value contained in the weak box.
803
/// If the garbage collector has proven that the previous content
804
/// value of weak-box was reachable only through a weak reference,
805
/// then default-value (which defaults to #f) is returned.
806
///
807
/// ```scheme
808
/// (define value (make-weak-box 10))
809
/// (weak-box-value value) ;; => 10
810
/// (set! value #f) ;; Wipe out the previous value
811
/// (#%gc-collect)
812
/// (weak-box-value value) ;; => #false
813
/// ```
814
#[steel_derive::function(name = "weak-box-value")]
815
pub fn weak_box_value(
×
816
    value: &SteelVal,
817
    mut rest: RestArgsIter<&SteelVal>,
818
) -> Result<SteelVal, SteelErr> {
819
    let inner = WeakBox::as_ref(value)?;
×
820

821
    // Check if its reachable?
822
    if let Some(value) = inner.value.maybe_get_from_weak() {
×
823
        Ok(value)
824
    } else {
825
        Ok(rest
×
826
            .next()
×
827
            .map(|x| x.unwrap().clone())
×
828
            .unwrap_or(SteelVal::BoolV(false)))
×
829
    }
830
}
831

832
// Have free list for vectors and values separately. Can keep some of the vectors pre allocated
833
// as well, as necessary.
834
//
835
// Also -> implement will / executor functionality for implementing destructors for objects.
836
// This should be _relatively_ straightforward. When allocating a value, send it on the destructor
837
// thread.
838
#[derive(Debug)]
839
struct FreeList<T: HeapAble> {
840
    // TODO: Make this a linked list of vectors. Can reuse im-lists?
841
    // Pointer chasing may not be worth it. The issue is every doubling of size _also_
842
    // allocate a whole bunch more. Can probably just get away with a vec of vecs to avoid
843
    // copying everything.
844
    elements: Vec<HeapElement<T>>,
845
    cursor: usize,
846

847
    // Available count
848
    alloc_count: usize,
849

850
    grow_count: usize,
851

852
    #[cfg(feature = "sync")]
853
    forward: Option<Sender<Vec<HeapElement<T>>>>,
854
    #[cfg(feature = "sync")]
855
    backward: Option<Receiver<Vec<HeapElement<T>>>>,
856
}
857

858
impl<T: HeapAble> Clone for FreeList<T> {
859
    fn clone(&self) -> Self {
788✔
860
        Self {
861
            cursor: self.cursor,
788✔
862
            alloc_count: self.alloc_count,
788✔
863
            grow_count: self.grow_count,
788✔
864
            elements: self
788✔
865
                .elements
866
                .iter()
867
                .map(|x| {
868
                    let guard = x.read();
869
                    let inner = guard.value.clone();
870
                    StandardShared::new(MutContainer::new(HeapAllocated {
871
                        reachable: guard.reachable,
872
                        finalizer: guard.finalizer,
873
                        value: inner,
874
                    }))
875
                })
876
                .collect(),
877
            #[cfg(feature = "sync")]
878
            forward: None,
879
            #[cfg(feature = "sync")]
880
            backward: None,
881
        }
882
    }
883
}
884

885
#[test]
886
fn basic_free_list_usage() {
887
    // Pre allocate some slots
888
    let mut free_list: FreeList<SteelVal> = FreeList::new();
889

890
    let pointers = (0..100)
891
        .into_iter()
892
        .map(|x| free_list.allocate(SteelVal::IntV(x)))
893
        .collect::<Vec<_>>();
894

895
    drop(pointers);
896

897
    free_list.weak_collection();
898

899
    for var in &free_list.elements {
900
        assert!(!var.read().is_reachable());
901
    }
902
}
903

904
#[test]
905
fn free_list_continues_allocating_when_full() {
906
    // Pre allocate some slots
907
    let mut free_list: FreeList<SteelVal> = FreeList::new();
908

909
    let count: usize = 10000;
910

911
    let pointers = (0..count)
912
        .into_iter()
913
        .map(|x| free_list.allocate(SteelVal::IntV(x as isize)))
914
        .collect::<Vec<_>>();
915

916
    for var in &free_list.elements[0..count] {
917
        assert!(var.read().is_reachable());
918
    }
919

920
    for var in &free_list.elements[count..] {
921
        assert!(!var.read().is_reachable());
922
    }
923

924
    drop(pointers);
925

926
    free_list.weak_collection();
927

928
    for var in &free_list.elements {
929
        assert!(!var.read().is_reachable());
930
    }
931
}
932

933
#[test]
934
fn free_list_continues_allocating_in_the_middle() {
935
    // Pre allocate some slots
936
    let mut free_list: FreeList<SteelVal> = FreeList::new();
937

938
    let count: usize = 10000;
939

940
    let mut pointers = (0..count)
941
        .into_iter()
942
        .map(|x| free_list.allocate(SteelVal::IntV(x as isize)))
943
        .collect::<Vec<_>>();
944

945
    for var in &free_list.elements[0..count] {
946
        assert!(var.read().is_reachable());
947
    }
948

949
    for var in &free_list.elements[count..] {
950
        assert!(!var.read().is_reachable());
951
    }
952

953
    let right_half = pointers.split_off(100);
954

955
    drop(pointers);
956

957
    free_list.weak_collection();
958

959
    // Check that the first 100 elements are in fact, gone
960

961
    for var in &free_list.elements[0..100] {
962
        assert!(!var.read().is_reachable());
963
    }
964

965
    drop(right_half)
966
}
967

968
#[cfg(feature = "sync")]
969
impl<T: HeapAble + Sync + Send + 'static> FreeList<T> {
970
    // TODO: Calculate the overhead!
971
    // How big is this?
972
    const EXTEND_CHUNK: usize = 256 * 100;
973

974
    fn new() -> Self {
19✔
975
        #[cfg(feature = "sync")]
976
        let (forward_sender, backward_receiver) = spawn_background_dropper();
57✔
977

978
        let mut res = FreeList {
979
            elements: Vec::new(),
38✔
980
            cursor: 0,
981
            alloc_count: 0,
982
            grow_count: 0,
983
            #[cfg(feature = "sync")]
984
            forward: Some(forward_sender),
19✔
985
            #[cfg(feature = "sync")]
986
            backward: Some(backward_receiver),
19✔
987
        };
988

989
        res.grow();
38✔
990

991
        res
19✔
992
    }
993

994
    // Update the counts for things
995
    fn recount(&mut self) {
446✔
996
        self.alloc_count = 0;
446✔
997
        for element in &mut self.elements {
22,835,646✔
998
            let guard = element.read();
×
999
            if !guard.is_reachable() {
11,417,418✔
1000
                // Replace with an empty value... for now.
1001
                // Want to keep the memory counts down.
1002
                // let value = std::mem::replace(&mut guard.value, T::empty());
1003
                self.alloc_count += 1;
11,417,418✔
1004
            }
1005
        }
1006
    }
1007

1008
    fn percent_full(&self) -> f64 {
9,232,806✔
1009
        let count = self.elements.len() as f64;
18,465,612✔
1010

1011
        let percent = (count - self.alloc_count as f64) / count;
18,465,612✔
1012

1013
        assert!(percent < 1.01);
18,465,612✔
1014

1015
        percent
9,232,806✔
1016
    }
1017

1018
    fn is_heap_full(&self) -> bool {
32✔
1019
        self.alloc_count == 0
32✔
1020
    }
1021

1022
    fn grow(&mut self) {
48✔
1023
        let now = std::time::Instant::now();
96✔
1024
        // Can probably make this a lot bigger
1025
        let current = self.elements.len().max(Self::EXTEND_CHUNK);
192✔
1026

1027
        self.cursor = self.elements.len();
48✔
1028

1029
        self.elements.reserve(current);
144✔
1030

1031
        log::debug!(target: "gc", "Time to extend the heap vec -> {:?}", now.elapsed());
48✔
1032

1033
        let now = std::time::Instant::now();
96✔
1034

1035
        // Can we pre allocate this somewhere else? Incrementally allocate the values?
1036
        // So basically we can have the elements be allocated vs not, and just have them
1037
        // be snatched on demand?
1038
        self.elements.extend(
96✔
1039
            std::iter::repeat_with(|| {
9,088,048✔
1040
                StandardShared::new(MutContainer::new(HeapAllocated::new(T::empty())))
36,352,000✔
1041
            })
1042
            .take(current),
96✔
1043
        );
1044

1045
        log::debug!(target: "gc", "Growing the heap by: {} -> {:?}", current, now.elapsed());
48✔
1046

1047
        self.alloc_count += current;
48✔
1048
        self.grow_count += 1;
48✔
1049

1050
        #[cfg(debug_assertions)]
1051
        {
1052
            assert!(!self.elements[self.cursor].read().is_reachable());
96✔
1053
        }
1054
    }
1055

1056
    // Extend the heap
1057
    fn extend_heap(&mut self) {
×
1058
        self.grow();
×
1059

1060
        #[cfg(debug_assertions)]
1061
        {
1062
            assert!(!self.elements[self.cursor].read().is_reachable());
×
1063
        }
1064
    }
1065

1066
    // if it points to another thing, consider marking it as unreachable?
1067
    fn seek_to_next_free(&mut self) {
×
1068
        todo!()
1069
    }
1070

1071
    fn allocate(&mut self, value: T) -> HeapRef<T> {
9,227,684✔
1072
        // Drain, moving values around...
1073
        // is that expensive?
1074
        let guard = &mut self.elements[self.cursor];
18,455,368✔
1075

1076
        // Allocate into this field
1077
        let mut heap_guard = guard.write();
27,683,052✔
1078

1079
        // TODO: If the guard is registered with a will executor,
1080
        // then we shouldn't mark this value as eligible to be
1081
        // freed?
1082
        heap_guard.value = value;
18,455,368✔
1083

1084
        heap_guard.reachable = true;
9,227,684✔
1085
        let weak_ptr = StandardShared::downgrade(guard);
27,683,052✔
1086
        drop(heap_guard);
18,455,368✔
1087

1088
        // self.elements[self.cursor] = pointer;
1089
        self.alloc_count -= 1;
9,227,684✔
1090

1091
        // Find where to assign the next slot optimistically?
1092
        let next_slot = self.elements[self.cursor..]
18,455,368✔
1093
            .iter()
1094
            .position(|x| !x.read().is_reachable());
27,837,186✔
1095

1096
        if let Some(next_slot) = next_slot {
18,455,336✔
1097
            self.cursor += next_slot;
×
1098

1099
            // #[cfg(debug_assertions)]
1100
            // {
1101
            // assert!(!self.elements[self.cursor].read().is_reachable());
1102
            // }
1103
        } else {
1104
            // TODO: Handle compaction and moving things around so the
1105
            // cursor has a chance to actually find stuff that has been
1106
            // freed. It would also be nice
1107
            if self.is_heap_full() {
64✔
1108
                log::debug!(target: "gc", "Extending the heap in `allocate`");
×
1109

1110
                // Extend the heap, move the cursor to the end
1111
                self.extend_heap();
×
1112

1113
                // assert!(!self.elements[self.cursor].read().is_reachable());
1114
            } else {
1115
                // Move to the beginning.
1116
                self.cursor = self
32✔
1117
                    .elements
32✔
1118
                    .iter()
32✔
1119
                    .position(|x| !x.read().is_reachable())
64✔
1120
                    .unwrap();
×
1121

1122
                // assert!(!self.elements[self.cursor].read().is_reachable());
1123
            }
1124
        }
1125

1126
        // assert!(!self.elements[self.cursor].read().is_reachable());
1127

1128
        HeapRef { inner: weak_ptr }
1129
    }
1130

1131
    // Can incrementally collect with the from / to space, assuming that the collections
1132
    // are done incrementally from the other side.
1133
    fn collect_on_condition(&mut self, func: fn(&HeapElement<T>) -> bool) -> usize {
55✔
1134
        log::debug!(target: "gc", "Free count before weak collection: {}", self.alloc_count);
55✔
1135
        let mut amount_dropped = 0;
110✔
1136

1137
        self.elements.iter_mut().for_each(|x| {
9,548,910✔
1138
            // This is... a little gnarly? We don't want to lock each time, but it could
1139
            // help. Allocations can now be genuinely reused since we're manipulating
1140
            // what is inside the pointer
1141
            if func(x) {
9,548,800✔
1142
                let mut guard = x.write();
2,065,242✔
1143

1144
                if guard.reachable {
937,451✔
1145
                    guard.reachable = false;
249,037✔
1146
                    amount_dropped += 1;
249,037✔
1147
                }
1148
            }
1149
        });
1150

1151
        self.alloc_count += amount_dropped;
55✔
1152
        log::debug!(target: "gc", "Free count after weak collection: {}", self.alloc_count);
55✔
1153

1154
        amount_dropped
55✔
1155
    }
1156

1157
    // Full weak collection
1158
    fn weak_collection(&mut self) -> usize {
55✔
1159
        // Just mark them to be dead
1160
        let res = self.collect_on_condition(|inner| StandardShared::weak_count(inner) == 0);
19,097,765✔
1161
        #[cfg(debug_assertions)]
1162
        {
1163
            assert!(!self.elements[self.cursor].read().is_reachable());
110✔
1164
        }
1165
        res
55✔
1166
    }
1167

1168
    fn mark_all_unreachable(&mut self) {
504✔
1169
        self.elements.iter_mut().for_each(|x| x.write().reset());
41,524,208✔
1170
    }
1171

1172
    // Compact every once in a while
1173
    // TODO: Move this on to its own thread
1174
    fn compact(&mut self) {
×
1175
        #[cfg(feature = "sync")]
×
1176
        if let Some(sender) = &self.forward {
×
1177
            sender.send(std::mem::take(&mut self.elements)).unwrap();
×
1178
            self.elements = self.backward.as_ref().unwrap().recv().unwrap();
×
1179
        } else {
1180
            self.elements.retain(|x| x.read().is_reachable());
×
1181
            self.elements.shrink_to_fit();
×
1182
        }
1183

1184
        #[cfg(not(feature = "sync"))]
1185
        {
1186
            self.elements.retain(|x| x.read().is_reachable());
×
1187
            self.elements.shrink_to_fit();
×
1188
        }
1189

1190
        log::debug!(target: "gc", "Heap size after compaction: {}", self.elements.len());
×
1191
        self.alloc_count = 0;
×
1192
        self.grow_count = 0;
×
1193
        self.extend_heap();
×
1194
        #[cfg(debug_assertions)]
1195
        {
1196
            assert!(!self.elements[self.cursor].read().is_reachable());
×
1197
        }
1198
    }
1199

1200
    fn strong_collection(&mut self) -> usize {
×
1201
        self.collect_on_condition(|inner| !inner.read().is_reachable())
×
1202
    }
1203
}
1204

1205
#[cfg(not(feature = "sync"))]
1206
impl<T: HeapAble + 'static> FreeList<T> {
1207
    // TODO: Calculate the overhead!
1208
    // How big is this?
1209
    const EXTEND_CHUNK: usize = 256 * 100;
1210

1211
    fn new() -> Self {
×
1212
        #[cfg(feature = "sync")]
1213
        let (forward_sender, backward_receiver) = spawn_background_dropper();
×
1214

1215
        let mut res = FreeList {
1216
            elements: Vec::new(),
×
1217
            cursor: 0,
1218
            alloc_count: 0,
1219
            grow_count: 0,
1220
            #[cfg(feature = "sync")]
1221
            forward: Some(forward_sender),
×
1222
            #[cfg(feature = "sync")]
1223
            backward: Some(backward_receiver),
×
1224
        };
1225

1226
        res.grow();
×
1227

1228
        res
×
1229
    }
1230

1231
    // Update the counts for things
1232
    fn recount(&mut self) {
×
1233
        self.alloc_count = 0;
×
1234
        for element in &mut self.elements {
×
1235
            let guard = element.read();
×
1236
            if !guard.is_reachable() {
×
1237
                // Replace with an empty value... for now.
1238
                // Want to keep the memory counts down.
1239
                // let value = std::mem::replace(&mut guard.value, T::empty());
1240
                self.alloc_count += 1;
×
1241
            }
1242
        }
1243
    }
1244

1245
    fn percent_full(&self) -> f64 {
×
1246
        let count = self.elements.len() as f64;
×
1247

1248
        let percent = (count - self.alloc_count as f64) / count;
×
1249

1250
        assert!(percent < 1.01);
×
1251

1252
        percent
×
1253
    }
1254

1255
    fn is_heap_full(&self) -> bool {
×
1256
        self.alloc_count == 0
×
1257
    }
1258

1259
    fn grow(&mut self) {
×
1260
        let now = std::time::Instant::now();
×
1261
        // Can probably make this a lot bigger
1262
        let current = self.elements.len().max(Self::EXTEND_CHUNK);
×
1263

1264
        self.cursor = self.elements.len();
×
1265

1266
        self.elements.reserve(current);
×
1267

1268
        log::debug!(target: "gc", "Time to extend the heap vec -> {:?}", now.elapsed());
×
1269

1270
        let now = std::time::Instant::now();
×
1271

1272
        // Can we pre allocate this somewhere else? Incrementally allocate the values?
1273
        // So basically we can have the elements be allocated vs not, and just have them
1274
        // be snatched on demand?
1275
        self.elements.extend(
×
1276
            std::iter::repeat_with(|| {
×
1277
                StandardShared::new(MutContainer::new(HeapAllocated::new(T::empty())))
×
1278
            })
1279
            .take(current),
×
1280
        );
1281

1282
        log::debug!(target: "gc", "Growing the heap by: {} -> {:?}", current, now.elapsed());
×
1283

1284
        self.alloc_count += current;
×
1285
        self.grow_count += 1;
×
1286

1287
        #[cfg(debug_assertions)]
1288
        {
1289
            assert!(!self.elements[self.cursor].read().is_reachable());
×
1290
        }
1291
    }
1292

1293
    // Extend the heap
1294
    fn extend_heap(&mut self) {
×
1295
        self.grow();
×
1296

1297
        #[cfg(debug_assertions)]
1298
        {
1299
            assert!(!self.elements[self.cursor].read().is_reachable());
×
1300
        }
1301
    }
1302

1303
    // if it points to another thing, consider marking it as unreachable?
1304
    fn seek_to_next_free(&mut self) {
×
1305
        todo!()
1306
    }
1307

1308
    fn allocate(&mut self, value: T) -> HeapRef<T> {
×
1309
        // Drain, moving values around...
1310
        // is that expensive?
1311
        let guard = &mut self.elements[self.cursor];
×
1312

1313
        // Allocate into this field
1314
        let mut heap_guard = guard.write();
×
1315

1316
        // TODO: If the guard is registered with a will executor,
1317
        // then we shouldn't mark this value as eligible to be
1318
        // freed?
1319
        heap_guard.value = value;
×
1320

1321
        heap_guard.reachable = true;
×
1322
        let weak_ptr = StandardShared::downgrade(&guard);
×
1323
        drop(heap_guard);
×
1324

1325
        // self.elements[self.cursor] = pointer;
1326
        self.alloc_count -= 1;
×
1327

1328
        // Find where to assign the next slot optimistically?
1329
        let next_slot = self.elements[self.cursor..]
×
1330
            .iter()
1331
            .position(|x| !x.read().is_reachable());
×
1332

1333
        if let Some(next_slot) = next_slot {
×
1334
            self.cursor += next_slot;
×
1335

1336
            // #[cfg(debug_assertions)]
1337
            // {
1338
            // assert!(!self.elements[self.cursor].read().is_reachable());
1339
            // }
1340
        } else {
1341
            // TODO: Handle compaction and moving things around so the
1342
            // cursor has a chance to actually find stuff that has been
1343
            // freed. It would also be nice
1344
            if self.is_heap_full() {
×
1345
                log::debug!(target: "gc", "Extending the heap in `allocate`");
×
1346

1347
                // Extend the heap, move the cursor to the end
1348
                self.extend_heap();
×
1349

1350
                // assert!(!self.elements[self.cursor].read().is_reachable());
1351
            } else {
1352
                // Move to the beginning.
1353
                self.cursor = self
×
1354
                    .elements
×
1355
                    .iter()
×
1356
                    .position(|x| !x.read().is_reachable())
×
1357
                    .unwrap();
×
1358

1359
                // assert!(!self.elements[self.cursor].read().is_reachable());
1360
            }
1361
        }
1362

1363
        // assert!(!self.elements[self.cursor].read().is_reachable());
1364

1365
        HeapRef { inner: weak_ptr }
1366
    }
1367

1368
    // Can incrementally collect with the from / to space, assuming that the collections
1369
    // are done incrementally from the other side.
1370
    fn collect_on_condition(&mut self, func: fn(&HeapElement<T>) -> bool) -> usize {
×
1371
        log::debug!(target: "gc", "Free count before weak collection: {}", self.alloc_count);
×
1372
        let mut amount_dropped = 0;
×
1373

1374
        self.elements.iter_mut().for_each(|x| {
×
1375
            // This is... a little gnarly? We don't want to lock each time, but it could
1376
            // help. Allocations can now be genuinely reused since we're manipulating
1377
            // what is inside the pointer
1378
            if func(x) {
×
1379
                let mut guard = x.write();
×
1380

1381
                if guard.reachable {
×
1382
                    guard.reachable = false;
×
1383
                    amount_dropped += 1;
×
1384
                }
1385
            }
1386
        });
1387

1388
        self.alloc_count += amount_dropped;
×
1389
        log::debug!(target: "gc", "Free count after weak collection: {}", self.alloc_count);
×
1390

1391
        amount_dropped
×
1392
    }
1393

1394
    // Full weak collection
1395
    fn weak_collection(&mut self) -> usize {
×
1396
        // Just mark them to be dead
1397
        let res = self.collect_on_condition(|inner| StandardShared::weak_count(inner) == 0);
×
1398
        #[cfg(debug_assertions)]
1399
        {
1400
            assert!(!self.elements[self.cursor].read().is_reachable());
×
1401
        }
1402
        res
×
1403
    }
1404

1405
    fn mark_all_unreachable(&mut self) {
×
1406
        self.elements.iter_mut().for_each(|x| x.write().reset());
×
1407
    }
1408

1409
    // Compact every once in a while
1410
    // TODO: Move this on to its own thread
1411
    fn compact(&mut self) {
×
1412
        #[cfg(feature = "sync")]
×
1413
        if let Some(sender) = &self.forward {
×
1414
            sender.send(std::mem::take(&mut self.elements)).unwrap();
×
1415
            self.elements = self.backward.as_ref().unwrap().recv().unwrap();
×
1416
        } else {
1417
            self.elements.retain(|x| x.read().is_reachable());
×
1418
            self.elements.shrink_to_fit();
×
1419
        }
1420

1421
        #[cfg(not(feature = "sync"))]
1422
        {
1423
            self.elements.retain(|x| x.read().is_reachable());
×
1424
            self.elements.shrink_to_fit();
×
1425
        }
1426

1427
        log::debug!(target: "gc", "Heap size after compaction: {}", self.elements.len());
×
1428
        self.alloc_count = 0;
×
1429
        self.grow_count = 0;
×
1430
        self.extend_heap();
×
1431
        #[cfg(debug_assertions)]
1432
        {
1433
            assert!(!self.elements[self.cursor].read().is_reachable());
×
1434
        }
1435
    }
1436

1437
    fn strong_collection(&mut self) -> usize {
×
1438
        self.collect_on_condition(|inner| !inner.read().is_reachable())
×
1439
    }
1440
}
1441

1442
impl FreeList<Vec<SteelVal>> {
1443
    fn allocate_vec(&mut self, value: impl Iterator<Item = SteelVal>) -> HeapRef<Vec<SteelVal>> {
25,170✔
1444
        // Drain, moving values around...
1445
        // is that expensive?
1446
        let guard = &mut self.elements[self.cursor];
50,340✔
1447

1448
        // Allocate into this field
1449
        let mut heap_guard = guard.write();
75,510✔
1450

1451
        // Check that this fits:
1452
        heap_guard.value.clear();
50,340✔
1453
        for v in value {
1,722,466✔
1454
            heap_guard.value.push(v);
×
1455
        }
1456

1457
        heap_guard.value.shrink_to_fit();
50,340✔
1458

1459
        heap_guard.reachable = true;
25,170✔
1460
        let weak_ptr = StandardShared::downgrade(guard);
75,510✔
1461
        drop(heap_guard);
50,340✔
1462

1463
        // self.elements[self.cursor] = pointer;
1464
        self.alloc_count -= 1;
25,170✔
1465

1466
        // Find where to assign the next slot optimistically?
1467
        let next_slot = self.elements[self.cursor..]
50,340✔
1468
            .iter()
1469
            .position(|x| !x.read().is_reachable());
75,510✔
1470

1471
        if let Some(next_slot) = next_slot {
50,340✔
1472
            self.cursor += next_slot;
×
1473

1474
            // #[cfg(debug_assertions)]
1475
            // {
1476
            // assert!(!self.elements[self.cursor].read().is_reachable());
1477
            // }
1478
        } else {
1479
            // TODO: Handle compaction and moving things around so the
1480
            // cursor has a chance to actually find stuff that has been
1481
            // freed. It would also be nice
1482
            if self.is_heap_full() {
×
1483
                log::debug!(target: "gc", "Extending the heap in `allocate`");
×
1484

1485
                // Extend the heap, move the cursor to the end
1486
                self.extend_heap();
×
1487

1488
                // assert!(!self.elements[self.cursor].read().is_reachable());
1489
            } else {
1490
                // Move to the beginning.
1491
                self.cursor = self
×
1492
                    .elements
×
1493
                    .iter()
×
1494
                    .position(|x| !x.read().is_reachable())
×
1495
                    .unwrap();
×
1496

1497
                // assert!(!self.elements[self.cursor].read().is_reachable());
1498
            }
1499
        }
1500

1501
        // assert!(!self.elements[self.cursor].read().is_reachable());
1502

1503
        HeapRef { inner: weak_ptr }
1504
    }
1505
}
1506

1507
#[cfg(feature = "sync")]
1508
fn spawn_background_dropper<T: HeapAble + Sync + Send + 'static>(
19✔
1509
) -> (Sender<Vec<HeapElement<T>>>, Receiver<Vec<HeapElement<T>>>) {
1510
    let (forward_sender, forward_receiver) = crossbeam_channel::bounded(0);
57✔
1511
    let (backward_sender, backward_receiver) = crossbeam_channel::bounded(0);
57✔
1512

1513
    // Worker thread, capable of dropping the background values of lists
1514
    std::thread::spawn(move || {
38✔
1515
        let mut current: Vec<HeapElement<T>> = Vec::new();
57✔
1516
        for mut block in forward_receiver {
19✔
1517
            std::mem::swap(&mut current, &mut block);
×
1518
            // Get the value, move on.
1519
            backward_sender.send(block).unwrap();
×
1520

1521
            // TODO: when compacting, keep values that have been registered with a will
1522
            // executor.
1523
            current.retain(|x| x.read().is_reachable());
×
1524
            current.shrink_to_fit();
×
1525
        }
1526
    });
1527
    (forward_sender, backward_receiver)
19✔
1528
}
1529

1530
/// The heap for steel currently uses an allocation scheme based on weak references
1531
/// to reference counted pointers. Allocation is just a `Vec<Rc<RefCell<T>>>`, where
1532
/// allocating simply pushes and allocates a value at the end. When we do a collection,
1533
/// we attempt to do a small collection by just dropping any values with no weak counts
1534
/// pointing to it.
1535
#[derive(Clone)]
1536
pub struct Heap {
1537
    count: usize,
1538
    mark_and_sweep_queue: Vec<SteelVal>,
1539

1540
    maybe_memory_size: usize,
1541

1542
    skip_minor_collection: bool,
1543
    memory_free_list: FreeList<SteelVal>,
1544
    vector_free_list: FreeList<Vec<SteelVal>>,
1545
}
1546

1547
#[cfg(feature = "sync")]
1548
unsafe impl Send for Heap {}
1549
#[cfg(feature = "sync")]
1550
unsafe impl Sync for Heap {}
1551

1552
// Contiguous... no good? Perhaps a free list is actually better here?
1553
// Can reuse the allocations more effectively, and can compact where needed.
1554
struct MemorySpace {
1555
    memory: Vec<HeapValue>,
1556
    vectors: Vec<HeapVector>,
1557
}
1558

1559
type MemoryBlock = (Vec<HeapValue>, Vec<HeapVector>);
1560

1561
impl Heap {
1562
    pub fn new() -> Self {
8✔
1563
        Heap {
1564
            count: 0,
1565
            // mark_and_sweep_queue: VecDeque::with_capacity(256),
1566
            mark_and_sweep_queue: Vec::with_capacity(256),
16✔
1567

1568
            maybe_memory_size: 0,
1569

1570
            skip_minor_collection: false,
1571

1572
            memory_free_list: FreeList::new(),
8✔
1573
            vector_free_list: FreeList::new(),
8✔
1574
        }
1575
    }
1576

1577
    pub fn new_empty() -> Self {
×
1578
        Heap {
1579
            count: 0,
1580
            mark_and_sweep_queue: Vec::new(),
×
1581
            maybe_memory_size: 0,
1582
            skip_minor_collection: false,
1583
            memory_free_list: FreeList::new(),
×
1584
            vector_free_list: FreeList::new(),
×
1585
        }
1586
    }
1587

1588
    pub fn collection<'a>(
×
1589
        &mut self,
1590
        roots: &'a [SteelVal],
1591
        live_functions: impl Iterator<Item = &'a ByteCodeLambda>,
1592
        globals: &'a [SteelVal],
1593
        tls: &'a [SteelVal],
1594
        synchronizer: &'a mut Synchronizer,
1595
        force_full: bool,
1596
    ) {
1597
        self.value_collection(
×
1598
            &SteelVal::Void,
×
1599
            roots,
×
1600
            live_functions,
×
1601
            globals,
×
1602
            tls,
×
1603
            synchronizer,
×
1604
            force_full,
×
1605
        );
1606
    }
1607

1608
    // Clean up the values?
1609
    pub fn allocate<'a>(
3,196,807✔
1610
        &mut self,
1611
        value: SteelVal,
1612
        roots: &'a [SteelVal],
1613
        live_functions: impl Iterator<Item = &'a ByteCodeLambda>,
1614
        globals: &'a [SteelVal],
1615
        tls: &'a [SteelVal],
1616
        synchronizer: &'a mut Synchronizer,
1617
    ) -> HeapRef<SteelVal> {
1618
        self.value_collection(
6,393,614✔
1619
            &value,
3,196,807✔
1620
            roots,
3,196,807✔
1621
            live_functions,
3,196,807✔
1622
            globals,
3,196,807✔
1623
            tls,
3,196,807✔
1624
            synchronizer,
3,196,807✔
1625
            false,
1626
        );
1627
        self.memory_free_list.allocate(value)
9,590,421✔
1628
    }
1629

1630
    fn value_collection<'a>(
3,196,807✔
1631
        &mut self,
1632
        value: &SteelVal,
1633
        roots: &'a [SteelVal],
1634
        live_functions: impl Iterator<Item = &'a ByteCodeLambda>,
1635
        globals: &'a [SteelVal],
1636
        tls: &'a [SteelVal],
1637
        synchronizer: &mut Synchronizer,
1638
        force: bool,
1639
    ) {
1640
        if self.memory_free_list.percent_full() > 0.95 || force {
6,393,588✔
1641
            let now = std::time::Instant::now();
52✔
1642
            // Attempt a weak collection
1643
            log::debug!(target: "gc", "SteelVal gc invocation");
26✔
1644
            self.memory_free_list.weak_collection();
52✔
1645

1646
            log::debug!(target: "gc", "Memory size post weak collection: {}", self.memory_free_list.percent_full());
26✔
1647
            log::debug!(target: "gc", "Weak collection time: {:?}", now.elapsed());
26✔
1648

1649
            if self.memory_free_list.percent_full() > 0.95 || force {
31✔
1650
                // New generation
1651
                self.memory_free_list.mark_all_unreachable();
42✔
1652
                self.vector_free_list.mark_all_unreachable();
42✔
1653

1654
                // Just reset the counter
1655
                let stats = self.mark_and_sweep_new(
63✔
1656
                    Some(value.clone()),
21✔
1657
                    std::iter::empty(),
21✔
1658
                    roots,
21✔
1659
                    live_functions,
21✔
1660
                    globals,
21✔
1661
                    tls,
21✔
1662
                    synchronizer,
21✔
1663
                );
1664

1665
                self.memory_free_list.alloc_count =
21✔
1666
                    self.memory_free_list.elements.len() - stats.memory_reached_count;
42✔
1667

1668
                self.vector_free_list.alloc_count =
21✔
1669
                    self.vector_free_list.elements.len() - stats.vector_reached_count;
42✔
1670

1671
                // Estimate what that memory size is?
1672
                if self.memory_free_list.grow_count > RESET_LIMIT {
21✔
1673
                    // Compact the free list.
1674
                    self.memory_free_list.compact();
×
1675
                } else {
1676
                    self.memory_free_list.grow();
21✔
1677
                }
1678

1679
                // synchronizer.resume_threads();
1680

1681
                log::debug!(target: "gc", "Memory size post mark and sweep: {}", self.memory_free_list.percent_full());
21✔
1682

1683
                log::debug!(target: "gc", "---- TOTAL GC TIME: {:?} ----", now.elapsed());
21✔
1684
            }
1685
        }
1686
    }
1687

1688
    pub fn allocate_vector<'a>(
6,010,777✔
1689
        &mut self,
1690
        values: Vec<SteelVal>,
1691
        roots: &'a [SteelVal],
1692
        live_functions: impl Iterator<Item = &'a ByteCodeLambda>,
1693
        globals: &'a [SteelVal],
1694
        tls: &'a [SteelVal],
1695
        synchronizer: &'a mut Synchronizer,
1696
    ) -> HeapRef<Vec<SteelVal>> {
1697
        // todo!();
1698

1699
        self.vector_collection(
12,021,554✔
1700
            &values,
6,010,777✔
1701
            roots,
6,010,777✔
1702
            live_functions,
6,010,777✔
1703
            globals,
6,010,777✔
1704
            tls,
6,010,777✔
1705
            synchronizer,
6,010,777✔
1706
            false,
1707
        );
1708

1709
        // TOOD: Optimize this a lot!
1710
        self.vector_free_list.allocate(values)
18,032,331✔
1711
    }
1712

1713
    fn vector_collection<'a>(
6,010,777✔
1714
        &mut self,
1715
        values: &[SteelVal],
1716
        roots: &'a [SteelVal],
1717
        live_functions: impl Iterator<Item = &'a ByteCodeLambda>,
1718
        globals: &'a [SteelVal],
1719
        tls: &'a [SteelVal],
1720
        synchronizer: &'a mut Synchronizer,
1721
        force: bool,
1722
    ) {
1723
        if self.vector_free_list.percent_full() > 0.95 || force {
12,021,528✔
1724
            let now = std::time::Instant::now();
52✔
1725
            // Attempt a weak collection
1726
            log::debug!(target: "gc", "Vec<SteelVal> gc invocation");
26✔
1727
            self.vector_free_list.weak_collection();
52✔
1728
            log::debug!(target: "gc", "Weak collection time: {:?}", now.elapsed());
26✔
1729

1730
            if self.vector_free_list.percent_full() > 0.95 || force {
44✔
1731
                self.vector_free_list.mark_all_unreachable();
16✔
1732
                self.memory_free_list.mark_all_unreachable();
16✔
1733

1734
                let stats = self.mark_and_sweep_new(
24✔
1735
                    None,
8✔
1736
                    values.iter().cloned(),
24✔
1737
                    roots,
8✔
1738
                    live_functions,
8✔
1739
                    globals,
8✔
1740
                    tls,
8✔
1741
                    synchronizer,
8✔
1742
                );
1743

1744
                self.vector_free_list.alloc_count =
8✔
1745
                    self.vector_free_list.elements.len() - stats.vector_reached_count;
16✔
1746

1747
                self.memory_free_list.alloc_count =
8✔
1748
                    self.memory_free_list.elements.len() - stats.memory_reached_count;
16✔
1749

1750
                // if self.vector_free_list.percent_full() > 0.75 {
1751
                if self.vector_free_list.grow_count > RESET_LIMIT {
8✔
1752
                    // Compact the free list.
1753
                    self.vector_free_list.compact();
×
1754
                } else {
1755
                    self.vector_free_list.grow();
8✔
1756
                }
1757

1758
                log::debug!(target: "gc", "Memory size post mark and sweep: {}", self.vector_free_list.percent_full());
8✔
1759

1760
                log::debug!(target: "gc", "---- TOTAL VECTOR GC TIME: {:?} ----", now.elapsed());
8✔
1761
            }
1762
        }
1763
    }
1764

1765
    pub fn allocate_vector_iter<'a>(
25,170✔
1766
        &mut self,
1767
        values: impl Iterator<Item = SteelVal> + Clone,
1768
        roots: &'a [SteelVal],
1769
        live_functions: impl Iterator<Item = &'a ByteCodeLambda>,
1770
        globals: &'a [SteelVal],
1771
        tls: &'a [SteelVal],
1772
        synchronizer: &'a mut Synchronizer,
1773
    ) -> HeapRef<Vec<SteelVal>> {
1774
        if self.vector_free_list.percent_full() > 0.95 {
25,170✔
1775
            let now = std::time::Instant::now();
×
1776
            // Attempt a weak collection
1777
            log::debug!(target: "gc", "Vec<SteelVal> gc invocation");
×
1778
            self.vector_free_list.weak_collection();
×
1779
            log::debug!(target: "gc", "Weak collection time: {:?}", now.elapsed());
×
1780

1781
            if self.vector_free_list.percent_full() > 0.95 {
×
1782
                self.vector_free_list.mark_all_unreachable();
×
1783
                self.memory_free_list.mark_all_unreachable();
×
1784

1785
                let stats = self.mark_and_sweep_new(
×
1786
                    None,
×
1787
                    values.clone(),
×
1788
                    roots,
×
1789
                    live_functions,
×
1790
                    globals,
×
1791
                    tls,
×
1792
                    synchronizer,
×
1793
                );
1794

1795
                self.vector_free_list.alloc_count =
×
1796
                    self.vector_free_list.elements.len() - stats.vector_reached_count;
×
1797

1798
                self.memory_free_list.alloc_count =
×
1799
                    self.memory_free_list.elements.len() - stats.memory_reached_count;
×
1800

1801
                // if self.vector_free_list.percent_full() > 0.75 {
1802
                if self.vector_free_list.grow_count > RESET_LIMIT {
×
1803
                    // Compact the free list.
1804
                    self.vector_free_list.compact();
×
1805
                } else {
1806
                    self.vector_free_list.grow();
×
1807
                }
1808

1809
                log::debug!(target: "gc", "Memory size post mark and sweep: {}", self.vector_free_list.percent_full());
×
1810

1811
                log::debug!(target: "gc", "---- TOTAL VECTOR GC TIME: {:?} ----", now.elapsed());
×
1812
            }
1813
        }
1814

1815
        self.vector_free_list.allocate_vec(values)
75,510✔
1816
    }
1817

1818
    fn mark_and_sweep_new<'a>(
29✔
1819
        &mut self,
1820
        root_value: Option<SteelVal>,
1821
        root_vector: impl Iterator<Item = SteelVal>,
1822
        roots: &'a [SteelVal],
1823
        function_stack: impl Iterator<Item = &'a ByteCodeLambda>,
1824
        globals: &'a [SteelVal],
1825
        tls: &'a [SteelVal],
1826
        synchronizer: &mut Synchronizer,
1827
    ) -> MarkAndSweepStats {
1828
        let stats = self.mark(
87✔
1829
            root_value,
29✔
1830
            root_vector,
29✔
1831
            roots,
29✔
1832
            function_stack,
29✔
1833
            globals,
29✔
1834
            tls,
29✔
1835
            synchronizer,
29✔
1836
        );
1837

1838
        // #[cfg(feature = "profiling")]
1839
        let now = std::time::Instant::now();
58✔
1840

1841
        #[cfg(feature = "sync")]
1842
        {
1843
            GLOBAL_ROOTS.lock().unwrap().increment_generation();
29✔
1844
        }
1845

1846
        #[cfg(not(feature = "sync"))]
1847
        {
1848
            ROOTS.with(|x| x.borrow_mut().increment_generation());
×
1849
        }
1850

1851
        // #[cfg(feature = "profiling")]
1852
        log::debug!(target: "gc", "Sweep: Time taken: {:?}", now.elapsed());
29✔
1853

1854
        synchronizer.resume_threads();
58✔
1855

1856
        stats
29✔
1857

1858
        // object_count.saturating_sub(amount_freed)
1859
        // 0
1860
    }
1861

1862
    fn mark<'a>(
29✔
1863
        &mut self,
1864
        root_value: Option<SteelVal>,
1865
        root_vector: impl Iterator<Item = SteelVal>,
1866
        roots: &[SteelVal],
1867
        function_stack: impl Iterator<Item = &'a ByteCodeLambda>,
1868
        globals: &[SteelVal],
1869
        tls: &[SteelVal],
1870
        synchronizer: &mut Synchronizer,
1871
    ) -> MarkAndSweepStats {
1872
        log::debug!(target: "gc", "Marking the heap");
29✔
1873

1874
        // #[cfg(feature = "profiling")]
1875
        let now = std::time::Instant::now();
58✔
1876

1877
        let mut context = MarkAndSweepContext {
1878
            queue: &mut self.mark_and_sweep_queue,
29✔
1879
            stats: MarkAndSweepStats::default(),
29✔
1880
        };
1881

1882
        // Pause all threads
1883
        synchronizer.stop_threads();
58✔
1884
        unsafe {
1885
            synchronizer.enumerate_stacks(&mut context);
58✔
1886
        }
1887

1888
        if let Some(root_value) = root_value {
50✔
1889
            context.push_back(root_value);
×
1890
        }
1891

1892
        for value in root_vector {
109✔
1893
            context.push_back(value.clone());
×
1894
        }
1895

1896
        for root in tls {
203✔
1897
            context.push_back(root.clone());
×
1898
        }
1899

1900
        log::debug!(target: "gc", "Roots size: {}", roots.len());
29✔
1901
        for root in roots {
1,324,853✔
1902
            context.push_back(root.clone());
×
1903
        }
1904

1905
        log::debug!(target: "gc", "Globals size: {}", globals.len());
29✔
1906
        for root in globals {
105,005✔
1907
            context.push_back(root.clone());
×
1908
        }
1909

1910
        for function in function_stack {
221,281✔
1911
            for value in function.captures() {
5,872✔
1912
                context.push_back(value.clone());
×
1913
            }
1914
        }
1915

1916
        #[cfg(feature = "sync")]
1917
        {
1918
            GLOBAL_ROOTS
29✔
1919
                .lock()
29✔
1920
                .unwrap()
29✔
1921
                .roots
29✔
1922
                .values()
1923
                .for_each(|value| context.push_back(value.clone()))
29✔
1924
        }
1925

1926
        #[cfg(not(feature = "sync"))]
1927
        {
1928
            ROOTS.with(|x| {
×
1929
                x.borrow()
×
1930
                    .roots
×
1931
                    .values()
×
1932
                    .for_each(|value| context.push_back(value.clone()))
×
1933
            });
1934
        }
1935

1936
        // TODO: Can we do this in parallel? Divide up the iterator into separate components
1937
        // and do them that way?
1938
        log::debug!(target: "gc", "Stack size: {}", context.queue.len());
29✔
1939

1940
        #[cfg(feature = "sync")]
1941
        let count = MARKER.mark(context.queue);
87✔
1942

1943
        #[cfg(not(feature = "sync"))]
1944
        let count = {
×
1945
            context.visit();
×
1946
            context.stats
×
1947
        };
1948

1949
        log::debug!(target: "gc", "Mark: Time taken: {:?}", now.elapsed());
29✔
1950
        count
29✔
1951
    }
1952
}
1953

1954
#[cfg(feature = "sync")]
1955
#[derive(Clone)]
1956
struct ParallelMarker {
1957
    // Chunks of the roots for marking
1958
    senders: Arc<Mutex<Vec<MarkerWorker>>>,
1959
    queue: Arc<crossbeam_queue::SegQueue<SteelValPointer>>,
1960
}
1961

1962
#[cfg(feature = "sync")]
1963
unsafe impl Sync for ParallelMarker {}
1964
#[cfg(feature = "sync")]
1965
unsafe impl Send for ParallelMarker {}
1966

1967
#[cfg(feature = "sync")]
1968
struct MarkerWorker {
1969
    // Tell the thread to wake up
1970
    sender: Sender<()>,
1971
    ack: Receiver<MarkAndSweepStats>,
1972
    handle: JoinHandle<()>,
1973
}
1974

1975
#[cfg(feature = "sync")]
1976
impl ParallelMarker {
1977
    pub fn new() -> Self {
1✔
1978
        // No parallelism
1979
        let parallelism = std::thread::available_parallelism()
2✔
1980
            .map(|x| x.get() + 1)
3✔
1981
            .unwrap_or(1);
1982

1983
        let mut workers = Vec::with_capacity(parallelism);
3✔
1984
        let queue = Arc::new(crossbeam_queue::SegQueue::new());
3✔
1985

1986
        for _ in 0..parallelism {
1✔
1987
            let cloned_queue = queue.clone();
5✔
1988
            let (sender, receiver) = crossbeam_channel::unbounded();
1989
            let (ack_sender, ack_receiver) = crossbeam_channel::unbounded();
1990

1991
            let handle = std::thread::spawn(move || {
5✔
1992
                let cloned_queue = cloned_queue;
10✔
1993
                // This... might be too big?
1994
                let mut local_queue = Vec::with_capacity(4096);
10✔
1995
                for _ in receiver {
150✔
1996
                    let now = std::time::Instant::now();
145✔
1997

1998
                    let mut context = MarkAndSweepContextRefQueue {
1999
                        queue: &cloned_queue,
2000
                        local_queue: &mut local_queue,
2001
                        stats: MarkAndSweepStats::default(),
2002
                    };
2003

2004
                    context.visit();
2005

2006
                    log::debug!(target: "gc",
×
2007
                        "{:?}: {:?} -> {:?}",
×
2008
                        std::thread::current().id(),
×
2009
                        now.elapsed(),
×
2010
                        context.stats
2011
                    );
2012

2013
                    ack_sender.send(context.stats).unwrap();
2014
                }
2015
            });
2016

2017
            workers.push(MarkerWorker {
2018
                sender,
2019
                handle,
2020
                ack: ack_receiver,
2021
            });
2022
        }
2023

2024
        Self {
2025
            senders: Arc::new(Mutex::new(workers)),
3✔
2026
            queue,
2027
        }
2028
    }
2029

2030
    pub fn mark(&self, queue: &[SteelVal]) -> MarkAndSweepStats {
29✔
2031
        let guard = self.senders.lock().unwrap();
87✔
2032

2033
        for value in queue.iter() {
72,765✔
2034
            if let Some(p) = SteelValPointer::from_value(value) {
72,629✔
2035
                // Start with a local queue first?
2036
                self.queue.push(p);
2037
            }
2038
        }
2039

2040
        for worker in guard.iter() {
203✔
2041
            worker.sender.send(()).unwrap();
2042
        }
2043

2044
        let mut count = MarkAndSweepStats::default();
58✔
2045

2046
        for worker in guard.iter() {
203✔
2047
            count = count + worker.ack.recv().unwrap();
2048
        }
2049

2050
        count
29✔
2051
    }
2052
}
2053

2054
pub trait HeapAble: Clone + std::fmt::Debug + PartialEq + Eq {
2055
    fn empty() -> Self;
2056
}
2057
impl HeapAble for SteelVal {
2058
    fn empty() -> Self {
2,355,200✔
2059
        SteelVal::Void
2,355,200✔
2060
    }
2061
}
2062
impl HeapAble for Vec<SteelVal> {
2063
    fn empty() -> Self {
6,732,800✔
2064
        Self::new()
6,732,800✔
2065
    }
2066
}
2067

2068
#[derive(Clone, Debug)]
2069
pub struct HeapRef<T: HeapAble> {
2070
    pub(crate) inner: WeakShared<MutContainer<HeapAllocated<T>>>,
2071
}
2072

2073
impl<T: HeapAble> HeapRef<T> {
2074
    #[inline(always)]
2075
    pub fn get(&self) -> T {
84,569,921✔
2076
        self.inner.upgrade().unwrap().read().value.clone()
253,709,763✔
2077
    }
2078

2079
    /// Get the value if the pointer is still valid.
2080
    /// If this is the only thing pointing at it, then we need to drop it.
2081
    pub(crate) fn maybe_get_from_weak(&self) -> Option<T> {
×
2082
        let inner = self.inner.upgrade()?;
×
2083

2084
        // Check if this thing is reachable? How?
2085
        if StandardShared::weak_count(&inner) == 1 {
×
2086
            // We can go ahead and nuke this since we're
2087
            // the only things pointing to it
2088
            let mut value = inner.write();
×
2089

2090
            if value.is_reachable() {
×
2091
                Some(value.value.clone())
×
2092
            } else {
2093
                value.reachable = false;
×
2094
                value.value = T::empty();
×
2095
                None
×
2096
            }
2097
        } else {
2098
            // Get the inner value on this, assuming we are calling
2099
            // this just from the heap ref. Anywhere else, we'll want
2100
            // to eliminate this?
2101
            Some(inner.read().value.clone())
×
2102
        }
2103
    }
2104

2105
    pub fn borrow<O>(&self, thunk: impl FnOnce(&T) -> O) -> O {
18,152✔
2106
        let value = self.inner.upgrade().unwrap();
72,608✔
2107
        let value = value.read();
54,456✔
2108
        thunk(&value.value)
18,152✔
2109
    }
2110

2111
    pub fn as_ptr_usize(&self) -> usize {
4,118✔
2112
        self.inner.as_ptr() as usize
4,118✔
2113
    }
2114

2115
    pub fn set(&mut self, value: T) -> T {
×
2116
        let inner = self.inner.upgrade().unwrap();
×
2117

2118
        let ret = { inner.read().value.clone() };
×
2119

2120
        inner.write().value = value;
×
2121
        ret
×
2122
    }
2123

2124
    pub fn set_and_return(&self, value: T) -> T {
8,023,673✔
2125
        let inner = self.inner.upgrade().unwrap();
32,094,692✔
2126

2127
        let mut guard = inner.write();
24,071,019✔
2128
        std::mem::replace(&mut guard.value, value)
24,071,019✔
2129
    }
2130

2131
    pub(crate) fn set_interior_mut(&self, value: T) -> T {
×
2132
        let inner = self.inner.upgrade().unwrap();
×
2133

2134
        let ret = { inner.read().value.clone() };
×
2135

2136
        inner.write().value = value;
×
2137
        ret
×
2138
    }
2139

2140
    pub(crate) fn strong_ptr(&self) -> StandardSharedMut<HeapAllocated<T>> {
107,511,809✔
2141
        self.inner.upgrade().unwrap()
322,535,427✔
2142
    }
2143

2144
    pub(crate) fn ptr_eq(&self, other: &Self) -> bool {
139,831✔
2145
        WeakShared::ptr_eq(&self.inner, &other.inner)
419,493✔
2146
    }
2147
}
2148

2149
#[derive(Clone, Debug, PartialEq, Eq)]
2150
pub struct HeapAllocated<T: Clone + std::fmt::Debug + PartialEq + Eq> {
2151
    pub(crate) reachable: bool,
2152
    pub(crate) finalizer: bool,
2153
    pub(crate) value: T,
2154
}
2155

2156
#[test]
2157
fn check_size_of_heap_allocated_value() {
2158
    println!("{:?}", std::mem::size_of::<HeapAllocated<SteelVal>>());
2159
}
2160

2161
impl<T: Clone + std::fmt::Debug + PartialEq + Eq> HeapAllocated<T> {
2162
    pub fn new(value: T) -> Self {
9,088,000✔
2163
        Self {
2164
            reachable: false,
2165
            finalizer: false,
2166
            value,
2167
        }
2168
    }
2169

UNCOV
2170
    pub fn is_reachable(&self) -> bool {
×
UNCOV
2171
        self.reachable
×
2172
    }
2173

2174
    pub(crate) fn mark_reachable(&mut self) {
6,575,137✔
2175
        self.reachable = true;
6,575,137✔
2176
    }
2177

2178
    pub(crate) fn reset(&mut self) {
20,761,600✔
2179
        self.reachable = false;
20,761,600✔
2180
    }
2181
}
2182

2183
pub struct MarkAndSweepContext<'a> {
2184
    queue: &'a mut Vec<SteelVal>,
2185
    stats: MarkAndSweepStats,
2186
}
2187

2188
impl<'a> MarkAndSweepContext<'a> {
2189
    pub(crate) fn mark_heap_reference(
1,237✔
2190
        &mut self,
2191
        heap_ref: &StandardSharedMut<HeapAllocated<SteelVal>>,
2192
    ) {
2193
        if heap_ref.read().is_reachable() {
1,237✔
2194
            return;
1,055✔
2195
        }
2196

2197
        {
2198
            heap_ref.write().mark_reachable();
×
2199
        }
2200

2201
        self.stats.memory_reached_count += 1;
×
2202

2203
        self.push_back(heap_ref.read().value.clone());
×
2204
    }
2205

2206
    // Visit the heap vector, mark it as visited!
2207
    pub(crate) fn mark_heap_vector(
×
2208
        &mut self,
2209
        heap_vector: &StandardSharedMut<HeapAllocated<Vec<SteelVal>>>,
2210
    ) {
2211
        if heap_vector.read().is_reachable() {
×
2212
            return;
×
2213
        }
2214

2215
        {
2216
            heap_vector.write().mark_reachable();
×
2217
        }
2218

2219
        self.stats.vector_reached_count += 1;
×
2220

2221
        for value in heap_vector.read().value.iter() {
×
2222
            self.push_back(value.clone());
×
2223
        }
2224
    }
2225
}
2226

2227
#[derive(Debug, Clone, Default)]
2228
struct MarkAndSweepStats {
2229
    object_count: usize,
2230
    memory_reached_count: usize,
2231
    vector_reached_count: usize,
2232
    keep_alive: Vec<SteelVal>,
2233
}
2234

2235
impl std::ops::Add for MarkAndSweepStats {
2236
    type Output = MarkAndSweepStats;
2237

2238
    fn add(mut self, rhs: Self) -> Self::Output {
145✔
2239
        self.object_count += rhs.object_count;
145✔
2240
        self.memory_reached_count += rhs.memory_reached_count;
145✔
2241
        self.vector_reached_count += rhs.vector_reached_count;
145✔
2242
        self
145✔
2243
    }
2244
}
2245

2246
#[cfg(feature = "sync")]
2247
pub struct MarkAndSweepContextRefQueue<'a> {
2248
    // Thread local queue + larger queue when it runs out?
2249
    // Try to push on to local queue first.
2250
    local_queue: &'a mut Vec<SteelValPointer>,
2251
    queue: &'a crossbeam_queue::SegQueue<SteelValPointer>,
2252
    stats: MarkAndSweepStats,
2253
}
2254

2255
#[cfg(feature = "sync")]
2256
impl<'a> MarkAndSweepContextRefQueue<'a> {
2257
    pub(crate) fn mark_heap_reference(
376,648✔
2258
        &mut self,
2259
        heap_ref: &StandardSharedMut<HeapAllocated<SteelVal>>,
2260
    ) {
2261
        if heap_ref.read().is_reachable() {
376,648✔
2262
            return;
11,363✔
2263
        }
2264

2265
        {
2266
            heap_ref.write().mark_reachable();
×
2267
        }
2268

2269
        self.stats.memory_reached_count += 1;
×
2270

2271
        self.push_back(&heap_ref.read().value);
×
2272
    }
2273

2274
    // Visit the heap vector, mark it as visited!
2275
    pub(crate) fn mark_heap_vector(
36,182,453✔
2276
        &mut self,
2277
        heap_vector: &StandardSharedMut<HeapAllocated<Vec<SteelVal>>>,
2278
    ) {
2279
        if heap_vector.read().is_reachable() {
36,182,453✔
2280
            return;
29,972,783✔
2281
        }
2282

2283
        {
2284
            heap_vector.write().mark_reachable();
×
2285
        }
2286

2287
        self.stats.vector_reached_count += 1;
×
2288

2289
        for value in heap_vector.read().value.iter() {
31,953,869✔
2290
            self.push_back(value);
×
2291
        }
2292
    }
2293

2294
    pub(crate) fn save(&mut self, value: SteelVal) {
20✔
2295
        self.stats.keep_alive.push(value);
60✔
2296
    }
2297
}
2298

2299
impl<'a> BreadthFirstSearchSteelValVisitor for MarkAndSweepContext<'a> {
2300
    type Output = ();
2301

2302
    fn default_output(&mut self) -> Self::Output {}
×
2303

2304
    // TODO: Do this in parallel, if possible?
2305
    fn pop_front(&mut self) -> Option<SteelVal> {
×
2306
        self.queue.pop()
×
2307
    }
2308

2309
    fn push_back(&mut self, value: SteelVal) {
721,102✔
2310
        self.stats.object_count += 1;
721,102✔
2311

2312
        // TODO: Determine if all numbers should push back.
2313
        match &value {
721,102✔
2314
            SteelVal::BoolV(_)
×
2315
            | SteelVal::NumV(_)
×
2316
            | SteelVal::IntV(_)
×
2317
            | SteelVal::CharV(_)
×
2318
            | SteelVal::Void
×
2319
            | SteelVal::StringV(_)
×
2320
            | SteelVal::FuncV(_)
×
2321
            | SteelVal::SymbolV(_)
×
2322
            | SteelVal::FutureFunc(_)
×
2323
            | SteelVal::FutureV(_)
×
2324
            | SteelVal::BoxedFunction(_)
×
2325
            | SteelVal::MutFunc(_)
×
2326
            | SteelVal::BuiltIn(_)
×
2327
            | SteelVal::ByteVector(_)
×
2328
            | SteelVal::BigNum(_) => (),
697,287✔
2329
            _ => {
23,815✔
2330
                self.queue.push(value);
47,630✔
2331
            }
2332
        }
2333
    }
2334

2335
    fn visit_bytevector(&mut self, _bytevector: crate::rvals::SteelByteVector) -> Self::Output {}
×
2336
    fn visit_bignum(&mut self, _: Gc<BigInt>) -> Self::Output {}
×
2337
    fn visit_complex(&mut self, _: Gc<SteelComplex>) -> Self::Output {}
×
2338
    fn visit_bool(&mut self, _boolean: bool) -> Self::Output {}
×
2339
    fn visit_boxed_function(&mut self, _function: Gc<BoxedDynFunction>) -> Self::Output {}
×
2340
    // TODO: Revisit this when the boxed iterator is cleaned up
2341
    fn visit_boxed_iterator(&mut self, iterator: GcMut<OpaqueIterator>) -> Self::Output {
×
2342
        self.push_back(iterator.read().root.clone());
×
2343
    }
2344
    fn visit_boxed_value(&mut self, boxed_value: GcMut<SteelVal>) -> Self::Output {
×
2345
        self.push_back(boxed_value.read().clone());
×
2346
    }
2347

2348
    fn visit_builtin_function(&mut self, _function: BuiltInSignature) -> Self::Output {}
×
2349

2350
    fn visit_char(&mut self, _c: char) -> Self::Output {}
×
2351
    fn visit_closure(&mut self, closure: Gc<ByteCodeLambda>) -> Self::Output {
×
2352
        // for heap_ref in closure.heap_allocated.borrow().iter() {
2353
        //     self.mark_heap_reference(&heap_ref.strong_ptr())
2354
        // }
2355

2356
        for capture in closure.captures() {
×
2357
            self.push_back(capture.clone());
×
2358
        }
2359

2360
        if let Some(contract) = closure.get_contract_information().as_ref() {
×
2361
            self.push_back(contract.clone());
×
2362
        }
2363
    }
2364
    fn visit_continuation(&mut self, continuation: Continuation) -> Self::Output {
×
2365
        // TODO: Don't clone this here!
2366
        let continuation = (*continuation.inner.read()).clone();
×
2367

2368
        match continuation {
×
2369
            ContinuationMark::Closed(continuation) => {
×
2370
                for value in continuation.stack {
×
2371
                    self.push_back(value);
×
2372
                }
2373

2374
                for value in &continuation.current_frame.function.captures {
×
2375
                    self.push_back(value.clone());
×
2376
                }
2377

2378
                for frame in continuation.stack_frames {
×
2379
                    for value in &frame.function.captures {
×
2380
                        self.push_back(value.clone());
×
2381
                    }
2382

2383
                    // if let Some(handler) = &frame.handler {
2384
                    //     self.push_back((*handler.as_ref()).clone());
2385
                    // }
2386

2387
                    if let Some(handler) =
×
2388
                        frame.attachments.as_ref().and_then(|x| x.handler.clone())
×
2389
                    {
2390
                        self.push_back(handler);
×
2391
                    }
2392
                }
2393
            }
2394

2395
            ContinuationMark::Open(continuation) => {
×
2396
                for value in &continuation.current_stack_values {
×
2397
                    self.push_back(value.clone());
×
2398
                }
2399

2400
                for value in &continuation.current_frame.function.captures {
×
2401
                    self.push_back(value.clone());
×
2402
                }
2403
            }
2404
        }
2405
    }
2406
    // TODO: Come back to this
2407
    fn visit_custom_type(&mut self, custom_type: GcMut<Box<dyn CustomType>>) -> Self::Output {
×
2408
        custom_type.read().visit_children(self);
×
2409
    }
2410

2411
    fn visit_float(&mut self, _float: f64) -> Self::Output {}
×
2412

2413
    fn visit_function_pointer(&mut self, _ptr: FunctionSignature) -> Self::Output {}
×
2414

2415
    fn visit_future(&mut self, _future: Gc<FutureResult>) -> Self::Output {}
×
2416

2417
    fn visit_future_function(&mut self, _function: BoxedAsyncFunctionSignature) -> Self::Output {}
×
2418

2419
    fn visit_hash_map(&mut self, hashmap: SteelHashMap) -> Self::Output {
×
2420
        for (key, value) in hashmap.iter() {
×
2421
            self.push_back(key.clone());
×
2422
            self.push_back(value.clone());
×
2423
        }
2424
    }
2425

2426
    fn visit_hash_set(&mut self, hashset: SteelHashSet) -> Self::Output {
×
2427
        for value in hashset.iter() {
×
2428
            self.push_back(value.clone());
×
2429
        }
2430
    }
2431

2432
    fn visit_heap_allocated(&mut self, heap_ref: HeapRef<SteelVal>) -> Self::Output {
×
2433
        self.mark_heap_reference(&heap_ref.strong_ptr());
×
2434
    }
2435

2436
    fn visit_immutable_vector(&mut self, vector: SteelVector) -> Self::Output {
×
2437
        for value in vector.iter() {
×
2438
            self.push_back(value.clone());
×
2439
        }
2440
    }
2441
    fn visit_int(&mut self, _int: isize) -> Self::Output {}
×
2442
    fn visit_rational(&mut self, _: Rational32) -> Self::Output {}
×
2443
    fn visit_bigrational(&mut self, _: Gc<BigRational>) -> Self::Output {}
×
2444

2445
    fn visit_list(&mut self, list: List<SteelVal>) -> Self::Output {
×
2446
        for value in list {
×
2447
            self.push_back(value);
×
2448
        }
2449
    }
2450

2451
    fn visit_mutable_function(&mut self, _function: MutFunctionSignature) -> Self::Output {}
×
2452

2453
    fn visit_mutable_vector(&mut self, vector: HeapRef<Vec<SteelVal>>) -> Self::Output {
×
2454
        self.mark_heap_vector(&vector.strong_ptr())
×
2455
    }
2456

2457
    fn visit_port(&mut self, _port: SteelPort) -> Self::Output {}
×
2458

2459
    fn visit_reducer(&mut self, reducer: Gc<Reducer>) -> Self::Output {
×
2460
        match reducer.as_ref().clone() {
×
2461
            Reducer::ForEach(f) => self.push_back(f),
×
2462
            Reducer::Generic(rf) => {
×
2463
                self.push_back(rf.initial_value);
×
2464
                self.push_back(rf.function);
×
2465
            }
2466
            _ => {}
×
2467
        }
2468
    }
2469

2470
    // TODO: Revisit this
2471
    fn visit_reference_value(&mut self, _reference: Gc<OpaqueReference<'static>>) -> Self::Output {}
×
2472

2473
    fn visit_steel_struct(&mut self, steel_struct: Gc<UserDefinedStruct>) -> Self::Output {
×
2474
        for field in steel_struct.fields.iter() {
×
2475
            self.push_back(field.clone());
×
2476
        }
2477
    }
2478

2479
    fn visit_stream(&mut self, stream: Gc<LazyStream>) -> Self::Output {
×
2480
        self.push_back(stream.initial_value.clone());
×
2481
        self.push_back(stream.stream_thunk.clone());
×
2482
    }
2483

2484
    fn visit_string(&mut self, _string: SteelString) -> Self::Output {}
×
2485

2486
    fn visit_symbol(&mut self, _symbol: SteelString) -> Self::Output {}
×
2487

2488
    fn visit_syntax_object(&mut self, syntax_object: Gc<Syntax>) -> Self::Output {
×
2489
        if let Some(raw) = syntax_object.raw.clone() {
×
2490
            self.push_back(raw);
×
2491
        }
2492

2493
        self.push_back(syntax_object.syntax.clone());
×
2494
    }
2495

2496
    fn visit_transducer(&mut self, transducer: Gc<Transducer>) -> Self::Output {
×
2497
        for transducer in transducer.ops.iter() {
×
2498
            match transducer.clone() {
×
2499
                crate::values::transducers::Transducers::Map(m) => self.push_back(m),
×
2500
                crate::values::transducers::Transducers::Filter(v) => self.push_back(v),
×
2501
                crate::values::transducers::Transducers::Take(t) => self.push_back(t),
×
2502
                crate::values::transducers::Transducers::Drop(d) => self.push_back(d),
×
2503
                crate::values::transducers::Transducers::FlatMap(fm) => self.push_back(fm),
×
2504
                crate::values::transducers::Transducers::Flatten => {}
×
2505
                crate::values::transducers::Transducers::Window(w) => self.push_back(w),
×
2506
                crate::values::transducers::Transducers::TakeWhile(tw) => self.push_back(tw),
×
2507
                crate::values::transducers::Transducers::DropWhile(dw) => self.push_back(dw),
×
2508
                crate::values::transducers::Transducers::Extend(e) => self.push_back(e),
×
2509
                crate::values::transducers::Transducers::Cycle => {}
×
2510
                crate::values::transducers::Transducers::Enumerating => {}
×
2511
                crate::values::transducers::Transducers::Zipping(z) => self.push_back(z),
×
2512
                crate::values::transducers::Transducers::Interleaving(i) => self.push_back(i),
×
2513
            }
2514
        }
2515
    }
2516

2517
    fn visit_void(&mut self) -> Self::Output {}
×
2518

2519
    fn visit_pair(&mut self, pair: Gc<super::lists::Pair>) -> Self::Output {
×
2520
        self.push_back(pair.car());
×
2521
        self.push_back(pair.cdr());
×
2522
    }
2523
}
2524

2525
#[cfg(feature = "sync")]
2526
impl<'a> BreadthFirstSearchSteelValReferenceVisitor2<'a> for MarkAndSweepContextRefQueue<'a> {
2527
    type Output = ();
2528

2529
    fn default_output(&mut self) -> Self::Output {}
290✔
2530

2531
    fn pop_front(&mut self) -> Option<SteelValPointer> {
36,873,279✔
2532
        self.local_queue.pop().or_else(|| self.queue.pop())
134,729,463✔
2533
    }
2534

2535
    fn push_back(&mut self, value: &SteelVal) {
45,121,563✔
2536
        self.stats.object_count += 1;
45,121,563✔
2537

2538
        match value {
45,121,563✔
2539
            SteelVal::BoolV(_)
×
2540
            | SteelVal::NumV(_)
×
2541
            | SteelVal::IntV(_)
×
2542
            | SteelVal::CharV(_)
×
2543
            | SteelVal::Void
×
2544
            | SteelVal::StringV(_)
×
2545
            | SteelVal::FuncV(_)
×
2546
            | SteelVal::SymbolV(_)
×
2547
            | SteelVal::FutureFunc(_)
×
2548
            | SteelVal::FutureV(_)
×
2549
            | SteelVal::BoxedFunction(_)
×
2550
            | SteelVal::MutFunc(_)
×
2551
            | SteelVal::BuiltIn(_)
×
2552
            | SteelVal::ByteVector(_)
×
2553
            | SteelVal::BigNum(_) => (),
8,320,894✔
2554
            _ => {
×
2555
                if let Some(p) = SteelValPointer::from_value(value) {
73,601,174✔
2556
                    if self.local_queue.len() == self.local_queue.capacity() {
11,982,039✔
2557
                        self.queue.push(p);
23,964,078✔
2558
                    } else {
2559
                        self.local_queue.push(p);
24,818,466✔
2560
                    }
2561
                }
2562
            }
2563
        }
2564
    }
2565

2566
    // TODO: Revisit this when the boxed iterator is cleaned up
2567
    fn visit_boxed_iterator(&mut self, iterator: &MutContainer<OpaqueIterator>) -> Self::Output {
×
2568
        let guard = iterator.read();
×
2569
        self.save(guard.root.clone());
×
2570
        self.push_back(&guard.root);
×
2571
    }
2572

2573
    fn visit_boxed_value(&mut self, boxed_value: &MutContainer<SteelVal>) -> Self::Output {
×
2574
        let guard = boxed_value.read();
×
2575
        self.save(guard.clone());
×
2576
        self.push_back(&guard);
×
2577
    }
2578

2579
    fn visit_closure(&mut self, closure: &ByteCodeLambda) -> Self::Output {
53,706✔
2580
        for capture in closure.captures() {
118,329✔
2581
            self.push_back(capture);
×
2582
        }
2583

2584
        if let Some(contract) = closure.get_contract_information().as_ref() {
54,174✔
2585
            self.push_back(contract);
×
2586
        }
2587
    }
2588
    fn visit_continuation(
20✔
2589
        &mut self,
2590
        continuation: &MutContainer<ContinuationMark>,
2591
    ) -> Self::Output {
2592
        // TODO: Don't clone this here!
2593
        let continuation = continuation.read();
60✔
2594

2595
        match &(*continuation) {
20✔
2596
            ContinuationMark::Closed(continuation) => {
×
2597
                for value in &continuation.stack {
×
2598
                    self.save(value.clone());
×
2599
                    self.push_back(value);
×
2600
                }
2601

2602
                for value in &continuation.current_frame.function.captures {
×
2603
                    self.save(value.clone());
×
2604
                    self.push_back(value);
×
2605
                }
2606

2607
                for frame in &continuation.stack_frames {
×
2608
                    for value in &frame.function.captures {
×
2609
                        self.save(value.clone());
×
2610
                        self.push_back(value);
×
2611
                    }
2612

2613
                    if let Some(handler) =
×
2614
                        frame.attachments.as_ref().and_then(|x| x.handler.clone())
×
2615
                    {
2616
                        self.save(handler.clone());
×
2617
                        self.push_back(&handler);
×
2618
                    }
2619
                }
2620
            }
2621

2622
            ContinuationMark::Open(continuation) => {
20✔
2623
                for value in &continuation.current_stack_values {
60✔
2624
                    self.save(value.clone());
×
2625
                    self.push_back(value);
×
2626
                }
2627

2628
                for value in &continuation.current_frame.function.captures {
20✔
2629
                    self.save(value.clone());
×
2630
                    self.push_back(value);
×
2631
                }
2632
            }
2633
        }
2634
    }
2635
    // TODO: Come back to this?
2636
    fn visit_custom_type(
5,032✔
2637
        &mut self,
2638
        custom_type: &'a MutContainer<Box<dyn CustomType>>,
2639
    ) -> Self::Output {
2640
        // Use mark and sweep queue:
2641
        let mut queue = Vec::new();
10,064✔
2642
        let mut temporary_queue = MarkAndSweepContext {
2643
            queue: &mut queue,
5,032✔
2644
            stats: MarkAndSweepStats::default(),
5,032✔
2645
        };
2646

2647
        // Intercept the downstream ones, keep them alive by using
2648
        // a local queue first, and then spilling over to the larger one.
2649
        custom_type.read().visit_children(&mut temporary_queue);
10,064✔
2650

2651
        self.stats.memory_reached_count += temporary_queue.stats.memory_reached_count;
5,032✔
2652
        self.stats.object_count += temporary_queue.stats.object_count;
5,032✔
2653
        self.stats.vector_reached_count += temporary_queue.stats.vector_reached_count;
5,032✔
2654

2655
        for value in queue {
5,032✔
2656
            self.push_back(&value);
×
2657
            self.save(value);
×
2658
        }
2659
    }
2660

2661
    fn visit_hash_map(&mut self, hashmap: &crate::collections::persistent::HashMap<SteelVal, SteelVal>) -> Self::Output {
1,904✔
2662
        for (key, value) in hashmap.iter() {
20,722✔
2663
            self.push_back(key);
×
2664
            self.push_back(value);
×
2665
        }
2666
    }
2667

2668
    fn visit_hash_set(&mut self, hashset: &crate::collections::persistent::HashSet<SteelVal>) -> Self::Output {
86✔
2669
        for value in hashset.iter() {
946✔
2670
            self.push_back(value);
×
2671
        }
2672
    }
2673

2674
    fn visit_heap_allocated(&mut self, heap_ref: HeapRef<SteelVal>) -> Self::Output {
376,648✔
2675
        self.mark_heap_reference(&heap_ref.strong_ptr());
1,129,944✔
2676
    }
2677

2678
    fn visit_immutable_vector(&mut self, vector: &Vector<SteelVal>) -> Self::Output {
×
2679
        for value in vector.iter() {
×
2680
            self.push_back(value);
×
2681
        }
2682
    }
2683
    fn visit_list(&mut self, list: super::lists::CellPointer<SteelVal>) -> Self::Output {
63,834✔
2684
        unsafe {
2685
            super::lists::List::<SteelVal>::call_from_raw(list, |lst| {
191,502✔
2686
                for value in lst {
24,812,938✔
2687
                    self.push_back(value);
×
2688
                }
2689
            })
2690
        }
2691
    }
2692

2693
    fn visit_mutable_vector(&mut self, vector: HeapRef<Vec<SteelVal>>) -> Self::Output {
36,182,453✔
2694
        self.mark_heap_vector(&vector.strong_ptr())
108,547,359✔
2695
    }
2696

2697
    fn visit_reducer(&mut self, reducer: &Reducer) -> Self::Output {
×
2698
        match reducer {
×
2699
            Reducer::ForEach(f) => self.push_back(f),
×
2700
            Reducer::Generic(rf) => {
×
2701
                self.push_back(&rf.initial_value);
×
2702
                self.push_back(&rf.function);
×
2703
            }
2704
            _ => {}
×
2705
        }
2706
    }
2707

2708
    fn visit_steel_struct(&mut self, steel_struct: &UserDefinedStruct) -> Self::Output {
189,254✔
2709
        for field in steel_struct.fields.iter() {
759,964✔
2710
            self.push_back(field);
×
2711
        }
2712
    }
2713

2714
    fn visit_stream(&mut self, stream: &LazyStream) -> Self::Output {
172✔
2715
        self.push_back(&stream.initial_value);
516✔
2716
        self.push_back(&stream.stream_thunk);
516✔
2717
    }
2718

2719
    fn visit_syntax_object(&mut self, syntax_object: &Syntax) -> Self::Output {
×
2720
        if let Some(raw) = &syntax_object.raw {
×
2721
            self.push_back(raw);
×
2722
        }
2723

2724
        self.push_back(&syntax_object.syntax);
×
2725
    }
2726

2727
    fn visit_transducer(&mut self, transducer: &Transducer) -> Self::Output {
×
2728
        for transducer in transducer.ops.iter() {
×
2729
            match &transducer {
×
2730
                crate::values::transducers::Transducers::Map(m) => self.push_back(m),
×
2731
                crate::values::transducers::Transducers::Filter(v) => self.push_back(v),
×
2732
                crate::values::transducers::Transducers::Take(t) => self.push_back(t),
×
2733
                crate::values::transducers::Transducers::Drop(d) => self.push_back(d),
×
2734
                crate::values::transducers::Transducers::FlatMap(fm) => self.push_back(fm),
×
2735
                crate::values::transducers::Transducers::Flatten => {}
×
2736
                crate::values::transducers::Transducers::Window(w) => self.push_back(w),
×
2737
                crate::values::transducers::Transducers::TakeWhile(tw) => self.push_back(tw),
×
2738
                crate::values::transducers::Transducers::DropWhile(dw) => self.push_back(dw),
×
2739
                crate::values::transducers::Transducers::Extend(e) => self.push_back(e),
×
2740
                crate::values::transducers::Transducers::Cycle => {}
×
2741
                crate::values::transducers::Transducers::Enumerating => {}
×
2742
                crate::values::transducers::Transducers::Zipping(z) => self.push_back(z),
×
2743
                crate::values::transducers::Transducers::Interleaving(i) => self.push_back(i),
×
2744
            }
2745
        }
2746
    }
2747

2748
    fn visit_pair(&mut self, pair: &super::lists::Pair) -> Self::Output {
25✔
2749
        self.push_back(pair.car_ref());
75✔
2750
        self.push_back(pair.cdr_ref());
75✔
2751
    }
2752
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc