• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

djeedai / bevy_hanabi / 11638278810

02 Nov 2024 12:30AM UTC coverage: 57.846% (-0.08%) from 57.93%
11638278810

push

github

web-flow
Improve debugging (#396)

- Several asset-related errors now emit the asset name.
- `BufferTable` now emits a table name in its logs.
- Improved `ExprError` error messages by implementing `Display` for
  `ValueType`.
- `SetAttributeModifer::eval()` now attempts to check that the type of the
  value emitted by the expression, if available, matches the type of the
attribute being assigned. This prevents generating invalid shader code.

21 of 53 new or added lines in 5 files covered. (39.62%)

1 existing line in 1 file now uncovered.

3561 of 6156 relevant lines covered (57.85%)

22.92 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

61.49
/src/render/effect_cache.rs
1
use std::{
2
    cmp::Ordering,
3
    num::NonZeroU64,
4
    ops::Range,
5
    sync::atomic::{AtomicU64, Ordering as AtomicOrdering},
6
};
7

8
use bevy::{
9
    asset::Handle,
10
    ecs::system::Resource,
11
    log::{trace, warn},
12
    render::{render_resource::*, renderer::RenderDevice},
13
    utils::HashMap,
14
};
15
use bytemuck::cast_slice_mut;
16

17
use super::buffer_table::BufferTableId;
18
use crate::{
19
    asset::EffectAsset,
20
    render::{
21
        GpuDispatchIndirect, GpuParticleGroup, GpuSpawnerParams, LayoutFlags, StorageType as _,
22
    },
23
    ParticleLayout, PropertyLayout,
24
};
25

26
/// Describes all particle groups' slices of particles in the particle buffer
27
/// for a single effect.
28
#[derive(Debug, Clone, PartialEq, Eq)]
29
pub struct EffectSlices {
30
    /// Slices into the underlying BufferVec of the group.
31
    ///
32
    /// The length of this vector is the number of particle groups plus one.
33
    /// The range of the first group is (slices[0]..slices[1]), the index of
34
    /// the second group is (slices[1]..slices[2]), etc.
35
    ///
36
    /// This is measured in items, not bytes.
37
    pub slices: Vec<u32>,
38
    /// The index of the buffer.
39
    pub buffer_index: u32,
40
    /// Particle layout of the slice.
41
    pub particle_layout: ParticleLayout,
42
}
43

44
impl Ord for EffectSlices {
45
    fn cmp(&self, other: &Self) -> Ordering {
8✔
46
        match self.buffer_index.cmp(&other.buffer_index) {
8✔
47
            Ordering::Equal => self.slices.first().cmp(&other.slices.first()),
4✔
48
            ord => ord,
4✔
49
        }
50
    }
51
}
52

53
impl PartialOrd for EffectSlices {
54
    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
8✔
55
        Some(self.cmp(other))
8✔
56
    }
57
}
58

59
/// Describes all particle groups' slices of particles in the particle buffer
60
/// for a single effect, as well as the [`DispatchBufferIndices`].
61
pub struct SlicesRef {
62
    pub ranges: Vec<u32>,
63
    /// Size of a single item in the slice. Currently equal to the unique size
64
    /// of all items in an [`EffectBuffer`] (no mixed size supported in same
65
    /// buffer), so cached only for convenience.
66
    particle_layout: ParticleLayout,
67
    pub dispatch_buffer_indices: DispatchBufferIndices,
68
}
69

70
/// A reference to a slice allocated inside an [`EffectBuffer`].
71
#[derive(Debug, Default, Clone, PartialEq, Eq)]
72
pub struct SliceRef {
73
    /// Range into an [`EffectBuffer`], in item count.
74
    range: Range<u32>,
75
    /// Size of a single item in the slice. Currently equal to the unique size
76
    /// of all items in an [`EffectBuffer`] (no mixed size supported in same
77
    /// buffer), so cached only for convenience.
78
    particle_layout: ParticleLayout,
79
}
80

81
impl SliceRef {
82
    /// The length of the slice, in number of items.
83
    #[allow(dead_code)]
84
    pub fn len(&self) -> u32 {
8✔
85
        self.range.end - self.range.start
8✔
86
    }
87

88
    /// The size in bytes of the slice.
89
    #[allow(dead_code)]
90
    pub fn byte_size(&self) -> usize {
4✔
91
        (self.len() as usize) * (self.particle_layout.min_binding_size().get() as usize)
4✔
92
    }
93
}
94

95
/// Storage for a single kind of effects, sharing the same buffer(s).
96
///
97
/// Currently only accepts a single unique item size (particle size), fixed at
98
/// creation. Also currently only accepts instances of a unique effect asset,
99
/// although this restriction is purely for convenience and may be relaxed in
100
/// the future to improve batching.
101
#[derive(Debug)]
102
pub struct EffectBuffer {
103
    /// GPU buffer holding all particles for the entire group of effects.
104
    particle_buffer: Buffer,
105
    /// GPU buffer holding the indirection indices for the entire group of
106
    /// effects. This is a triple buffer containing:
107
    /// - the ping-pong alive particles and render indirect indices at offsets 0
108
    ///   and 1
109
    /// - the dead particle indices at offset 2
110
    indirect_buffer: Buffer,
111
    /// GPU buffer holding the properties of the effect(s), if any. This is
112
    /// always `None` if the property layout is empty.
113
    properties_buffer: Option<Buffer>,
114
    /// Layout of particles.
115
    particle_layout: ParticleLayout,
116
    /// Layout of properties of the effect(s), if using properties.
117
    property_layout: PropertyLayout,
118
    /// Flags
119
    layout_flags: LayoutFlags,
120
    /// -
121
    particles_buffer_layout_sim: BindGroupLayout,
122
    /// -
123
    particles_buffer_layout_with_dispatch: BindGroupLayout,
124
    /// Total buffer capacity, in number of particles.
125
    capacity: u32,
126
    /// Used buffer size, in number of particles, either from allocated slices
127
    /// or from slices in the free list.
128
    used_size: u32,
129
    /// Array of free slices for new allocations, sorted in increasing order in
130
    /// the buffer.
131
    free_slices: Vec<Range<u32>>,
132
    /// Compute pipeline for the effect update pass.
133
    // pub compute_pipeline: ComputePipeline, // FIXME - ComputePipelineId, to avoid duplicating per
134
    // instance!
135
    /// Handle of all effects common in this buffer. TODO - replace with
136
    /// compatible layout.
137
    asset: Handle<EffectAsset>,
138
    /// Bind group for the per-buffer data (group @1) of the init and update
139
    /// passes.
140
    simulate_bind_group: Option<BindGroup>,
141
}
142

143
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
144
pub enum BufferState {
145
    Used,
146
    Free,
147
}
148

149
impl EffectBuffer {
150
    /// Minimum buffer capacity to allocate, in number of particles.
151
    // FIXME - Batching is broken due to binding a single GpuSpawnerParam instead of
152
    // N, and inability for a particle index to tell which Spawner it should
153
    // use. Setting this to 1 effectively ensures that all new buffers just fit
154
    // the effect, so batching never occurs.
155
    pub const MIN_CAPACITY: u32 = 1; // 65536; // at least 64k particles
156

157
    /// Create a new group and a GPU buffer to back it up.
158
    ///
159
    /// The buffer cannot contain less than [`MIN_CAPACITY`] particles. If
160
    /// `capacity` is smaller, it's rounded up to [`MIN_CAPACITY`].
161
    ///
162
    /// [`MIN_CAPACITY`]: EffectBuffer::MIN_CAPACITY
163
    pub fn new(
5✔
164
        asset: Handle<EffectAsset>,
165
        capacity: u32,
166
        particle_layout: ParticleLayout,
167
        property_layout: PropertyLayout,
168
        layout_flags: LayoutFlags,
169
        render_device: &RenderDevice,
170
        label: Option<&str>,
171
    ) -> Self {
172
        trace!(
5✔
173
            "EffectBuffer::new(capacity={}, particle_layout={:?}, property_layout={:?}, layout_flags={:?}, item_size={}B, properties_size={}B)",
×
174
            capacity,
×
175
            particle_layout,
×
176
            property_layout,
×
177
            layout_flags,
×
178
            particle_layout.min_binding_size().get(),
×
179
            if property_layout.is_empty() { 0 } else { property_layout.min_binding_size().get() },
×
180
        );
181

182
        let capacity = capacity.max(Self::MIN_CAPACITY);
5✔
183
        debug_assert!(
5✔
184
            capacity > 0,
5✔
185
            "Attempted to create a zero-sized effect buffer."
×
186
        );
187

188
        let particle_capacity_bytes: BufferAddress =
5✔
189
            capacity as u64 * particle_layout.min_binding_size().get();
5✔
190
        let particle_buffer = render_device.create_buffer(&BufferDescriptor {
5✔
191
            label,
5✔
192
            size: particle_capacity_bytes,
5✔
193
            usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
5✔
194
            mapped_at_creation: false,
5✔
195
        });
196

197
        let capacity_bytes: BufferAddress = capacity as u64 * 4;
5✔
198

199
        let indirect_label = if let Some(label) = label {
5✔
200
            format!("{label}_indirect")
201
        } else {
202
            "hanabi:buffer:effect_indirect".to_owned()
×
203
        };
204
        let indirect_buffer = render_device.create_buffer(&BufferDescriptor {
205
            label: Some(&indirect_label),
206
            size: capacity_bytes * 3, // ping-pong + deadlist
207
            usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
208
            mapped_at_creation: true,
209
        });
210
        // Set content
211
        {
212
            // Scope get_mapped_range_mut() to force a drop before unmap()
213
            {
214
                let slice = &mut indirect_buffer.slice(..).get_mapped_range_mut()
215
                    [..capacity_bytes as usize * 3];
216
                let slice: &mut [u32] = cast_slice_mut(slice);
217
                for index in 0..capacity {
12,294✔
218
                    slice[3 * index as usize + 2] = capacity - 1 - index;
6,147✔
219
                }
220
            }
221
            indirect_buffer.unmap();
5✔
222
        }
223

224
        let properties_buffer = if property_layout.is_empty() {
10✔
225
            None
5✔
226
        } else {
227
            let properties_label = if let Some(label) = label {
×
228
                format!("{}_properties", label)
229
            } else {
230
                "hanabi:buffer:effect_properties".to_owned()
×
231
            };
232
            let size = property_layout.min_binding_size().get(); // TODO: * num_effects_in_buffer (once batching works again)
233
            let properties_buffer = render_device.create_buffer(&BufferDescriptor {
234
                label: Some(&properties_label),
235
                size,
236
                usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
237
                mapped_at_creation: false,
238
            });
239
            Some(properties_buffer)
240
        };
241

242
        // TODO - Cache particle_layout and associated bind group layout, instead of
243
        // creating one bind group layout per buffer using that layout...
244
        let particle_group_size = GpuParticleGroup::aligned_size(
245
            render_device.limits().min_storage_buffer_offset_alignment,
5✔
246
        );
247
        let mut entries = vec![
5✔
248
            // @binding(0) var<storage, read_write> particle_buffer : ParticleBuffer
249
            BindGroupLayoutEntry {
5✔
250
                binding: 0,
5✔
251
                visibility: ShaderStages::COMPUTE,
5✔
252
                ty: BindingType::Buffer {
5✔
253
                    ty: BufferBindingType::Storage { read_only: false },
5✔
254
                    has_dynamic_offset: false,
5✔
255
                    min_binding_size: Some(particle_layout.min_binding_size()),
5✔
256
                },
257
                count: None,
5✔
258
            },
259
            // @binding(1) var<storage, read_write> indirect_buffer : IndirectBuffer
260
            BindGroupLayoutEntry {
5✔
261
                binding: 1,
5✔
262
                visibility: ShaderStages::COMPUTE,
5✔
263
                ty: BindingType::Buffer {
5✔
264
                    ty: BufferBindingType::Storage { read_only: false },
5✔
265
                    has_dynamic_offset: false,
5✔
266
                    min_binding_size: BufferSize::new(12),
5✔
267
                },
268
                count: None,
5✔
269
            },
270
            // @binding(2) var<storage, read> particle_groups : array<ParticleGroup>
271
            BindGroupLayoutEntry {
5✔
272
                binding: 2,
5✔
273
                visibility: ShaderStages::COMPUTE,
5✔
274
                ty: BindingType::Buffer {
5✔
275
                    ty: BufferBindingType::Storage { read_only: true },
5✔
276
                    has_dynamic_offset: false,
5✔
277
                    // Despite no dynamic offset, we do bind a non-zero offset sometimes,
278
                    // so keep this aligned
279
                    min_binding_size: Some(particle_group_size),
5✔
280
                },
281
                count: None,
5✔
282
            },
283
        ];
284
        if !property_layout.is_empty() {
5✔
285
            entries.push(BindGroupLayoutEntry {
×
286
                binding: 3,
×
287
                visibility: ShaderStages::COMPUTE,
×
288
                ty: BindingType::Buffer {
×
289
                    ty: BufferBindingType::Storage { read_only: true },
×
290
                    has_dynamic_offset: false, // TODO
×
291
                    min_binding_size: Some(property_layout.min_binding_size()),
×
292
                },
293
                count: None,
×
294
            });
295
        }
296
        let label = "hanabi:sim_particles_buffer_layout";
5✔
297
        trace!(
5✔
298
            "Creating particle bind group layout '{}' for simulation passes with {} entries.",
×
299
            label,
×
300
            entries.len(),
×
301
        );
302
        let particles_buffer_layout_sim = render_device.create_bind_group_layout(label, &entries);
5✔
303

304
        // Create the render layout.
305
        let dispatch_indirect_size = GpuDispatchIndirect::aligned_size(
306
            render_device.limits().min_storage_buffer_offset_alignment,
5✔
307
        );
308
        let mut entries = vec![
5✔
309
            BindGroupLayoutEntry {
5✔
310
                binding: 0,
5✔
311
                visibility: ShaderStages::VERTEX,
5✔
312
                ty: BindingType::Buffer {
5✔
313
                    ty: BufferBindingType::Storage { read_only: true },
5✔
314
                    has_dynamic_offset: false,
5✔
315
                    min_binding_size: Some(particle_layout.min_binding_size()),
5✔
316
                },
317
                count: None,
5✔
318
            },
319
            BindGroupLayoutEntry {
5✔
320
                binding: 1,
5✔
321
                visibility: ShaderStages::VERTEX,
5✔
322
                ty: BindingType::Buffer {
5✔
323
                    ty: BufferBindingType::Storage { read_only: true },
5✔
324
                    has_dynamic_offset: false,
5✔
325
                    min_binding_size: BufferSize::new(std::mem::size_of::<u32>() as u64),
5✔
326
                },
327
                count: None,
5✔
328
            },
329
            BindGroupLayoutEntry {
5✔
330
                binding: 2,
5✔
331
                visibility: ShaderStages::VERTEX,
5✔
332
                ty: BindingType::Buffer {
5✔
333
                    ty: BufferBindingType::Storage { read_only: true },
5✔
334
                    has_dynamic_offset: true,
5✔
335
                    min_binding_size: Some(dispatch_indirect_size),
5✔
336
                },
337
                count: None,
5✔
338
            },
339
        ];
340
        if layout_flags.contains(LayoutFlags::LOCAL_SPACE_SIMULATION) {
5✔
341
            entries.push(BindGroupLayoutEntry {
×
342
                binding: 3,
×
343
                visibility: ShaderStages::VERTEX,
×
344
                ty: BindingType::Buffer {
×
345
                    ty: BufferBindingType::Storage { read_only: true },
×
346
                    has_dynamic_offset: true,
×
347
                    min_binding_size: Some(GpuSpawnerParams::min_size()), // TODO - array
×
348
                },
349
                count: None,
×
350
            });
351
        }
352
        trace!(
353
            "Creating render layout with {} entries (flags: {:?})",
×
354
            entries.len(),
×
355
            layout_flags
356
        );
357
        let particles_buffer_layout_with_dispatch =
5✔
358
            render_device.create_bind_group_layout("hanabi:buffer_layout_render", &entries);
5✔
359

360
        Self {
361
            particle_buffer,
362
            indirect_buffer,
363
            properties_buffer,
364
            particle_layout,
365
            property_layout,
366
            layout_flags,
367
            particles_buffer_layout_sim,
368
            particles_buffer_layout_with_dispatch,
369
            capacity,
370
            used_size: 0,
371
            free_slices: vec![],
5✔
372
            asset,
373
            simulate_bind_group: None,
374
        }
375
    }
376

377
    pub fn properties_buffer(&self) -> Option<&Buffer> {
×
378
        self.properties_buffer.as_ref()
×
379
    }
380

381
    pub fn particle_layout(&self) -> &ParticleLayout {
×
382
        &self.particle_layout
×
383
    }
384

385
    pub fn property_layout(&self) -> &PropertyLayout {
×
386
        &self.property_layout
×
387
    }
388

389
    pub fn layout_flags(&self) -> LayoutFlags {
×
390
        self.layout_flags
×
391
    }
392

393
    pub fn particle_layout_bind_group_sim(&self) -> &BindGroupLayout {
×
394
        &self.particles_buffer_layout_sim
×
395
    }
396

397
    pub fn particle_layout_bind_group_with_dispatch(&self) -> &BindGroupLayout {
×
398
        &self.particles_buffer_layout_with_dispatch
×
399
    }
400

401
    /// Return a binding for the entire particle buffer.
402
    pub fn max_binding(&self) -> BindingResource {
×
403
        let capacity_bytes = self.capacity as u64 * self.particle_layout.min_binding_size().get();
×
404
        BindingResource::Buffer(BufferBinding {
×
405
            buffer: &self.particle_buffer,
×
406
            offset: 0,
×
407
            size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
×
408
        })
409
    }
410

411
    /// Return a binding of the buffer for a starting range of a given size (in
412
    /// bytes).
413
    #[allow(dead_code)]
414
    pub fn binding(&self, size: u32) -> BindingResource {
×
415
        BindingResource::Buffer(BufferBinding {
×
416
            buffer: &self.particle_buffer,
×
417
            offset: 0,
×
418
            size: Some(NonZeroU64::new(size as u64).unwrap()),
×
419
        })
420
    }
421

422
    /// Return a binding for the entire indirect buffer associated with the
423
    /// current effect buffer.
424
    pub fn indirect_max_binding(&self) -> BindingResource {
×
425
        let capacity_bytes = self.capacity as u64 * 4;
×
426
        BindingResource::Buffer(BufferBinding {
×
427
            buffer: &self.indirect_buffer,
×
428
            offset: 0,
×
429
            size: Some(NonZeroU64::new(capacity_bytes * 3).unwrap()),
×
430
        })
431
    }
432

433
    /// Return a binding for the entire properties buffer associated with the
434
    /// current effect buffer, if any.
435
    pub fn properties_max_binding(&self) -> Option<BindingResource> {
×
436
        self.properties_buffer.as_ref().map(|buffer| {
×
437
            let capacity_bytes = self.property_layout.min_binding_size().get();
×
438
            BindingResource::Buffer(BufferBinding {
×
439
                buffer,
×
440
                offset: 0,
×
441
                size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
×
442
            })
443
        })
444
    }
445

446
    /// Create the bind group for the init and update passes if needed.
447
    ///
448
    /// The `buffer_index` must be the index of the current [`EffectBuffer`]
449
    /// inside the [`EffectCache`]. The `group_binding` is the binding resource
450
    /// for the particle groups of this buffer.
451
    pub fn create_sim_bind_group(
×
452
        &mut self,
453
        buffer_index: u32,
454
        render_device: &RenderDevice,
455
        group_binding: BufferBinding,
456
    ) {
457
        if self.simulate_bind_group.is_some() {
×
458
            return;
×
459
        }
460

461
        let layout = self.particle_layout_bind_group_sim();
×
462
        let label = format!("hanabi:bind_group_sim_batch{}", buffer_index);
×
463
        let mut bindings = vec![
×
464
            BindGroupEntry {
×
465
                binding: 0,
×
466
                resource: self.max_binding(),
×
467
            },
468
            BindGroupEntry {
×
469
                binding: 1,
×
470
                resource: self.indirect_max_binding(),
×
471
            },
472
            BindGroupEntry {
×
473
                binding: 2,
×
474
                resource: BindingResource::Buffer(group_binding),
×
475
            },
476
        ];
477
        if let Some(property_binding) = self.properties_max_binding() {
×
478
            bindings.push(BindGroupEntry {
479
                binding: 3,
480
                resource: property_binding,
481
            });
482
        }
483
        trace!(
484
            "Create simulate bind group '{}' with {} entries",
×
485
            label,
×
486
            bindings.len()
×
487
        );
488
        let bind_group = render_device.create_bind_group(Some(&label[..]), layout, &bindings);
×
489
        self.simulate_bind_group = Some(bind_group);
×
490
    }
491

492
    /// Return the cached bind group for the init and update passes.
493
    ///
494
    /// This is the per-buffer bind group at binding @1 which binds all
495
    /// per-buffer resources shared by all effect instances batched in a single
496
    /// buffer.
497
    pub fn sim_bind_group(&self) -> Option<&BindGroup> {
×
498
        self.simulate_bind_group.as_ref()
×
499
    }
500

501
    /// Try to recycle a free slice to store `size` items.
502
    fn pop_free_slice(&mut self, size: u32) -> Option<Range<u32>> {
17✔
503
        if self.free_slices.is_empty() {
17✔
504
            return None;
14✔
505
        }
506

507
        struct BestRange {
508
            range: Range<u32>,
509
            capacity: u32,
510
            index: usize,
511
        }
512

513
        let mut result = BestRange {
514
            range: 0..0, // marker for "invalid"
515
            capacity: u32::MAX,
516
            index: usize::MAX,
517
        };
518
        for (index, slice) in self.free_slices.iter().enumerate() {
3✔
519
            let capacity = slice.end - slice.start;
3✔
520
            if size > capacity {
3✔
521
                continue;
1✔
522
            }
523
            if capacity < result.capacity {
4✔
524
                result = BestRange {
2✔
525
                    range: slice.clone(),
2✔
526
                    capacity,
2✔
527
                    index,
2✔
528
                };
529
            }
530
        }
531
        if !result.range.is_empty() {
3✔
532
            if result.capacity > size {
2✔
533
                // split
534
                let start = result.range.start;
1✔
535
                let used_end = start + size;
1✔
536
                let free_end = result.range.end;
1✔
537
                let range = start..used_end;
1✔
538
                self.free_slices[result.index] = used_end..free_end;
1✔
539
                Some(range)
1✔
540
            } else {
541
                // recycle entirely
542
                self.free_slices.remove(result.index);
1✔
543
                Some(result.range)
1✔
544
            }
545
        } else {
546
            None
1✔
547
        }
548
    }
549

550
    /// Allocate a new slice in the buffer to store the particles of a single
551
    /// effect.
552
    pub fn allocate_slice(
18✔
553
        &mut self,
554
        capacity: u32,
555
        particle_layout: &ParticleLayout,
556
    ) -> Option<SliceRef> {
557
        trace!(
18✔
558
            "EffectBuffer::allocate_slice: capacity={} particle_layout={:?} item_size={}",
×
559
            capacity,
×
560
            particle_layout,
×
561
            particle_layout.min_binding_size().get(),
×
562
        );
563

564
        if capacity > self.capacity {
18✔
565
            return None;
1✔
566
        }
567

568
        let range = if let Some(range) = self.pop_free_slice(capacity) {
17✔
569
            range
2✔
570
        } else {
571
            let new_size = self.used_size.checked_add(capacity).unwrap();
15✔
572
            if new_size <= self.capacity {
15✔
573
                let range = self.used_size..new_size;
13✔
574
                self.used_size = new_size;
13✔
575
                range
13✔
576
            } else {
577
                if self.used_size == 0 {
2✔
578
                    warn!(
×
579
                        "Cannot allocate slice of size {} in effect cache buffer of capacity {}.",
×
580
                        capacity, self.capacity
581
                    );
582
                }
583
                return None;
2✔
584
            }
585
        };
586

587
        Some(SliceRef {
588
            range,
589
            particle_layout: particle_layout.clone(),
590
        })
591
    }
592

593
    /// Free an allocated slice, and if this was the last allocated slice also
594
    /// free the buffer.
595
    pub fn free_slice(&mut self, slice: SliceRef) -> BufferState {
9✔
596
        // If slice is at the end of the buffer, reduce total used size
597
        if slice.range.end == self.used_size {
9✔
598
            self.used_size = slice.range.start;
3✔
599
            // Check other free slices to further reduce used size and drain the free slice
600
            // list
601
            while let Some(free_slice) = self.free_slices.last() {
13✔
602
                if free_slice.end == self.used_size {
5✔
603
                    self.used_size = free_slice.start;
5✔
604
                    self.free_slices.pop();
5✔
605
                } else {
606
                    break;
×
607
                }
608
            }
609
            if self.used_size == 0 {
3✔
610
                assert!(self.free_slices.is_empty());
2✔
611
                // The buffer is not used anymore, free it too
612
                BufferState::Free
2✔
613
            } else {
614
                // There are still some slices used, the last one of which ends at
615
                // self.used_size
616
                BufferState::Used
1✔
617
            }
618
        } else {
619
            // Free slice is not at end; insert it in free list
620
            let range = slice.range;
6✔
621
            match self.free_slices.binary_search_by(|s| {
12✔
622
                if s.end <= range.start {
6✔
623
                    Ordering::Less
6✔
624
                } else if s.start >= range.end {
×
625
                    Ordering::Greater
×
626
                } else {
627
                    Ordering::Equal
×
628
                }
629
            }) {
630
                Ok(_) => warn!("Range {:?} already present in free list!", range),
×
631
                Err(index) => self.free_slices.insert(index, range),
6✔
632
            }
633
            BufferState::Used
6✔
634
        }
635
    }
636

637
    pub fn is_compatible(&self, handle: &Handle<EffectAsset>) -> bool {
2✔
638
        // TODO - replace with check particle layout is compatible to allow tighter
639
        // packing in less buffers, and update in the less dispatch calls
640
        *handle == self.asset
2✔
641
    }
642
}
643

644
/// Identifier referencing an effect cached in an internal effect cache.
645
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
646
pub(crate) struct EffectCacheId(/* TEMP */ pub(crate) u64);
647

648
impl EffectCacheId {
649
    /// An invalid handle, corresponding to nothing.
650
    pub const INVALID: Self = Self(u64::MAX);
651

652
    /// Generate a new valid effect cache identifier.
653
    pub fn new() -> Self {
3✔
654
        static NEXT_EFFECT_CACHE_ID: AtomicU64 = AtomicU64::new(0);
655
        Self(NEXT_EFFECT_CACHE_ID.fetch_add(1, AtomicOrdering::Relaxed))
3✔
656
    }
657

658
    /// Check if the ID is valid.
659
    #[allow(dead_code)]
660
    pub fn is_valid(&self) -> bool {
3✔
661
        *self != Self::INVALID
3✔
662
    }
663
}
664

665
/// Cache for effect instances sharing common GPU data structures.
666
#[derive(Resource)]
667
pub struct EffectCache {
668
    /// Render device the GPU resources (buffers) are allocated from.
669
    device: RenderDevice,
670
    /// Collection of effect buffers managed by this cache. Some buffers might
671
    /// be `None` if the entry is not used. Since the buffers are referenced
672
    /// by index, we cannot move them once they're allocated.
673
    buffers: Vec<Option<EffectBuffer>>,
674
    /// Map from an effect cache ID to various buffer indices.
675
    effects: HashMap<EffectCacheId, CachedEffect>,
676
}
677

678
/// Stores various data, including the buffer index and slice boundaries within
679
/// the buffer for all groups in a single effect.
680
pub(crate) struct CachedEffect {
681
    /// The index of the buffer.
682
    pub(crate) buffer_index: u32,
683
    /// The slices within that buffer.
684
    pub(crate) slices: SlicesRef,
685
    /// The order in which we evaluate groups.
686
    pub(crate) group_order: Vec<u32>,
687
}
688

689
/// The indices in the indirect dispatch buffers for a single effect, as well as
690
/// that of the metadata buffer.
691
#[derive(Clone, Debug)]
692
pub(crate) struct DispatchBufferIndices {
693
    /// The index of the first update group indirect dispatch buffer.
694
    ///
695
    /// There will be one such dispatch buffer for each particle group.
696
    pub(crate) first_update_group_dispatch_buffer_index: BufferTableId,
697
    /// The index of the first render group indirect dispatch buffer.
698
    ///
699
    /// There will be one such dispatch buffer for each particle group.
700
    pub(crate) first_render_group_dispatch_buffer_index: BufferTableId,
701
    /// The index of the render indirect metadata buffer.
702
    pub(crate) render_effect_metadata_buffer_index: BufferTableId,
703
    pub(crate) trail_dispatch_buffer_indices: HashMap<u32, TrailDispatchBufferIndices>,
704
}
705

706
#[derive(Clone, Copy, Debug)]
707
pub(crate) struct TrailDispatchBufferIndices {
708
    pub(crate) dest: BufferTableId,
709
    pub(crate) src: BufferTableId,
710
}
711

712
impl Default for DispatchBufferIndices {
713
    // For testing purposes only.
714
    fn default() -> Self {
3✔
715
        DispatchBufferIndices {
716
            first_update_group_dispatch_buffer_index: BufferTableId(0),
3✔
717
            first_render_group_dispatch_buffer_index: BufferTableId(0),
3✔
718
            render_effect_metadata_buffer_index: BufferTableId(0),
3✔
719
            trail_dispatch_buffer_indices: HashMap::default(),
3✔
720
        }
721
    }
722
}
723

724
impl EffectCache {
725
    pub fn new(device: RenderDevice) -> Self {
2✔
726
        Self {
727
            device,
728
            buffers: vec![],
2✔
729
            effects: HashMap::default(),
2✔
730
        }
731
    }
732

733
    #[allow(dead_code)]
734
    pub fn buffers(&self) -> &[Option<EffectBuffer>] {
7✔
735
        &self.buffers
7✔
736
    }
737

738
    #[allow(dead_code)]
739
    pub fn buffers_mut(&mut self) -> &mut [Option<EffectBuffer>] {
×
740
        &mut self.buffers
×
741
    }
742

743
    pub fn insert(
3✔
744
        &mut self,
745
        asset: Handle<EffectAsset>,
746
        capacities: Vec<u32>,
747
        particle_layout: &ParticleLayout,
748
        property_layout: &PropertyLayout,
749
        layout_flags: LayoutFlags,
750
        dispatch_buffer_indices: DispatchBufferIndices,
751
        group_order: Vec<u32>,
752
    ) -> EffectCacheId {
753
        let total_capacity = capacities.iter().cloned().sum();
3✔
754
        let (buffer_index, slice) = self
3✔
755
            .buffers
3✔
756
            .iter_mut()
757
            .enumerate()
758
            .find_map(|(buffer_index, buffer)| {
6✔
759
                if let Some(buffer) = buffer {
5✔
760
                    // The buffer must be compatible with the effect layout, to allow the update pass
761
                    // to update all particles at once from all compatible effects in a single dispatch.
762
                    if !buffer.is_compatible(&asset) {
763
                        return None;
×
764
                    }
765

766
                    // Try to allocate a slice into the buffer
767
                    buffer
2✔
768
                        .allocate_slice(total_capacity, particle_layout)
2✔
769
                        .map(|slice| (buffer_index, slice))
4✔
770
                } else {
771
                    None
1✔
772
                }
773
            })
774
            .or_else(|| {
6✔
775
                // Cannot find any suitable buffer; allocate a new one
776
                let buffer_index = self.buffers.iter().position(|buf| buf.is_none()).unwrap_or(self.buffers.len());
8✔
777
                let byte_size = total_capacity.checked_mul(particle_layout.min_binding_size().get() as u32).unwrap_or_else(|| panic!(
3✔
778
                    "Effect size overflow: capacities={:?} particle_layout={:?} item_size={}",
×
779
                    capacities, particle_layout, particle_layout.min_binding_size().get()
×
780
                ));
781
                trace!(
3✔
782
                    "Creating new effect buffer #{} for effect {:?} (capacities={:?}, particle_layout={:?} item_size={}, byte_size={})",
×
783
                    buffer_index,
×
784
                    asset,
×
785
                    capacities,
×
786
                    particle_layout,
×
787
                    particle_layout.min_binding_size().get(),
×
788
                    byte_size
789
                );
790
                let mut buffer = EffectBuffer::new(
3✔
791
                    asset,
3✔
792
                    total_capacity,
3✔
793
                    particle_layout.clone(),
3✔
794
                    property_layout.clone(),
3✔
795
                    layout_flags,
3✔
796
                    &self.device,
3✔
797
                    Some(&format!("hanabi:buffer:effect{buffer_index}_particles")),
3✔
798
                );
799
                let slice_ref = buffer.allocate_slice(total_capacity, particle_layout).unwrap();
3✔
800
                if buffer_index >= self.buffers.len() {
5✔
801
                    self.buffers.push(Some(buffer));
2✔
802
                } else {
803
                    debug_assert!(self.buffers[buffer_index].is_none());
2✔
804
                    self.buffers[buffer_index] = Some(buffer);
1✔
805
                }
806
                Some((buffer_index, slice_ref))
3✔
807
            })
808
            .unwrap();
809
        let id = EffectCacheId::new();
3✔
810

811
        let mut ranges = vec![slice.range.start];
3✔
812
        let group_count = capacities.len();
3✔
813
        for capacity in capacities {
12✔
814
            let start_index = ranges.last().unwrap();
3✔
815
            ranges.push(start_index + capacity);
3✔
816
        }
817
        debug_assert_eq!(ranges.len(), group_count + 1);
6✔
818

819
        let slices = SlicesRef {
820
            ranges,
821
            particle_layout: slice.particle_layout,
3✔
822
            dispatch_buffer_indices,
823
        };
824

825
        trace!(
3✔
826
            "Insert effect id={:?} buffer_index={} slice={}B particle_layout={:?}",
×
827
            id,
×
828
            buffer_index,
×
829
            slices.particle_layout.min_binding_size().get(),
×
830
            slices.particle_layout,
831
        );
832
        self.effects.insert(
3✔
833
            id,
3✔
834
            CachedEffect {
3✔
835
                buffer_index: buffer_index as u32,
3✔
836
                slices,
3✔
837
                group_order,
3✔
838
            },
839
        );
840
        id
3✔
841
    }
842

843
    pub fn get_slices(&self, id: EffectCacheId) -> EffectSlices {
3✔
844
        self.effects
3✔
845
            .get(&id)
3✔
846
            .map(|indices| EffectSlices {
6✔
847
                slices: indices.slices.ranges.clone(),
3✔
848
                buffer_index: indices.buffer_index,
3✔
849
                particle_layout: indices.slices.particle_layout.clone(),
3✔
850
            })
851
            .unwrap()
852
    }
853

854
    pub(crate) fn get_dispatch_buffer_indices(&self, id: EffectCacheId) -> &DispatchBufferIndices {
×
855
        &self.effects[&id].slices.dispatch_buffer_indices
×
856
    }
857

858
    pub(crate) fn get_group_order(&self, id: EffectCacheId) -> &[u32] {
×
859
        &self.effects[&id].group_order
×
860
    }
861

862
    /// Get the init bind group for a cached effect.
863
    pub fn init_bind_group(&self, id: EffectCacheId) -> Option<&BindGroup> {
×
864
        if let Some(indices) = self.effects.get(&id) {
×
865
            if let Some(effect_buffer) = &self.buffers[indices.buffer_index as usize] {
×
866
                return effect_buffer.sim_bind_group();
×
867
            }
868
        }
869
        None
×
870
    }
871

872
    /// Get the update bind group for a cached effect.
873
    #[inline]
874
    pub fn update_bind_group(&self, id: EffectCacheId) -> Option<&BindGroup> {
×
875
        self.init_bind_group(id)
×
876
    }
877

878
    pub fn get_property_buffer(&self, id: EffectCacheId) -> Option<&Buffer> {
×
879
        if let Some(cached_effect_indices) = self.effects.get(&id) {
×
880
            let buffer_index = cached_effect_indices.buffer_index as usize;
881
            self.buffers[buffer_index]
882
                .as_ref()
NEW
883
                .and_then(|eb| eb.properties_buffer())
×
884
        } else {
885
            None
×
886
        }
887
    }
888

889
    /// Remove an effect from the cache. If this was the last effect, drop the
890
    /// underlying buffer and return the index of the dropped buffer.
891
    pub fn remove(&mut self, id: EffectCacheId) -> Option<CachedEffect> {
1✔
892
        let indices = self.effects.remove(&id)?;
2✔
893
        let &mut Some(ref mut buffer) = &mut self.buffers[indices.buffer_index as usize] else {
1✔
894
            return None;
×
895
        };
896

897
        let slice = SliceRef {
898
            range: indices.slices.ranges[0]..*indices.slices.ranges.last().unwrap(),
899
            // FIXME: clone() needed to return CachedEffectIndices, but really we don't care about
900
            // returning the ParticleLayout, so should split...
901
            particle_layout: indices.slices.particle_layout.clone(),
902
        };
903

904
        if buffer.free_slice(slice) == BufferState::Free {
905
            self.buffers[indices.buffer_index as usize] = None;
1✔
906
            return Some(indices);
1✔
907
        }
908

909
        None
×
910
    }
911
}
912

913
#[cfg(all(test, feature = "gpu_tests"))]
914
mod gpu_tests {
915
    use std::borrow::Cow;
916

917
    use bevy::math::Vec4;
918

919
    use super::*;
920
    use crate::{
921
        graph::{Value, VectorValue},
922
        test_utils::MockRenderer,
923
        Attribute, AttributeInner,
924
    };
925

926
    #[test]
927
    fn effect_slice_ord() {
928
        let particle_layout = ParticleLayout::new().append(Attribute::POSITION).build();
929
        let slice1 = EffectSlices {
930
            slices: vec![0, 32],
931
            buffer_index: 1,
932
            particle_layout: particle_layout.clone(),
933
        };
934
        let slice2 = EffectSlices {
935
            slices: vec![32, 64],
936
            buffer_index: 1,
937
            particle_layout: particle_layout.clone(),
938
        };
939
        assert!(slice1 < slice2);
940
        assert!(slice1 <= slice2);
941
        assert!(slice2 > slice1);
942
        assert!(slice2 >= slice1);
943

944
        let slice3 = EffectSlices {
945
            slices: vec![0, 32],
946
            buffer_index: 0,
947
            particle_layout,
948
        };
949
        assert!(slice3 < slice1);
950
        assert!(slice3 < slice2);
951
        assert!(slice1 > slice3);
952
        assert!(slice2 > slice3);
953
    }
954

955
    const F4A_INNER: &AttributeInner = &AttributeInner::new(
956
        Cow::Borrowed("F4A"),
957
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
958
    );
959
    const F4B_INNER: &AttributeInner = &AttributeInner::new(
960
        Cow::Borrowed("F4B"),
961
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
962
    );
963
    const F4C_INNER: &AttributeInner = &AttributeInner::new(
964
        Cow::Borrowed("F4C"),
965
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
966
    );
967
    const F4D_INNER: &AttributeInner = &AttributeInner::new(
968
        Cow::Borrowed("F4D"),
969
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
970
    );
971

972
    const F4A: Attribute = Attribute(F4A_INNER);
973
    const F4B: Attribute = Attribute(F4B_INNER);
974
    const F4C: Attribute = Attribute(F4C_INNER);
975
    const F4D: Attribute = Attribute(F4D_INNER);
976

977
    #[test]
978
    fn slice_ref() {
979
        let l16 = ParticleLayout::new().append(F4A).build();
980
        assert_eq!(16, l16.size());
981
        let l32 = ParticleLayout::new().append(F4A).append(F4B).build();
982
        assert_eq!(32, l32.size());
983
        let l48 = ParticleLayout::new()
984
            .append(F4A)
985
            .append(F4B)
986
            .append(F4C)
987
            .build();
988
        assert_eq!(48, l48.size());
989
        for (range, particle_layout, len, byte_size) in [
990
            (0..0, &l16, 0, 0),
991
            (0..16, &l16, 16, 16 * 16),
992
            (0..16, &l32, 16, 16 * 32),
993
            (240..256, &l48, 16, 16 * 48),
994
        ] {
995
            let sr = SliceRef {
996
                range,
997
                particle_layout: particle_layout.clone(),
998
            };
999
            assert_eq!(sr.len(), len);
1000
            assert_eq!(sr.byte_size(), byte_size);
1001
        }
1002
    }
1003

1004
    #[test]
1005
    fn effect_buffer() {
1006
        let renderer = MockRenderer::new();
1007
        let render_device = renderer.device();
1008

1009
        let l64 = ParticleLayout::new()
1010
            .append(F4A)
1011
            .append(F4B)
1012
            .append(F4C)
1013
            .append(F4D)
1014
            .build();
1015
        assert_eq!(64, l64.size());
1016

1017
        let asset = Handle::<EffectAsset>::default();
1018
        let capacity = 4096;
1019
        let mut buffer = EffectBuffer::new(
1020
            asset,
1021
            capacity,
1022
            l64.clone(),
1023
            PropertyLayout::empty(), // not using properties
1024
            LayoutFlags::NONE,
1025
            &render_device,
1026
            Some("my_buffer"),
1027
        );
1028

1029
        assert_eq!(buffer.capacity, capacity.max(EffectBuffer::MIN_CAPACITY));
1030
        assert_eq!(64, buffer.particle_layout.size());
1031
        assert_eq!(64, buffer.particle_layout.min_binding_size().get());
1032
        assert_eq!(0, buffer.used_size);
1033
        assert!(buffer.free_slices.is_empty());
1034

1035
        assert_eq!(None, buffer.allocate_slice(buffer.capacity + 1, &l64));
1036

1037
        let mut offset = 0;
1038
        let mut slices = vec![];
1039
        for size in [32, 128, 55, 148, 1, 2048, 42] {
1040
            let slice = buffer.allocate_slice(size, &l64);
1041
            assert!(slice.is_some());
1042
            let slice = slice.unwrap();
1043
            assert_eq!(64, slice.particle_layout.size());
1044
            assert_eq!(64, buffer.particle_layout.min_binding_size().get());
1045
            assert_eq!(offset..offset + size, slice.range);
1046
            slices.push(slice);
1047
            offset += size;
1048
        }
1049
        assert_eq!(offset, buffer.used_size);
1050

1051
        assert_eq!(BufferState::Used, buffer.free_slice(slices[2].clone()));
1052
        assert_eq!(1, buffer.free_slices.len());
1053
        let free_slice = &buffer.free_slices[0];
1054
        assert_eq!(160..215, *free_slice);
1055
        assert_eq!(offset, buffer.used_size); // didn't move
1056

1057
        assert_eq!(BufferState::Used, buffer.free_slice(slices[3].clone()));
1058
        assert_eq!(BufferState::Used, buffer.free_slice(slices[4].clone()));
1059
        assert_eq!(BufferState::Used, buffer.free_slice(slices[5].clone()));
1060
        assert_eq!(4, buffer.free_slices.len());
1061
        assert_eq!(offset, buffer.used_size); // didn't move
1062

1063
        // this will collapse all the way to slices[1], the highest allocated
1064
        assert_eq!(BufferState::Used, buffer.free_slice(slices[6].clone()));
1065
        assert_eq!(0, buffer.free_slices.len()); // collapsed
1066
        assert_eq!(160, buffer.used_size); // collapsed
1067

1068
        assert_eq!(BufferState::Used, buffer.free_slice(slices[0].clone()));
1069
        assert_eq!(1, buffer.free_slices.len());
1070
        assert_eq!(160, buffer.used_size); // didn't move
1071

1072
        // collapse all, and free buffer
1073
        assert_eq!(BufferState::Free, buffer.free_slice(slices[1].clone()));
1074
        assert_eq!(0, buffer.free_slices.len());
1075
        assert_eq!(0, buffer.used_size); // collapsed and empty
1076
    }
1077

1078
    #[test]
1079
    fn pop_free_slice() {
1080
        let renderer = MockRenderer::new();
1081
        let render_device = renderer.device();
1082

1083
        let l64 = ParticleLayout::new()
1084
            .append(F4A)
1085
            .append(F4B)
1086
            .append(F4C)
1087
            .append(F4D)
1088
            .build();
1089
        assert_eq!(64, l64.size());
1090

1091
        let asset = Handle::<EffectAsset>::default();
1092
        let capacity = 2048; // EffectBuffer::MIN_CAPACITY;
1093
        assert!(capacity >= 2048); // otherwise the logic below breaks
1094
        let mut buffer = EffectBuffer::new(
1095
            asset,
1096
            capacity,
1097
            l64.clone(),
1098
            PropertyLayout::empty(), // not using properties
1099
            LayoutFlags::NONE,
1100
            &render_device,
1101
            Some("my_buffer"),
1102
        );
1103

1104
        let slice0 = buffer.allocate_slice(32, &l64);
1105
        assert!(slice0.is_some());
1106
        let slice0 = slice0.unwrap();
1107
        assert_eq!(slice0.range, 0..32);
1108
        assert!(buffer.free_slices.is_empty());
1109

1110
        let slice1 = buffer.allocate_slice(1024, &l64);
1111
        assert!(slice1.is_some());
1112
        let slice1 = slice1.unwrap();
1113
        assert_eq!(slice1.range, 32..1056);
1114
        assert!(buffer.free_slices.is_empty());
1115

1116
        let state = buffer.free_slice(slice0);
1117
        assert_eq!(state, BufferState::Used);
1118
        assert_eq!(buffer.free_slices.len(), 1);
1119
        assert_eq!(buffer.free_slices[0], 0..32);
1120

1121
        // Try to allocate a slice larger than slice0, such that slice0 cannot be
1122
        // recycled, and instead the new slice has to be appended after all
1123
        // existing ones.
1124
        let slice2 = buffer.allocate_slice(64, &l64);
1125
        assert!(slice2.is_some());
1126
        let slice2 = slice2.unwrap();
1127
        assert_eq!(slice2.range.start, slice1.range.end); // after slice1
1128
        assert_eq!(slice2.range, 1056..1120);
1129
        assert_eq!(buffer.free_slices.len(), 1);
1130

1131
        // Now allocate a small slice that fits, to recycle (part of) slice0.
1132
        let slice3 = buffer.allocate_slice(16, &l64);
1133
        assert!(slice3.is_some());
1134
        let slice3 = slice3.unwrap();
1135
        assert_eq!(slice3.range, 0..16);
1136
        assert_eq!(buffer.free_slices.len(), 1); // split
1137
        assert_eq!(buffer.free_slices[0], 16..32);
1138

1139
        // Allocate a second small slice that fits exactly the left space, completely
1140
        // recycling
1141
        let slice4 = buffer.allocate_slice(16, &l64);
1142
        assert!(slice4.is_some());
1143
        let slice4 = slice4.unwrap();
1144
        assert_eq!(slice4.range, 16..32);
1145
        assert!(buffer.free_slices.is_empty()); // recycled
1146
    }
1147

1148
    #[test]
1149
    fn effect_cache() {
1150
        let renderer = MockRenderer::new();
1151
        let render_device = renderer.device();
1152

1153
        let empty_property_layout = PropertyLayout::empty(); // not using properties
1154

1155
        let l32 = ParticleLayout::new().append(F4A).append(F4B).build();
1156
        assert_eq!(32, l32.size());
1157

1158
        let mut effect_cache = EffectCache::new(render_device);
1159
        assert_eq!(effect_cache.buffers().len(), 0);
1160

1161
        let asset = Handle::<EffectAsset>::default();
1162
        let capacity = EffectBuffer::MIN_CAPACITY;
1163
        let capacities = vec![capacity];
1164
        let group_order = vec![0];
1165
        let item_size = l32.size();
1166

1167
        let id1 = effect_cache.insert(
1168
            asset.clone(),
1169
            capacities.clone(),
1170
            &l32,
1171
            &empty_property_layout,
1172
            LayoutFlags::NONE,
1173
            DispatchBufferIndices::default(),
1174
            group_order.clone(),
1175
        );
1176
        assert!(id1.is_valid());
1177
        let slice1 = effect_cache.get_slices(id1);
1178
        assert_eq!(
1179
            slice1.particle_layout.min_binding_size().get() as u32,
1180
            item_size
1181
        );
1182
        assert_eq!(slice1.slices, vec![0, capacity]);
1183
        assert_eq!(effect_cache.buffers().len(), 1);
1184

1185
        let id2 = effect_cache.insert(
1186
            asset.clone(),
1187
            capacities.clone(),
1188
            &l32,
1189
            &empty_property_layout,
1190
            LayoutFlags::NONE,
1191
            DispatchBufferIndices::default(),
1192
            group_order.clone(),
1193
        );
1194
        assert!(id2.is_valid());
1195
        let slice2 = effect_cache.get_slices(id2);
1196
        assert_eq!(
1197
            slice2.particle_layout.min_binding_size().get() as u32,
1198
            item_size
1199
        );
1200
        assert_eq!(slice2.slices, vec![0, capacity]);
1201
        assert_eq!(effect_cache.buffers().len(), 2);
1202

1203
        let cached_effect_indices = effect_cache.remove(id1).unwrap();
1204
        assert_eq!(cached_effect_indices.buffer_index, 0);
1205
        assert_eq!(effect_cache.buffers().len(), 2);
1206
        {
1207
            let buffers = effect_cache.buffers();
1208
            assert!(buffers[0].is_none());
1209
            assert!(buffers[1].is_some()); // id2
1210
        }
1211

1212
        // Regression #60
1213
        let id3 = effect_cache.insert(
1214
            asset,
1215
            capacities,
1216
            &l32,
1217
            &empty_property_layout,
1218
            LayoutFlags::NONE,
1219
            DispatchBufferIndices::default(),
1220
            group_order,
1221
        );
1222
        assert!(id3.is_valid());
1223
        let slice3 = effect_cache.get_slices(id3);
1224
        assert_eq!(
1225
            slice3.particle_layout.min_binding_size().get() as u32,
1226
            item_size
1227
        );
1228
        assert_eq!(slice3.slices, vec![0, capacity]);
1229
        assert_eq!(effect_cache.buffers().len(), 2);
1230
        {
1231
            let buffers = effect_cache.buffers();
1232
            assert!(buffers[0].is_some()); // id3
1233
            assert!(buffers[1].is_some()); // id2
1234
        }
1235
    }
1236
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc