• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

djeedai / bevy_hanabi / 12443878803

21 Dec 2024 09:01AM UTC coverage: 48.369% (-0.3%) from 48.638%
12443878803

push

github

web-flow
Start transitioning to retained render world (#409)

Start to transition the internal rendering implementation to leverage
the retained render world introduced in Bevy 0.15.

This change moves the `CachedEffect` and `DispatchBufferIndices` out of
the `EffectCache`, and convert them to components added to the render
world entity spawned and synchronized via `SyncToRenderWorld` with the
main world entity owning the effect instance.

As part of this change, split the addition and removal of new effect
instances into a separate `add_remove_effects()` system which runs
before the `prepare_effects()` system.

Also fix a small bug with opaque particles not rendered in some cases;
this is hypothetical, but `EffectShaderSource::generate()` was not
setting the `LayoutFlags::OPAQUE`.

22 of 146 new or added lines in 4 files covered. (15.07%)

18 existing lines in 2 files now uncovered.

3100 of 6409 relevant lines covered (48.37%)

21.52 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

59.3
/src/render/effect_cache.rs
1
use std::{cmp::Ordering, num::NonZeroU64, ops::Range};
2

3
use bevy::{
4
    asset::Handle,
5
    ecs::{component::Component, system::Resource},
6
    log::{trace, warn},
7
    render::{render_resource::*, renderer::RenderDevice},
8
    utils::{default, HashMap},
9
};
10
use bytemuck::cast_slice_mut;
11

12
use super::{buffer_table::BufferTableId, AddedEffectGroup};
13
use crate::{
14
    asset::EffectAsset,
15
    render::{
16
        GpuDispatchIndirect, GpuParticleGroup, GpuSpawnerParams, LayoutFlags, StorageType as _,
17
    },
18
    ParticleLayout, PropertyLayout,
19
};
20

21
/// Describes all particle groups' slices of particles in the particle buffer
22
/// for a single effect.
23
#[derive(Debug, Clone, PartialEq, Eq)]
24
pub struct EffectSlices {
25
    /// Slices into the underlying BufferVec of the group.
26
    ///
27
    /// The length of this vector is the number of particle groups plus one.
28
    /// The range of the first group is (slices[0]..slices[1]), the index of
29
    /// the second group is (slices[1]..slices[2]), etc.
30
    ///
31
    /// This is measured in items, not bytes.
32
    pub slices: Vec<u32>,
33
    /// The index of the buffer.
34
    pub buffer_index: u32,
35
    /// Particle layout of the slice.
36
    pub particle_layout: ParticleLayout,
37
}
38

39
impl Ord for EffectSlices {
40
    fn cmp(&self, other: &Self) -> Ordering {
8✔
41
        match self.buffer_index.cmp(&other.buffer_index) {
8✔
42
            Ordering::Equal => self.slices.first().cmp(&other.slices.first()),
4✔
43
            ord => ord,
4✔
44
        }
45
    }
46
}
47

48
impl PartialOrd for EffectSlices {
49
    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
8✔
50
        Some(self.cmp(other))
8✔
51
    }
52
}
53

54
/// Describes all particle groups' slices of particles in the particle buffer
55
/// for a single effect, as well as the [`DispatchBufferIndices`].
56
pub struct SlicesRef {
57
    pub ranges: Vec<u32>,
58
    /// Size of a single item in the slice. Currently equal to the unique size
59
    /// of all items in an [`EffectBuffer`] (no mixed size supported in same
60
    /// buffer), so cached only for convenience.
61
    particle_layout: ParticleLayout,
62
}
63

64
impl SlicesRef {
65
    pub fn group_count(&self) -> u32 {
3✔
66
        debug_assert!(self.ranges.len() >= 2);
6✔
67
        (self.ranges.len() - 1) as u32
3✔
68
    }
69

70
    #[allow(dead_code)]
71
    pub fn group_capacity(&self, group_index: u32) -> u32 {
3✔
72
        assert!(group_index + 1 < self.ranges.len() as u32);
3✔
73
        let start = self.ranges[group_index as usize];
3✔
74
        let end = self.ranges[group_index as usize + 1];
3✔
75
        end - start
3✔
76
    }
77

78
    #[allow(dead_code)]
79
    pub fn total_capacity(&self) -> u32 {
3✔
80
        if self.ranges.is_empty() {
3✔
NEW
81
            0
×
82
        } else {
83
            debug_assert!(self.ranges.len() >= 2);
6✔
84
            let start = self.ranges[0];
3✔
85
            let end = self.ranges[self.ranges.len() - 1];
3✔
86
            end - start
3✔
87
        }
88
    }
89

NEW
90
    pub fn particle_layout(&self) -> &ParticleLayout {
×
NEW
91
        &self.particle_layout
×
92
    }
93
}
94

95
/// A reference to a slice allocated inside an [`EffectBuffer`].
96
#[derive(Debug, Default, Clone, PartialEq, Eq)]
97
pub struct SliceRef {
98
    /// Range into an [`EffectBuffer`], in item count.
99
    range: Range<u32>,
100
    /// Size of a single item in the slice. Currently equal to the unique size
101
    /// of all items in an [`EffectBuffer`] (no mixed size supported in same
102
    /// buffer), so cached only for convenience.
103
    particle_layout: ParticleLayout,
104
}
105

106
impl SliceRef {
107
    /// The length of the slice, in number of items.
108
    #[allow(dead_code)]
109
    pub fn len(&self) -> u32 {
8✔
110
        self.range.end - self.range.start
8✔
111
    }
112

113
    /// The size in bytes of the slice.
114
    #[allow(dead_code)]
115
    pub fn byte_size(&self) -> usize {
4✔
116
        (self.len() as usize) * (self.particle_layout.min_binding_size().get() as usize)
4✔
117
    }
118
}
119

120
/// Storage for a single kind of effects, sharing the same buffer(s).
121
///
122
/// Currently only accepts a single unique item size (particle size), fixed at
123
/// creation. Also currently only accepts instances of a unique effect asset,
124
/// although this restriction is purely for convenience and may be relaxed in
125
/// the future to improve batching.
126
#[derive(Debug)]
127
pub struct EffectBuffer {
128
    /// GPU buffer holding all particles for the entire group of effects.
129
    particle_buffer: Buffer,
130
    /// GPU buffer holding the indirection indices for the entire group of
131
    /// effects. This is a triple buffer containing:
132
    /// - the ping-pong alive particles and render indirect indices at offsets 0
133
    ///   and 1
134
    /// - the dead particle indices at offset 2
135
    indirect_buffer: Buffer,
136
    /// GPU buffer holding the properties of the effect(s), if any. This is
137
    /// always `None` if the property layout is empty.
138
    properties_buffer: Option<Buffer>,
139
    /// Layout of particles.
140
    particle_layout: ParticleLayout,
141
    /// Layout of properties of the effect(s), if using properties.
142
    property_layout: PropertyLayout,
143
    /// Flags
144
    layout_flags: LayoutFlags,
145
    /// -
146
    particles_buffer_layout_sim: BindGroupLayout,
147
    /// -
148
    particles_buffer_layout_with_dispatch: BindGroupLayout,
149
    /// Total buffer capacity, in number of particles.
150
    capacity: u32,
151
    /// Used buffer size, in number of particles, either from allocated slices
152
    /// or from slices in the free list.
153
    used_size: u32,
154
    /// Array of free slices for new allocations, sorted in increasing order in
155
    /// the buffer.
156
    free_slices: Vec<Range<u32>>,
157
    /// Compute pipeline for the effect update pass.
158
    // pub compute_pipeline: ComputePipeline, // FIXME - ComputePipelineId, to avoid duplicating per
159
    // instance!
160
    /// Handle of all effects common in this buffer. TODO - replace with
161
    /// compatible layout.
162
    asset: Handle<EffectAsset>,
163
    /// Bind group for the per-buffer data (group @1) of the init and update
164
    /// passes.
165
    simulate_bind_group: Option<BindGroup>,
166
}
167

168
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
169
pub enum BufferState {
170
    /// The buffer is in use, with allocated resources.
171
    Used,
172
    /// The buffer is free (its resources were deallocated).
173
    Free,
174
}
175

176
impl EffectBuffer {
177
    /// Minimum buffer capacity to allocate, in number of particles.
178
    // FIXME - Batching is broken due to binding a single GpuSpawnerParam instead of
179
    // N, and inability for a particle index to tell which Spawner it should
180
    // use. Setting this to 1 effectively ensures that all new buffers just fit
181
    // the effect, so batching never occurs.
182
    pub const MIN_CAPACITY: u32 = 1; // 65536; // at least 64k particles
183

184
    /// Create a new group and a GPU buffer to back it up.
185
    ///
186
    /// The buffer cannot contain less than [`MIN_CAPACITY`] particles. If
187
    /// `capacity` is smaller, it's rounded up to [`MIN_CAPACITY`].
188
    ///
189
    /// [`MIN_CAPACITY`]: EffectBuffer::MIN_CAPACITY
190
    pub fn new(
5✔
191
        asset: Handle<EffectAsset>,
192
        capacity: u32,
193
        particle_layout: ParticleLayout,
194
        property_layout: PropertyLayout,
195
        layout_flags: LayoutFlags,
196
        render_device: &RenderDevice,
197
        label: Option<&str>,
198
    ) -> Self {
199
        trace!(
5✔
200
            "EffectBuffer::new(capacity={}, particle_layout={:?}, property_layout={:?}, layout_flags={:?}, item_size={}B, properties_size={}B)",
×
201
            capacity,
×
202
            particle_layout,
×
203
            property_layout,
×
204
            layout_flags,
×
205
            particle_layout.min_binding_size().get(),
×
206
            if property_layout.is_empty() { 0 } else { property_layout.min_binding_size().get() },
×
207
        );
208

209
        let capacity = capacity.max(Self::MIN_CAPACITY);
5✔
210
        debug_assert!(
5✔
211
            capacity > 0,
5✔
212
            "Attempted to create a zero-sized effect buffer."
×
213
        );
214

215
        let particle_capacity_bytes: BufferAddress =
5✔
216
            capacity as u64 * particle_layout.min_binding_size().get();
5✔
217
        let particle_buffer = render_device.create_buffer(&BufferDescriptor {
5✔
218
            label,
5✔
219
            size: particle_capacity_bytes,
5✔
220
            usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
5✔
221
            mapped_at_creation: false,
5✔
222
        });
223

224
        let capacity_bytes: BufferAddress = capacity as u64 * 4;
5✔
225

226
        let indirect_label = if let Some(label) = label {
5✔
227
            format!("{label}_indirect")
228
        } else {
229
            "hanabi:buffer:effect_indirect".to_owned()
×
230
        };
231
        let indirect_buffer = render_device.create_buffer(&BufferDescriptor {
232
            label: Some(&indirect_label),
233
            size: capacity_bytes * 3, // ping-pong + deadlist
234
            usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
235
            mapped_at_creation: true,
236
        });
237
        // Set content
238
        {
239
            // Scope get_mapped_range_mut() to force a drop before unmap()
240
            {
241
                let slice = &mut indirect_buffer.slice(..).get_mapped_range_mut()
242
                    [..capacity_bytes as usize * 3];
243
                let slice: &mut [u32] = cast_slice_mut(slice);
244
                for index in 0..capacity {
12,294✔
245
                    slice[3 * index as usize + 2] = capacity - 1 - index;
6,147✔
246
                }
247
            }
248
            indirect_buffer.unmap();
5✔
249
        }
250

251
        let properties_buffer = if property_layout.is_empty() {
10✔
252
            None
5✔
253
        } else {
254
            let properties_label = if let Some(label) = label {
×
255
                format!("{}_properties", label)
256
            } else {
257
                "hanabi:buffer:effect_properties".to_owned()
×
258
            };
259
            let size = property_layout.min_binding_size().get(); // TODO: * num_effects_in_buffer (once batching works again)
260
            let properties_buffer = render_device.create_buffer(&BufferDescriptor {
261
                label: Some(&properties_label),
262
                size,
263
                usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
264
                mapped_at_creation: false,
265
            });
266
            Some(properties_buffer)
267
        };
268

269
        // TODO - Cache particle_layout and associated bind group layout, instead of
270
        // creating one bind group layout per buffer using that layout...
271
        let particle_group_size = GpuParticleGroup::aligned_size(
272
            render_device.limits().min_storage_buffer_offset_alignment,
5✔
273
        );
274
        let mut entries = vec![
5✔
275
            // @binding(0) var<storage, read_write> particle_buffer : ParticleBuffer
276
            BindGroupLayoutEntry {
5✔
277
                binding: 0,
5✔
278
                visibility: ShaderStages::COMPUTE,
5✔
279
                ty: BindingType::Buffer {
5✔
280
                    ty: BufferBindingType::Storage { read_only: false },
5✔
281
                    has_dynamic_offset: false,
5✔
282
                    min_binding_size: Some(particle_layout.min_binding_size()),
5✔
283
                },
284
                count: None,
5✔
285
            },
286
            // @binding(1) var<storage, read_write> indirect_buffer : IndirectBuffer
287
            BindGroupLayoutEntry {
5✔
288
                binding: 1,
5✔
289
                visibility: ShaderStages::COMPUTE,
5✔
290
                ty: BindingType::Buffer {
5✔
291
                    ty: BufferBindingType::Storage { read_only: false },
5✔
292
                    has_dynamic_offset: false,
5✔
293
                    min_binding_size: BufferSize::new(12),
5✔
294
                },
295
                count: None,
5✔
296
            },
297
            // @binding(2) var<storage, read> particle_groups : array<ParticleGroup>
298
            BindGroupLayoutEntry {
5✔
299
                binding: 2,
5✔
300
                visibility: ShaderStages::COMPUTE,
5✔
301
                ty: BindingType::Buffer {
5✔
302
                    ty: BufferBindingType::Storage { read_only: true },
5✔
303
                    has_dynamic_offset: false,
5✔
304
                    // Despite no dynamic offset, we do bind a non-zero offset sometimes,
305
                    // so keep this aligned
306
                    min_binding_size: Some(particle_group_size),
5✔
307
                },
308
                count: None,
5✔
309
            },
310
        ];
311
        if !property_layout.is_empty() {
5✔
312
            entries.push(BindGroupLayoutEntry {
×
313
                binding: 3,
×
314
                visibility: ShaderStages::COMPUTE,
×
315
                ty: BindingType::Buffer {
×
316
                    ty: BufferBindingType::Storage { read_only: true },
×
317
                    has_dynamic_offset: false, // TODO
×
318
                    min_binding_size: Some(property_layout.min_binding_size()),
×
319
                },
320
                count: None,
×
321
            });
322
        }
323
        let label = "hanabi:sim_particles_buffer_layout";
5✔
324
        trace!(
5✔
325
            "Creating particle bind group layout '{}' for simulation passes with {} entries.",
×
326
            label,
×
327
            entries.len(),
×
328
        );
329
        let particles_buffer_layout_sim = render_device.create_bind_group_layout(label, &entries);
5✔
330

331
        // Create the render layout.
332
        let dispatch_indirect_size = GpuDispatchIndirect::aligned_size(
333
            render_device.limits().min_storage_buffer_offset_alignment,
5✔
334
        );
335
        let mut entries = vec![
5✔
336
            BindGroupLayoutEntry {
5✔
337
                binding: 0,
5✔
338
                visibility: ShaderStages::VERTEX,
5✔
339
                ty: BindingType::Buffer {
5✔
340
                    ty: BufferBindingType::Storage { read_only: true },
5✔
341
                    has_dynamic_offset: false,
5✔
342
                    min_binding_size: Some(particle_layout.min_binding_size()),
5✔
343
                },
344
                count: None,
5✔
345
            },
346
            BindGroupLayoutEntry {
5✔
347
                binding: 1,
5✔
348
                visibility: ShaderStages::VERTEX,
5✔
349
                ty: BindingType::Buffer {
5✔
350
                    ty: BufferBindingType::Storage { read_only: true },
5✔
351
                    has_dynamic_offset: false,
5✔
352
                    min_binding_size: BufferSize::new(std::mem::size_of::<u32>() as u64),
5✔
353
                },
354
                count: None,
5✔
355
            },
356
            BindGroupLayoutEntry {
5✔
357
                binding: 2,
5✔
358
                visibility: ShaderStages::VERTEX,
5✔
359
                ty: BindingType::Buffer {
5✔
360
                    ty: BufferBindingType::Storage { read_only: true },
5✔
361
                    has_dynamic_offset: true,
5✔
362
                    min_binding_size: Some(dispatch_indirect_size),
5✔
363
                },
364
                count: None,
5✔
365
            },
366
        ];
367
        if layout_flags.contains(LayoutFlags::LOCAL_SPACE_SIMULATION) {
5✔
368
            entries.push(BindGroupLayoutEntry {
×
369
                binding: 3,
×
370
                visibility: ShaderStages::VERTEX,
×
371
                ty: BindingType::Buffer {
×
372
                    ty: BufferBindingType::Storage { read_only: true },
×
373
                    has_dynamic_offset: true,
×
374
                    min_binding_size: Some(GpuSpawnerParams::min_size()), // TODO - array
×
375
                },
376
                count: None,
×
377
            });
378
        }
379
        trace!(
380
            "Creating render layout with {} entries (flags: {:?})",
×
381
            entries.len(),
×
382
            layout_flags
383
        );
384
        let particles_buffer_layout_with_dispatch =
5✔
385
            render_device.create_bind_group_layout("hanabi:buffer_layout_render", &entries);
5✔
386

387
        Self {
388
            particle_buffer,
389
            indirect_buffer,
390
            properties_buffer,
391
            particle_layout,
392
            property_layout,
393
            layout_flags,
394
            particles_buffer_layout_sim,
395
            particles_buffer_layout_with_dispatch,
396
            capacity,
397
            used_size: 0,
398
            free_slices: vec![],
5✔
399
            asset,
400
            simulate_bind_group: None,
401
        }
402
    }
403

404
    pub fn properties_buffer(&self) -> Option<&Buffer> {
×
405
        self.properties_buffer.as_ref()
×
406
    }
407

408
    pub fn particle_layout(&self) -> &ParticleLayout {
×
409
        &self.particle_layout
×
410
    }
411

412
    pub fn property_layout(&self) -> &PropertyLayout {
×
413
        &self.property_layout
×
414
    }
415

416
    pub fn layout_flags(&self) -> LayoutFlags {
×
417
        self.layout_flags
×
418
    }
419

420
    pub fn particle_layout_bind_group_sim(&self) -> &BindGroupLayout {
×
421
        &self.particles_buffer_layout_sim
×
422
    }
423

424
    pub fn particle_layout_bind_group_with_dispatch(&self) -> &BindGroupLayout {
×
425
        &self.particles_buffer_layout_with_dispatch
×
426
    }
427

428
    /// Return a binding for the entire particle buffer.
429
    pub fn max_binding(&self) -> BindingResource {
×
430
        let capacity_bytes = self.capacity as u64 * self.particle_layout.min_binding_size().get();
×
431
        BindingResource::Buffer(BufferBinding {
×
432
            buffer: &self.particle_buffer,
×
433
            offset: 0,
×
434
            size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
×
435
        })
436
    }
437

438
    /// Return a binding of the buffer for a starting range of a given size (in
439
    /// bytes).
440
    #[allow(dead_code)]
441
    pub fn binding(&self, size: u32) -> BindingResource {
×
442
        BindingResource::Buffer(BufferBinding {
×
443
            buffer: &self.particle_buffer,
×
444
            offset: 0,
×
445
            size: Some(NonZeroU64::new(size as u64).unwrap()),
×
446
        })
447
    }
448

449
    /// Return a binding for the entire indirect buffer associated with the
450
    /// current effect buffer.
451
    pub fn indirect_max_binding(&self) -> BindingResource {
×
452
        let capacity_bytes = self.capacity as u64 * 4;
×
453
        BindingResource::Buffer(BufferBinding {
×
454
            buffer: &self.indirect_buffer,
×
455
            offset: 0,
×
456
            size: Some(NonZeroU64::new(capacity_bytes * 3).unwrap()),
×
457
        })
458
    }
459

460
    /// Return a binding for the entire properties buffer associated with the
461
    /// current effect buffer, if any.
462
    pub fn properties_max_binding(&self) -> Option<BindingResource> {
×
463
        self.properties_buffer.as_ref().map(|buffer| {
×
464
            let capacity_bytes = self.property_layout.min_binding_size().get();
×
465
            BindingResource::Buffer(BufferBinding {
×
466
                buffer,
×
467
                offset: 0,
×
468
                size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
×
469
            })
470
        })
471
    }
472

473
    /// Create the bind group for the init and update passes if needed.
474
    ///
475
    /// The `buffer_index` must be the index of the current [`EffectBuffer`]
476
    /// inside the [`EffectCache`]. The `group_binding` is the binding resource
477
    /// for the particle groups of this buffer.
478
    pub fn create_sim_bind_group(
×
479
        &mut self,
480
        buffer_index: u32,
481
        render_device: &RenderDevice,
482
        group_binding: BufferBinding,
483
    ) {
484
        if self.simulate_bind_group.is_some() {
×
485
            return;
×
486
        }
487

488
        let layout = self.particle_layout_bind_group_sim();
×
489
        let label = format!("hanabi:bind_group_sim_batch{}", buffer_index);
×
490
        let mut bindings = vec![
×
491
            BindGroupEntry {
×
492
                binding: 0,
×
493
                resource: self.max_binding(),
×
494
            },
495
            BindGroupEntry {
×
496
                binding: 1,
×
497
                resource: self.indirect_max_binding(),
×
498
            },
499
            BindGroupEntry {
×
500
                binding: 2,
×
501
                resource: BindingResource::Buffer(group_binding),
×
502
            },
503
        ];
504
        if let Some(property_binding) = self.properties_max_binding() {
×
505
            bindings.push(BindGroupEntry {
506
                binding: 3,
507
                resource: property_binding,
508
            });
509
        }
510
        trace!(
511
            "Create simulate bind group '{}' with {} entries",
×
512
            label,
×
513
            bindings.len()
×
514
        );
515
        let bind_group = render_device.create_bind_group(Some(&label[..]), layout, &bindings);
×
516
        self.simulate_bind_group = Some(bind_group);
×
517
    }
518

519
    /// Return the cached bind group for the init and update passes.
520
    ///
521
    /// This is the per-buffer bind group at binding @1 which binds all
522
    /// per-buffer resources shared by all effect instances batched in a single
523
    /// buffer.
524
    pub fn sim_bind_group(&self) -> Option<&BindGroup> {
×
525
        self.simulate_bind_group.as_ref()
×
526
    }
527

528
    /// Try to recycle a free slice to store `size` items.
529
    fn pop_free_slice(&mut self, size: u32) -> Option<Range<u32>> {
17✔
530
        if self.free_slices.is_empty() {
17✔
531
            return None;
14✔
532
        }
533

534
        struct BestRange {
535
            range: Range<u32>,
536
            capacity: u32,
537
            index: usize,
538
        }
539

540
        let mut result = BestRange {
541
            range: 0..0, // marker for "invalid"
542
            capacity: u32::MAX,
543
            index: usize::MAX,
544
        };
545
        for (index, slice) in self.free_slices.iter().enumerate() {
3✔
546
            let capacity = slice.end - slice.start;
3✔
547
            if size > capacity {
3✔
548
                continue;
1✔
549
            }
550
            if capacity < result.capacity {
4✔
551
                result = BestRange {
2✔
552
                    range: slice.clone(),
2✔
553
                    capacity,
2✔
554
                    index,
2✔
555
                };
556
            }
557
        }
558
        if !result.range.is_empty() {
3✔
559
            if result.capacity > size {
2✔
560
                // split
561
                let start = result.range.start;
1✔
562
                let used_end = start + size;
1✔
563
                let free_end = result.range.end;
1✔
564
                let range = start..used_end;
1✔
565
                self.free_slices[result.index] = used_end..free_end;
1✔
566
                Some(range)
1✔
567
            } else {
568
                // recycle entirely
569
                self.free_slices.remove(result.index);
1✔
570
                Some(result.range)
1✔
571
            }
572
        } else {
573
            None
1✔
574
        }
575
    }
576

577
    /// Allocate a new slice in the buffer to store the particles of a single
578
    /// effect.
579
    pub fn allocate_slice(
18✔
580
        &mut self,
581
        capacity: u32,
582
        particle_layout: &ParticleLayout,
583
    ) -> Option<SliceRef> {
584
        trace!(
18✔
585
            "EffectBuffer::allocate_slice: capacity={} particle_layout={:?} item_size={}",
×
586
            capacity,
×
587
            particle_layout,
×
588
            particle_layout.min_binding_size().get(),
×
589
        );
590

591
        if capacity > self.capacity {
18✔
592
            return None;
1✔
593
        }
594

595
        let range = if let Some(range) = self.pop_free_slice(capacity) {
17✔
596
            range
2✔
597
        } else {
598
            let new_size = self.used_size.checked_add(capacity).unwrap();
15✔
599
            if new_size <= self.capacity {
15✔
600
                let range = self.used_size..new_size;
13✔
601
                self.used_size = new_size;
13✔
602
                range
13✔
603
            } else {
604
                if self.used_size == 0 {
2✔
605
                    warn!(
×
606
                        "Cannot allocate slice of size {} in effect cache buffer of capacity {}.",
×
607
                        capacity, self.capacity
608
                    );
609
                }
610
                return None;
2✔
611
            }
612
        };
613

614
        Some(SliceRef {
615
            range,
616
            particle_layout: particle_layout.clone(),
617
        })
618
    }
619

620
    /// Free an allocated slice, and if this was the last allocated slice also
621
    /// free the buffer.
622
    pub fn free_slice(&mut self, slice: SliceRef) -> BufferState {
9✔
623
        // If slice is at the end of the buffer, reduce total used size
624
        if slice.range.end == self.used_size {
9✔
625
            self.used_size = slice.range.start;
3✔
626
            // Check other free slices to further reduce used size and drain the free slice
627
            // list
628
            while let Some(free_slice) = self.free_slices.last() {
13✔
629
                if free_slice.end == self.used_size {
5✔
630
                    self.used_size = free_slice.start;
5✔
631
                    self.free_slices.pop();
5✔
632
                } else {
633
                    break;
×
634
                }
635
            }
636
            if self.used_size == 0 {
3✔
637
                assert!(self.free_slices.is_empty());
2✔
638
                // The buffer is not used anymore, free it too
639
                BufferState::Free
2✔
640
            } else {
641
                // There are still some slices used, the last one of which ends at
642
                // self.used_size
643
                BufferState::Used
1✔
644
            }
645
        } else {
646
            // Free slice is not at end; insert it in free list
647
            let range = slice.range;
6✔
648
            match self.free_slices.binary_search_by(|s| {
12✔
649
                if s.end <= range.start {
6✔
650
                    Ordering::Less
6✔
651
                } else if s.start >= range.end {
×
652
                    Ordering::Greater
×
653
                } else {
654
                    Ordering::Equal
×
655
                }
656
            }) {
657
                Ok(_) => warn!("Range {:?} already present in free list!", range),
×
658
                Err(index) => self.free_slices.insert(index, range),
6✔
659
            }
660
            BufferState::Used
6✔
661
        }
662
    }
663

664
    pub fn is_compatible(&self, handle: &Handle<EffectAsset>) -> bool {
2✔
665
        // TODO - replace with check particle layout is compatible to allow tighter
666
        // packing in less buffers, and update in the less dispatch calls
667
        *handle == self.asset
2✔
668
    }
669
}
670

671
/// Identifier referencing an effect cached in an internal effect cache.
672
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
673
pub(crate) struct EffectCacheId {
674
    pub index: u32,
675
}
676

677
impl EffectCacheId {
678
    /// An invalid handle, corresponding to nothing.
679
    pub const INVALID: Self = Self { index: u32::MAX };
680

681
    /// Generate a new valid effect cache identifier.
682
    #[allow(dead_code)]
NEW
683
    pub fn new(index: u32) -> Self {
×
684
        Self { index }
685
    }
686

687
    /// Check if the ID is valid.
688
    #[allow(dead_code)]
UNCOV
689
    pub fn is_valid(&self) -> bool {
×
UNCOV
690
        *self != Self::INVALID
×
691
    }
692
}
693

694
/// Stores various data, including the buffer index and slice boundaries within
695
/// the buffer for all groups in a single effect.
696
#[derive(Component)]
697
pub(crate) struct CachedEffect {
698
    /// The index of the [`EffectBuffer`].
699
    pub(crate) buffer_index: u32,
700
    /// The slices within that buffer.
701
    pub(crate) slices: SlicesRef,
702
    /// The order in which we evaluate groups.
703
    pub(crate) group_order: Vec<u32>,
704
}
705

706
#[derive(Debug, Clone, Copy)]
707
pub(crate) struct PendingEffectGroup {
708
    pub capacity: u32,
709
    pub src_group_index_if_trail: Option<u32>,
710
}
711

712
impl From<&AddedEffectGroup> for PendingEffectGroup {
713
    fn from(value: &AddedEffectGroup) -> Self {
×
714
        Self {
715
            capacity: value.capacity,
×
716
            src_group_index_if_trail: value.src_group_index_if_trail,
×
717
        }
718
    }
719
}
720

721
#[derive(Debug, Clone)]
722
pub(crate) enum RenderGroupDispatchIndices {
723
    Pending {
724
        groups: Box<[PendingEffectGroup]>,
725
    },
726
    Allocated {
727
        /// The index of the first render group indirect dispatch buffer.
728
        ///
729
        /// There will be one such dispatch buffer for each particle group.
730
        first_render_group_dispatch_buffer_index: BufferTableId,
731
        /// Map from a group index to its source and destination rows into the
732
        /// render group dispatch buffer.
733
        trail_dispatch_buffer_indices: HashMap<u32, TrailDispatchBufferIndices>,
734
    },
735
}
736

737
impl Default for RenderGroupDispatchIndices {
UNCOV
738
    fn default() -> Self {
×
739
        Self::Pending {
UNCOV
740
            groups: Box::new([]),
×
741
        }
742
    }
743
}
744

745
/// The indices in the indirect dispatch buffers for a single effect, as well as
746
/// that of the metadata buffer.
747
#[derive(Debug, Clone, Component)]
748
pub(crate) struct DispatchBufferIndices {
749
    /// The index of the first update group indirect dispatch buffer.
750
    ///
751
    /// There will be one such dispatch buffer for each particle group.
752
    pub(crate) first_update_group_dispatch_buffer_index: BufferTableId,
753
    /// The index of the render indirect metadata buffer.
754
    pub(crate) render_effect_metadata_buffer_index: BufferTableId,
755
    /// Render group dispatch indirect indices for all groups of the effect.
756
    pub(crate) render_group_dispatch_indices: RenderGroupDispatchIndices,
757
}
758

759
#[derive(Debug, Clone, Copy)]
760
pub(crate) struct TrailDispatchBufferIndices {
761
    pub(crate) dest: BufferTableId,
762
    pub(crate) src: BufferTableId,
763
}
764

765
impl Default for DispatchBufferIndices {
766
    // For testing purposes only.
UNCOV
767
    fn default() -> Self {
×
768
        DispatchBufferIndices {
769
            first_update_group_dispatch_buffer_index: BufferTableId::INVALID,
770
            render_effect_metadata_buffer_index: BufferTableId::INVALID,
UNCOV
771
            render_group_dispatch_indices: default(),
×
772
        }
773
    }
774
}
775

776
/// Cache for effect instances sharing common GPU data structures.
777
#[derive(Resource)]
778
pub struct EffectCache {
779
    /// Render device the GPU resources (buffers) are allocated from.
780
    device: RenderDevice,
781
    /// Collection of effect buffers managed by this cache. Some buffers might
782
    /// be `None` if the entry is not used. Since the buffers are referenced
783
    /// by index, we cannot move them once they're allocated.
784
    buffers: Vec<Option<EffectBuffer>>,
785
}
786

787
impl EffectCache {
788
    pub fn new(device: RenderDevice) -> Self {
1✔
789
        Self {
790
            device,
791
            buffers: vec![],
1✔
792
        }
793
    }
794

795
    #[allow(dead_code)]
796
    pub fn buffers(&self) -> &[Option<EffectBuffer>] {
7✔
797
        &self.buffers
7✔
798
    }
799

800
    #[allow(dead_code)]
801
    pub fn buffers_mut(&mut self) -> &mut [Option<EffectBuffer>] {
×
802
        &mut self.buffers
×
803
    }
804

805
    pub fn insert(
3✔
806
        &mut self,
807
        asset: Handle<EffectAsset>,
808
        capacities: Vec<u32>,
809
        particle_layout: &ParticleLayout,
810
        property_layout: &PropertyLayout,
811
        layout_flags: LayoutFlags,
812
        group_order: Vec<u32>,
813
    ) -> CachedEffect {
814
        let total_capacity = capacities.iter().cloned().sum();
3✔
815
        let (buffer_index, slice) = self
3✔
816
            .buffers
3✔
817
            .iter_mut()
818
            .enumerate()
819
            .find_map(|(buffer_index, buffer)| {
6✔
820
                if let Some(buffer) = buffer {
5✔
821
                    // The buffer must be compatible with the effect layout, to allow the update pass
822
                    // to update all particles at once from all compatible effects in a single dispatch.
823
                    if !buffer.is_compatible(&asset) {
824
                        return None;
×
825
                    }
826

827
                    // Try to allocate a slice into the buffer
828
                    buffer
2✔
829
                        .allocate_slice(total_capacity, particle_layout)
2✔
830
                        .map(|slice| (buffer_index, slice))
4✔
831
                } else {
832
                    None
1✔
833
                }
834
            })
835
            .or_else(|| {
6✔
836
                // Cannot find any suitable buffer; allocate a new one
837
                let buffer_index = self.buffers.iter().position(|buf| buf.is_none()).unwrap_or(self.buffers.len());
8✔
838
                let byte_size = total_capacity.checked_mul(particle_layout.min_binding_size().get() as u32).unwrap_or_else(|| panic!(
3✔
839
                    "Effect size overflow: capacities={:?} particle_layout={:?} item_size={}",
×
840
                    capacities, particle_layout, particle_layout.min_binding_size().get()
×
841
                ));
842
                trace!(
3✔
843
                    "Creating new effect buffer #{} for effect {:?} (capacities={:?}, particle_layout={:?} item_size={}, byte_size={})",
×
844
                    buffer_index,
×
845
                    asset,
×
846
                    capacities,
×
847
                    particle_layout,
×
848
                    particle_layout.min_binding_size().get(),
×
849
                    byte_size
850
                );
851
                let mut buffer = EffectBuffer::new(
3✔
852
                    asset,
3✔
853
                    total_capacity,
3✔
854
                    particle_layout.clone(),
3✔
855
                    property_layout.clone(),
3✔
856
                    layout_flags,
3✔
857
                    &self.device,
3✔
858
                    Some(&format!("hanabi:buffer:effect{buffer_index}_particles")),
3✔
859
                );
860
                let slice_ref = buffer.allocate_slice(total_capacity, particle_layout).unwrap();
3✔
861
                if buffer_index >= self.buffers.len() {
5✔
862
                    self.buffers.push(Some(buffer));
2✔
863
                } else {
864
                    debug_assert!(self.buffers[buffer_index].is_none());
2✔
865
                    self.buffers[buffer_index] = Some(buffer);
1✔
866
                }
867
                Some((buffer_index, slice_ref))
3✔
868
            })
869
            .unwrap();
870

871
        let mut ranges = vec![slice.range.start];
3✔
872
        let group_count = capacities.len();
3✔
873
        for capacity in capacities {
12✔
874
            let start_index = ranges.last().unwrap();
3✔
875
            ranges.push(start_index + capacity);
3✔
876
        }
877
        debug_assert_eq!(ranges.len(), group_count + 1);
6✔
878

879
        let slices = SlicesRef {
880
            ranges,
881
            particle_layout: slice.particle_layout,
3✔
882
        };
883

884
        trace!(
3✔
NEW
885
            "Insert effect buffer_index={} slice={}B particle_layout={:?}",
×
886
            buffer_index,
×
887
            slices.particle_layout.min_binding_size().get(),
×
888
            slices.particle_layout,
889
        );
890
        CachedEffect {
891
            buffer_index: buffer_index as u32,
3✔
892
            slices,
893
            group_order,
894
        }
895
    }
896

897
    /// Get the init bind group for a cached effect.
NEW
898
    pub fn init_bind_group(&self, buffer_index: u32) -> Option<&BindGroup> {
×
NEW
899
        self.buffers[buffer_index as usize]
×
900
            .as_ref()
NEW
901
            .and_then(|eb| eb.sim_bind_group())
×
902
    }
903

904
    /// Get the update bind group for a cached effect.
905
    #[inline]
NEW
906
    pub fn update_bind_group(&self, buffer_index: u32) -> Option<&BindGroup> {
×
NEW
907
        self.init_bind_group(buffer_index)
×
908
    }
909

NEW
910
    pub fn get_property_buffer(&self, buffer_index: u32) -> Option<&Buffer> {
×
NEW
911
        self.buffers[buffer_index as usize]
×
912
            .as_ref()
NEW
913
            .and_then(|eb| eb.properties_buffer())
×
914
    }
915

916
    /// Remove an effect from the cache. If this was the last effect, drop the
917
    /// underlying buffer and return the index of the dropped buffer.
918
    pub fn remove(&mut self, cached_effect: &CachedEffect) -> Result<BufferState, ()> {
1✔
919
        // Resolve the buffer by index
920
        let Some(maybe_buffer) = self.buffers.get_mut(cached_effect.buffer_index as usize) else {
2✔
NEW
921
            return Err(());
×
922
        };
923
        let Some(buffer) = maybe_buffer.as_mut() else {
1✔
NEW
924
            return Err(());
×
925
        };
926

927
        // Reconstruct the original slice
928
        let slice = SliceRef {
929
            range: cached_effect.slices.ranges[0]..*cached_effect.slices.ranges.last().unwrap(),
930
            // FIXME: clone() needed to return CachedEffectIndices, but really we don't care about
931
            // returning the ParticleLayout, so should split...
932
            particle_layout: cached_effect.slices.particle_layout.clone(),
933
        };
934

935
        // Free the slice inside the resolved buffer
936
        if buffer.free_slice(slice) == BufferState::Free {
937
            *maybe_buffer = None;
1✔
938
            return Ok(BufferState::Free);
1✔
939
        }
940

NEW
941
        Ok(BufferState::Used)
×
942
    }
943
}
944

945
#[cfg(all(test, feature = "gpu_tests"))]
946
mod gpu_tests {
947
    use std::borrow::Cow;
948

949
    use bevy::math::Vec4;
950

951
    use super::*;
952
    use crate::{
953
        graph::{Value, VectorValue},
954
        test_utils::MockRenderer,
955
        Attribute, AttributeInner,
956
    };
957

958
    #[test]
959
    fn effect_slice_ord() {
960
        let particle_layout = ParticleLayout::new().append(Attribute::POSITION).build();
961
        let slice1 = EffectSlices {
962
            slices: vec![0, 32],
963
            buffer_index: 1,
964
            particle_layout: particle_layout.clone(),
965
        };
966
        let slice2 = EffectSlices {
967
            slices: vec![32, 64],
968
            buffer_index: 1,
969
            particle_layout: particle_layout.clone(),
970
        };
971
        assert!(slice1 < slice2);
972
        assert!(slice1 <= slice2);
973
        assert!(slice2 > slice1);
974
        assert!(slice2 >= slice1);
975

976
        let slice3 = EffectSlices {
977
            slices: vec![0, 32],
978
            buffer_index: 0,
979
            particle_layout,
980
        };
981
        assert!(slice3 < slice1);
982
        assert!(slice3 < slice2);
983
        assert!(slice1 > slice3);
984
        assert!(slice2 > slice3);
985
    }
986

987
    const F4A_INNER: &AttributeInner = &AttributeInner::new(
988
        Cow::Borrowed("F4A"),
989
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
990
    );
991
    const F4B_INNER: &AttributeInner = &AttributeInner::new(
992
        Cow::Borrowed("F4B"),
993
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
994
    );
995
    const F4C_INNER: &AttributeInner = &AttributeInner::new(
996
        Cow::Borrowed("F4C"),
997
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
998
    );
999
    const F4D_INNER: &AttributeInner = &AttributeInner::new(
1000
        Cow::Borrowed("F4D"),
1001
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1002
    );
1003

1004
    const F4A: Attribute = Attribute(F4A_INNER);
1005
    const F4B: Attribute = Attribute(F4B_INNER);
1006
    const F4C: Attribute = Attribute(F4C_INNER);
1007
    const F4D: Attribute = Attribute(F4D_INNER);
1008

1009
    #[test]
1010
    fn slice_ref() {
1011
        let l16 = ParticleLayout::new().append(F4A).build();
1012
        assert_eq!(16, l16.size());
1013
        let l32 = ParticleLayout::new().append(F4A).append(F4B).build();
1014
        assert_eq!(32, l32.size());
1015
        let l48 = ParticleLayout::new()
1016
            .append(F4A)
1017
            .append(F4B)
1018
            .append(F4C)
1019
            .build();
1020
        assert_eq!(48, l48.size());
1021
        for (range, particle_layout, len, byte_size) in [
1022
            (0..0, &l16, 0, 0),
1023
            (0..16, &l16, 16, 16 * 16),
1024
            (0..16, &l32, 16, 16 * 32),
1025
            (240..256, &l48, 16, 16 * 48),
1026
        ] {
1027
            let sr = SliceRef {
1028
                range,
1029
                particle_layout: particle_layout.clone(),
1030
            };
1031
            assert_eq!(sr.len(), len);
1032
            assert_eq!(sr.byte_size(), byte_size);
1033
        }
1034
    }
1035

1036
    #[test]
1037
    fn effect_buffer() {
1038
        let renderer = MockRenderer::new();
1039
        let render_device = renderer.device();
1040

1041
        let l64 = ParticleLayout::new()
1042
            .append(F4A)
1043
            .append(F4B)
1044
            .append(F4C)
1045
            .append(F4D)
1046
            .build();
1047
        assert_eq!(64, l64.size());
1048

1049
        let asset = Handle::<EffectAsset>::default();
1050
        let capacity = 4096;
1051
        let mut buffer = EffectBuffer::new(
1052
            asset,
1053
            capacity,
1054
            l64.clone(),
1055
            PropertyLayout::empty(), // not using properties
1056
            LayoutFlags::NONE,
1057
            &render_device,
1058
            Some("my_buffer"),
1059
        );
1060

1061
        assert_eq!(buffer.capacity, capacity.max(EffectBuffer::MIN_CAPACITY));
1062
        assert_eq!(64, buffer.particle_layout.size());
1063
        assert_eq!(64, buffer.particle_layout.min_binding_size().get());
1064
        assert_eq!(0, buffer.used_size);
1065
        assert!(buffer.free_slices.is_empty());
1066

1067
        assert_eq!(None, buffer.allocate_slice(buffer.capacity + 1, &l64));
1068

1069
        let mut offset = 0;
1070
        let mut slices = vec![];
1071
        for size in [32, 128, 55, 148, 1, 2048, 42] {
1072
            let slice = buffer.allocate_slice(size, &l64);
1073
            assert!(slice.is_some());
1074
            let slice = slice.unwrap();
1075
            assert_eq!(64, slice.particle_layout.size());
1076
            assert_eq!(64, buffer.particle_layout.min_binding_size().get());
1077
            assert_eq!(offset..offset + size, slice.range);
1078
            slices.push(slice);
1079
            offset += size;
1080
        }
1081
        assert_eq!(offset, buffer.used_size);
1082

1083
        assert_eq!(BufferState::Used, buffer.free_slice(slices[2].clone()));
1084
        assert_eq!(1, buffer.free_slices.len());
1085
        let free_slice = &buffer.free_slices[0];
1086
        assert_eq!(160..215, *free_slice);
1087
        assert_eq!(offset, buffer.used_size); // didn't move
1088

1089
        assert_eq!(BufferState::Used, buffer.free_slice(slices[3].clone()));
1090
        assert_eq!(BufferState::Used, buffer.free_slice(slices[4].clone()));
1091
        assert_eq!(BufferState::Used, buffer.free_slice(slices[5].clone()));
1092
        assert_eq!(4, buffer.free_slices.len());
1093
        assert_eq!(offset, buffer.used_size); // didn't move
1094

1095
        // this will collapse all the way to slices[1], the highest allocated
1096
        assert_eq!(BufferState::Used, buffer.free_slice(slices[6].clone()));
1097
        assert_eq!(0, buffer.free_slices.len()); // collapsed
1098
        assert_eq!(160, buffer.used_size); // collapsed
1099

1100
        assert_eq!(BufferState::Used, buffer.free_slice(slices[0].clone()));
1101
        assert_eq!(1, buffer.free_slices.len());
1102
        assert_eq!(160, buffer.used_size); // didn't move
1103

1104
        // collapse all, and free buffer
1105
        assert_eq!(BufferState::Free, buffer.free_slice(slices[1].clone()));
1106
        assert_eq!(0, buffer.free_slices.len());
1107
        assert_eq!(0, buffer.used_size); // collapsed and empty
1108
    }
1109

1110
    #[test]
1111
    fn pop_free_slice() {
1112
        let renderer = MockRenderer::new();
1113
        let render_device = renderer.device();
1114

1115
        let l64 = ParticleLayout::new()
1116
            .append(F4A)
1117
            .append(F4B)
1118
            .append(F4C)
1119
            .append(F4D)
1120
            .build();
1121
        assert_eq!(64, l64.size());
1122

1123
        let asset = Handle::<EffectAsset>::default();
1124
        let capacity = 2048; // EffectBuffer::MIN_CAPACITY;
1125
        assert!(capacity >= 2048); // otherwise the logic below breaks
1126
        let mut buffer = EffectBuffer::new(
1127
            asset,
1128
            capacity,
1129
            l64.clone(),
1130
            PropertyLayout::empty(), // not using properties
1131
            LayoutFlags::NONE,
1132
            &render_device,
1133
            Some("my_buffer"),
1134
        );
1135

1136
        let slice0 = buffer.allocate_slice(32, &l64);
1137
        assert!(slice0.is_some());
1138
        let slice0 = slice0.unwrap();
1139
        assert_eq!(slice0.range, 0..32);
1140
        assert!(buffer.free_slices.is_empty());
1141

1142
        let slice1 = buffer.allocate_slice(1024, &l64);
1143
        assert!(slice1.is_some());
1144
        let slice1 = slice1.unwrap();
1145
        assert_eq!(slice1.range, 32..1056);
1146
        assert!(buffer.free_slices.is_empty());
1147

1148
        let state = buffer.free_slice(slice0);
1149
        assert_eq!(state, BufferState::Used);
1150
        assert_eq!(buffer.free_slices.len(), 1);
1151
        assert_eq!(buffer.free_slices[0], 0..32);
1152

1153
        // Try to allocate a slice larger than slice0, such that slice0 cannot be
1154
        // recycled, and instead the new slice has to be appended after all
1155
        // existing ones.
1156
        let slice2 = buffer.allocate_slice(64, &l64);
1157
        assert!(slice2.is_some());
1158
        let slice2 = slice2.unwrap();
1159
        assert_eq!(slice2.range.start, slice1.range.end); // after slice1
1160
        assert_eq!(slice2.range, 1056..1120);
1161
        assert_eq!(buffer.free_slices.len(), 1);
1162

1163
        // Now allocate a small slice that fits, to recycle (part of) slice0.
1164
        let slice3 = buffer.allocate_slice(16, &l64);
1165
        assert!(slice3.is_some());
1166
        let slice3 = slice3.unwrap();
1167
        assert_eq!(slice3.range, 0..16);
1168
        assert_eq!(buffer.free_slices.len(), 1); // split
1169
        assert_eq!(buffer.free_slices[0], 16..32);
1170

1171
        // Allocate a second small slice that fits exactly the left space, completely
1172
        // recycling
1173
        let slice4 = buffer.allocate_slice(16, &l64);
1174
        assert!(slice4.is_some());
1175
        let slice4 = slice4.unwrap();
1176
        assert_eq!(slice4.range, 16..32);
1177
        assert!(buffer.free_slices.is_empty()); // recycled
1178
    }
1179

1180
    #[test]
1181
    fn effect_cache() {
1182
        let renderer = MockRenderer::new();
1183
        let render_device = renderer.device();
1184

1185
        let empty_property_layout = PropertyLayout::empty(); // not using properties
1186

1187
        let l32 = ParticleLayout::new().append(F4A).append(F4B).build();
1188
        assert_eq!(32, l32.size());
1189

1190
        let mut effect_cache = EffectCache::new(render_device);
1191
        assert_eq!(effect_cache.buffers().len(), 0);
1192

1193
        let asset = Handle::<EffectAsset>::default();
1194
        let capacity = EffectBuffer::MIN_CAPACITY;
1195
        let capacities = vec![capacity];
1196
        let group_order = vec![0];
1197
        let item_size = l32.size();
1198

1199
        // Insert an effect
1200
        let effect1 = effect_cache.insert(
1201
            asset.clone(),
1202
            capacities.clone(),
1203
            &l32,
1204
            &empty_property_layout,
1205
            LayoutFlags::NONE,
1206
            group_order.clone(),
1207
        );
1208
        //assert!(effect1.is_valid());
1209
        let slice1 = &effect1.slices;
1210
        assert_eq!(slice1.group_count(), 1);
1211
        assert_eq!(slice1.group_capacity(0), capacity);
1212
        assert_eq!(slice1.total_capacity(), capacity);
1213
        assert_eq!(
1214
            slice1.particle_layout.min_binding_size().get() as u32,
1215
            item_size
1216
        );
1217
        assert_eq!(slice1.ranges, vec![0, capacity]);
1218
        assert_eq!(effect_cache.buffers().len(), 1);
1219

1220
        // Insert a second copy of the same effect
1221
        let effect2 = effect_cache.insert(
1222
            asset.clone(),
1223
            capacities.clone(),
1224
            &l32,
1225
            &empty_property_layout,
1226
            LayoutFlags::NONE,
1227
            group_order.clone(),
1228
        );
1229
        //assert!(effect2.is_valid());
1230
        let slice2 = &effect2.slices;
1231
        assert_eq!(slice2.group_count(), 1);
1232
        assert_eq!(slice2.group_capacity(0), capacity);
1233
        assert_eq!(slice2.total_capacity(), capacity);
1234
        assert_eq!(
1235
            slice2.particle_layout.min_binding_size().get() as u32,
1236
            item_size
1237
        );
1238
        assert_eq!(slice2.ranges, vec![0, capacity]);
1239
        assert_eq!(effect_cache.buffers().len(), 2);
1240

1241
        // Remove the first effect instance
1242
        let buffer_state = effect_cache.remove(&effect1).unwrap();
1243
        // Note: currently batching is disabled, so each instance has its own buffer,
1244
        // which becomes unused once the instance is destroyed.
1245
        assert_eq!(buffer_state, BufferState::Free);
1246
        assert_eq!(effect_cache.buffers().len(), 2);
1247
        {
1248
            let buffers = effect_cache.buffers();
1249
            assert!(buffers[0].is_none());
1250
            assert!(buffers[1].is_some()); // id2
1251
        }
1252

1253
        // Regression #60
1254
        let effect3 = effect_cache.insert(
1255
            asset,
1256
            capacities,
1257
            &l32,
1258
            &empty_property_layout,
1259
            LayoutFlags::NONE,
1260
            group_order,
1261
        );
1262
        //assert!(effect3.is_valid());
1263
        let slice3 = &effect3.slices;
1264
        assert_eq!(slice3.group_count(), 1);
1265
        assert_eq!(slice3.group_capacity(0), capacity);
1266
        assert_eq!(slice3.total_capacity(), capacity);
1267
        assert_eq!(
1268
            slice3.particle_layout.min_binding_size().get() as u32,
1269
            item_size
1270
        );
1271
        assert_eq!(slice3.ranges, vec![0, capacity]);
1272
        // Note: currently batching is disabled, so each instance has its own buffer.
1273
        assert_eq!(effect_cache.buffers().len(), 2);
1274
        {
1275
            let buffers = effect_cache.buffers();
1276
            assert!(buffers[0].is_some()); // id3
1277
            assert!(buffers[1].is_some()); // id2
1278
        }
1279
    }
1280
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc