• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

djeedai / bevy_hanabi / 12128238298

02 Dec 2024 09:24PM UTC coverage: 48.661% (-7.6%) from 56.217%
12128238298

Pull #401

github

web-flow
Merge 30c486d1a into 19aee8dbc
Pull Request #401: Upgrade to Bevy v0.15.0

39 of 284 new or added lines in 11 files covered. (13.73%)

435 existing lines in 8 files now uncovered.

3106 of 6383 relevant lines covered (48.66%)

21.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

59.66
/src/render/effect_cache.rs
1
use std::{
2
    cmp::Ordering,
3
    num::NonZeroU64,
4
    ops::Range,
5
    sync::atomic::{AtomicU64, Ordering as AtomicOrdering},
6
};
7

8
use bevy::{
9
    asset::Handle,
10
    ecs::system::Resource,
11
    log::{trace, warn},
12
    render::{render_resource::*, renderer::RenderDevice},
13
    utils::{default, HashMap},
14
};
15
use bytemuck::cast_slice_mut;
16

17
use super::{buffer_table::BufferTableId, AddedEffectGroup};
18
use crate::{
19
    asset::EffectAsset,
20
    render::{
21
        GpuDispatchIndirect, GpuParticleGroup, GpuSpawnerParams, LayoutFlags, StorageType as _,
22
    },
23
    ParticleLayout, PropertyLayout,
24
};
25

26
/// Describes all particle groups' slices of particles in the particle buffer
27
/// for a single effect.
28
#[derive(Debug, Clone, PartialEq, Eq)]
29
pub struct EffectSlices {
30
    /// Slices into the underlying BufferVec of the group.
31
    ///
32
    /// The length of this vector is the number of particle groups plus one.
33
    /// The range of the first group is (slices[0]..slices[1]), the index of
34
    /// the second group is (slices[1]..slices[2]), etc.
35
    ///
36
    /// This is measured in items, not bytes.
37
    pub slices: Vec<u32>,
38
    /// The index of the buffer.
39
    pub buffer_index: u32,
40
    /// Particle layout of the slice.
41
    pub particle_layout: ParticleLayout,
42
}
43

44
impl Ord for EffectSlices {
45
    fn cmp(&self, other: &Self) -> Ordering {
8✔
46
        match self.buffer_index.cmp(&other.buffer_index) {
8✔
47
            Ordering::Equal => self.slices.first().cmp(&other.slices.first()),
4✔
48
            ord => ord,
4✔
49
        }
50
    }
51
}
52

53
impl PartialOrd for EffectSlices {
54
    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
8✔
55
        Some(self.cmp(other))
8✔
56
    }
57
}
58

59
/// Describes all particle groups' slices of particles in the particle buffer
60
/// for a single effect, as well as the [`DispatchBufferIndices`].
61
pub struct SlicesRef {
62
    pub ranges: Vec<u32>,
63
    /// Size of a single item in the slice. Currently equal to the unique size
64
    /// of all items in an [`EffectBuffer`] (no mixed size supported in same
65
    /// buffer), so cached only for convenience.
66
    particle_layout: ParticleLayout,
67
    pub dispatch_buffer_indices: DispatchBufferIndices,
68
}
69

70
/// A reference to a slice allocated inside an [`EffectBuffer`].
71
#[derive(Debug, Default, Clone, PartialEq, Eq)]
72
pub struct SliceRef {
73
    /// Range into an [`EffectBuffer`], in item count.
74
    range: Range<u32>,
75
    /// Size of a single item in the slice. Currently equal to the unique size
76
    /// of all items in an [`EffectBuffer`] (no mixed size supported in same
77
    /// buffer), so cached only for convenience.
78
    particle_layout: ParticleLayout,
79
}
80

81
impl SliceRef {
82
    /// The length of the slice, in number of items.
83
    #[allow(dead_code)]
84
    pub fn len(&self) -> u32 {
8✔
85
        self.range.end - self.range.start
8✔
86
    }
87

88
    /// The size in bytes of the slice.
89
    #[allow(dead_code)]
90
    pub fn byte_size(&self) -> usize {
4✔
91
        (self.len() as usize) * (self.particle_layout.min_binding_size().get() as usize)
4✔
92
    }
93
}
94

95
/// Storage for a single kind of effects, sharing the same buffer(s).
96
///
97
/// Currently only accepts a single unique item size (particle size), fixed at
98
/// creation. Also currently only accepts instances of a unique effect asset,
99
/// although this restriction is purely for convenience and may be relaxed in
100
/// the future to improve batching.
101
#[derive(Debug)]
102
pub struct EffectBuffer {
103
    /// GPU buffer holding all particles for the entire group of effects.
104
    particle_buffer: Buffer,
105
    /// GPU buffer holding the indirection indices for the entire group of
106
    /// effects. This is a triple buffer containing:
107
    /// - the ping-pong alive particles and render indirect indices at offsets 0
108
    ///   and 1
109
    /// - the dead particle indices at offset 2
110
    indirect_buffer: Buffer,
111
    /// GPU buffer holding the properties of the effect(s), if any. This is
112
    /// always `None` if the property layout is empty.
113
    properties_buffer: Option<Buffer>,
114
    /// Layout of particles.
115
    particle_layout: ParticleLayout,
116
    /// Layout of properties of the effect(s), if using properties.
117
    property_layout: PropertyLayout,
118
    /// Flags
119
    layout_flags: LayoutFlags,
120
    /// -
121
    particles_buffer_layout_sim: BindGroupLayout,
122
    /// -
123
    particles_buffer_layout_with_dispatch: BindGroupLayout,
124
    /// Total buffer capacity, in number of particles.
125
    capacity: u32,
126
    /// Used buffer size, in number of particles, either from allocated slices
127
    /// or from slices in the free list.
128
    used_size: u32,
129
    /// Array of free slices for new allocations, sorted in increasing order in
130
    /// the buffer.
131
    free_slices: Vec<Range<u32>>,
132
    /// Compute pipeline for the effect update pass.
133
    // pub compute_pipeline: ComputePipeline, // FIXME - ComputePipelineId, to avoid duplicating per
134
    // instance!
135
    /// Handle of all effects common in this buffer. TODO - replace with
136
    /// compatible layout.
137
    asset: Handle<EffectAsset>,
138
    /// Bind group for the per-buffer data (group @1) of the init and update
139
    /// passes.
140
    simulate_bind_group: Option<BindGroup>,
141
}
142

143
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
144
pub enum BufferState {
145
    Used,
146
    Free,
147
}
148

149
impl EffectBuffer {
150
    /// Minimum buffer capacity to allocate, in number of particles.
151
    // FIXME - Batching is broken due to binding a single GpuSpawnerParam instead of
152
    // N, and inability for a particle index to tell which Spawner it should
153
    // use. Setting this to 1 effectively ensures that all new buffers just fit
154
    // the effect, so batching never occurs.
155
    pub const MIN_CAPACITY: u32 = 1; // 65536; // at least 64k particles
156

157
    /// Create a new group and a GPU buffer to back it up.
158
    ///
159
    /// The buffer cannot contain less than [`MIN_CAPACITY`] particles. If
160
    /// `capacity` is smaller, it's rounded up to [`MIN_CAPACITY`].
161
    ///
162
    /// [`MIN_CAPACITY`]: EffectBuffer::MIN_CAPACITY
163
    pub fn new(
5✔
164
        asset: Handle<EffectAsset>,
165
        capacity: u32,
166
        particle_layout: ParticleLayout,
167
        property_layout: PropertyLayout,
168
        layout_flags: LayoutFlags,
169
        render_device: &RenderDevice,
170
        label: Option<&str>,
171
    ) -> Self {
172
        trace!(
5✔
173
            "EffectBuffer::new(capacity={}, particle_layout={:?}, property_layout={:?}, layout_flags={:?}, item_size={}B, properties_size={}B)",
×
174
            capacity,
×
175
            particle_layout,
×
176
            property_layout,
×
177
            layout_flags,
×
178
            particle_layout.min_binding_size().get(),
×
179
            if property_layout.is_empty() { 0 } else { property_layout.min_binding_size().get() },
×
180
        );
181

182
        let capacity = capacity.max(Self::MIN_CAPACITY);
5✔
183
        debug_assert!(
5✔
184
            capacity > 0,
5✔
185
            "Attempted to create a zero-sized effect buffer."
×
186
        );
187

188
        let particle_capacity_bytes: BufferAddress =
5✔
189
            capacity as u64 * particle_layout.min_binding_size().get();
5✔
190
        let particle_buffer = render_device.create_buffer(&BufferDescriptor {
5✔
191
            label,
5✔
192
            size: particle_capacity_bytes,
5✔
193
            usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
5✔
194
            mapped_at_creation: false,
5✔
195
        });
196

197
        let capacity_bytes: BufferAddress = capacity as u64 * 4;
5✔
198

199
        let indirect_label = if let Some(label) = label {
5✔
200
            format!("{label}_indirect")
201
        } else {
202
            "hanabi:buffer:effect_indirect".to_owned()
×
203
        };
204
        let indirect_buffer = render_device.create_buffer(&BufferDescriptor {
205
            label: Some(&indirect_label),
206
            size: capacity_bytes * 3, // ping-pong + deadlist
207
            usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
208
            mapped_at_creation: true,
209
        });
210
        // Set content
211
        {
212
            // Scope get_mapped_range_mut() to force a drop before unmap()
213
            {
214
                let slice = &mut indirect_buffer.slice(..).get_mapped_range_mut()
215
                    [..capacity_bytes as usize * 3];
216
                let slice: &mut [u32] = cast_slice_mut(slice);
217
                for index in 0..capacity {
12,294✔
218
                    slice[3 * index as usize + 2] = capacity - 1 - index;
6,147✔
219
                }
220
            }
221
            indirect_buffer.unmap();
5✔
222
        }
223

224
        let properties_buffer = if property_layout.is_empty() {
10✔
225
            None
5✔
226
        } else {
227
            let properties_label = if let Some(label) = label {
×
228
                format!("{}_properties", label)
229
            } else {
230
                "hanabi:buffer:effect_properties".to_owned()
×
231
            };
232
            let size = property_layout.min_binding_size().get(); // TODO: * num_effects_in_buffer (once batching works again)
233
            let properties_buffer = render_device.create_buffer(&BufferDescriptor {
234
                label: Some(&properties_label),
235
                size,
236
                usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
237
                mapped_at_creation: false,
238
            });
239
            Some(properties_buffer)
240
        };
241

242
        // TODO - Cache particle_layout and associated bind group layout, instead of
243
        // creating one bind group layout per buffer using that layout...
244
        let particle_group_size = GpuParticleGroup::aligned_size(
245
            render_device.limits().min_storage_buffer_offset_alignment,
5✔
246
        );
247
        let mut entries = vec![
5✔
248
            // @binding(0) var<storage, read_write> particle_buffer : ParticleBuffer
249
            BindGroupLayoutEntry {
5✔
250
                binding: 0,
5✔
251
                visibility: ShaderStages::COMPUTE,
5✔
252
                ty: BindingType::Buffer {
5✔
253
                    ty: BufferBindingType::Storage { read_only: false },
5✔
254
                    has_dynamic_offset: false,
5✔
255
                    min_binding_size: Some(particle_layout.min_binding_size()),
5✔
256
                },
257
                count: None,
5✔
258
            },
259
            // @binding(1) var<storage, read_write> indirect_buffer : IndirectBuffer
260
            BindGroupLayoutEntry {
5✔
261
                binding: 1,
5✔
262
                visibility: ShaderStages::COMPUTE,
5✔
263
                ty: BindingType::Buffer {
5✔
264
                    ty: BufferBindingType::Storage { read_only: false },
5✔
265
                    has_dynamic_offset: false,
5✔
266
                    min_binding_size: BufferSize::new(12),
5✔
267
                },
268
                count: None,
5✔
269
            },
270
            // @binding(2) var<storage, read> particle_groups : array<ParticleGroup>
271
            BindGroupLayoutEntry {
5✔
272
                binding: 2,
5✔
273
                visibility: ShaderStages::COMPUTE,
5✔
274
                ty: BindingType::Buffer {
5✔
275
                    ty: BufferBindingType::Storage { read_only: true },
5✔
276
                    has_dynamic_offset: false,
5✔
277
                    // Despite no dynamic offset, we do bind a non-zero offset sometimes,
278
                    // so keep this aligned
279
                    min_binding_size: Some(particle_group_size),
5✔
280
                },
281
                count: None,
5✔
282
            },
283
        ];
284
        if !property_layout.is_empty() {
5✔
285
            entries.push(BindGroupLayoutEntry {
×
286
                binding: 3,
×
287
                visibility: ShaderStages::COMPUTE,
×
288
                ty: BindingType::Buffer {
×
289
                    ty: BufferBindingType::Storage { read_only: true },
×
290
                    has_dynamic_offset: false, // TODO
×
291
                    min_binding_size: Some(property_layout.min_binding_size()),
×
292
                },
293
                count: None,
×
294
            });
295
        }
296
        let label = "hanabi:sim_particles_buffer_layout";
5✔
297
        trace!(
5✔
298
            "Creating particle bind group layout '{}' for simulation passes with {} entries.",
×
299
            label,
×
300
            entries.len(),
×
301
        );
302
        let particles_buffer_layout_sim = render_device.create_bind_group_layout(label, &entries);
5✔
303

304
        // Create the render layout.
305
        let dispatch_indirect_size = GpuDispatchIndirect::aligned_size(
306
            render_device.limits().min_storage_buffer_offset_alignment,
5✔
307
        );
308
        let mut entries = vec![
5✔
309
            BindGroupLayoutEntry {
5✔
310
                binding: 0,
5✔
311
                visibility: ShaderStages::VERTEX,
5✔
312
                ty: BindingType::Buffer {
5✔
313
                    ty: BufferBindingType::Storage { read_only: true },
5✔
314
                    has_dynamic_offset: false,
5✔
315
                    min_binding_size: Some(particle_layout.min_binding_size()),
5✔
316
                },
317
                count: None,
5✔
318
            },
319
            BindGroupLayoutEntry {
5✔
320
                binding: 1,
5✔
321
                visibility: ShaderStages::VERTEX,
5✔
322
                ty: BindingType::Buffer {
5✔
323
                    ty: BufferBindingType::Storage { read_only: true },
5✔
324
                    has_dynamic_offset: false,
5✔
325
                    min_binding_size: BufferSize::new(std::mem::size_of::<u32>() as u64),
5✔
326
                },
327
                count: None,
5✔
328
            },
329
            BindGroupLayoutEntry {
5✔
330
                binding: 2,
5✔
331
                visibility: ShaderStages::VERTEX,
5✔
332
                ty: BindingType::Buffer {
5✔
333
                    ty: BufferBindingType::Storage { read_only: true },
5✔
334
                    has_dynamic_offset: true,
5✔
335
                    min_binding_size: Some(dispatch_indirect_size),
5✔
336
                },
337
                count: None,
5✔
338
            },
339
        ];
340
        if layout_flags.contains(LayoutFlags::LOCAL_SPACE_SIMULATION) {
5✔
341
            entries.push(BindGroupLayoutEntry {
×
342
                binding: 3,
×
343
                visibility: ShaderStages::VERTEX,
×
344
                ty: BindingType::Buffer {
×
345
                    ty: BufferBindingType::Storage { read_only: true },
×
346
                    has_dynamic_offset: true,
×
347
                    min_binding_size: Some(GpuSpawnerParams::min_size()), // TODO - array
×
348
                },
349
                count: None,
×
350
            });
351
        }
352
        trace!(
353
            "Creating render layout with {} entries (flags: {:?})",
×
354
            entries.len(),
×
355
            layout_flags
356
        );
357
        let particles_buffer_layout_with_dispatch =
5✔
358
            render_device.create_bind_group_layout("hanabi:buffer_layout_render", &entries);
5✔
359

360
        Self {
361
            particle_buffer,
362
            indirect_buffer,
363
            properties_buffer,
364
            particle_layout,
365
            property_layout,
366
            layout_flags,
367
            particles_buffer_layout_sim,
368
            particles_buffer_layout_with_dispatch,
369
            capacity,
370
            used_size: 0,
371
            free_slices: vec![],
5✔
372
            asset,
373
            simulate_bind_group: None,
374
        }
375
    }
376

377
    pub fn properties_buffer(&self) -> Option<&Buffer> {
×
378
        self.properties_buffer.as_ref()
×
379
    }
380

381
    pub fn particle_layout(&self) -> &ParticleLayout {
×
382
        &self.particle_layout
×
383
    }
384

385
    pub fn property_layout(&self) -> &PropertyLayout {
×
386
        &self.property_layout
×
387
    }
388

389
    pub fn layout_flags(&self) -> LayoutFlags {
×
390
        self.layout_flags
×
391
    }
392

393
    pub fn particle_layout_bind_group_sim(&self) -> &BindGroupLayout {
×
394
        &self.particles_buffer_layout_sim
×
395
    }
396

397
    pub fn particle_layout_bind_group_with_dispatch(&self) -> &BindGroupLayout {
×
398
        &self.particles_buffer_layout_with_dispatch
×
399
    }
400

401
    /// Return a binding for the entire particle buffer.
402
    pub fn max_binding(&self) -> BindingResource {
×
403
        let capacity_bytes = self.capacity as u64 * self.particle_layout.min_binding_size().get();
×
404
        BindingResource::Buffer(BufferBinding {
×
405
            buffer: &self.particle_buffer,
×
406
            offset: 0,
×
407
            size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
×
408
        })
409
    }
410

411
    /// Return a binding of the buffer for a starting range of a given size (in
412
    /// bytes).
413
    #[allow(dead_code)]
414
    pub fn binding(&self, size: u32) -> BindingResource {
×
415
        BindingResource::Buffer(BufferBinding {
×
416
            buffer: &self.particle_buffer,
×
417
            offset: 0,
×
418
            size: Some(NonZeroU64::new(size as u64).unwrap()),
×
419
        })
420
    }
421

422
    /// Return a binding for the entire indirect buffer associated with the
423
    /// current effect buffer.
424
    pub fn indirect_max_binding(&self) -> BindingResource {
×
425
        let capacity_bytes = self.capacity as u64 * 4;
×
426
        BindingResource::Buffer(BufferBinding {
×
427
            buffer: &self.indirect_buffer,
×
428
            offset: 0,
×
429
            size: Some(NonZeroU64::new(capacity_bytes * 3).unwrap()),
×
430
        })
431
    }
432

433
    /// Return a binding for the entire properties buffer associated with the
434
    /// current effect buffer, if any.
435
    pub fn properties_max_binding(&self) -> Option<BindingResource> {
×
436
        self.properties_buffer.as_ref().map(|buffer| {
×
437
            let capacity_bytes = self.property_layout.min_binding_size().get();
×
438
            BindingResource::Buffer(BufferBinding {
×
439
                buffer,
×
440
                offset: 0,
×
441
                size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
×
442
            })
443
        })
444
    }
445

446
    /// Create the bind group for the init and update passes if needed.
447
    ///
448
    /// The `buffer_index` must be the index of the current [`EffectBuffer`]
449
    /// inside the [`EffectCache`]. The `group_binding` is the binding resource
450
    /// for the particle groups of this buffer.
451
    pub fn create_sim_bind_group(
×
452
        &mut self,
453
        buffer_index: u32,
454
        render_device: &RenderDevice,
455
        group_binding: BufferBinding,
456
    ) {
457
        if self.simulate_bind_group.is_some() {
×
458
            return;
×
459
        }
460

461
        let layout = self.particle_layout_bind_group_sim();
×
462
        let label = format!("hanabi:bind_group_sim_batch{}", buffer_index);
×
463
        let mut bindings = vec![
×
464
            BindGroupEntry {
×
465
                binding: 0,
×
466
                resource: self.max_binding(),
×
467
            },
468
            BindGroupEntry {
×
469
                binding: 1,
×
470
                resource: self.indirect_max_binding(),
×
471
            },
472
            BindGroupEntry {
×
473
                binding: 2,
×
474
                resource: BindingResource::Buffer(group_binding),
×
475
            },
476
        ];
477
        if let Some(property_binding) = self.properties_max_binding() {
×
478
            bindings.push(BindGroupEntry {
479
                binding: 3,
480
                resource: property_binding,
481
            });
482
        }
483
        trace!(
484
            "Create simulate bind group '{}' with {} entries",
×
485
            label,
×
486
            bindings.len()
×
487
        );
488
        let bind_group = render_device.create_bind_group(Some(&label[..]), layout, &bindings);
×
489
        self.simulate_bind_group = Some(bind_group);
×
490
    }
491

492
    /// Return the cached bind group for the init and update passes.
493
    ///
494
    /// This is the per-buffer bind group at binding @1 which binds all
495
    /// per-buffer resources shared by all effect instances batched in a single
496
    /// buffer.
497
    pub fn sim_bind_group(&self) -> Option<&BindGroup> {
×
498
        self.simulate_bind_group.as_ref()
×
499
    }
500

501
    /// Try to recycle a free slice to store `size` items.
502
    fn pop_free_slice(&mut self, size: u32) -> Option<Range<u32>> {
17✔
503
        if self.free_slices.is_empty() {
17✔
504
            return None;
14✔
505
        }
506

507
        struct BestRange {
508
            range: Range<u32>,
509
            capacity: u32,
510
            index: usize,
511
        }
512

513
        let mut result = BestRange {
514
            range: 0..0, // marker for "invalid"
515
            capacity: u32::MAX,
516
            index: usize::MAX,
517
        };
518
        for (index, slice) in self.free_slices.iter().enumerate() {
3✔
519
            let capacity = slice.end - slice.start;
3✔
520
            if size > capacity {
3✔
521
                continue;
1✔
522
            }
523
            if capacity < result.capacity {
4✔
524
                result = BestRange {
2✔
525
                    range: slice.clone(),
2✔
526
                    capacity,
2✔
527
                    index,
2✔
528
                };
529
            }
530
        }
531
        if !result.range.is_empty() {
3✔
532
            if result.capacity > size {
2✔
533
                // split
534
                let start = result.range.start;
1✔
535
                let used_end = start + size;
1✔
536
                let free_end = result.range.end;
1✔
537
                let range = start..used_end;
1✔
538
                self.free_slices[result.index] = used_end..free_end;
1✔
539
                Some(range)
1✔
540
            } else {
541
                // recycle entirely
542
                self.free_slices.remove(result.index);
1✔
543
                Some(result.range)
1✔
544
            }
545
        } else {
546
            None
1✔
547
        }
548
    }
549

550
    /// Allocate a new slice in the buffer to store the particles of a single
551
    /// effect.
552
    pub fn allocate_slice(
18✔
553
        &mut self,
554
        capacity: u32,
555
        particle_layout: &ParticleLayout,
556
    ) -> Option<SliceRef> {
557
        trace!(
18✔
558
            "EffectBuffer::allocate_slice: capacity={} particle_layout={:?} item_size={}",
×
559
            capacity,
×
560
            particle_layout,
×
561
            particle_layout.min_binding_size().get(),
×
562
        );
563

564
        if capacity > self.capacity {
18✔
565
            return None;
1✔
566
        }
567

568
        let range = if let Some(range) = self.pop_free_slice(capacity) {
17✔
569
            range
2✔
570
        } else {
571
            let new_size = self.used_size.checked_add(capacity).unwrap();
15✔
572
            if new_size <= self.capacity {
15✔
573
                let range = self.used_size..new_size;
13✔
574
                self.used_size = new_size;
13✔
575
                range
13✔
576
            } else {
577
                if self.used_size == 0 {
2✔
578
                    warn!(
×
579
                        "Cannot allocate slice of size {} in effect cache buffer of capacity {}.",
×
580
                        capacity, self.capacity
581
                    );
582
                }
583
                return None;
2✔
584
            }
585
        };
586

587
        Some(SliceRef {
588
            range,
589
            particle_layout: particle_layout.clone(),
590
        })
591
    }
592

593
    /// Free an allocated slice, and if this was the last allocated slice also
594
    /// free the buffer.
595
    pub fn free_slice(&mut self, slice: SliceRef) -> BufferState {
9✔
596
        // If slice is at the end of the buffer, reduce total used size
597
        if slice.range.end == self.used_size {
9✔
598
            self.used_size = slice.range.start;
3✔
599
            // Check other free slices to further reduce used size and drain the free slice
600
            // list
601
            while let Some(free_slice) = self.free_slices.last() {
13✔
602
                if free_slice.end == self.used_size {
5✔
603
                    self.used_size = free_slice.start;
5✔
604
                    self.free_slices.pop();
5✔
605
                } else {
606
                    break;
×
607
                }
608
            }
609
            if self.used_size == 0 {
3✔
610
                assert!(self.free_slices.is_empty());
2✔
611
                // The buffer is not used anymore, free it too
612
                BufferState::Free
2✔
613
            } else {
614
                // There are still some slices used, the last one of which ends at
615
                // self.used_size
616
                BufferState::Used
1✔
617
            }
618
        } else {
619
            // Free slice is not at end; insert it in free list
620
            let range = slice.range;
6✔
621
            match self.free_slices.binary_search_by(|s| {
12✔
622
                if s.end <= range.start {
6✔
623
                    Ordering::Less
6✔
624
                } else if s.start >= range.end {
×
625
                    Ordering::Greater
×
626
                } else {
627
                    Ordering::Equal
×
628
                }
629
            }) {
630
                Ok(_) => warn!("Range {:?} already present in free list!", range),
×
631
                Err(index) => self.free_slices.insert(index, range),
6✔
632
            }
633
            BufferState::Used
6✔
634
        }
635
    }
636

637
    pub fn is_compatible(&self, handle: &Handle<EffectAsset>) -> bool {
2✔
638
        // TODO - replace with check particle layout is compatible to allow tighter
639
        // packing in less buffers, and update in the less dispatch calls
640
        *handle == self.asset
2✔
641
    }
642
}
643

644
/// Identifier referencing an effect cached in an internal effect cache.
645
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
646
pub(crate) struct EffectCacheId(/* TEMP */ pub(crate) u64);
647

648
impl EffectCacheId {
649
    /// An invalid handle, corresponding to nothing.
650
    pub const INVALID: Self = Self(u64::MAX);
651

652
    /// Generate a new valid effect cache identifier.
653
    pub fn new() -> Self {
3✔
654
        static NEXT_EFFECT_CACHE_ID: AtomicU64 = AtomicU64::new(0);
655
        Self(NEXT_EFFECT_CACHE_ID.fetch_add(1, AtomicOrdering::Relaxed))
3✔
656
    }
657

658
    /// Check if the ID is valid.
659
    #[allow(dead_code)]
660
    pub fn is_valid(&self) -> bool {
3✔
661
        *self != Self::INVALID
3✔
662
    }
663
}
664

665
/// Cache for effect instances sharing common GPU data structures.
666
#[derive(Resource)]
667
pub struct EffectCache {
668
    /// Render device the GPU resources (buffers) are allocated from.
669
    device: RenderDevice,
670
    /// Collection of effect buffers managed by this cache. Some buffers might
671
    /// be `None` if the entry is not used. Since the buffers are referenced
672
    /// by index, we cannot move them once they're allocated.
673
    buffers: Vec<Option<EffectBuffer>>,
674
    /// Map from an effect cache ID to various buffer indices.
675
    effects: HashMap<EffectCacheId, CachedEffect>,
676
}
677

678
/// Stores various data, including the buffer index and slice boundaries within
679
/// the buffer for all groups in a single effect.
680
pub(crate) struct CachedEffect {
681
    /// The index of the buffer.
682
    pub(crate) buffer_index: u32,
683
    /// The slices within that buffer.
684
    pub(crate) slices: SlicesRef,
685
    /// The order in which we evaluate groups.
686
    pub(crate) group_order: Vec<u32>,
687
}
688

689
#[derive(Debug, Clone, Copy)]
690
pub(crate) struct PendingEffectGroup {
691
    pub capacity: u32,
692
    pub src_group_index_if_trail: Option<u32>,
693
}
694

695
impl From<&AddedEffectGroup> for PendingEffectGroup {
NEW
696
    fn from(value: &AddedEffectGroup) -> Self {
×
697
        Self {
NEW
698
            capacity: value.capacity,
×
NEW
699
            src_group_index_if_trail: value.src_group_index_if_trail,
×
700
        }
701
    }
702
}
703

704
#[derive(Debug, Clone)]
705
pub(crate) enum RenderGroupDispatchIndices {
706
    Pending {
707
        groups: Box<[PendingEffectGroup]>,
708
    },
709
    Allocated {
710
        /// The index of the first render group indirect dispatch buffer.
711
        ///
712
        /// There will be one such dispatch buffer for each particle group.
713
        first_render_group_dispatch_buffer_index: BufferTableId,
714
        /// Map from a group index to its source and destination rows into the
715
        /// render group dispatch buffer.
716
        trail_dispatch_buffer_indices: HashMap<u32, TrailDispatchBufferIndices>,
717
    },
718
}
719

720
impl Default for RenderGroupDispatchIndices {
721
    fn default() -> Self {
3✔
722
        Self::Pending {
723
            groups: Box::new([]),
3✔
724
        }
725
    }
726
}
727

728
/// The indices in the indirect dispatch buffers for a single effect, as well as
729
/// that of the metadata buffer.
730
#[derive(Debug, Clone)]
731
pub(crate) struct DispatchBufferIndices {
732
    /// The index of the first update group indirect dispatch buffer.
733
    ///
734
    /// There will be one such dispatch buffer for each particle group.
735
    pub(crate) first_update_group_dispatch_buffer_index: BufferTableId,
736
    /// The index of the render indirect metadata buffer.
737
    pub(crate) render_effect_metadata_buffer_index: BufferTableId,
738
    /// Render group dispatch indirect indices for all groups of the effect.
739
    pub(crate) render_group_dispatch_indices: RenderGroupDispatchIndices,
740
}
741

742
#[derive(Debug, Clone, Copy)]
743
pub(crate) struct TrailDispatchBufferIndices {
744
    pub(crate) dest: BufferTableId,
745
    pub(crate) src: BufferTableId,
746
}
747

748
impl Default for DispatchBufferIndices {
749
    // For testing purposes only.
750
    fn default() -> Self {
3✔
751
        DispatchBufferIndices {
752
            first_update_group_dispatch_buffer_index: BufferTableId::INVALID,
753
            render_effect_metadata_buffer_index: BufferTableId::INVALID,
754
            render_group_dispatch_indices: default(),
3✔
755
        }
756
    }
757
}
758

759
impl EffectCache {
760
    pub fn new(device: RenderDevice) -> Self {
1✔
761
        Self {
762
            device,
763
            buffers: vec![],
1✔
764
            effects: HashMap::default(),
1✔
765
        }
766
    }
767

768
    #[allow(dead_code)]
769
    pub fn buffers(&self) -> &[Option<EffectBuffer>] {
7✔
770
        &self.buffers
7✔
771
    }
772

773
    #[allow(dead_code)]
774
    pub fn buffers_mut(&mut self) -> &mut [Option<EffectBuffer>] {
×
775
        &mut self.buffers
×
776
    }
777

778
    pub fn insert(
3✔
779
        &mut self,
780
        asset: Handle<EffectAsset>,
781
        capacities: Vec<u32>,
782
        particle_layout: &ParticleLayout,
783
        property_layout: &PropertyLayout,
784
        layout_flags: LayoutFlags,
785
        dispatch_buffer_indices: DispatchBufferIndices,
786
        group_order: Vec<u32>,
787
    ) -> EffectCacheId {
788
        let total_capacity = capacities.iter().cloned().sum();
3✔
789
        let (buffer_index, slice) = self
3✔
790
            .buffers
3✔
791
            .iter_mut()
792
            .enumerate()
793
            .find_map(|(buffer_index, buffer)| {
6✔
794
                if let Some(buffer) = buffer {
5✔
795
                    // The buffer must be compatible with the effect layout, to allow the update pass
796
                    // to update all particles at once from all compatible effects in a single dispatch.
797
                    if !buffer.is_compatible(&asset) {
798
                        return None;
×
799
                    }
800

801
                    // Try to allocate a slice into the buffer
802
                    buffer
2✔
803
                        .allocate_slice(total_capacity, particle_layout)
2✔
804
                        .map(|slice| (buffer_index, slice))
4✔
805
                } else {
806
                    None
1✔
807
                }
808
            })
809
            .or_else(|| {
6✔
810
                // Cannot find any suitable buffer; allocate a new one
811
                let buffer_index = self.buffers.iter().position(|buf| buf.is_none()).unwrap_or(self.buffers.len());
8✔
812
                let byte_size = total_capacity.checked_mul(particle_layout.min_binding_size().get() as u32).unwrap_or_else(|| panic!(
3✔
813
                    "Effect size overflow: capacities={:?} particle_layout={:?} item_size={}",
×
814
                    capacities, particle_layout, particle_layout.min_binding_size().get()
×
815
                ));
816
                trace!(
3✔
817
                    "Creating new effect buffer #{} for effect {:?} (capacities={:?}, particle_layout={:?} item_size={}, byte_size={})",
×
818
                    buffer_index,
×
819
                    asset,
×
820
                    capacities,
×
821
                    particle_layout,
×
822
                    particle_layout.min_binding_size().get(),
×
823
                    byte_size
824
                );
825
                let mut buffer = EffectBuffer::new(
3✔
826
                    asset,
3✔
827
                    total_capacity,
3✔
828
                    particle_layout.clone(),
3✔
829
                    property_layout.clone(),
3✔
830
                    layout_flags,
3✔
831
                    &self.device,
3✔
832
                    Some(&format!("hanabi:buffer:effect{buffer_index}_particles")),
3✔
833
                );
834
                let slice_ref = buffer.allocate_slice(total_capacity, particle_layout).unwrap();
3✔
835
                if buffer_index >= self.buffers.len() {
5✔
836
                    self.buffers.push(Some(buffer));
2✔
837
                } else {
838
                    debug_assert!(self.buffers[buffer_index].is_none());
2✔
839
                    self.buffers[buffer_index] = Some(buffer);
1✔
840
                }
841
                Some((buffer_index, slice_ref))
3✔
842
            })
843
            .unwrap();
844
        let id = EffectCacheId::new();
3✔
845

846
        let mut ranges = vec![slice.range.start];
3✔
847
        let group_count = capacities.len();
3✔
848
        for capacity in capacities {
12✔
849
            let start_index = ranges.last().unwrap();
3✔
850
            ranges.push(start_index + capacity);
3✔
851
        }
852
        debug_assert_eq!(ranges.len(), group_count + 1);
6✔
853

854
        let slices = SlicesRef {
855
            ranges,
856
            particle_layout: slice.particle_layout,
3✔
857
            dispatch_buffer_indices,
858
        };
859

860
        trace!(
3✔
861
            "Insert effect id={:?} buffer_index={} slice={}B particle_layout={:?}",
×
862
            id,
×
863
            buffer_index,
×
864
            slices.particle_layout.min_binding_size().get(),
×
865
            slices.particle_layout,
866
        );
867
        self.effects.insert(
3✔
868
            id,
3✔
869
            CachedEffect {
3✔
870
                buffer_index: buffer_index as u32,
3✔
871
                slices,
3✔
872
                group_order,
3✔
873
            },
874
        );
875
        id
3✔
876
    }
877

878
    pub fn get_slices(&self, id: EffectCacheId) -> EffectSlices {
3✔
879
        self.effects
3✔
880
            .get(&id)
3✔
881
            .map(|indices| EffectSlices {
6✔
882
                slices: indices.slices.ranges.clone(),
3✔
883
                buffer_index: indices.buffer_index,
3✔
884
                particle_layout: indices.slices.particle_layout.clone(),
3✔
885
            })
886
            .unwrap()
887
    }
888

889
    pub(crate) fn get_dispatch_buffer_indices(&self, id: EffectCacheId) -> &DispatchBufferIndices {
×
890
        &self.effects[&id].slices.dispatch_buffer_indices
×
891
    }
892

NEW
893
    pub(crate) fn get_dispatch_buffer_indices_mut(
×
894
        &mut self,
895
        id: EffectCacheId,
896
    ) -> &mut DispatchBufferIndices {
NEW
897
        &mut self
×
NEW
898
            .effects
×
NEW
899
            .get_mut(&id)
×
NEW
900
            .unwrap()
×
NEW
901
            .slices
×
NEW
902
            .dispatch_buffer_indices
×
903
    }
904

905
    pub(crate) fn get_group_order(&self, id: EffectCacheId) -> &[u32] {
×
906
        &self.effects[&id].group_order
×
907
    }
908

909
    /// Get the init bind group for a cached effect.
910
    pub fn init_bind_group(&self, id: EffectCacheId) -> Option<&BindGroup> {
×
911
        if let Some(indices) = self.effects.get(&id) {
×
912
            if let Some(effect_buffer) = &self.buffers[indices.buffer_index as usize] {
×
913
                return effect_buffer.sim_bind_group();
×
914
            }
915
        }
916
        None
×
917
    }
918

919
    /// Get the update bind group for a cached effect.
920
    #[inline]
921
    pub fn update_bind_group(&self, id: EffectCacheId) -> Option<&BindGroup> {
×
922
        self.init_bind_group(id)
×
923
    }
924

925
    pub fn get_property_buffer(&self, id: EffectCacheId) -> Option<&Buffer> {
×
926
        if let Some(cached_effect_indices) = self.effects.get(&id) {
×
927
            let buffer_index = cached_effect_indices.buffer_index as usize;
928
            self.buffers[buffer_index]
929
                .as_ref()
930
                .and_then(|eb| eb.properties_buffer())
×
931
        } else {
932
            None
×
933
        }
934
    }
935

936
    /// Remove an effect from the cache. If this was the last effect, drop the
937
    /// underlying buffer and return the index of the dropped buffer.
938
    pub fn remove(&mut self, id: EffectCacheId) -> Option<CachedEffect> {
1✔
939
        let indices = self.effects.remove(&id)?;
2✔
940
        let &mut Some(ref mut buffer) = &mut self.buffers[indices.buffer_index as usize] else {
1✔
941
            return None;
×
942
        };
943

944
        let slice = SliceRef {
945
            range: indices.slices.ranges[0]..*indices.slices.ranges.last().unwrap(),
946
            // FIXME: clone() needed to return CachedEffectIndices, but really we don't care about
947
            // returning the ParticleLayout, so should split...
948
            particle_layout: indices.slices.particle_layout.clone(),
949
        };
950

951
        if buffer.free_slice(slice) == BufferState::Free {
952
            self.buffers[indices.buffer_index as usize] = None;
1✔
953
            return Some(indices);
1✔
954
        }
955

956
        None
×
957
    }
958
}
959

960
#[cfg(all(test, feature = "gpu_tests"))]
961
mod gpu_tests {
962
    use std::borrow::Cow;
963

964
    use bevy::math::Vec4;
965

966
    use super::*;
967
    use crate::{
968
        graph::{Value, VectorValue},
969
        test_utils::MockRenderer,
970
        Attribute, AttributeInner,
971
    };
972

973
    #[test]
974
    fn effect_slice_ord() {
975
        let particle_layout = ParticleLayout::new().append(Attribute::POSITION).build();
976
        let slice1 = EffectSlices {
977
            slices: vec![0, 32],
978
            buffer_index: 1,
979
            particle_layout: particle_layout.clone(),
980
        };
981
        let slice2 = EffectSlices {
982
            slices: vec![32, 64],
983
            buffer_index: 1,
984
            particle_layout: particle_layout.clone(),
985
        };
986
        assert!(slice1 < slice2);
987
        assert!(slice1 <= slice2);
988
        assert!(slice2 > slice1);
989
        assert!(slice2 >= slice1);
990

991
        let slice3 = EffectSlices {
992
            slices: vec![0, 32],
993
            buffer_index: 0,
994
            particle_layout,
995
        };
996
        assert!(slice3 < slice1);
997
        assert!(slice3 < slice2);
998
        assert!(slice1 > slice3);
999
        assert!(slice2 > slice3);
1000
    }
1001

1002
    const F4A_INNER: &AttributeInner = &AttributeInner::new(
1003
        Cow::Borrowed("F4A"),
1004
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1005
    );
1006
    const F4B_INNER: &AttributeInner = &AttributeInner::new(
1007
        Cow::Borrowed("F4B"),
1008
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1009
    );
1010
    const F4C_INNER: &AttributeInner = &AttributeInner::new(
1011
        Cow::Borrowed("F4C"),
1012
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1013
    );
1014
    const F4D_INNER: &AttributeInner = &AttributeInner::new(
1015
        Cow::Borrowed("F4D"),
1016
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1017
    );
1018

1019
    const F4A: Attribute = Attribute(F4A_INNER);
1020
    const F4B: Attribute = Attribute(F4B_INNER);
1021
    const F4C: Attribute = Attribute(F4C_INNER);
1022
    const F4D: Attribute = Attribute(F4D_INNER);
1023

1024
    #[test]
1025
    fn slice_ref() {
1026
        let l16 = ParticleLayout::new().append(F4A).build();
1027
        assert_eq!(16, l16.size());
1028
        let l32 = ParticleLayout::new().append(F4A).append(F4B).build();
1029
        assert_eq!(32, l32.size());
1030
        let l48 = ParticleLayout::new()
1031
            .append(F4A)
1032
            .append(F4B)
1033
            .append(F4C)
1034
            .build();
1035
        assert_eq!(48, l48.size());
1036
        for (range, particle_layout, len, byte_size) in [
1037
            (0..0, &l16, 0, 0),
1038
            (0..16, &l16, 16, 16 * 16),
1039
            (0..16, &l32, 16, 16 * 32),
1040
            (240..256, &l48, 16, 16 * 48),
1041
        ] {
1042
            let sr = SliceRef {
1043
                range,
1044
                particle_layout: particle_layout.clone(),
1045
            };
1046
            assert_eq!(sr.len(), len);
1047
            assert_eq!(sr.byte_size(), byte_size);
1048
        }
1049
    }
1050

1051
    #[test]
1052
    fn effect_buffer() {
1053
        let renderer = MockRenderer::new();
1054
        let render_device = renderer.device();
1055

1056
        let l64 = ParticleLayout::new()
1057
            .append(F4A)
1058
            .append(F4B)
1059
            .append(F4C)
1060
            .append(F4D)
1061
            .build();
1062
        assert_eq!(64, l64.size());
1063

1064
        let asset = Handle::<EffectAsset>::default();
1065
        let capacity = 4096;
1066
        let mut buffer = EffectBuffer::new(
1067
            asset,
1068
            capacity,
1069
            l64.clone(),
1070
            PropertyLayout::empty(), // not using properties
1071
            LayoutFlags::NONE,
1072
            &render_device,
1073
            Some("my_buffer"),
1074
        );
1075

1076
        assert_eq!(buffer.capacity, capacity.max(EffectBuffer::MIN_CAPACITY));
1077
        assert_eq!(64, buffer.particle_layout.size());
1078
        assert_eq!(64, buffer.particle_layout.min_binding_size().get());
1079
        assert_eq!(0, buffer.used_size);
1080
        assert!(buffer.free_slices.is_empty());
1081

1082
        assert_eq!(None, buffer.allocate_slice(buffer.capacity + 1, &l64));
1083

1084
        let mut offset = 0;
1085
        let mut slices = vec![];
1086
        for size in [32, 128, 55, 148, 1, 2048, 42] {
1087
            let slice = buffer.allocate_slice(size, &l64);
1088
            assert!(slice.is_some());
1089
            let slice = slice.unwrap();
1090
            assert_eq!(64, slice.particle_layout.size());
1091
            assert_eq!(64, buffer.particle_layout.min_binding_size().get());
1092
            assert_eq!(offset..offset + size, slice.range);
1093
            slices.push(slice);
1094
            offset += size;
1095
        }
1096
        assert_eq!(offset, buffer.used_size);
1097

1098
        assert_eq!(BufferState::Used, buffer.free_slice(slices[2].clone()));
1099
        assert_eq!(1, buffer.free_slices.len());
1100
        let free_slice = &buffer.free_slices[0];
1101
        assert_eq!(160..215, *free_slice);
1102
        assert_eq!(offset, buffer.used_size); // didn't move
1103

1104
        assert_eq!(BufferState::Used, buffer.free_slice(slices[3].clone()));
1105
        assert_eq!(BufferState::Used, buffer.free_slice(slices[4].clone()));
1106
        assert_eq!(BufferState::Used, buffer.free_slice(slices[5].clone()));
1107
        assert_eq!(4, buffer.free_slices.len());
1108
        assert_eq!(offset, buffer.used_size); // didn't move
1109

1110
        // this will collapse all the way to slices[1], the highest allocated
1111
        assert_eq!(BufferState::Used, buffer.free_slice(slices[6].clone()));
1112
        assert_eq!(0, buffer.free_slices.len()); // collapsed
1113
        assert_eq!(160, buffer.used_size); // collapsed
1114

1115
        assert_eq!(BufferState::Used, buffer.free_slice(slices[0].clone()));
1116
        assert_eq!(1, buffer.free_slices.len());
1117
        assert_eq!(160, buffer.used_size); // didn't move
1118

1119
        // collapse all, and free buffer
1120
        assert_eq!(BufferState::Free, buffer.free_slice(slices[1].clone()));
1121
        assert_eq!(0, buffer.free_slices.len());
1122
        assert_eq!(0, buffer.used_size); // collapsed and empty
1123
    }
1124

1125
    #[test]
1126
    fn pop_free_slice() {
1127
        let renderer = MockRenderer::new();
1128
        let render_device = renderer.device();
1129

1130
        let l64 = ParticleLayout::new()
1131
            .append(F4A)
1132
            .append(F4B)
1133
            .append(F4C)
1134
            .append(F4D)
1135
            .build();
1136
        assert_eq!(64, l64.size());
1137

1138
        let asset = Handle::<EffectAsset>::default();
1139
        let capacity = 2048; // EffectBuffer::MIN_CAPACITY;
1140
        assert!(capacity >= 2048); // otherwise the logic below breaks
1141
        let mut buffer = EffectBuffer::new(
1142
            asset,
1143
            capacity,
1144
            l64.clone(),
1145
            PropertyLayout::empty(), // not using properties
1146
            LayoutFlags::NONE,
1147
            &render_device,
1148
            Some("my_buffer"),
1149
        );
1150

1151
        let slice0 = buffer.allocate_slice(32, &l64);
1152
        assert!(slice0.is_some());
1153
        let slice0 = slice0.unwrap();
1154
        assert_eq!(slice0.range, 0..32);
1155
        assert!(buffer.free_slices.is_empty());
1156

1157
        let slice1 = buffer.allocate_slice(1024, &l64);
1158
        assert!(slice1.is_some());
1159
        let slice1 = slice1.unwrap();
1160
        assert_eq!(slice1.range, 32..1056);
1161
        assert!(buffer.free_slices.is_empty());
1162

1163
        let state = buffer.free_slice(slice0);
1164
        assert_eq!(state, BufferState::Used);
1165
        assert_eq!(buffer.free_slices.len(), 1);
1166
        assert_eq!(buffer.free_slices[0], 0..32);
1167

1168
        // Try to allocate a slice larger than slice0, such that slice0 cannot be
1169
        // recycled, and instead the new slice has to be appended after all
1170
        // existing ones.
1171
        let slice2 = buffer.allocate_slice(64, &l64);
1172
        assert!(slice2.is_some());
1173
        let slice2 = slice2.unwrap();
1174
        assert_eq!(slice2.range.start, slice1.range.end); // after slice1
1175
        assert_eq!(slice2.range, 1056..1120);
1176
        assert_eq!(buffer.free_slices.len(), 1);
1177

1178
        // Now allocate a small slice that fits, to recycle (part of) slice0.
1179
        let slice3 = buffer.allocate_slice(16, &l64);
1180
        assert!(slice3.is_some());
1181
        let slice3 = slice3.unwrap();
1182
        assert_eq!(slice3.range, 0..16);
1183
        assert_eq!(buffer.free_slices.len(), 1); // split
1184
        assert_eq!(buffer.free_slices[0], 16..32);
1185

1186
        // Allocate a second small slice that fits exactly the left space, completely
1187
        // recycling
1188
        let slice4 = buffer.allocate_slice(16, &l64);
1189
        assert!(slice4.is_some());
1190
        let slice4 = slice4.unwrap();
1191
        assert_eq!(slice4.range, 16..32);
1192
        assert!(buffer.free_slices.is_empty()); // recycled
1193
    }
1194

1195
    #[test]
1196
    fn effect_cache() {
1197
        let renderer = MockRenderer::new();
1198
        let render_device = renderer.device();
1199

1200
        let empty_property_layout = PropertyLayout::empty(); // not using properties
1201

1202
        let l32 = ParticleLayout::new().append(F4A).append(F4B).build();
1203
        assert_eq!(32, l32.size());
1204

1205
        let mut effect_cache = EffectCache::new(render_device);
1206
        assert_eq!(effect_cache.buffers().len(), 0);
1207

1208
        let asset = Handle::<EffectAsset>::default();
1209
        let capacity = EffectBuffer::MIN_CAPACITY;
1210
        let capacities = vec![capacity];
1211
        let group_order = vec![0];
1212
        let item_size = l32.size();
1213

1214
        let id1 = effect_cache.insert(
1215
            asset.clone(),
1216
            capacities.clone(),
1217
            &l32,
1218
            &empty_property_layout,
1219
            LayoutFlags::NONE,
1220
            DispatchBufferIndices::default(),
1221
            group_order.clone(),
1222
        );
1223
        assert!(id1.is_valid());
1224
        let slice1 = effect_cache.get_slices(id1);
1225
        assert_eq!(
1226
            slice1.particle_layout.min_binding_size().get() as u32,
1227
            item_size
1228
        );
1229
        assert_eq!(slice1.slices, vec![0, capacity]);
1230
        assert_eq!(effect_cache.buffers().len(), 1);
1231

1232
        let id2 = effect_cache.insert(
1233
            asset.clone(),
1234
            capacities.clone(),
1235
            &l32,
1236
            &empty_property_layout,
1237
            LayoutFlags::NONE,
1238
            DispatchBufferIndices::default(),
1239
            group_order.clone(),
1240
        );
1241
        assert!(id2.is_valid());
1242
        let slice2 = effect_cache.get_slices(id2);
1243
        assert_eq!(
1244
            slice2.particle_layout.min_binding_size().get() as u32,
1245
            item_size
1246
        );
1247
        assert_eq!(slice2.slices, vec![0, capacity]);
1248
        assert_eq!(effect_cache.buffers().len(), 2);
1249

1250
        let cached_effect_indices = effect_cache.remove(id1).unwrap();
1251
        assert_eq!(cached_effect_indices.buffer_index, 0);
1252
        assert_eq!(effect_cache.buffers().len(), 2);
1253
        {
1254
            let buffers = effect_cache.buffers();
1255
            assert!(buffers[0].is_none());
1256
            assert!(buffers[1].is_some()); // id2
1257
        }
1258

1259
        // Regression #60
1260
        let id3 = effect_cache.insert(
1261
            asset,
1262
            capacities,
1263
            &l32,
1264
            &empty_property_layout,
1265
            LayoutFlags::NONE,
1266
            DispatchBufferIndices::default(),
1267
            group_order,
1268
        );
1269
        assert!(id3.is_valid());
1270
        let slice3 = effect_cache.get_slices(id3);
1271
        assert_eq!(
1272
            slice3.particle_layout.min_binding_size().get() as u32,
1273
            item_size
1274
        );
1275
        assert_eq!(slice3.slices, vec![0, capacity]);
1276
        assert_eq!(effect_cache.buffers().len(), 2);
1277
        {
1278
            let buffers = effect_cache.buffers();
1279
            assert!(buffers[0].is_some()); // id3
1280
            assert!(buffers[1].is_some()); // id2
1281
        }
1282
    }
1283
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc