• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

djeedai / bevy_hanabi / 21565578469

01 Feb 2026 03:38PM UTC coverage: 58.351% (-8.1%) from 66.442%
21565578469

push

github

web-flow
Update to Bevy v0.18 (#521)

Thanks to @morgenthum for the original work.

93 of 170 new or added lines in 6 files covered. (54.71%)

968 existing lines in 17 files now uncovered.

4954 of 8490 relevant lines covered (58.35%)

190.51 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

71.21
/src/render/effect_cache.rs
1
use std::{
2
    cmp::Ordering,
3
    num::{NonZeroU32, NonZeroU64},
4
    ops::Range,
5
};
6

7
use bevy::{
8
    asset::Handle,
9
    ecs::{component::Component, resource::Resource},
10
    log::{trace, warn},
11
    platform::collections::HashMap,
12
    render::{mesh::allocator::MeshBufferSlice, render_resource::*, renderer::RenderDevice},
13
    utils::default,
14
};
15
use bytemuck::cast_slice_mut;
16

17
use super::{buffer_table::BufferTableId, BufferBindingSource};
18
use crate::{
19
    asset::EffectAsset,
20
    render::{
21
        calc_hash, event::GpuChildInfo, GpuDrawIndexedIndirectArgs, GpuDrawIndirectArgs,
22
        GpuEffectMetadata, GpuSpawnerParams, StorageType as _, INDIRECT_INDEX_SIZE,
23
    },
24
    ParticleLayout,
25
};
26

27
/// Describes all particle slices of particles in the particle buffer
28
/// for a single effect.
29
#[derive(Debug, Clone, PartialEq, Eq)]
30
pub struct EffectSlice {
31
    /// Slice into the underlying [`BufferVec`].
32
    ///
33
    /// This is measured in items, not bytes.
34
    pub slice: Range<u32>,
35
    /// ID of the particle slab in the [`EffectCache`].
36
    pub slab_id: SlabId,
37
    /// Particle layout of the effect.
38
    pub particle_layout: ParticleLayout,
39
}
40

41
impl Ord for EffectSlice {
42
    fn cmp(&self, other: &Self) -> Ordering {
8✔
43
        match self.slab_id.cmp(&other.slab_id) {
16✔
44
            Ordering::Equal => self.slice.start.cmp(&other.slice.start),
4✔
45
            ord => ord,
8✔
46
        }
47
    }
48
}
49

50
impl PartialOrd for EffectSlice {
51
    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
8✔
52
        Some(self.cmp(other))
16✔
53
    }
54
}
55

56
/// A reference to a slice allocated inside an [`ParticleSlab`].
57
#[derive(Debug, Default, Clone, PartialEq, Eq)]
58
pub struct SlabSliceRef {
59
    /// Range into a [`ParticleSlab`], in item count.
60
    range: Range<u32>,
61
    /// Particle layout for the effect stored in that slice.
62
    pub(crate) particle_layout: ParticleLayout,
63
}
64

65
impl SlabSliceRef {
66
    /// The length of the slice, in number of items.
67
    #[allow(dead_code)]
68
    pub fn len(&self) -> u32 {
14✔
69
        self.range.end - self.range.start
14✔
70
    }
71

72
    /// The size in bytes of the slice.
73
    #[allow(dead_code)]
74
    pub fn byte_size(&self) -> usize {
4✔
75
        (self.len() as usize) * (self.particle_layout.min_binding_size().get() as usize)
12✔
76
    }
77

78
    pub fn range(&self) -> Range<u32> {
624✔
79
        self.range.clone()
1,248✔
80
    }
81
}
82

83
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
84
struct SimBindGroupKey {
85
    buffer: Option<BufferId>,
86
    offset: u32,
87
    size: u32,
88
}
89

90
impl SimBindGroupKey {
91
    /// Invalid key, often used as placeholder.
92
    pub const INVALID: Self = Self {
93
        buffer: None,
94
        offset: u32::MAX,
95
        size: 0,
96
    };
97
}
98

99
impl From<&BufferBindingSource> for SimBindGroupKey {
100
    fn from(value: &BufferBindingSource) -> Self {
×
101
        Self {
102
            buffer: Some(value.buffer.id()),
×
103
            offset: value.offset,
×
104
            size: value.size.get(),
×
105
        }
106
    }
107
}
108

109
impl From<Option<&BufferBindingSource>> for SimBindGroupKey {
110
    fn from(value: Option<&BufferBindingSource>) -> Self {
312✔
111
        if let Some(bbs) = value {
312✔
112
            Self {
UNCOV
113
                buffer: Some(bbs.buffer.id()),
×
UNCOV
114
                offset: bbs.offset,
×
UNCOV
115
                size: bbs.size.get(),
×
116
            }
117
        } else {
118
            Self::INVALID
312✔
119
        }
120
    }
121
}
122

123
/// State of a [`ParticleSlab`] after an insertion or removal operation.
124
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
125
pub enum SlabState {
126
    /// The slab is in use, with allocated resources.
127
    Used,
128
    /// Like `Used`, but the slab was resized, so any bind group is
129
    /// nonetheless invalid.
130
    Resized,
131
    /// The slab is free (its resources were deallocated).
132
    Free,
133
}
134

135
/// ID of a [`ParticleSlab`] inside an [`EffectCache`].
136
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
137
pub struct SlabId(u32);
138

139
impl SlabId {
140
    /// An invalid value, often used as placeholder.
141
    pub const INVALID: SlabId = SlabId(u32::MAX);
142

143
    /// Create a new slab ID from its underlying index.
144
    pub const fn new(index: u32) -> Self {
326✔
145
        assert!(index != u32::MAX);
652✔
146
        Self(index)
326✔
147
    }
148

149
    /// Check if the current ID is valid, that is, is different from
150
    /// [`INVALID`].
151
    ///
152
    /// [`INVALID`]: Self::INVALID
153
    #[inline]
154
    #[allow(dead_code)]
155
    pub const fn is_valid(&self) -> bool {
×
156
        self.0 != Self::INVALID.0
×
157
    }
158

159
    /// Get the raw underlying index.
160
    ///
161
    /// This is mostly used for debugging / logging.
162
    #[inline]
163
    pub const fn index(&self) -> u32 {
1,879✔
164
        self.0
1,879✔
165
    }
166
}
167

168
impl Default for SlabId {
169
    fn default() -> Self {
×
170
        Self::INVALID
×
171
    }
172
}
173

174
/// Storage for the per-particle data of effects sharing compatible layouts.
175
///
176
/// Currently only accepts a single unique particle layout, fixed at creation.
177
/// If an effect has a different particle layout, it needs to be stored in a
178
/// different slab.
179
///
180
/// Also currently only accepts instances of a unique effect asset, although
181
/// this restriction is purely for convenience and may be relaxed in the future
182
/// to improve batching.
183
#[derive(Debug)]
184
pub struct ParticleSlab {
185
    /// GPU buffer storing all particles for the entire slab of effects.
186
    ///
187
    /// Each particle is a collection of attributes arranged according to
188
    /// [`Self::particle_layout`]. The buffer contains storage for exactly
189
    /// [`Self::capacity`] particles.
190
    particle_buffer: Buffer,
191
    /// GPU buffer storing the indirection indices for the entire slab of
192
    /// effects.
193
    ///
194
    /// Each indirection item contains 3 values:
195
    /// - the ping-pong alive particles and render indirect indices at offsets 0
196
    ///   and 1
197
    /// - the dead particle indices at offset 2
198
    ///
199
    /// The buffer contains storage for exactly [`Self::capacity`] items.
200
    indirect_index_buffer: Buffer,
201
    /// Layout of particles.
202
    particle_layout: ParticleLayout,
203
    /// Total slab capacity, in number of particles.
204
    capacity: u32,
205
    /// Used slab size, in number of particles, either from allocated slices
206
    /// or from slices in the free list.
207
    used_size: u32,
208
    /// Array of free slices for new allocations, sorted in increasing order
209
    /// inside the slab buffers.
210
    free_slices: Vec<Range<u32>>,
211

212
    /// Handle of all effects common in this slab. TODO - replace with
213
    /// compatible layout.
214
    asset: Handle<EffectAsset>,
215
    /// Layout of the particle@1 bind group for the render pass.
216
    // TODO - move; this only depends on the particle and spawner layouts, can be shared across
217
    // slabs
218
    render_particles_buffer_layout: BindGroupLayout,
219
    /// Bind group particle@1 of the simulation passes (init and udpate).
220
    sim_bind_group: Option<BindGroup>,
221
    /// Key the `sim_bind_group` was created from.
222
    sim_bind_group_key: SimBindGroupKey,
223
}
224

225
impl ParticleSlab {
226
    /// Minimum buffer capacity to allocate, in number of particles.
227
    pub const MIN_CAPACITY: u32 = 65536; // at least 64k particles
228

229
    /// Create a new slab and the GPU resources to back it up.
230
    ///
231
    /// The slab cannot contain less than [`MIN_CAPACITY`] particles. If the
232
    /// input `capacity` is smaller, it's rounded up to [`MIN_CAPACITY`].
233
    ///
234
    /// # Panics
235
    ///
236
    /// This panics if the `capacity` is zero.
237
    ///
238
    /// [`MIN_CAPACITY`]: Self::MIN_CAPACITY
239
    pub fn new(
8✔
240
        slab_id: SlabId,
241
        asset: Handle<EffectAsset>,
242
        capacity: u32,
243
        particle_layout: ParticleLayout,
244
        render_device: &RenderDevice,
245
    ) -> Self {
246
        trace!(
8✔
247
            "ParticleSlab::new(slab_id={}, capacity={}, particle_layout={:?}, item_size={}B)",
248
            slab_id.0,
249
            capacity,
250
            particle_layout,
251
            particle_layout.min_binding_size().get(),
9✔
252
        );
253

254
        // Calculate the clamped capacity of the group, in number of particles.
255
        let capacity = capacity.max(Self::MIN_CAPACITY);
24✔
256
        assert!(
8✔
257
            capacity > 0,
8✔
258
            "Attempted to create a zero-sized effect buffer."
259
        );
260

261
        // Allocate the particle buffer itself, containing the attributes of each
262
        // particle.
263
        #[cfg(debug_assertions)]
264
        let mapped_at_creation = true;
16✔
265
        #[cfg(not(debug_assertions))]
266
        let mapped_at_creation = false;
267
        let particle_capacity_bytes: BufferAddress =
16✔
268
            capacity as u64 * particle_layout.min_binding_size().get();
24✔
269
        let particle_label = format!("hanabi:buffer:slab{}:particle", slab_id.0);
16✔
270
        let particle_buffer = render_device.create_buffer(&BufferDescriptor {
32✔
271
            label: Some(&particle_label),
16✔
272
            size: particle_capacity_bytes,
16✔
273
            usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
8✔
274
            mapped_at_creation,
8✔
275
        });
276
        // Set content
277
        #[cfg(debug_assertions)]
278
        {
279
            // Scope get_mapped_range_mut() to force a drop before unmap()
280
            {
281
                let slice: &mut [u8] = &mut particle_buffer
48✔
282
                    .slice(..particle_capacity_bytes)
24✔
283
                    .get_mapped_range_mut();
24✔
284
                let slice: &mut [u32] = cast_slice_mut(slice);
40✔
285
                slice.fill(0xFFFFFFFF);
16✔
286
            }
287
            particle_buffer.unmap();
8✔
288
        }
289

290
        // Each indirect buffer stores 3 arrays of u32, of length the number of
291
        // particles.
292
        let indirect_capacity_bytes: BufferAddress = capacity as u64 * 4 * 3;
24✔
293
        let indirect_label = format!("hanabi:buffer:slab{}:indirect", slab_id.0);
16✔
294
        let indirect_index_buffer = render_device.create_buffer(&BufferDescriptor {
32✔
295
            label: Some(&indirect_label),
16✔
296
            size: indirect_capacity_bytes,
8✔
297
            usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
8✔
298
            mapped_at_creation: true,
8✔
299
        });
300
        // Set content
301
        {
302
            // Scope get_mapped_range_mut() to force a drop before unmap()
303
            {
304
                let slice: &mut [u8] = &mut indirect_index_buffer
32✔
305
                    .slice(..indirect_capacity_bytes)
8✔
306
                    .get_mapped_range_mut();
8✔
307
                let slice: &mut [u32] = cast_slice_mut(slice);
24✔
308
                for index in 0..capacity {
524,296✔
309
                    slice[3 * index as usize + 2] = index;
310
                }
311
            }
312
            indirect_index_buffer.unmap();
16✔
313
        }
314

315
        // Create the render layout.
316
        // TODO - move; this only depends on the particle and spawner layouts, can be
317
        // shared across slabs
318
        let spawner_params_size = GpuSpawnerParams::aligned_size(
319
            render_device.limits().min_storage_buffer_offset_alignment,
8✔
320
        );
321
        let entries = [
16✔
322
            // @group(1) @binding(0) var<storage, read> particle_buffer : ParticleBuffer;
323
            BindGroupLayoutEntry {
16✔
324
                binding: 0,
16✔
325
                visibility: ShaderStages::VERTEX_FRAGMENT,
16✔
326
                ty: BindingType::Buffer {
16✔
327
                    ty: BufferBindingType::Storage { read_only: true },
24✔
328
                    has_dynamic_offset: false,
16✔
329
                    min_binding_size: Some(particle_layout.min_binding_size()),
16✔
330
                },
331
                count: None,
16✔
332
            },
333
            // @group(1) @binding(1) var<storage, read> indirect_buffer : IndirectBuffer;
334
            BindGroupLayoutEntry {
16✔
335
                binding: 1,
16✔
336
                visibility: ShaderStages::VERTEX,
16✔
337
                ty: BindingType::Buffer {
16✔
338
                    ty: BufferBindingType::Storage { read_only: true },
24✔
339
                    has_dynamic_offset: false,
16✔
340
                    min_binding_size: Some(NonZeroU64::new(INDIRECT_INDEX_SIZE as u64).unwrap()),
24✔
341
                },
342
                count: None,
16✔
343
            },
344
            // @group(1) @binding(2) var<storage, read> spawner : Spawner;
345
            BindGroupLayoutEntry {
8✔
346
                binding: 2,
8✔
347
                visibility: ShaderStages::VERTEX,
8✔
348
                ty: BindingType::Buffer {
8✔
349
                    ty: BufferBindingType::Storage { read_only: true },
8✔
350
                    has_dynamic_offset: true,
8✔
351
                    min_binding_size: Some(spawner_params_size),
8✔
352
                },
353
                count: None,
8✔
354
            },
355
        ];
356
        let label = format!(
16✔
357
            "hanabi:bind_group_layout:render:particles@1:slab{}",
358
            slab_id.0
359
        );
360
        trace!(
8✔
361
            "Creating particles@1 layout '{}' for render pass with {} entries",
362
            label,
363
            entries.len(),
6✔
364
        );
365
        let render_particles_buffer_layout =
8✔
366
            render_device.create_bind_group_layout(&label[..], &entries[..]);
32✔
367

368
        Self {
369
            particle_buffer,
370
            indirect_index_buffer,
371
            particle_layout,
372
            render_particles_buffer_layout,
373
            capacity,
374
            used_size: 0,
375
            free_slices: vec![],
16✔
376
            asset,
377
            sim_bind_group: None,
378
            sim_bind_group_key: SimBindGroupKey::INVALID,
379
        }
380
    }
381

382
    // TODO - move; this only depends on the particle and spawner layouts, can be
383
    // shared across slabs
384
    pub fn render_particles_buffer_layout(&self) -> &BindGroupLayout {
2✔
385
        &self.render_particles_buffer_layout
2✔
386
    }
387

388
    #[inline]
389
    pub fn particle_buffer(&self) -> &Buffer {
×
390
        &self.particle_buffer
×
391
    }
392

393
    #[inline]
394
    pub fn indirect_index_buffer(&self) -> &Buffer {
×
395
        &self.indirect_index_buffer
×
396
    }
397

398
    /// Return a binding for the entire particle buffer.
399
    pub fn as_entire_binding_particle(&self) -> BindingResource<'_> {
5✔
400
        let capacity_bytes = self.capacity as u64 * self.particle_layout.min_binding_size().get();
20✔
401
        BindingResource::Buffer(BufferBinding {
5✔
402
            buffer: &self.particle_buffer,
10✔
403
            offset: 0,
5✔
404
            size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
10✔
405
        })
406
        //self.particle_buffer.as_entire_binding()
407
    }
408

409
    /// Return a binding source for the entire particle buffer.
410
    pub fn max_binding_source(&self) -> BufferBindingSource {
×
411
        let capacity_bytes = self.capacity * self.particle_layout.min_binding_size32().get();
×
412
        BufferBindingSource {
413
            buffer: self.particle_buffer.clone(),
×
414
            offset: 0,
415
            size: NonZeroU32::new(capacity_bytes).unwrap(),
×
416
        }
417
    }
418

419
    /// Return a binding for the entire indirect buffer associated with the
420
    /// current effect buffer.
421
    pub fn as_entire_binding_indirect(&self) -> BindingResource<'_> {
5✔
422
        let capacity_bytes = self.capacity as u64 * 12;
10✔
423
        BindingResource::Buffer(BufferBinding {
5✔
424
            buffer: &self.indirect_index_buffer,
10✔
425
            offset: 0,
5✔
426
            size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
10✔
427
        })
428
        //self.indirect_index_buffer.as_entire_binding()
429
    }
430

431
    /// Create the "particle" bind group @1 for the init and update passes if
432
    /// needed.
433
    ///
434
    /// The `slab_id` must be the ID of the current [`ParticleSlab`] inside the
435
    /// [`EffectCache`].
436
    pub fn create_particle_sim_bind_group(
312✔
437
        &mut self,
438
        layout: &BindGroupLayout,
439
        slab_id: &SlabId,
440
        render_device: &RenderDevice,
441
        parent_binding_source: Option<&BufferBindingSource>,
442
    ) {
443
        let key: SimBindGroupKey = parent_binding_source.into();
1,248✔
444
        if self.sim_bind_group.is_some() && self.sim_bind_group_key == key {
933✔
445
            return;
309✔
446
        }
447

UNCOV
448
        let label = format!("hanabi:bind_group:sim:particle@1:vfx{}", slab_id.index());
×
449
        let entries: &[BindGroupEntry] = if let Some(parent_binding) =
×
450
            parent_binding_source.as_ref().map(|bbs| bbs.as_binding())
×
451
        {
UNCOV
452
            &[
×
UNCOV
453
                BindGroupEntry {
×
UNCOV
454
                    binding: 0,
×
UNCOV
455
                    resource: self.as_entire_binding_particle(),
×
456
                },
UNCOV
457
                BindGroupEntry {
×
UNCOV
458
                    binding: 1,
×
UNCOV
459
                    resource: self.as_entire_binding_indirect(),
×
460
                },
UNCOV
461
                BindGroupEntry {
×
UNCOV
462
                    binding: 2,
×
UNCOV
463
                    resource: parent_binding,
×
464
                },
465
            ]
466
        } else {
467
            &[
3✔
468
                BindGroupEntry {
6✔
469
                    binding: 0,
6✔
470
                    resource: self.as_entire_binding_particle(),
6✔
471
                },
472
                BindGroupEntry {
3✔
473
                    binding: 1,
3✔
474
                    resource: self.as_entire_binding_indirect(),
3✔
475
                },
476
            ]
477
        };
478

UNCOV
479
        trace!(
×
480
            "Create particle simulation bind group '{}' with {} entries (has_parent:{})",
481
            label,
482
            entries.len(),
6✔
483
            parent_binding_source.is_some(),
6✔
484
        );
UNCOV
485
        let bind_group = render_device.create_bind_group(Some(&label[..]), layout, entries);
×
UNCOV
486
        self.sim_bind_group = Some(bind_group);
×
UNCOV
487
        self.sim_bind_group_key = key;
×
488
    }
489

490
    /// Invalidate any existing simulate bind group.
491
    ///
492
    /// Invalidate any existing bind group previously created by
493
    /// [`create_particle_sim_bind_group()`], generally because a buffer was
494
    /// re-allocated. This forces a re-creation of the bind group
495
    /// next time [`create_particle_sim_bind_group()`] is called.
496
    ///
497
    /// [`create_particle_sim_bind_group()`]: self::ParticleSlab::create_particle_sim_bind_group
498
    #[allow(dead_code)] // FIXME - review this...
499
    fn invalidate_particle_sim_bind_group(&mut self) {
×
500
        self.sim_bind_group = None;
×
501
        self.sim_bind_group_key = SimBindGroupKey::INVALID;
×
502
    }
503

504
    /// Return the cached particle@1 bind group for the simulation (init and
505
    /// update) passes.
506
    ///
507
    /// This is the per-buffer bind group at binding @1 which binds all
508
    /// per-buffer resources shared by all effect instances batched in a single
509
    /// buffer. The bind group is created by
510
    /// [`create_particle_sim_bind_group()`], and cached until a call to
511
    /// [`invalidate_particle_sim_bind_groups()`] clears the
512
    /// cached reference.
513
    ///
514
    /// [`create_particle_sim_bind_group()`]: self::ParticleSlab::create_particle_sim_bind_group
515
    /// [`invalidate_particle_sim_bind_groups()`]: self::ParticleSlab::invalidate_particle_sim_bind_groups
516
    pub fn particle_sim_bind_group(&self) -> Option<&BindGroup> {
609✔
517
        self.sim_bind_group.as_ref()
1,218✔
518
    }
519

520
    /// Try to recycle a free slice to store `size` items.
521
    fn pop_free_slice(&mut self, size: u32) -> Option<Range<u32>> {
20✔
522
        if self.free_slices.is_empty() {
40✔
523
            return None;
17✔
524
        }
525

526
        struct BestRange {
527
            range: Range<u32>,
528
            capacity: u32,
529
            index: usize,
530
        }
531

532
        let mut result = BestRange {
533
            range: 0..0, // marker for "invalid"
534
            capacity: u32::MAX,
535
            index: usize::MAX,
536
        };
537
        for (index, slice) in self.free_slices.iter().enumerate() {
3✔
538
            let capacity = slice.end - slice.start;
539
            if size > capacity {
540
                continue;
1✔
541
            }
542
            if capacity < result.capacity {
4✔
543
                result = BestRange {
2✔
544
                    range: slice.clone(),
6✔
545
                    capacity,
2✔
546
                    index,
2✔
547
                };
548
            }
549
        }
550
        if !result.range.is_empty() {
551
            if result.capacity > size {
2✔
552
                // split
553
                let start = result.range.start;
2✔
554
                let used_end = start + size;
2✔
555
                let free_end = result.range.end;
2✔
556
                let range = start..used_end;
2✔
557
                self.free_slices[result.index] = used_end..free_end;
2✔
558
                Some(range)
1✔
559
            } else {
560
                // recycle entirely
561
                self.free_slices.remove(result.index);
1✔
562
                Some(result.range)
563
            }
564
        } else {
565
            None
1✔
566
        }
567
    }
568

569
    /// Allocate a new entry in the slab to store the particles of a single
570
    /// effect.
571
    pub fn allocate(&mut self, capacity: u32) -> Option<SlabSliceRef> {
21✔
572
        trace!("ParticleSlab::allocate(capacity={})", capacity);
21✔
573

574
        if capacity > self.capacity {
21✔
575
            return None;
1✔
576
        }
577

578
        let range = if let Some(range) = self.pop_free_slice(capacity) {
20✔
579
            range
580
        } else {
581
            let new_size = self.used_size.checked_add(capacity).unwrap();
90✔
582
            if new_size <= self.capacity {
18✔
583
                let range = self.used_size..new_size;
32✔
584
                self.used_size = new_size;
16✔
585
                range
16✔
586
            } else {
587
                if self.used_size == 0 {
2✔
588
                    warn!(
×
589
                        "Cannot allocate slice of size {} in particle slab of capacity {}.",
590
                        capacity, self.capacity
591
                    );
592
                }
593
                return None;
594
            }
595
        };
596

597
        trace!("-> allocated slice {:?}", range);
598
        Some(SlabSliceRef {
599
            range,
600
            particle_layout: self.particle_layout.clone(),
601
        })
602
    }
603

604
    /// Free an allocated slice, and if this was the last allocated slice also
605
    /// free the buffer.
606
    pub fn free_slice(&mut self, slice: SlabSliceRef) -> SlabState {
11✔
607
        // If slice is at the end of the buffer, reduce total used size
608
        if slice.range.end == self.used_size {
11✔
609
            self.used_size = slice.range.start;
5✔
610
            // Check other free slices to further reduce used size and drain the free slice
611
            // list
612
            while let Some(free_slice) = self.free_slices.last() {
15✔
613
                if free_slice.end == self.used_size {
5✔
614
                    self.used_size = free_slice.start;
5✔
615
                    self.free_slices.pop();
5✔
616
                } else {
617
                    break;
×
618
                }
619
            }
620
            if self.used_size == 0 {
5✔
621
                assert!(self.free_slices.is_empty());
12✔
622
                // The buffer is not used anymore, free it too
623
                SlabState::Free
4✔
624
            } else {
625
                // There are still some slices used, the last one of which ends at
626
                // self.used_size
627
                SlabState::Used
1✔
628
            }
629
        } else {
630
            // Free slice is not at end; insert it in free list
631
            let range = slice.range;
6✔
632
            match self.free_slices.binary_search_by(|s| {
6✔
633
                if s.end <= range.start {
6✔
634
                    Ordering::Less
6✔
635
                } else if s.start >= range.end {
×
636
                    Ordering::Greater
×
637
                } else {
638
                    Ordering::Equal
×
639
                }
640
            }) {
641
                Ok(_) => warn!("Range {:?} already present in free list!", range),
×
642
                Err(index) => self.free_slices.insert(index, range),
30✔
643
            }
644
            SlabState::Used
645
        }
646
    }
647

648
    /// Check whether this slab is compatible with the given asset.
649
    ///
650
    /// This allows determining whether an instance of the effect can be stored
651
    /// inside this slab.
652
    pub fn is_compatible(
2✔
653
        &self,
654
        handle: &Handle<EffectAsset>,
655
        _particle_layout: &ParticleLayout,
656
    ) -> bool {
657
        // TODO - replace with check particle layout is compatible to allow tighter
658
        // packing in less buffers, and update in the less dispatch calls
659
        *handle == self.asset
2✔
660
    }
661
}
662

663
/// A single cached effect in the [`EffectCache`].
664
#[derive(Debug, Component)]
665
pub(crate) struct CachedEffect {
666
    /// ID of the slab of the slab storing the particles for this effect in the
667
    /// [`EffectCache`].
668
    pub slab_id: SlabId,
669
    /// The allocated effect slice within that slab.
670
    pub slice: SlabSliceRef,
671
}
672

673
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
674
pub(crate) enum AnyDrawIndirectArgs {
675
    /// Args of a non-indexed draw call.
676
    NonIndexed(GpuDrawIndirectArgs),
677
    /// Args of an indexed draw call.
678
    Indexed(GpuDrawIndexedIndirectArgs),
679
}
680

681
impl AnyDrawIndirectArgs {
682
    /// Create from a vertex buffer slice and an optional index buffer one.
683
    pub fn from_slices(
314✔
684
        vertex_slice: &MeshBufferSlice<'_>,
685
        index_slice: Option<&MeshBufferSlice<'_>>,
686
    ) -> Self {
687
        if let Some(index_slice) = index_slice {
628✔
UNCOV
688
            Self::Indexed(GpuDrawIndexedIndirectArgs {
×
UNCOV
689
                index_count: index_slice.range.len() as u32,
×
UNCOV
690
                instance_count: 0,
×
UNCOV
691
                first_index: index_slice.range.start,
×
UNCOV
692
                base_vertex: vertex_slice.range.start as i32,
×
UNCOV
693
                first_instance: 0,
×
694
            })
695
        } else {
696
            Self::NonIndexed(GpuDrawIndirectArgs {
×
697
                vertex_count: vertex_slice.range.len() as u32,
×
698
                instance_count: 0,
×
699
                first_vertex: vertex_slice.range.start,
×
700
                first_instance: 0,
×
701
            })
702
        }
703
    }
704

705
    /// Check if this args are for an indexed draw call.
706
    #[inline(always)]
707
    #[allow(dead_code)]
708
    pub fn is_indexed(&self) -> bool {
×
709
        matches!(*self, Self::Indexed(..))
×
710
    }
711

712
    /// Bit-cast the args to the row entry of the GPU buffer.
713
    ///
714
    /// If non-indexed, this returns an indexed struct bit-cast from the actual
715
    /// non-indexed one, ready for GPU upload.
716
    pub fn bitcast_to_row_entry(&self) -> GpuDrawIndexedIndirectArgs {
2✔
717
        match self {
2✔
718
            AnyDrawIndirectArgs::NonIndexed(args) => GpuDrawIndexedIndirectArgs {
UNCOV
719
                index_count: args.vertex_count,
×
UNCOV
720
                instance_count: args.instance_count,
×
UNCOV
721
                first_index: args.first_vertex,
×
UNCOV
722
                base_vertex: args.first_instance as i32,
×
723
                first_instance: 0,
724
            },
725
            AnyDrawIndirectArgs::Indexed(args) => *args,
4✔
726
        }
727
    }
728
}
729

730
impl From<GpuDrawIndirectArgs> for AnyDrawIndirectArgs {
731
    fn from(args: GpuDrawIndirectArgs) -> Self {
×
732
        Self::NonIndexed(args)
×
733
    }
734
}
735

736
impl From<GpuDrawIndexedIndirectArgs> for AnyDrawIndirectArgs {
737
    fn from(args: GpuDrawIndexedIndirectArgs) -> Self {
×
738
        Self::Indexed(args)
×
739
    }
740
}
741

742
/// Index of a row (entry) into the [`BufferTable`] storing the indirect draw
743
/// args of a single draw call.
744
#[derive(Debug, Clone, Copy, Component)]
745
pub(crate) struct CachedDrawIndirectArgs {
746
    pub row: BufferTableId,
747
    pub args: AnyDrawIndirectArgs,
748
}
749

750
impl Default for CachedDrawIndirectArgs {
751
    fn default() -> Self {
×
752
        Self {
753
            row: BufferTableId::INVALID,
754
            args: AnyDrawIndirectArgs::NonIndexed(default()),
×
755
        }
756
    }
757
}
758

759
impl CachedDrawIndirectArgs {
760
    /// Check if the index is valid.
761
    ///
762
    /// An invalid index doesn't correspond to any allocated args entry. A valid
763
    /// one may, but note that the args entry in the buffer may have been freed
764
    /// already with this index. There's no mechanism to detect reuse either.
765
    #[inline(always)]
766
    #[allow(dead_code)]
767
    pub fn is_valid(&self) -> bool {
×
768
        self.get_row_raw().is_valid()
×
769
    }
770

771
    /// Check if this row index refers to an indexed draw args entry.
772
    #[inline(always)]
773
    #[allow(dead_code)]
774
    pub fn is_indexed(&self) -> bool {
×
775
        self.args.is_indexed()
×
776
    }
777

778
    /// Get the raw index value.
779
    ///
780
    /// Retrieve the raw index value, losing the discriminant between indexed
781
    /// and non-indexed draw. This is useful when storing the index value into a
782
    /// GPU buffer. The rest of the time, prefer retaining the typed enum for
783
    /// safety.
784
    ///
785
    /// # Panics
786
    ///
787
    /// Panics if the index is invalid, whether indexed or non-indexed.
788
    pub fn get_row(&self) -> BufferTableId {
316✔
789
        let idx = self.get_row_raw();
948✔
790
        assert!(idx.is_valid());
948✔
791
        idx
316✔
792
    }
793

794
    #[inline(always)]
795
    fn get_row_raw(&self) -> BufferTableId {
316✔
796
        self.row
316✔
797
    }
798
}
799

800
/// The indices in the indirect dispatch buffers for a single effect, as well as
801
/// that of the metadata buffer.
802
#[derive(Debug, Default, Clone, Copy, Component)]
803
pub(crate) struct DispatchBufferIndices {
804
    /// The index of the [`GpuDispatchIndirect`] row in the GPU buffer
805
    /// [`EffectsMeta::update_dispatch_indirect_buffer`].
806
    ///
807
    /// [`GpuDispatchIndirect`]: super::GpuDispatchIndirect
808
    /// [`EffectsMeta::update_dispatch_indirect_buffer`]: super::EffectsMeta::dispatch_indirect_buffer
809
    pub(crate) update_dispatch_indirect_buffer_row_index: u32,
810
}
811

812
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
813
struct ParticleBindGroupLayoutKey {
814
    pub min_binding_size: NonZeroU32,
815
    pub parent_min_binding_size: Option<NonZeroU32>,
816
}
817

818
/// Cache for effect instances sharing common GPU data structures.
819
#[derive(Resource)]
820
pub struct EffectCache {
821
    /// Render device the GPU resources (buffers) are allocated from.
822
    render_device: RenderDevice,
823
    /// Collection of particle slabs managed by this cache. Some slabs might be
824
    /// `None` if the entry is not used. Since the slabs are referenced
825
    /// by index, we cannot move them once they're allocated.
826
    particle_slabs: Vec<Option<ParticleSlab>>,
827
    /// Cache of bind group layouts for the particle@1 bind groups of the
828
    /// simulation passes (init and update). Since all bindings depend only
829
    /// on buffers managed by the [`EffectCache`], we also cache the layouts
830
    /// here for convenience.
831
    particle_bind_group_layout_descs:
832
        HashMap<ParticleBindGroupLayoutKey, BindGroupLayoutDescriptor>,
833
    /// Cache of bind group layouts for the metadata@3 bind group of the init
834
    /// pass.
835
    metadata_init_bind_group_layout_desc: [Option<BindGroupLayoutDescriptor>; 2],
836
    /// Cache of bind group layouts for the metadata@3 bind group of the
837
    /// updatepass.
838
    metadata_update_bind_group_layout_descs: HashMap<u32, BindGroupLayoutDescriptor>,
839
}
840

841
impl EffectCache {
842
    /// Create a new empty cache.
843
    pub fn new(device: RenderDevice) -> Self {
4✔
844
        Self {
845
            render_device: device,
846
            particle_slabs: vec![],
8✔
847
            particle_bind_group_layout_descs: default(),
8✔
848
            metadata_init_bind_group_layout_desc: [None, None],
4✔
849
            metadata_update_bind_group_layout_descs: default(),
4✔
850
        }
851
    }
852

853
    /// Get all the particle slab slots. Unallocated slots are `None`. This can
854
    /// be indexed by the slab index.
855
    #[allow(dead_code)]
856
    #[inline]
857
    pub fn slabs(&self) -> &[Option<ParticleSlab>] {
319✔
858
        &self.particle_slabs
319✔
859
    }
860

861
    /// Get all the particle slab slots. Unallocated slots are `None`. This can
862
    /// be indexed by the slab ID.
863
    #[allow(dead_code)]
864
    #[inline]
865
    pub fn slabs_mut(&mut self) -> &mut [Option<ParticleSlab>] {
×
866
        &mut self.particle_slabs
×
867
    }
868

869
    /// Fetch a specific slab by ID.
870
    #[inline]
871
    pub fn get_slab(&self, slab_id: &SlabId) -> Option<&ParticleSlab> {
609✔
872
        self.particle_slabs.get(slab_id.0 as usize)?.as_ref()
1,827✔
873
    }
874

875
    /// Fetch a specific buffer by ID.
876
    #[allow(dead_code)]
877
    #[inline]
878
    pub fn get_slab_mut(&mut self, slab_id: &SlabId) -> Option<&mut ParticleSlab> {
×
879
        self.particle_slabs.get_mut(slab_id.0 as usize)?.as_mut()
×
880
    }
881

882
    /// Invalidate all the particle@1 bind group for all buffers.
883
    ///
884
    /// This iterates over all valid buffers and calls
885
    /// [`ParticleSlab::invalidate_particle_sim_bind_group()`] on each one.
886
    #[allow(dead_code)] // FIXME - review this...
887
    pub fn invalidate_particle_sim_bind_groups(&mut self) {
×
888
        for buffer in self.particle_slabs.iter_mut().flatten() {
×
UNCOV
889
            buffer.invalidate_particle_sim_bind_group();
×
890
        }
891
    }
892

893
    /// Insert a new effect instance in the cache.
894
    pub fn insert(
6✔
895
        &mut self,
896
        asset: Handle<EffectAsset>,
897
        capacity: u32,
898
        particle_layout: &ParticleLayout,
899
    ) -> CachedEffect {
900
        trace!("Inserting new effect into cache: capacity={capacity}");
6✔
901
        let (slab_id, slice) = self
18✔
902
            .particle_slabs
6✔
903
            .iter_mut()
904
            .enumerate()
905
            .find_map(|(slab_index, maybe_slab)| {
10✔
906
                // Ignore empty (non-allocated) entries as we're trying to fit the new allocation inside an existing slab.
907
                let Some(slab) = maybe_slab else { return None; };
8✔
908

909
                // The slab must be compatible with the effect's layout, otherwise ignore it.
910
                if !slab.is_compatible(&asset, particle_layout) {
911
                    return None;
×
912
                }
913

914
                // Try to allocate a slice into the slab
915
                slab
2✔
916
                    .allocate(capacity)
4✔
917
                    .map(|slice| (SlabId::new(slab_index as u32), slice))
2✔
918
            })
919
            .unwrap_or_else(|| {
12✔
920
                // Cannot find any suitable slab; allocate a new one
921
                let index = self.particle_slabs.iter().position(|buf| buf.is_none()).unwrap_or(self.particle_slabs.len());
42✔
922
                let byte_size = capacity.checked_mul(particle_layout.min_binding_size().get() as u32).unwrap_or_else(|| panic!(
36✔
923
                    "Effect size overflow: capacity={:?} particle_layout={:?} item_size={}",
924
                    capacity, particle_layout, particle_layout.min_binding_size().get()
×
925
                ));
926
                trace!(
6✔
927
                    "Creating new particle slab #{} for effect {:?} (capacity={:?}, particle_layout={:?} item_size={}, byte_size={})",
928
                    index,
929
                    asset,
930
                    capacity,
931
                    particle_layout,
932
                    particle_layout.min_binding_size().get(),
9✔
933
                    byte_size
934
                );
935
                let slab_id = SlabId::new(index as u32);
18✔
936
                let mut slab = ParticleSlab::new(
12✔
937
                    slab_id,
6✔
938
                    asset,
6✔
939
                    capacity,
6✔
940
                    particle_layout.clone(),
12✔
941
                    &self.render_device,
6✔
942
                );
943
                let slice_ref = slab.allocate(capacity).unwrap();
30✔
944
                if index >= self.particle_slabs.len() {
16✔
945
                    self.particle_slabs.push(Some(slab));
8✔
946
                } else {
947
                    debug_assert!(self.particle_slabs[index].is_none());
2✔
948
                    self.particle_slabs[index] = Some(slab);
4✔
949
                }
950
                (slab_id, slice_ref)
6✔
951
            });
952

953
        let slice = SlabSliceRef {
954
            range: slice.range.clone(),
12✔
955
            particle_layout: slice.particle_layout,
6✔
956
        };
957

958
        trace!(
6✔
959
            "Insert effect slab_id={} slice={}B particle_layout={:?}",
960
            slab_id.0,
961
            slice.particle_layout.min_binding_size().get(),
9✔
962
            slice.particle_layout,
963
        );
964
        CachedEffect { slab_id, slice }
965
    }
966

967
    /// Remove an effect from the cache. If this was the last effect, drop the
968
    /// underlying buffer and return the index of the dropped buffer.
969
    pub fn remove(&mut self, cached_effect: &CachedEffect) -> Result<SlabState, ()> {
3✔
970
        // Resolve the buffer by index
971
        let Some(maybe_buffer) = self
9✔
972
            .particle_slabs
6✔
973
            .get_mut(cached_effect.slab_id.0 as usize)
3✔
974
        else {
975
            return Err(());
×
976
        };
977
        let Some(buffer) = maybe_buffer.as_mut() else {
3✔
978
            return Err(());
×
979
        };
980

981
        // Free the slice inside the resolved buffer
982
        if buffer.free_slice(cached_effect.slice.clone()) == SlabState::Free {
983
            *maybe_buffer = None;
6✔
984
            return Ok(SlabState::Free);
3✔
985
        }
986

987
        Ok(SlabState::Used)
988
    }
989

990
    //
991
    // Bind group layouts
992
    //
993

994
    /// Ensure a bind group layout exists for the bind group @1 ("particles")
995
    /// for use with the given min binding sizes.
996
    pub fn ensure_particle_bind_group_layout_desc(
315✔
997
        &mut self,
998
        min_binding_size: NonZeroU32,
999
        parent_min_binding_size: Option<NonZeroU32>,
1000
    ) -> &BindGroupLayoutDescriptor {
1001
        // FIXME - This "ensure" pattern means we never de-allocate entries. This is
1002
        // probably fine, because there's a limited number of realistic combinations,
1003
        // but could cause wastes if e.g. loading widely different scenes.
1004
        let key = ParticleBindGroupLayoutKey {
1005
            min_binding_size,
1006
            parent_min_binding_size,
1007
        };
1008
        self.particle_bind_group_layout_descs
315✔
1009
            .entry(key)
630✔
1010
            .or_insert_with(|| {
317✔
1011
                trace!("Creating new particle sim bind group @1 for min_binding_size={} parent_min_binding_size={:?}", min_binding_size, parent_min_binding_size);
2✔
1012
                create_particle_sim_bind_group_layout_desc(
2✔
1013
                    min_binding_size,
2✔
1014
                    parent_min_binding_size,
2✔
1015
                )
1016
            })
1017
    }
1018

1019
    /// Get the bind group layout for the bind group @1 ("particles") for use
1020
    /// with the given min binding sizes.
1021
    pub fn particle_bind_group_layout_desc(
314✔
1022
        &self,
1023
        min_binding_size: NonZeroU32,
1024
        parent_min_binding_size: Option<NonZeroU32>,
1025
    ) -> Option<&BindGroupLayoutDescriptor> {
1026
        let key = ParticleBindGroupLayoutKey {
1027
            min_binding_size,
1028
            parent_min_binding_size,
1029
        };
1030
        self.particle_bind_group_layout_descs.get(&key)
942✔
1031
    }
1032

1033
    /// Ensure a bind group layout exists for the metadata@3 bind group of
1034
    /// the init pass.
1035
    pub fn ensure_metadata_init_bind_group_layout_desc(&mut self, consume_gpu_spawn_events: bool) {
3✔
1036
        let layout =
3✔
1037
            &mut self.metadata_init_bind_group_layout_desc[consume_gpu_spawn_events as usize];
3✔
1038
        if layout.is_none() {
8✔
1039
            *layout = Some(create_metadata_init_bind_group_layout_desc(
6✔
1040
                &self.render_device,
2✔
1041
                consume_gpu_spawn_events,
2✔
1042
            ));
1043
        }
1044
    }
1045

1046
    /// Get the bind group layout for the metadata@3 bind group of the init
1047
    /// pass.
1048
    pub fn metadata_init_bind_group_layout_desc(
314✔
1049
        &self,
1050
        consume_gpu_spawn_events: bool,
1051
    ) -> Option<&BindGroupLayoutDescriptor> {
1052
        self.metadata_init_bind_group_layout_desc[consume_gpu_spawn_events as usize].as_ref()
628✔
1053
    }
1054

1055
    /// Ensure a bind group layout exists for the metadata@3 bind group of
1056
    /// the update pass.
1057
    pub fn ensure_metadata_update_bind_group_layout_desc(&mut self, num_event_buffers: u32) {
2✔
1058
        self.metadata_update_bind_group_layout_descs
2✔
1059
            .entry(num_event_buffers)
4✔
1060
            .or_insert_with(|| {
4✔
1061
                create_metadata_update_bind_group_layout_desc(
2✔
1062
                    &self.render_device,
2✔
1063
                    num_event_buffers,
2✔
1064
                )
1065
            });
1066
    }
1067

1068
    /// Get the bind group layout for the metadata@3 bind group of the
1069
    /// update pass.
1070
    pub fn metadata_update_bind_group_layout_desc(
314✔
1071
        &self,
1072
        num_event_buffers: u32,
1073
    ) -> Option<&BindGroupLayoutDescriptor> {
1074
        self.metadata_update_bind_group_layout_descs
314✔
1075
            .get(&num_event_buffers)
628✔
1076
    }
1077

1078
    //
1079
    // Bind groups
1080
    //
1081

1082
    /// Get the "particle" bind group for the simulation (init and update)
1083
    /// passes a cached effect stored in a given GPU particle buffer.
1084
    pub fn particle_sim_bind_group(&self, slab_id: &SlabId) -> Option<&BindGroup> {
609✔
1085
        self.get_slab(slab_id)
1,827✔
1086
            .and_then(|slab| slab.particle_sim_bind_group())
1,827✔
1087
    }
1088

1089
    pub fn create_particle_sim_bind_group(
312✔
1090
        &mut self,
1091
        slab_id: &SlabId,
1092
        render_device: &RenderDevice,
1093
        min_binding_size: NonZeroU32,
1094
        parent_min_binding_size: Option<NonZeroU32>,
1095
        parent_binding_source: Option<&BufferBindingSource>,
1096
        pipeline_cache: &PipelineCache,
1097
    ) -> Result<(), ()> {
1098
        // Create the bind group
1099
        let layout = self
936✔
1100
            .ensure_particle_bind_group_layout_desc(min_binding_size, parent_min_binding_size)
624✔
1101
            .clone();
1102
        let slot = self
936✔
1103
            .particle_slabs
624✔
1104
            .get_mut(slab_id.index() as usize)
624✔
1105
            .ok_or(())?;
624✔
1106
        let effect_buffer = slot.as_mut().ok_or(())?;
312✔
UNCOV
1107
        effect_buffer.create_particle_sim_bind_group(
×
NEW
1108
            &pipeline_cache.get_bind_group_layout(&layout),
×
UNCOV
1109
            slab_id,
×
UNCOV
1110
            render_device,
×
UNCOV
1111
            parent_binding_source,
×
1112
        );
UNCOV
1113
        Ok(())
×
1114
    }
1115
}
1116

1117
/// Create the bind group layout for the "particle" group (@1) of the init and
1118
/// update passes.
1119
fn create_particle_sim_bind_group_layout_desc(
2✔
1120
    particle_layout_min_binding_size: NonZeroU32,
1121
    parent_particle_layout_min_binding_size: Option<NonZeroU32>,
1122
) -> BindGroupLayoutDescriptor {
1123
    let mut entries = Vec::with_capacity(3);
4✔
1124

1125
    // @group(1) @binding(0) var<storage, read_write> particle_buffer :
1126
    // ParticleBuffer
1127
    entries.push(BindGroupLayoutEntry {
6✔
1128
        binding: 0,
2✔
1129
        visibility: ShaderStages::COMPUTE,
2✔
1130
        ty: BindingType::Buffer {
2✔
1131
            ty: BufferBindingType::Storage { read_only: false },
4✔
1132
            has_dynamic_offset: false,
2✔
1133
            min_binding_size: Some(particle_layout_min_binding_size.into()),
2✔
1134
        },
1135
        count: None,
2✔
1136
    });
1137

1138
    // @group(1) @binding(1) var<storage, read_write> indirect_buffer :
1139
    // IndirectBuffer
1140
    entries.push(BindGroupLayoutEntry {
6✔
1141
        binding: 1,
2✔
1142
        visibility: ShaderStages::COMPUTE,
2✔
1143
        ty: BindingType::Buffer {
2✔
1144
            ty: BufferBindingType::Storage { read_only: false },
4✔
1145
            has_dynamic_offset: false,
2✔
1146
            min_binding_size: Some(NonZeroU64::new(INDIRECT_INDEX_SIZE as _).unwrap()),
4✔
1147
        },
1148
        count: None,
2✔
1149
    });
1150

1151
    // @group(1) @binding(2) var<storage, read> parent_particle_buffer :
1152
    // ParentParticleBuffer;
1153
    if let Some(min_binding_size) = parent_particle_layout_min_binding_size {
2✔
UNCOV
1154
        entries.push(BindGroupLayoutEntry {
×
UNCOV
1155
            binding: 2,
×
UNCOV
1156
            visibility: ShaderStages::COMPUTE,
×
UNCOV
1157
            ty: BindingType::Buffer {
×
UNCOV
1158
                ty: BufferBindingType::Storage { read_only: true },
×
UNCOV
1159
                has_dynamic_offset: false,
×
UNCOV
1160
                min_binding_size: Some(min_binding_size.into()),
×
1161
            },
UNCOV
1162
            count: None,
×
1163
        });
1164
    }
1165

1166
    let hash = calc_hash(&entries);
6✔
1167
    let label = format!("hanabi:bind_group_layout:sim:particles_{:016X}", hash);
4✔
1168
    trace!(
2✔
1169
        "Creating particle bind group layout '{}' for init pass with {} entries. (parent_buffer:{})",
1170
        label,
1171
        entries.len(),
4✔
1172
        parent_particle_layout_min_binding_size.is_some(),
4✔
1173
    );
1174
    BindGroupLayoutDescriptor::new(label, &entries)
6✔
1175
}
1176

1177
/// Create the bind group layout for the metadata@3 bind group of the init pass.
1178
fn create_metadata_init_bind_group_layout_desc(
2✔
1179
    render_device: &RenderDevice,
1180
    consume_gpu_spawn_events: bool,
1181
) -> BindGroupLayoutDescriptor {
1182
    let storage_alignment = render_device.limits().min_storage_buffer_offset_alignment;
4✔
1183
    let effect_metadata_size = GpuEffectMetadata::aligned_size(storage_alignment);
6✔
1184

1185
    let mut entries = Vec::with_capacity(3);
4✔
1186

1187
    // @group(3) @binding(0) var<storage, read_write> effect_metadata :
1188
    // EffectMetadata;
1189
    entries.push(BindGroupLayoutEntry {
6✔
1190
        binding: 0,
2✔
1191
        visibility: ShaderStages::COMPUTE,
2✔
1192
        ty: BindingType::Buffer {
2✔
1193
            ty: BufferBindingType::Storage { read_only: false },
2✔
1194
            has_dynamic_offset: false,
2✔
1195
            // This WGSL struct is manually padded, so the Rust type GpuEffectMetadata doesn't
1196
            // reflect its true min size.
1197
            min_binding_size: Some(effect_metadata_size),
2✔
1198
        },
1199
        count: None,
2✔
1200
    });
1201

1202
    if consume_gpu_spawn_events {
2✔
1203
        // @group(3) @binding(1) var<storage, read> child_info_buffer : ChildInfoBuffer;
1204
        entries.push(BindGroupLayoutEntry {
×
1205
            binding: 1,
×
1206
            visibility: ShaderStages::COMPUTE,
×
1207
            ty: BindingType::Buffer {
×
1208
                ty: BufferBindingType::Storage { read_only: true },
×
1209
                has_dynamic_offset: false,
×
1210
                min_binding_size: Some(GpuChildInfo::min_size()),
×
1211
            },
1212
            count: None,
×
1213
        });
1214

1215
        // @group(3) @binding(2) var<storage, read> event_buffer : EventBuffer;
1216
        entries.push(BindGroupLayoutEntry {
×
1217
            binding: 2,
×
1218
            visibility: ShaderStages::COMPUTE,
×
1219
            ty: BindingType::Buffer {
×
1220
                ty: BufferBindingType::Storage { read_only: true },
×
1221
                has_dynamic_offset: false,
×
1222
                min_binding_size: Some(NonZeroU64::new(4).unwrap()),
×
1223
            },
1224
            count: None,
×
1225
        });
1226
    }
1227

1228
    let hash = calc_hash(&entries);
6✔
1229
    let label = format!(
4✔
1230
        "hanabi:bind_group_layout:init:metadata@3_{}{:016X}",
1231
        if consume_gpu_spawn_events {
2✔
1232
            "events"
×
1233
        } else {
1234
            "noevent"
2✔
1235
        },
1236
        hash
1237
    );
1238
    trace!(
2✔
1239
        "Creating metadata@3 bind group layout '{}' for init pass with {} entries. (consume_gpu_spawn_events:{})",
1240
        label,
1241
        entries.len(),
4✔
1242
        consume_gpu_spawn_events,
1243
    );
1244
    BindGroupLayoutDescriptor::new(label, &entries)
6✔
1245
}
1246

1247
/// Create the bind group layout for the metadata@3 bind group of the update
1248
/// pass.
1249
fn create_metadata_update_bind_group_layout_desc(
2✔
1250
    render_device: &RenderDevice,
1251
    num_event_buffers: u32,
1252
) -> BindGroupLayoutDescriptor {
1253
    let storage_alignment = render_device.limits().min_storage_buffer_offset_alignment;
4✔
1254
    let effect_metadata_size = GpuEffectMetadata::aligned_size(storage_alignment);
6✔
1255

1256
    let mut entries = Vec::with_capacity(num_event_buffers as usize + 2);
6✔
1257

1258
    // @group(3) @binding(0) var<storage, read_write> effect_metadata :
1259
    // EffectMetadata;
1260
    entries.push(BindGroupLayoutEntry {
6✔
1261
        binding: 0,
2✔
1262
        visibility: ShaderStages::COMPUTE,
2✔
1263
        ty: BindingType::Buffer {
2✔
1264
            ty: BufferBindingType::Storage { read_only: false },
2✔
1265
            has_dynamic_offset: false,
2✔
1266
            // This WGSL struct is manually padded, so the Rust type GpuEffectMetadata doesn't
1267
            // reflect its true min size.
1268
            min_binding_size: Some(effect_metadata_size),
2✔
1269
        },
1270
        count: None,
2✔
1271
    });
1272

1273
    if num_event_buffers > 0 {
2✔
1274
        // @group(3) @binding(1) var<storage, read_write> child_infos : array<ChildInfo,
1275
        // N>;
1276
        entries.push(BindGroupLayoutEntry {
×
1277
            binding: 1,
×
1278
            visibility: ShaderStages::COMPUTE,
×
1279
            ty: BindingType::Buffer {
×
1280
                ty: BufferBindingType::Storage { read_only: false },
×
1281
                has_dynamic_offset: false,
×
1282
                min_binding_size: Some(GpuChildInfo::min_size()),
×
1283
            },
1284
            count: None,
×
1285
        });
1286

1287
        for i in 0..num_event_buffers {
×
1288
            // @group(3) @binding(2+i) var<storage, read_write> event_buffer_#i :
1289
            // EventBuffer;
UNCOV
1290
            entries.push(BindGroupLayoutEntry {
×
UNCOV
1291
                binding: 2 + i,
×
UNCOV
1292
                visibility: ShaderStages::COMPUTE,
×
UNCOV
1293
                ty: BindingType::Buffer {
×
UNCOV
1294
                    ty: BufferBindingType::Storage { read_only: false },
×
UNCOV
1295
                    has_dynamic_offset: false,
×
UNCOV
1296
                    min_binding_size: Some(NonZeroU64::new(4).unwrap()),
×
1297
                },
UNCOV
1298
                count: None,
×
1299
            });
1300
        }
1301
    }
1302

1303
    let hash = calc_hash(&entries);
6✔
1304
    let label = format!("hanabi:bind_group_layout:update:metadata_{:016X}", hash);
4✔
1305
    trace!(
2✔
1306
        "Creating particle bind group layout '{}' for init update with {} entries. (num_event_buffers:{})",
1307
        label,
1308
        entries.len(),
4✔
1309
        num_event_buffers,
1310
    );
1311
    BindGroupLayoutDescriptor::new(label, &entries)
6✔
1312
}
1313

1314
#[cfg(all(test, feature = "gpu_tests"))]
1315
mod gpu_tests {
1316
    use std::borrow::Cow;
1317

1318
    use bevy::math::Vec4;
1319

1320
    use super::*;
1321
    use crate::{
1322
        graph::{Value, VectorValue},
1323
        test_utils::MockRenderer,
1324
        Attribute, AttributeInner,
1325
    };
1326

1327
    #[test]
1328
    fn effect_slice_ord() {
1329
        let particle_layout = ParticleLayout::new().append(Attribute::POSITION).build();
1330
        let slice1 = EffectSlice {
1331
            slice: 0..32,
1332
            slab_id: SlabId::new(1),
1333
            particle_layout: particle_layout.clone(),
1334
        };
1335
        let slice2 = EffectSlice {
1336
            slice: 32..64,
1337
            slab_id: SlabId::new(1),
1338
            particle_layout: particle_layout.clone(),
1339
        };
1340
        assert!(slice1 < slice2);
1341
        assert!(slice1 <= slice2);
1342
        assert!(slice2 > slice1);
1343
        assert!(slice2 >= slice1);
1344

1345
        let slice3 = EffectSlice {
1346
            slice: 0..32,
1347
            slab_id: SlabId::new(0),
1348
            particle_layout,
1349
        };
1350
        assert!(slice3 < slice1);
1351
        assert!(slice3 < slice2);
1352
        assert!(slice1 > slice3);
1353
        assert!(slice2 > slice3);
1354
    }
1355

1356
    const F4A_INNER: &AttributeInner = &AttributeInner::new(
1357
        Cow::Borrowed("F4A"),
1358
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1359
    );
1360
    const F4B_INNER: &AttributeInner = &AttributeInner::new(
1361
        Cow::Borrowed("F4B"),
1362
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1363
    );
1364
    const F4C_INNER: &AttributeInner = &AttributeInner::new(
1365
        Cow::Borrowed("F4C"),
1366
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1367
    );
1368
    const F4D_INNER: &AttributeInner = &AttributeInner::new(
1369
        Cow::Borrowed("F4D"),
1370
        Value::Vector(VectorValue::new_vec4(Vec4::ONE)),
1371
    );
1372

1373
    const F4A: Attribute = Attribute(F4A_INNER);
1374
    const F4B: Attribute = Attribute(F4B_INNER);
1375
    const F4C: Attribute = Attribute(F4C_INNER);
1376
    const F4D: Attribute = Attribute(F4D_INNER);
1377

1378
    #[test]
1379
    fn slice_ref() {
1380
        let l16 = ParticleLayout::new().append(F4A).build();
1381
        assert_eq!(16, l16.size());
1382
        let l32 = ParticleLayout::new().append(F4A).append(F4B).build();
1383
        assert_eq!(32, l32.size());
1384
        let l48 = ParticleLayout::new()
1385
            .append(F4A)
1386
            .append(F4B)
1387
            .append(F4C)
1388
            .build();
1389
        assert_eq!(48, l48.size());
1390
        for (range, particle_layout, len, byte_size) in [
1391
            (0..0, &l16, 0, 0),
1392
            (0..16, &l16, 16, 16 * 16),
1393
            (0..16, &l32, 16, 16 * 32),
1394
            (240..256, &l48, 16, 16 * 48),
1395
        ] {
1396
            let sr = SlabSliceRef {
1397
                range,
1398
                particle_layout: particle_layout.clone(),
1399
            };
1400
            assert_eq!(sr.len(), len);
1401
            assert_eq!(sr.byte_size(), byte_size);
1402
        }
1403
    }
1404

1405
    #[test]
1406
    fn effect_buffer() {
1407
        let renderer = MockRenderer::new();
1408
        let render_device = renderer.device();
1409

1410
        let l64 = ParticleLayout::new()
1411
            .append(F4A)
1412
            .append(F4B)
1413
            .append(F4C)
1414
            .append(F4D)
1415
            .build();
1416
        assert_eq!(64, l64.size());
1417

1418
        let asset = Handle::<EffectAsset>::default();
1419
        let capacity = 4096;
1420
        let mut buffer = ParticleSlab::new(
1421
            SlabId::new(42),
1422
            asset,
1423
            capacity,
1424
            l64.clone(),
1425
            &render_device,
1426
        );
1427

1428
        assert_eq!(buffer.capacity, capacity.max(ParticleSlab::MIN_CAPACITY));
1429
        assert_eq!(64, buffer.particle_layout.size());
1430
        assert_eq!(64, buffer.particle_layout.min_binding_size().get());
1431
        assert_eq!(0, buffer.used_size);
1432
        assert!(buffer.free_slices.is_empty());
1433

1434
        assert_eq!(None, buffer.allocate(buffer.capacity + 1));
1435

1436
        let mut offset = 0;
1437
        let mut slices = vec![];
1438
        for size in [32, 128, 55, 148, 1, 2048, 42] {
1439
            let slice = buffer.allocate(size);
1440
            assert!(slice.is_some());
1441
            let slice = slice.unwrap();
1442
            assert_eq!(64, slice.particle_layout.size());
1443
            assert_eq!(64, buffer.particle_layout.min_binding_size().get());
1444
            assert_eq!(offset..offset + size, slice.range);
1445
            slices.push(slice);
1446
            offset += size;
1447
        }
1448
        assert_eq!(offset, buffer.used_size);
1449

1450
        assert_eq!(SlabState::Used, buffer.free_slice(slices[2].clone()));
1451
        assert_eq!(1, buffer.free_slices.len());
1452
        let free_slice = &buffer.free_slices[0];
1453
        assert_eq!(160..215, *free_slice);
1454
        assert_eq!(offset, buffer.used_size); // didn't move
1455

1456
        assert_eq!(SlabState::Used, buffer.free_slice(slices[3].clone()));
1457
        assert_eq!(SlabState::Used, buffer.free_slice(slices[4].clone()));
1458
        assert_eq!(SlabState::Used, buffer.free_slice(slices[5].clone()));
1459
        assert_eq!(4, buffer.free_slices.len());
1460
        assert_eq!(offset, buffer.used_size); // didn't move
1461

1462
        // this will collapse all the way to slices[1], the highest allocated
1463
        assert_eq!(SlabState::Used, buffer.free_slice(slices[6].clone()));
1464
        assert_eq!(0, buffer.free_slices.len()); // collapsed
1465
        assert_eq!(160, buffer.used_size); // collapsed
1466

1467
        assert_eq!(SlabState::Used, buffer.free_slice(slices[0].clone()));
1468
        assert_eq!(1, buffer.free_slices.len());
1469
        assert_eq!(160, buffer.used_size); // didn't move
1470

1471
        // collapse all, and free buffer
1472
        assert_eq!(SlabState::Free, buffer.free_slice(slices[1].clone()));
1473
        assert_eq!(0, buffer.free_slices.len());
1474
        assert_eq!(0, buffer.used_size); // collapsed and empty
1475
    }
1476

1477
    #[test]
1478
    fn pop_free_slice() {
1479
        let renderer = MockRenderer::new();
1480
        let render_device = renderer.device();
1481

1482
        let l64 = ParticleLayout::new()
1483
            .append(F4A)
1484
            .append(F4B)
1485
            .append(F4C)
1486
            .append(F4D)
1487
            .build();
1488
        assert_eq!(64, l64.size());
1489

1490
        let asset = Handle::<EffectAsset>::default();
1491
        let capacity = 2048; // ParticleSlab::MIN_CAPACITY;
1492
        assert!(capacity >= 2048); // otherwise the logic below breaks
1493
        let mut buffer = ParticleSlab::new(
1494
            SlabId::new(42),
1495
            asset,
1496
            capacity,
1497
            l64.clone(),
1498
            &render_device,
1499
        );
1500

1501
        let slice0 = buffer.allocate(32);
1502
        assert!(slice0.is_some());
1503
        let slice0 = slice0.unwrap();
1504
        assert_eq!(slice0.range, 0..32);
1505
        assert!(buffer.free_slices.is_empty());
1506

1507
        let slice1 = buffer.allocate(1024);
1508
        assert!(slice1.is_some());
1509
        let slice1 = slice1.unwrap();
1510
        assert_eq!(slice1.range, 32..1056);
1511
        assert!(buffer.free_slices.is_empty());
1512

1513
        let state = buffer.free_slice(slice0);
1514
        assert_eq!(state, SlabState::Used);
1515
        assert_eq!(buffer.free_slices.len(), 1);
1516
        assert_eq!(buffer.free_slices[0], 0..32);
1517

1518
        // Try to allocate a slice larger than slice0, such that slice0 cannot be
1519
        // recycled, and instead the new slice has to be appended after all
1520
        // existing ones.
1521
        let slice2 = buffer.allocate(64);
1522
        assert!(slice2.is_some());
1523
        let slice2 = slice2.unwrap();
1524
        assert_eq!(slice2.range.start, slice1.range.end); // after slice1
1525
        assert_eq!(slice2.range, 1056..1120);
1526
        assert_eq!(buffer.free_slices.len(), 1);
1527

1528
        // Now allocate a small slice that fits, to recycle (part of) slice0.
1529
        let slice3 = buffer.allocate(16);
1530
        assert!(slice3.is_some());
1531
        let slice3 = slice3.unwrap();
1532
        assert_eq!(slice3.range, 0..16);
1533
        assert_eq!(buffer.free_slices.len(), 1); // split
1534
        assert_eq!(buffer.free_slices[0], 16..32);
1535

1536
        // Allocate a second small slice that fits exactly the left space, completely
1537
        // recycling
1538
        let slice4 = buffer.allocate(16);
1539
        assert!(slice4.is_some());
1540
        let slice4 = slice4.unwrap();
1541
        assert_eq!(slice4.range, 16..32);
1542
        assert!(buffer.free_slices.is_empty()); // recycled
1543
    }
1544

1545
    #[test]
1546
    fn effect_cache() {
1547
        let renderer = MockRenderer::new();
1548
        let render_device = renderer.device();
1549

1550
        let l32 = ParticleLayout::new().append(F4A).append(F4B).build();
1551
        assert_eq!(32, l32.size());
1552

1553
        let mut effect_cache = EffectCache::new(render_device);
1554
        assert_eq!(effect_cache.slabs().len(), 0);
1555

1556
        let asset = Handle::<EffectAsset>::default();
1557
        let capacity = ParticleSlab::MIN_CAPACITY;
1558
        let item_size = l32.size();
1559

1560
        // Insert an effect
1561
        let effect1 = effect_cache.insert(asset.clone(), capacity, &l32);
1562
        //assert!(effect1.is_valid());
1563
        let slice1 = &effect1.slice;
1564
        assert_eq!(slice1.len(), capacity);
1565
        assert_eq!(
1566
            slice1.particle_layout.min_binding_size().get() as u32,
1567
            item_size
1568
        );
1569
        assert_eq!(slice1.range, 0..capacity);
1570
        assert_eq!(effect_cache.slabs().len(), 1);
1571

1572
        // Insert a second copy of the same effect
1573
        let effect2 = effect_cache.insert(asset.clone(), capacity, &l32);
1574
        //assert!(effect2.is_valid());
1575
        let slice2 = &effect2.slice;
1576
        assert_eq!(slice2.len(), capacity);
1577
        assert_eq!(
1578
            slice2.particle_layout.min_binding_size().get() as u32,
1579
            item_size
1580
        );
1581
        assert_eq!(slice2.range, 0..capacity);
1582
        assert_eq!(effect_cache.slabs().len(), 2);
1583

1584
        // Remove the first effect instance
1585
        let buffer_state = effect_cache.remove(&effect1).unwrap();
1586
        // Note: currently batching is disabled, so each instance has its own buffer,
1587
        // which becomes unused once the instance is destroyed.
1588
        assert_eq!(buffer_state, SlabState::Free);
1589
        assert_eq!(effect_cache.slabs().len(), 2);
1590
        {
1591
            let slabs = effect_cache.slabs();
1592
            assert!(slabs[0].is_none());
1593
            assert!(slabs[1].is_some()); // id2
1594
        }
1595

1596
        // Regression #60
1597
        let effect3 = effect_cache.insert(asset, capacity, &l32);
1598
        //assert!(effect3.is_valid());
1599
        let slice3 = &effect3.slice;
1600
        assert_eq!(slice3.len(), capacity);
1601
        assert_eq!(
1602
            slice3.particle_layout.min_binding_size().get() as u32,
1603
            item_size
1604
        );
1605
        assert_eq!(slice3.range, 0..capacity);
1606
        // Note: currently batching is disabled, so each instance has its own buffer.
1607
        assert_eq!(effect_cache.slabs().len(), 2);
1608
        {
1609
            let slabs = effect_cache.slabs();
1610
            assert!(slabs[0].is_some()); // id3
1611
            assert!(slabs[1].is_some()); // id2
1612
        }
1613
    }
1614
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc