• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

djeedai / bevy_hanabi / 18260856969

05 Oct 2025 03:39PM UTC coverage: 66.606% (+0.03%) from 66.58%
18260856969

push

github

web-flow
Upgrade to Bevy v0.17 (#502)

31 of 38 new or added lines in 11 files covered. (81.58%)

2 existing lines in 1 file now uncovered.

5120 of 7687 relevant lines covered (66.61%)

148.45 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

54.86
/src/render/aligned_buffer_vec.rs
1
use std::{num::NonZeroU64, ops::Range};
2

3
use bevy::{
4
    log::trace,
5
    render::{
6
        render_resource::{
7
            BindingResource, Buffer, BufferAddress, BufferBinding, BufferDescriptor, BufferUsages,
8
            ShaderSize, ShaderType,
9
        },
10
        renderer::{RenderDevice, RenderQueue},
11
    },
12
};
13
use bytemuck::{cast_slice, Pod};
14

15
/// Like Bevy's [`BufferVec`], but with extra per-item alignment.
16
///
17
/// This helper ensures the individual array elements are properly aligned,
18
/// depending on the device constraints and the WGSL rules. In general using
19
/// [`BufferVec`] is enough to ensure alignment; however when some array items
20
/// also need to be bound individually, then each item (not only the array
21
/// itself) needs to be aligned to the device requirements. This is admittedly a
22
/// very specific case, because the device alignment might be very large (256
23
/// bytes) and this causes a lot of wasted space (padding per-element, instead
24
/// of padding for the entire array).
25
///
26
/// For this buffer to work correctly and items be bindable individually, the
27
/// alignment must come from one of the [`WgpuLimits`]. For example for a
28
/// storage buffer, to be able to bind the entire buffer but also any subset of
29
/// it (including individual elements), the extra alignment must
30
/// be [`WgpuLimits::min_storage_buffer_offset_alignment`].
31
///
32
/// The element type `T` needs to implement the following traits:
33
/// - [`Pod`] to allow copy.
34
/// - [`ShaderType`] because it needs to be mapped for a shader.
35
/// - [`ShaderSize`] to ensure a fixed footprint, to allow packing multiple
36
///   instances inside a single buffer. This therefore excludes any
37
///   runtime-sized array.
38
///
39
/// [`BufferVec`]: bevy::render::render_resource::BufferVec
40
/// [`WgpuLimits`]: bevy::render::settings::WgpuLimits
41
pub struct AlignedBufferVec<T: Pod + ShaderSize> {
42
    /// Pending values accumulated on CPU and not yet written to GPU.
43
    values: Vec<T>,
44
    /// GPU buffer if already allocated, or `None` otherwise.
45
    buffer: Option<Buffer>,
46
    /// Capacity of the buffer, in number of elements.
47
    capacity: usize,
48
    /// Size of a single buffer element, in bytes, in CPU memory (Rust layout).
49
    item_size: usize,
50
    /// Size of a single buffer element, in bytes, aligned to GPU memory
51
    /// constraints.
52
    aligned_size: usize,
53
    /// GPU buffer usages.
54
    buffer_usage: BufferUsages,
55
    /// Optional GPU buffer name, for debugging.
56
    label: Option<String>,
57
}
58

59
impl<T: Pod + ShaderSize> Default for AlignedBufferVec<T> {
60
    fn default() -> Self {
29✔
61
        let item_size = std::mem::size_of::<T>();
58✔
62
        let aligned_size = <T as ShaderSize>::SHADER_SIZE.get() as usize;
58✔
63
        assert!(aligned_size >= item_size);
58✔
64
        Self {
65
            values: Vec::new(),
58✔
66
            buffer: None,
67
            capacity: 0,
68
            buffer_usage: BufferUsages::all(),
58✔
69
            item_size,
70
            aligned_size,
71
            label: None,
72
        }
73
    }
74
}
75

76
impl<T: Pod + ShaderSize> AlignedBufferVec<T> {
77
    /// Create a new collection.
78
    ///
79
    /// `item_align` is an optional additional alignment for items in the
80
    /// collection. If greater than the natural alignment dictated by WGSL
81
    /// rules, this extra alignment is enforced. Otherwise it's ignored (so you
82
    /// can pass `0` to ignore).
83
    ///
84
    /// # Panics
85
    ///
86
    /// Panics if `buffer_usage` contains [`BufferUsages::UNIFORM`] and the
87
    /// layout of the element type `T` does not meet the requirements of the
88
    /// uniform address space, as tested by
89
    /// [`ShaderType::assert_uniform_compat()`].
90
    ///
91
    /// [`BufferUsages::UNIFORM`]: bevy::render::render_resource::BufferUsages::UNIFORM
92
    pub fn new(
29✔
93
        buffer_usage: BufferUsages,
94
        item_align: Option<NonZeroU64>,
95
        label: Option<String>,
96
    ) -> Self {
97
        // GPU-aligned item size, compatible with WGSL rules
98
        let item_size = <T as ShaderSize>::SHADER_SIZE.get() as usize;
58✔
99
        // Extra manual alignment for device constraints
100
        let aligned_size = if let Some(item_align) = item_align {
84✔
101
            let item_align = item_align.get() as usize;
×
102
            let aligned_size = item_size.next_multiple_of(item_align);
×
103
            assert!(aligned_size >= item_size);
×
104
            assert!(aligned_size.is_multiple_of(item_align));
104✔
105
            aligned_size
26✔
106
        } else {
107
            item_size
3✔
108
        };
109
        trace!(
×
110
            "AlignedBufferVec['{}']: item_size={} aligned_size={}",
4✔
111
            label.as_ref().map(|s| &s[..]).unwrap_or(""),
24✔
112
            item_size,
×
113
            aligned_size
×
114
        );
115
        if buffer_usage.contains(BufferUsages::UNIFORM) {
4✔
116
            <T as ShaderType>::assert_uniform_compat();
4✔
117
        }
118
        Self {
119
            buffer_usage,
120
            aligned_size,
121
            label,
122
            ..Default::default()
123
        }
124
    }
125

126
    fn safe_label(&self) -> &str {
628✔
127
        self.label.as_ref().map(|s| &s[..]).unwrap_or("")
3,768✔
128
    }
129

130
    #[inline]
131
    pub fn buffer(&self) -> Option<&Buffer> {
1,267✔
132
        self.buffer.as_ref()
2,534✔
133
    }
134

135
    /// Get a binding for the entire buffer.
136
    #[inline]
137
    #[allow(dead_code)]
138
    pub fn binding(&self) -> Option<BindingResource<'_>> {
×
139
        // FIXME - Return a Buffer wrapper first, which can be unwrapped, then from that
140
        // wrapper implement all the xxx_binding() helpers. That avoids a bunch of "if
141
        // let Some()" everywhere when we know the buffer is valid. The only reason the
142
        // buffer might not be valid is if it was not created, and in that case
143
        // we wouldn't be calling the xxx_bindings() helpers, we'd have earlied out
144
        // before.
145
        let buffer = self.buffer()?;
×
146
        Some(BindingResource::Buffer(BufferBinding {
×
147
            buffer,
×
148
            offset: 0,
×
149
            size: None, // entire buffer
×
150
        }))
151
    }
152

153
    /// Get a binding for a subset of the elements of the buffer.
154
    ///
155
    /// Returns a binding for the elements in the range `offset..offset+count`.
156
    ///
157
    /// # Panics
158
    ///
159
    /// Panics if `count` is zero.
160
    #[inline]
161
    #[allow(dead_code)]
162
    pub fn range_binding(&self, offset: u32, count: u32) -> Option<BindingResource<'_>> {
×
163
        assert!(count > 0);
×
164
        let buffer = self.buffer()?;
×
165
        let offset = self.aligned_size as u64 * offset as u64;
×
166
        let size = NonZeroU64::new(self.aligned_size as u64 * count as u64).unwrap();
×
167
        Some(BindingResource::Buffer(BufferBinding {
×
168
            buffer,
×
169
            offset,
×
170
            size: Some(size),
×
171
        }))
172
    }
173

174
    #[inline]
175
    #[allow(dead_code)]
176
    pub fn capacity(&self) -> usize {
×
177
        self.capacity
×
178
    }
179

180
    #[inline]
181
    pub fn len(&self) -> usize {
1,311✔
182
        self.values.len()
2,622✔
183
    }
184

185
    /// Size in bytes of a single item in the buffer, aligned to the item
186
    /// alignment.
187
    #[inline]
188
    pub fn aligned_size(&self) -> usize {
1,254✔
189
        self.aligned_size
1,254✔
190
    }
191

192
    /// Calculate a dynamic byte offset for a bind group from an array element
193
    /// index.
194
    ///
195
    /// This returns the product of `index` by the internal [`aligned_size()`].
196
    ///
197
    /// # Panic
198
    ///
199
    /// Panics if the `index` is too large, producing a byte offset larger than
200
    /// `u32::MAX`.
201
    ///
202
    /// [`aligned_size()`]: crate::AlignedBufferVec::aligned_size
203
    #[inline]
204
    pub fn dynamic_offset(&self, index: usize) -> u32 {
×
205
        let offset = self.aligned_size * index;
×
206
        assert!(offset <= u32::MAX as usize);
×
207
        u32::try_from(offset).expect("AlignedBufferVec index out of bounds")
×
208
    }
209

210
    #[inline]
211
    #[allow(dead_code)]
212
    pub fn is_empty(&self) -> bool {
1,002✔
213
        self.values.is_empty()
2,004✔
214
    }
215

216
    /// Append a value to the buffer.
217
    ///
218
    /// The content is stored on the CPU and uploaded on the GPU once
219
    /// [`write_buffer()`] is called.
220
    ///
221
    /// [`write_buffer()`]: crate::AlignedBufferVec::write_buffer
222
    pub fn push(&mut self, value: T) -> usize {
340✔
223
        let index = self.values.len();
1,020✔
224
        self.values.push(value);
1,020✔
225
        index
340✔
226
    }
227

228
    /// Reserve some capacity into the buffer.
229
    ///
230
    /// If the buffer is reallocated, the old content (on the GPU) is lost, and
231
    /// needs to be re-uploaded to the newly-created buffer. This is done with
232
    /// [`write_buffer()`].
233
    ///
234
    /// # Returns
235
    ///
236
    /// `true` if the buffer was (re)allocated, or `false` if an existing buffer
237
    /// was reused which already had enough capacity.
238
    ///
239
    /// [`write_buffer()`]: crate::AlignedBufferVec::write_buffer
240
    pub fn reserve(&mut self, capacity: usize, device: &RenderDevice) -> bool {
317✔
241
        if capacity > self.capacity {
317✔
242
            let size = self.aligned_size * capacity;
10✔
243
            trace!(
5✔
244
                "reserve['{}']: increase capacity from {} to {} elements, new size {} bytes",
2✔
245
                self.safe_label(),
4✔
246
                self.capacity,
×
247
                capacity,
×
248
                size
×
249
            );
250
            self.capacity = capacity;
5✔
251
            if let Some(old_buffer) = self.buffer.take() {
6✔
252
                trace!(
×
253
                    "reserve['{}']: destroying old buffer #{:?}",
×
254
                    self.safe_label(),
×
255
                    old_buffer.id()
×
256
                );
257
                old_buffer.destroy();
×
258
            }
259
            let new_buffer = device.create_buffer(&BufferDescriptor {
15✔
260
                label: self.label.as_ref().map(|s| &s[..]),
19✔
261
                size: size as BufferAddress,
5✔
262
                usage: BufferUsages::COPY_DST | self.buffer_usage,
5✔
263
                mapped_at_creation: false,
×
264
            });
265
            trace!(
5✔
266
                "reserve['{}']: created new buffer #{:?}",
2✔
267
                self.safe_label(),
4✔
268
                new_buffer.id(),
4✔
269
            );
270
            self.buffer = Some(new_buffer);
10✔
271
            // FIXME - this discards the old content if any!!!
272
            true
5✔
273
        } else {
274
            false
312✔
275
        }
276
    }
277

278
    /// Schedule the buffer write to GPU.
279
    ///
280
    /// # Returns
281
    ///
282
    /// `true` if the buffer was (re)allocated, `false` otherwise.
283
    pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) -> bool {
664✔
284
        if self.values.is_empty() {
1,328✔
285
            return false;
348✔
286
        }
287
        trace!(
×
288
            "write_buffer['{}']: values.len={} item_size={} aligned_size={}",
312✔
289
            self.safe_label(),
624✔
290
            self.values.len(),
624✔
291
            self.item_size,
×
292
            self.aligned_size
×
293
        );
294
        let buffer_changed = self.reserve(self.values.len(), device);
×
295
        if let Some(buffer) = &self.buffer {
316✔
296
            let aligned_size = self.aligned_size * self.values.len();
×
297
            trace!(
×
298
                "aligned_buffer['{}']: size={} buffer={:?}",
312✔
299
                self.safe_label(),
624✔
300
                aligned_size,
×
301
                buffer.id(),
624✔
302
            );
303
            let mut aligned_buffer: Vec<u8> = vec![0; aligned_size];
×
304
            for i in 0..self.values.len() {
319✔
305
                let src: &[u8] = cast_slice(std::slice::from_ref(&self.values[i]));
×
306
                let dst_offset = i * self.aligned_size;
×
307
                let dst_range = dst_offset..dst_offset + self.item_size;
×
308
                trace!("+ copy: src={:?} dst={:?}", src.as_ptr(), dst_range);
936✔
309
                let dst = &mut aligned_buffer[dst_range];
×
310
                dst.copy_from_slice(src);
×
311
            }
312
            let bytes: &[u8] = cast_slice(&aligned_buffer);
×
313
            queue.write_buffer(buffer, 0, bytes);
×
314
        }
315
        buffer_changed
×
316
    }
317

318
    pub fn clear(&mut self) {
663✔
319
        self.values.clear();
1,326✔
320
    }
321
}
322

323
impl<T: Pod + ShaderSize> std::ops::Index<usize> for AlignedBufferVec<T> {
324
    type Output = T;
325

326
    fn index(&self, index: usize) -> &Self::Output {
×
327
        &self.values[index]
×
328
    }
329
}
330

331
impl<T: Pod + ShaderSize> std::ops::IndexMut<usize> for AlignedBufferVec<T> {
332
    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
×
333
        &mut self.values[index]
×
334
    }
335
}
336

337
#[derive(Debug, Clone, PartialEq, Eq)]
338
struct FreeRow(pub Range<u32>);
339

340
impl PartialOrd for FreeRow {
341
    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
×
342
        Some(self.cmp(other))
×
343
    }
344
}
345

346
impl Ord for FreeRow {
347
    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
6✔
348
        self.0.start.cmp(&other.0.start)
18✔
349
    }
350
}
351

352
/// Like [`AlignedBufferVec`], but for heterogenous data.
353
#[derive(Debug)]
354
pub struct HybridAlignedBufferVec {
355
    /// Pending values accumulated on CPU and not yet written to GPU.
356
    values: Vec<u8>,
357
    /// GPU buffer if already allocated, or `None` otherwise.
358
    buffer: Option<Buffer>,
359
    /// Capacity of the buffer, in bytes.
360
    capacity: usize,
361
    /// Alignment of each element, in bytes.
362
    item_align: usize,
363
    /// GPU buffer usages.
364
    buffer_usage: BufferUsages,
365
    /// Optional GPU buffer name, for debugging.
366
    label: Option<String>,
367
    /// Free ranges available for re-allocation. Those are row ranges; byte
368
    /// ranges are obtained by multiplying these by `item_align`.
369
    free_rows: Vec<FreeRow>,
370
    /// Is the GPU buffer stale and the CPU one need to be re-uploaded?
371
    is_stale: bool,
372
}
373

374
impl HybridAlignedBufferVec {
375
    /// Create a new collection.
376
    ///
377
    /// `item_align` is the alignment for items in the collection.
378
    pub fn new(buffer_usage: BufferUsages, item_align: NonZeroU64, label: Option<String>) -> Self {
5✔
379
        let item_align = item_align.get() as usize;
10✔
380
        trace!(
5✔
381
            "HybridAlignedBufferVec['{}']: item_align={} byte",
3✔
382
            label.as_ref().map(|s| &s[..]).unwrap_or(""),
18✔
383
            item_align,
384
        );
385
        Self {
386
            values: vec![],
10✔
387
            buffer: None,
388
            capacity: 0,
389
            item_align,
390
            buffer_usage,
391
            label,
392
            free_rows: vec![],
5✔
393
            is_stale: true,
394
        }
395
    }
396

397
    #[inline]
398
    pub fn buffer(&self) -> Option<&Buffer> {
638✔
399
        self.buffer.as_ref()
1,276✔
400
    }
401

402
    /// Get a binding for the entire buffer.
403
    #[allow(dead_code)]
404
    #[inline]
405
    pub fn max_binding(&self) -> Option<BindingResource<'_>> {
×
406
        // FIXME - Return a Buffer wrapper first, which can be unwrapped, then from that
407
        // wrapper implement all the xxx_binding() helpers. That avoids a bunch of "if
408
        // let Some()" everywhere when we know the buffer is valid. The only reason the
409
        // buffer might not be valid is if it was not created, and in that case
410
        // we wouldn't be calling the xxx_bindings() helpers, we'd have earlied out
411
        // before.
412
        let buffer = self.buffer()?;
×
413
        Some(BindingResource::Buffer(BufferBinding {
×
414
            buffer,
×
415
            offset: 0,
×
416
            size: None, // entire buffer
417
        }))
418
    }
419

420
    /// Get a binding for the first `size` bytes of the buffer.
421
    ///
422
    /// # Panics
423
    ///
424
    /// Panics if `size` is zero.
425
    #[allow(dead_code)]
426
    #[inline]
427
    pub fn lead_binding(&self, size: u32) -> Option<BindingResource<'_>> {
×
428
        let buffer = self.buffer()?;
×
429
        let size = NonZeroU64::new(size as u64).unwrap();
×
430
        Some(BindingResource::Buffer(BufferBinding {
×
431
            buffer,
×
432
            offset: 0,
×
433
            size: Some(size),
×
434
        }))
435
    }
436

437
    /// Get a binding for a subset of the elements of the buffer.
438
    ///
439
    /// Returns a binding for the elements in the range `offset..offset+count`.
440
    ///
441
    /// # Panics
442
    ///
443
    /// Panics if `offset` is not a multiple of the alignment specified on
444
    /// construction.
445
    ///
446
    /// Panics if `size` is zero.
447
    #[allow(dead_code)]
448
    #[inline]
449
    pub fn range_binding(&self, offset: u32, size: u32) -> Option<BindingResource<'_>> {
×
NEW
450
        assert!((offset as usize).is_multiple_of(self.item_align));
×
451
        let buffer = self.buffer()?;
×
452
        let size = NonZeroU64::new(size as u64).unwrap();
×
453
        Some(BindingResource::Buffer(BufferBinding {
×
454
            buffer,
×
455
            offset: offset as u64,
×
456
            size: Some(size),
×
457
        }))
458
    }
459

460
    /// Capacity of the allocated GPU buffer, in bytes.
461
    ///
462
    /// This may be zero if the buffer was not allocated yet. In general, this
463
    /// can differ from the actual data size cached on CPU and waiting to be
464
    /// uploaded to GPU.
465
    #[inline]
466
    #[allow(dead_code)]
467
    pub fn capacity(&self) -> usize {
×
468
        self.capacity
×
469
    }
470

471
    /// Current buffer size, in bytes.
472
    ///
473
    /// This represents the size of the CPU data uploaded to GPU. Pending a GPU
474
    /// buffer re-allocation or re-upload, this size might differ from the
475
    /// actual GPU buffer size. But they're eventually consistent.
476
    #[inline]
477
    pub fn len(&self) -> usize {
1✔
478
        self.values.len()
2✔
479
    }
480

481
    /// Alignment, in bytes, of all the elements.
482
    #[allow(dead_code)]
483
    #[inline]
484
    pub fn item_align(&self) -> usize {
×
485
        self.item_align
×
486
    }
487

488
    /// Calculate a dynamic byte offset for a bind group from an array element
489
    /// index.
490
    ///
491
    /// This returns the product of `index` by the internal [`item_align()`].
492
    ///
493
    /// # Panic
494
    ///
495
    /// Panics if the `index` is too large, producing a byte offset larger than
496
    /// `u32::MAX`.
497
    ///
498
    /// [`item_align()`]: crate::HybridAlignedBufferVec::item_align
499
    #[allow(dead_code)]
500
    #[inline]
501
    pub fn dynamic_offset(&self, index: usize) -> u32 {
×
502
        let offset = self.item_align * index;
×
503
        assert!(offset <= u32::MAX as usize);
×
504
        u32::try_from(offset).expect("HybridAlignedBufferVec index out of bounds")
×
505
    }
506

507
    #[inline]
508
    #[allow(dead_code)]
509
    pub fn is_empty(&self) -> bool {
664✔
510
        self.values.is_empty()
1,328✔
511
    }
512

513
    /// Append a value to the buffer.
514
    ///
515
    /// As with [`set_content()`], the content is stored on the CPU and uploaded
516
    /// on the GPU once [`write_buffers()`] is called.
517
    ///
518
    /// # Returns
519
    ///
520
    /// Returns a range starting at the byte offset at which the new element was
521
    /// inserted, which is guaranteed to be a multiple of [`item_align()`].
522
    /// The range span is the item byte size.
523
    ///
524
    /// [`item_align()`]: self::HybridAlignedBufferVec::item_align
525
    pub fn push<T: Pod + ShaderSize>(&mut self, value: &T) -> Range<u32> {
15✔
526
        let src: &[u8] = cast_slice(std::slice::from_ref(value));
60✔
527
        assert_eq!(value.size().get() as usize, src.len());
75✔
528
        self.push_raw(src)
45✔
529
    }
530

531
    /// Append a slice of values to the buffer.
532
    ///
533
    /// The values are assumed to be tightly packed, and will be copied
534
    /// back-to-back into the buffer, without any padding between them. This
535
    /// means that the individul slice items must be properly aligned relative
536
    /// to the beginning of the slice.
537
    ///
538
    /// As with [`set_content()`], the content is stored on the CPU and uploaded
539
    /// on the GPU once [`write_buffers()`] is called.
540
    ///
541
    /// # Returns
542
    ///
543
    /// Returns a range starting at the byte offset at which the new element
544
    /// (the slice) was inserted, which is guaranteed to be a multiple of
545
    /// [`item_align()`]. The range span is the item byte size.
546
    ///
547
    /// # Panics
548
    ///
549
    /// Panics if the byte size of the element `T` is not at least a multiple of
550
    /// the minimum GPU alignment, which is 4 bytes. Note that this doesn't
551
    /// guarantee that the written data is well-formed for use on GPU, as array
552
    /// elements on GPU have other alignment requirements according to WGSL, but
553
    /// at least this catches obvious errors.
554
    ///
555
    /// [`item_align()`]: self::HybridAlignedBufferVec::item_align
556
    #[allow(dead_code)]
557
    pub fn push_many<T: Pod + ShaderSize>(&mut self, value: &[T]) -> Range<u32> {
×
558
        assert_eq!(size_of::<T>() % 4, 0);
×
559
        let src: &[u8] = cast_slice(value);
×
560
        self.push_raw(src)
×
561
    }
562

563
    pub fn push_raw(&mut self, src: &[u8]) -> Range<u32> {
16✔
564
        self.is_stale = true;
16✔
565

566
        // Calculate the number of (aligned) rows to allocate
567
        let num_rows = src.len().div_ceil(self.item_align) as u32;
64✔
568

569
        // Try to find a block of free rows which can accomodate it, and pick the
570
        // smallest one in order to limit wasted space.
571
        let mut best_slot: Option<(u32, usize)> = None;
48✔
572
        for (index, range) in self.free_rows.iter().enumerate() {
32✔
573
            let free_rows = range.0.end - range.0.start;
×
574
            if free_rows >= num_rows {
×
575
                let wasted_rows = free_rows - num_rows;
×
576
                // If we found a slot with the exact size, just use it already
577
                if wasted_rows == 0 {
×
578
                    best_slot = Some((0, index));
×
579
                    break;
580
                }
581
                // Otherwise try to find the smallest oversized slot to reduce wasted space
582
                if let Some(best_slot) = best_slot.as_mut() {
×
583
                    if wasted_rows < best_slot.0 {
×
584
                        *best_slot = (wasted_rows, index);
×
585
                    }
586
                } else {
587
                    best_slot = Some((wasted_rows, index));
×
588
                }
589
            }
590
        }
591

592
        // Insert into existing space
593
        if let Some((_, index)) = best_slot {
16✔
594
            let row_range = self.free_rows.remove(index);
595
            let offset = row_range.0.start as usize * self.item_align;
596
            let free_size = (row_range.0.end - row_range.0.start) as usize * self.item_align;
597
            let size = src.len();
598
            assert!(size <= free_size);
599

600
            let dst = self.values.as_mut_ptr();
×
601
            // SAFETY: dst is guaranteed to point to allocated bytes, which are already
602
            // initialized from a previous call, and are initialized by overwriting the
603
            // bytes with those of a POD type.
604
            #[allow(unsafe_code)]
605
            unsafe {
606
                let dst = dst.add(offset);
×
607
                dst.copy_from_nonoverlapping(src.as_ptr(), size);
×
608
            }
609

610
            let start = offset as u32;
×
611
            let end = start + size as u32;
×
612
            start..end
×
613
        }
614
        // Insert at end of vector, after resizing it
615
        else {
616
            // Calculate new aligned insertion offset and new capacity
617
            let offset = self.values.len().next_multiple_of(self.item_align);
80✔
618
            let size = src.len();
48✔
619
            let new_capacity = offset + size;
32✔
620
            if new_capacity > self.values.capacity() {
32✔
621
                let additional = new_capacity - self.values.len();
15✔
622
                self.values.reserve(additional)
15✔
623
            }
624

625
            // Insert padding if needed
626
            if offset > self.values.len() {
42✔
627
                self.values.resize(offset, 0);
20✔
628
            }
629

630
            // Insert serialized value
631
            // Dealing with safe code via Vec::spare_capacity_mut() is quite difficult
632
            // without the upcoming (unstable) additions to MaybeUninit to deal with arrays.
633
            // To prevent having to loop over individual u8, we use direct pointers instead.
634
            assert!(self.values.capacity() >= offset + size);
64✔
635
            assert_eq!(self.values.len(), offset);
48✔
636
            let dst = self.values.as_mut_ptr();
48✔
637
            // SAFETY: dst is guaranteed to point to allocated (offset+size) bytes, which
638
            // are written by copying a Pod type, so ensures those values are initialized,
639
            // and the final size is set to exactly (offset+size).
640
            #[allow(unsafe_code)]
641
            unsafe {
642
                let dst = dst.add(offset);
80✔
643
                dst.copy_from_nonoverlapping(src.as_ptr(), size);
96✔
644
                self.values.set_len(offset + size);
48✔
645
            }
646

647
            debug_assert_eq!(offset % self.item_align, 0);
32✔
648
            let start = offset as u32;
32✔
649
            let end = start + size as u32;
32✔
650
            start..end
16✔
651
        }
652
    }
653

654
    /// Remove a range of bytes previously added.
655
    ///
656
    /// Remove a range of bytes previously returned by adding one or more
657
    /// elements with [`push()`] or [`push_many()`].
658
    ///
659
    /// # Returns
660
    ///
661
    /// Returns `true` if the range was valid and the corresponding data was
662
    /// removed, or `false` otherwise. In that case, the buffer is not modified.
663
    ///
664
    /// [`push()`]: Self::push
665
    /// [`push_many()`]: Self::push_many
666
    pub fn remove(&mut self, range: Range<u32>) -> bool {
17✔
667
        // Can only remove entire blocks starting at an aligned size
668
        let align = self.item_align as u32;
34✔
669
        if !range.start.is_multiple_of(align) {
34✔
670
            return false;
×
671
        }
672

673
        // Check for out of bounds argument
674
        let end = self.values.len() as u32;
34✔
675
        if range.start >= end || range.end > end {
34✔
676
            return false;
×
677
        }
678

679
        // Note: See below, sometimes self.values() has some padding left we couldn't
680
        // recover earlier beause we didn't know the size of this allocation, but we
681
        // need to still deallocate the row here.
682
        if range.end == end || range.end.next_multiple_of(align) == end {
11✔
683
            // If the allocation is at the end of the buffer, shorten the CPU values. This
684
            // ensures is_empty() eventually returns true.
685
            let mut new_row_end = range.start.div_ceil(align);
8✔
686

687
            // Walk the (sorted) free list to also dequeue any range which is now at the end
688
            // of the buffer
689
            while let Some(free_row) = self.free_rows.pop() {
16✔
690
                if free_row.0.end == new_row_end {
4✔
691
                    new_row_end = free_row.0.start;
4✔
692
                } else {
693
                    self.free_rows.push(free_row);
×
694
                    break;
695
                }
696
            }
697

698
            // Note: we can't really recover any padding here because we don't know the
699
            // exact size of that allocation, only its row-aligned size.
700
            self.values.truncate((new_row_end * align) as usize);
701
        } else {
702
            // Otherwise, save the row into the free list.
703
            let start = range.start / align;
9✔
704
            let end = range.end.div_ceil(align);
705
            let free_row = FreeRow(start..end);
706

707
            // Insert as sorted
708
            if self.free_rows.is_empty() {
4✔
709
                // Special case to simplify below, and to avoid binary_search()
710
                self.free_rows.push(free_row);
8✔
711
            } else if let Err(index) = self.free_rows.binary_search(&free_row) {
13✔
712
                if index >= self.free_rows.len() {
713
                    // insert at end
714
                    let prev = self.free_rows.last_mut().unwrap(); // known
3✔
715
                    if prev.0.end == free_row.0.start {
2✔
716
                        // merge with last value
717
                        prev.0.end = free_row.0.end;
1✔
718
                    } else {
719
                        // insert last, with gap
720
                        self.free_rows.push(free_row);
×
721
                    }
722
                } else if index == 0 {
3✔
723
                    // insert at start
724
                    let next = &mut self.free_rows[0];
4✔
725
                    if free_row.0.end == next.0.start {
3✔
726
                        // merge with next
727
                        next.0.start = free_row.0.start;
1✔
728
                    } else {
729
                        // insert first, with gap
730
                        self.free_rows.insert(0, free_row);
1✔
731
                    }
732
                } else {
733
                    // insert between 2 existing elements
734
                    let prev = &mut self.free_rows[index - 1];
1✔
735
                    if prev.0.end == free_row.0.start {
736
                        // merge with previous value
737
                        prev.0.end = free_row.0.end;
1✔
738

739
                        let prev = self.free_rows[index - 1].clone();
3✔
740
                        let next = &mut self.free_rows[index];
2✔
741
                        if prev.0.end == next.0.start {
2✔
742
                            // also merge prev with next, and remove prev
743
                            next.0.start = prev.0.start;
2✔
744
                            self.free_rows.remove(index - 1);
2✔
745
                        }
746
                    } else {
747
                        let next = &mut self.free_rows[index];
×
748
                        if free_row.0.end == next.0.start {
×
749
                            // merge with next value
750
                            next.0.start = free_row.0.start;
×
751
                        } else {
752
                            // insert between 2 values, with gaps on both sides
753
                            self.free_rows.insert(0, free_row);
×
754
                        }
755
                    }
756
                }
757
            } else {
758
                // The range exists in the free list, this means it's already removed. This is a
759
                // duplicate; ignore it.
760
                return false;
1✔
761
            }
762
        }
763
        self.is_stale = true;
16✔
764
        true
765
    }
766

767
    /// Update an allocated entry with a new value.
768
    #[inline]
769
    pub fn update<T: Pod + ShaderSize>(&mut self, offset: u32, value: &T) {
×
770
        let data: &[u8] = cast_slice(std::slice::from_ref(value));
×
771
        assert_eq!(value.size().get() as usize, data.len());
×
772
        self.update_raw(offset, data);
×
773
    }
774

775
    /// Update an allocated entry with new data.
776
    pub fn update_raw(&mut self, offset: u32, data: &[u8]) {
×
777
        // Can only update entire blocks starting at an aligned size
778
        let align = self.item_align as u32;
×
NEW
779
        if !offset.is_multiple_of(align) {
×
780
            return;
×
781
        }
782

783
        // Check for out of bounds argument
UNCOV
784
        let end = self.values.len() as u32;
×
UNCOV
785
        let data_end = offset + data.len() as u32;
×
786
        if offset >= end || data_end > end {
×
787
            return;
×
788
        }
789

790
        let dst: &mut [u8] = &mut self.values[offset as usize..data_end as usize];
791
        dst.copy_from_slice(data);
792

793
        self.is_stale = true;
794
    }
795

796
    /// Reserve some capacity into the buffer.
797
    ///
798
    /// If the buffer is reallocated, the old content (on the GPU) is lost, and
799
    /// needs to be re-uploaded to the newly-created buffer. This is done with
800
    /// [`write_buffer()`].
801
    ///
802
    /// # Returns
803
    ///
804
    /// `true` if the buffer was (re)allocated, or `false` if an existing buffer
805
    /// was reused which already had enough capacity.
806
    ///
807
    /// [`write_buffer()`]: crate::AlignedBufferVec::write_buffer
808
    pub fn reserve(&mut self, capacity: usize, device: &RenderDevice) -> bool {
1✔
809
        if capacity > self.capacity {
1✔
810
            trace!(
1✔
811
                "reserve: increase capacity from {} to {} bytes",
1✔
812
                self.capacity,
813
                capacity,
814
            );
815
            self.capacity = capacity;
1✔
816
            if let Some(buffer) = self.buffer.take() {
1✔
817
                buffer.destroy();
818
            }
819
            self.buffer = Some(device.create_buffer(&BufferDescriptor {
3✔
820
                label: self.label.as_ref().map(|s| &s[..]),
4✔
821
                size: capacity as BufferAddress,
1✔
822
                usage: BufferUsages::COPY_DST | self.buffer_usage,
1✔
823
                mapped_at_creation: false,
824
            }));
825
            self.is_stale = !self.values.is_empty();
1✔
826
            // FIXME - this discards the old content if any!!!
827
            true
1✔
828
        } else {
829
            false
×
830
        }
831
    }
832

833
    /// Schedule the buffer write to GPU.
834
    ///
835
    /// # Returns
836
    ///
837
    /// `true` if the buffer was (re)allocated, `false` otherwise. If the buffer
838
    /// was reallocated, all bind groups referencing the old buffer should be
839
    /// destroyed.
840
    pub fn write_buffer(&mut self, device: &RenderDevice, queue: &RenderQueue) -> bool {
350✔
841
        if self.values.is_empty() || !self.is_stale {
714✔
842
            return false;
349✔
843
        }
844
        let size = self.values.len();
3✔
845
        trace!(
1✔
846
            "hybrid abv: write_buffer: size={}B item_align={}B",
1✔
847
            size,
848
            self.item_align,
849
        );
850
        let buffer_changed = self.reserve(size, device);
5✔
851
        if let Some(buffer) = &self.buffer {
2✔
852
            queue.write_buffer(buffer, 0, self.values.as_slice());
853
            self.is_stale = false;
854
        }
855
        buffer_changed
1✔
856
    }
857

858
    #[allow(dead_code)]
859
    pub fn clear(&mut self) {
×
860
        if !self.values.is_empty() {
×
861
            self.is_stale = true;
×
862
        }
863
        self.values.clear();
×
864
    }
865
}
866

867
#[cfg(test)]
868
mod tests {
869
    use std::num::NonZeroU64;
870

871
    use bevy::math::Vec3;
872
    use bytemuck::{Pod, Zeroable};
873

874
    use super::*;
875

876
    #[repr(C)]
877
    #[derive(Debug, Default, Clone, Copy, Pod, Zeroable, ShaderType)]
878
    pub(crate) struct GpuDummy {
879
        pub v: Vec3,
880
    }
881

882
    #[repr(C)]
883
    #[derive(Debug, Default, Clone, Copy, Pod, Zeroable, ShaderType)]
884
    pub(crate) struct GpuDummyComposed {
885
        pub simple: GpuDummy,
886
        pub tag: u32,
887
        // GPU padding to 16 bytes due to GpuDummy forcing align to 16 bytes
888
    }
889

890
    #[repr(C)]
891
    #[derive(Debug, Clone, Copy, Pod, Zeroable, ShaderType)]
892
    pub(crate) struct GpuDummyLarge {
893
        pub simple: GpuDummy,
894
        pub tag: u32,
895
        pub large: [f32; 128],
896
    }
897

898
    #[test]
899
    fn abv_sizes() {
900
        // Rust
901
        assert_eq!(std::mem::size_of::<GpuDummy>(), 12);
902
        assert_eq!(std::mem::align_of::<GpuDummy>(), 4);
903
        assert_eq!(std::mem::size_of::<GpuDummyComposed>(), 16); // tight packing
904
        assert_eq!(std::mem::align_of::<GpuDummyComposed>(), 4);
905
        assert_eq!(std::mem::size_of::<GpuDummyLarge>(), 132 * 4); // tight packing
906
        assert_eq!(std::mem::align_of::<GpuDummyLarge>(), 4);
907

908
        // GPU
909
        assert_eq!(<GpuDummy as ShaderType>::min_size().get(), 16); // Vec3 gets padded to 16 bytes
910
        assert_eq!(<GpuDummy as ShaderSize>::SHADER_SIZE.get(), 16);
911
        assert_eq!(<GpuDummyComposed as ShaderType>::min_size().get(), 32); // align is 16 bytes, forces padding
912
        assert_eq!(<GpuDummyComposed as ShaderSize>::SHADER_SIZE.get(), 32);
913
        assert_eq!(<GpuDummyLarge as ShaderType>::min_size().get(), 544); // align is 16 bytes, forces padding
914
        assert_eq!(<GpuDummyLarge as ShaderSize>::SHADER_SIZE.get(), 544);
915

916
        for (item_align, expected_aligned_size) in [
917
            (0, 16),
918
            (4, 16),
919
            (8, 16),
920
            (16, 16),
921
            (32, 32),
922
            (256, 256),
923
            (512, 512),
924
        ] {
925
            let mut abv = AlignedBufferVec::<GpuDummy>::new(
926
                BufferUsages::STORAGE,
927
                NonZeroU64::new(item_align),
928
                None,
929
            );
930
            assert_eq!(abv.aligned_size(), expected_aligned_size);
931
            assert!(abv.is_empty());
932
            abv.push(GpuDummy::default());
933
            assert!(!abv.is_empty());
934
            assert_eq!(abv.len(), 1);
935
        }
936

937
        for (item_align, expected_aligned_size) in [
938
            (0, 32),
939
            (4, 32),
940
            (8, 32),
941
            (16, 32),
942
            (32, 32),
943
            (256, 256),
944
            (512, 512),
945
        ] {
946
            let mut abv = AlignedBufferVec::<GpuDummyComposed>::new(
947
                BufferUsages::STORAGE,
948
                NonZeroU64::new(item_align),
949
                None,
950
            );
951
            assert_eq!(abv.aligned_size(), expected_aligned_size);
952
            assert!(abv.is_empty());
953
            abv.push(GpuDummyComposed::default());
954
            assert!(!abv.is_empty());
955
            assert_eq!(abv.len(), 1);
956
        }
957

958
        for (item_align, expected_aligned_size) in [
959
            (0, 544),
960
            (4, 544),
961
            (8, 544),
962
            (16, 544),
963
            (32, 544),
964
            (256, 768),
965
            (512, 1024),
966
        ] {
967
            let mut abv = AlignedBufferVec::<GpuDummyLarge>::new(
968
                BufferUsages::STORAGE,
969
                NonZeroU64::new(item_align),
970
                None,
971
            );
972
            assert_eq!(abv.aligned_size(), expected_aligned_size);
973
            assert!(abv.is_empty());
974
            abv.push(GpuDummyLarge {
975
                simple: Default::default(),
976
                tag: 0,
977
                large: [0.; 128],
978
            });
979
            assert!(!abv.is_empty());
980
            assert_eq!(abv.len(), 1);
981
        }
982
    }
983

984
    #[test]
985
    fn habv_remove() {
986
        let mut habv =
987
            HybridAlignedBufferVec::new(BufferUsages::STORAGE, NonZeroU64::new(32).unwrap(), None);
988
        assert!(habv.is_empty());
989
        assert_eq!(habv.item_align, 32);
990

991
        // +r -r
992
        {
993
            let r = habv.push(&42u32);
994
            assert_eq!(r, 0..4);
995
            assert!(!habv.is_empty());
996
            assert_eq!(habv.values.len(), 4);
997
            assert!(habv.free_rows.is_empty());
998

999
            assert!(habv.remove(r));
1000
            assert!(habv.is_empty());
1001
            assert!(habv.values.is_empty());
1002
            assert!(habv.free_rows.is_empty());
1003
        }
1004

1005
        // +r0 +r1 +r2 -r0 -r0 -r1 -r2
1006
        {
1007
            let r0 = habv.push(&42u32);
1008
            let r1 = habv.push(&84u32);
1009
            let r2 = habv.push(&84u32);
1010
            assert_eq!(r0, 0..4);
1011
            assert_eq!(r1, 32..36);
1012
            assert_eq!(r2, 64..68);
1013
            assert!(!habv.is_empty());
1014
            assert_eq!(habv.values.len(), 68);
1015
            assert!(habv.free_rows.is_empty());
1016

1017
            assert!(habv.remove(r0.clone()));
1018
            assert!(!habv.is_empty());
1019
            assert_eq!(habv.values.len(), 68);
1020
            assert_eq!(habv.free_rows.len(), 1);
1021
            assert_eq!(habv.free_rows[0], FreeRow(0..1));
1022

1023
            // dupe; no-op
1024
            assert!(!habv.remove(r0));
1025

1026
            assert!(habv.remove(r1.clone()));
1027
            assert!(!habv.is_empty());
1028
            assert_eq!(habv.values.len(), 68);
1029
            assert_eq!(habv.free_rows.len(), 1); // merged!
1030
            assert_eq!(habv.free_rows[0], FreeRow(0..2));
1031

1032
            assert!(habv.remove(r2));
1033
            assert!(habv.is_empty());
1034
            assert_eq!(habv.values.len(), 0);
1035
            assert!(habv.free_rows.is_empty());
1036
        }
1037

1038
        // +r0 +r1 +r2 -r1 -r0 -r2
1039
        {
1040
            let r0 = habv.push(&42u32);
1041
            let r1 = habv.push(&84u32);
1042
            let r2 = habv.push(&84u32);
1043
            assert_eq!(r0, 0..4);
1044
            assert_eq!(r1, 32..36);
1045
            assert_eq!(r2, 64..68);
1046
            assert!(!habv.is_empty());
1047
            assert_eq!(habv.values.len(), 68);
1048
            assert!(habv.free_rows.is_empty());
1049

1050
            assert!(habv.remove(r1.clone()));
1051
            assert!(!habv.is_empty());
1052
            assert_eq!(habv.values.len(), 68);
1053
            assert_eq!(habv.free_rows.len(), 1);
1054
            assert_eq!(habv.free_rows[0], FreeRow(1..2));
1055

1056
            assert!(habv.remove(r0.clone()));
1057
            assert!(!habv.is_empty());
1058
            assert_eq!(habv.values.len(), 68);
1059
            assert_eq!(habv.free_rows.len(), 1); // merged!
1060
            assert_eq!(habv.free_rows[0], FreeRow(0..2));
1061

1062
            assert!(habv.remove(r2));
1063
            assert!(habv.is_empty());
1064
            assert_eq!(habv.values.len(), 0);
1065
            assert!(habv.free_rows.is_empty());
1066
        }
1067

1068
        // +r0 +r1 +r2 -r1 -r2 -r0
1069
        {
1070
            let r0 = habv.push(&42u32);
1071
            let r1 = habv.push(&84u32);
1072
            let r2 = habv.push(&84u32);
1073
            assert_eq!(r0, 0..4);
1074
            assert_eq!(r1, 32..36);
1075
            assert_eq!(r2, 64..68);
1076
            assert!(!habv.is_empty());
1077
            assert_eq!(habv.values.len(), 68);
1078
            assert!(habv.free_rows.is_empty());
1079

1080
            assert!(habv.remove(r1.clone()));
1081
            assert!(!habv.is_empty());
1082
            assert_eq!(habv.values.len(), 68);
1083
            assert_eq!(habv.free_rows.len(), 1);
1084
            assert_eq!(habv.free_rows[0], FreeRow(1..2));
1085

1086
            assert!(habv.remove(r2.clone()));
1087
            assert!(!habv.is_empty());
1088
            assert_eq!(habv.values.len(), 32); // can't recover exact alloc (4), only row-aligned size (32)
1089
            assert!(habv.free_rows.is_empty()); // merged!
1090

1091
            assert!(habv.remove(r0));
1092
            assert!(habv.is_empty());
1093
            assert_eq!(habv.values.len(), 0);
1094
            assert!(habv.free_rows.is_empty());
1095
        }
1096

1097
        // +r0 +r1 +r2 +r3 +r4 -r3 -r1 -r2 -r4 r0
1098
        {
1099
            let r0 = habv.push(&42u32);
1100
            let r1 = habv.push(&84u32);
1101
            let r2 = habv.push(&84u32);
1102
            let r3 = habv.push(&84u32);
1103
            let r4 = habv.push(&84u32);
1104
            assert_eq!(r0, 0..4);
1105
            assert_eq!(r1, 32..36);
1106
            assert_eq!(r2, 64..68);
1107
            assert_eq!(r3, 96..100);
1108
            assert_eq!(r4, 128..132);
1109
            assert!(!habv.is_empty());
1110
            assert_eq!(habv.values.len(), 132);
1111
            assert!(habv.free_rows.is_empty());
1112

1113
            assert!(habv.remove(r3.clone()));
1114
            assert!(!habv.is_empty());
1115
            assert_eq!(habv.values.len(), 132);
1116
            assert_eq!(habv.free_rows.len(), 1);
1117
            assert_eq!(habv.free_rows[0], FreeRow(3..4));
1118

1119
            assert!(habv.remove(r1.clone()));
1120
            assert!(!habv.is_empty());
1121
            assert_eq!(habv.values.len(), 132);
1122
            assert_eq!(habv.free_rows.len(), 2);
1123
            assert_eq!(habv.free_rows[0], FreeRow(1..2)); // sorted!
1124
            assert_eq!(habv.free_rows[1], FreeRow(3..4));
1125

1126
            assert!(habv.remove(r2.clone()));
1127
            assert!(!habv.is_empty());
1128
            assert_eq!(habv.values.len(), 132);
1129
            assert_eq!(habv.free_rows.len(), 1); // merged!
1130
            assert_eq!(habv.free_rows[0], FreeRow(1..4)); // merged!
1131

1132
            assert!(habv.remove(r4.clone()));
1133
            assert!(!habv.is_empty());
1134
            assert_eq!(habv.values.len(), 32); // can't recover exact alloc (4), only row-aligned size (32)
1135
            assert!(habv.free_rows.is_empty());
1136

1137
            assert!(habv.remove(r0));
1138
            assert!(habv.is_empty());
1139
            assert_eq!(habv.values.len(), 0);
1140
            assert!(habv.free_rows.is_empty());
1141
        }
1142
    }
1143
}
1144

1145
#[cfg(all(test, feature = "gpu_tests"))]
1146
mod gpu_tests {
1147
    use tests::*;
1148

1149
    use super::*;
1150
    use crate::test_utils::MockRenderer;
1151

1152
    #[test]
1153
    fn abv_write() {
1154
        let renderer = MockRenderer::new();
1155
        let device = renderer.device();
1156
        let queue = renderer.queue();
1157

1158
        // Create a dummy CommandBuffer to force the write_buffer() call to have any
1159
        // effect.
1160
        let encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
1161
            label: Some("test"),
1162
        });
1163
        let command_buffer = encoder.finish();
1164

1165
        let item_align = device.limits().min_storage_buffer_offset_alignment as u64;
1166
        let mut abv = AlignedBufferVec::<GpuDummyComposed>::new(
1167
            BufferUsages::STORAGE | BufferUsages::MAP_READ,
1168
            NonZeroU64::new(item_align),
1169
            None,
1170
        );
1171
        let final_align = item_align.max(<GpuDummyComposed as ShaderSize>::SHADER_SIZE.get());
1172
        assert_eq!(abv.aligned_size(), final_align as usize);
1173

1174
        const CAPACITY: usize = 42;
1175

1176
        // Write buffer (CPU -> GPU)
1177
        abv.push(GpuDummyComposed {
1178
            tag: 1,
1179
            ..Default::default()
1180
        });
1181
        abv.push(GpuDummyComposed {
1182
            tag: 2,
1183
            ..Default::default()
1184
        });
1185
        abv.push(GpuDummyComposed {
1186
            tag: 3,
1187
            ..Default::default()
1188
        });
1189
        abv.reserve(CAPACITY, &device);
1190
        abv.write_buffer(&device, &queue);
1191
        // need a submit() for write_buffer() to be processed
1192
        queue.submit([command_buffer]);
1193
        let (tx, rx) = futures::channel::oneshot::channel();
1194
        queue.on_submitted_work_done(move || {
1195
            tx.send(()).unwrap();
1196
        });
1197
        let _ = device.poll(wgpu::PollType::Wait);
1198
        let _ = futures::executor::block_on(rx);
1199
        println!("Buffer written");
1200

1201
        // Read back (GPU -> CPU)
1202
        let buffer = abv.buffer();
1203
        let buffer = buffer.as_ref().expect("Buffer was not allocated");
1204
        let buffer = buffer.slice(..);
1205
        let (tx, rx) = futures::channel::oneshot::channel();
1206
        buffer.map_async(wgpu::MapMode::Read, move |result| {
1207
            tx.send(result).unwrap();
1208
        });
1209
        let _ = device.poll(wgpu::PollType::Wait);
1210
        let _result = futures::executor::block_on(rx);
1211
        let view = buffer.get_mapped_range();
1212

1213
        // Validate content
1214
        assert_eq!(view.len(), final_align as usize * CAPACITY);
1215
        for i in 0..3 {
1216
            let offset = i * final_align as usize;
1217
            let dummy_composed: &[GpuDummyComposed] =
1218
                cast_slice(&view[offset..offset + std::mem::size_of::<GpuDummyComposed>()]);
1219
            assert_eq!(dummy_composed[0].tag, (i + 1) as u32);
1220
        }
1221
    }
1222
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc