Split big Buffer writes into multiple chunks

This commit is contained in:
Héctor Ramón Jiménez 2024-03-29 14:29:31 +01:00
parent 0a97b9e37a
commit 5f1eb43161
No known key found for this signature in database
GPG key ID: 7CC46565708259A7
2 changed files with 38 additions and 9 deletions

View file

@ -1,3 +1,4 @@
use crate::buffer;
use crate::core::{Color, Size, Transformation};
use crate::graphics::backend;
use crate::graphics::color;
@ -66,7 +67,9 @@ impl Backend {
// TODO: Resize belt smartly (?)
// It would be great if the `StagingBelt` API exposed methods
// for introspection to detect when a resize may be worth it.
staging_belt: wgpu::util::StagingBelt::new(1024 * 100),
staging_belt: wgpu::util::StagingBelt::new(
buffer::MAX_WRITE_SIZE as u64,
),
}
}

View file

@ -1,6 +1,8 @@
use std::marker::PhantomData;
use std::ops::RangeBounds;
pub const MAX_WRITE_SIZE: usize = 1024 * 100;
#[derive(Debug)]
pub struct Buffer<T> {
label: &'static str,
@ -69,14 +71,38 @@ impl<T: bytemuck::Pod> Buffer<T> {
) -> usize {
let bytes: &[u8] = bytemuck::cast_slice(contents);
belt.write_buffer(
encoder,
&self.raw,
offset as u64,
(bytes.len() as u64).try_into().expect("Non-empty write"),
device,
)
.copy_from_slice(bytes);
if bytes.len() <= MAX_WRITE_SIZE {
belt.write_buffer(
encoder,
&self.raw,
offset as u64,
(bytes.len() as u64).try_into().expect("Non-empty write"),
device,
)
.copy_from_slice(bytes);
} else {
let mut bytes_written = 0;
let bytes_per_chunk = (bytes.len().min(MAX_WRITE_SIZE) as u64)
.try_into()
.expect("Non-empty write");
while bytes_written < bytes.len() {
belt.write_buffer(
encoder,
&self.raw,
(offset + bytes_written) as u64,
bytes_per_chunk,
device,
)
.copy_from_slice(
&bytes[bytes_written
..bytes_written + bytes_per_chunk.get() as usize],
);
bytes_written += bytes_per_chunk.get() as usize;
}
}
self.offsets.push(offset as u64);