Merge branch 'master' into non-uniform-border-radius-for-quads

This commit is contained in:
Héctor Ramón Jiménez 2022-12-02 18:53:21 +01:00
commit 4029a1cdaa
No known key found for this signature in database
GPG key ID: 140CC052C94F138E
147 changed files with 4828 additions and 2184 deletions

View file

@ -1,6 +1,6 @@
[package]
name = "iced_wgpu"
version = "0.5.1"
version = "0.6.1"
authors = ["Héctor Ramón Jiménez <hector0193@gmail.com>"]
edition = "2021"
description = "A wgpu renderer for Iced"
@ -8,19 +8,19 @@ license = "MIT AND OFL-1.1"
repository = "https://github.com/iced-rs/iced"
[features]
svg = ["resvg", "usvg", "tiny-skia"]
image = ["png", "jpeg", "jpeg_rayon", "gif", "webp", "bmp"]
png = ["image_rs/png"]
jpeg = ["image_rs/jpeg"]
jpeg_rayon = ["image_rs/jpeg_rayon"]
gif = ["image_rs/gif"]
webp = ["image_rs/webp"]
pnm = ["image_rs/pnm"]
ico = ["image_rs/ico"]
bmp = ["image_rs/bmp"]
hdr = ["image_rs/hdr"]
dds = ["image_rs/dds"]
farbfeld = ["image_rs/farbfeld"]
svg = ["iced_graphics/svg"]
image = ["iced_graphics/image"]
png = ["iced_graphics/png"]
jpeg = ["iced_graphics/jpeg"]
jpeg_rayon = ["iced_graphics/jpeg_rayon"]
gif = ["iced_graphics/gif"]
webp = ["iced_graphics/webp"]
pnm = ["iced_graphics/pnm"]
ico = ["iced_graphics/ico"]
bmp = ["iced_graphics/bmp"]
hdr = ["iced_graphics/hdr"]
dds = ["iced_graphics/dds"]
farbfeld = ["iced_graphics/farbfeld"]
canvas = ["iced_graphics/canvas"]
qr_code = ["iced_graphics/qr_code"]
default_system_font = ["iced_graphics/font-source"]
@ -35,7 +35,6 @@ raw-window-handle = "0.5"
log = "0.4"
guillotiere = "0.6"
futures = "0.3"
kamadak-exif = "0.5"
bitflags = "1.2"
[dependencies.bytemuck]
@ -43,32 +42,14 @@ version = "1.9"
features = ["derive"]
[dependencies.iced_native]
version = "0.5"
version = "0.6"
path = "../native"
[dependencies.iced_graphics]
version = "0.3"
version = "0.4"
path = "../graphics"
features = ["font-fallback", "font-icons"]
[dependencies.image_rs]
version = "0.23"
package = "image"
default-features = false
optional = true
[dependencies.resvg]
version = "0.18"
optional = true
[dependencies.usvg]
version = "0.18"
optional = true
[dependencies.tiny-skia]
version = "0.6"
optional = true
[dependencies.encase]
version = "0.3.0"
features = ["glam"]

View file

@ -10,7 +10,7 @@ use iced_graphics::{Primitive, Viewport};
use iced_native::alignment;
use iced_native::{Font, Size};
#[cfg(any(feature = "image_rs", feature = "svg"))]
#[cfg(any(feature = "image", feature = "svg"))]
use crate::image;
/// A [`wgpu`] graphics backend for [`iced`].
@ -23,7 +23,7 @@ pub struct Backend {
text_pipeline: text::Pipeline,
triangle_pipeline: triangle::Pipeline,
#[cfg(any(feature = "image_rs", feature = "svg"))]
#[cfg(any(feature = "image", feature = "svg"))]
image_pipeline: image::Pipeline,
default_text_size: u16,
@ -47,7 +47,7 @@ impl Backend {
let triangle_pipeline =
triangle::Pipeline::new(device, format, settings.antialiasing);
#[cfg(any(feature = "image_rs", feature = "svg"))]
#[cfg(any(feature = "image", feature = "svg"))]
let image_pipeline = image::Pipeline::new(device, format);
Self {
@ -55,7 +55,7 @@ impl Backend {
text_pipeline,
triangle_pipeline,
#[cfg(any(feature = "image_rs", feature = "svg"))]
#[cfg(any(feature = "image", feature = "svg"))]
image_pipeline,
default_text_size: settings.default_text_size,
@ -98,8 +98,8 @@ impl Backend {
);
}
#[cfg(any(feature = "image_rs", feature = "svg"))]
self.image_pipeline.trim_cache();
#[cfg(any(feature = "image", feature = "svg"))]
self.image_pipeline.trim_cache(device, encoder);
}
fn flush(
@ -148,7 +148,7 @@ impl Backend {
);
}
#[cfg(any(feature = "image_rs", feature = "svg"))]
#[cfg(any(feature = "image", feature = "svg"))]
{
if !layer.images.is_empty() {
let scaled = transformation
@ -294,9 +294,9 @@ impl backend::Text for Backend {
}
}
#[cfg(feature = "image_rs")]
#[cfg(feature = "image")]
impl backend::Image for Backend {
fn dimensions(&self, handle: &iced_native::image::Handle) -> (u32, u32) {
fn dimensions(&self, handle: &iced_native::image::Handle) -> Size<u32> {
self.image_pipeline.dimensions(handle)
}
}
@ -306,7 +306,7 @@ impl backend::Svg for Backend {
fn viewport_dimensions(
&self,
handle: &iced_native::svg::Handle,
) -> (u32, u32) {
) -> Size<u32> {
self.image_pipeline.viewport_dimensions(handle)
}
}

View file

@ -1,10 +1,13 @@
//! Utilities for uniform buffer operations.
use encase::private::WriteInto;
use encase::ShaderType;
use std::fmt;
use std::marker::PhantomData;
/// A dynamic buffer is any type of buffer which does not have a static offset.
pub(crate) struct Buffer<T: ShaderType> {
#[derive(Debug)]
pub struct Buffer<T: ShaderType> {
offsets: Vec<wgpu::DynamicOffset>,
cpu: Internal,
gpu: wgpu::Buffer,
@ -24,6 +27,7 @@ impl<T: ShaderType + WriteInto> Buffer<T> {
)
}
#[cfg(not(target_arch = "wasm32"))]
/// Creates a new dynamic storage buffer.
pub fn storage(device: &wgpu::Device, label: &'static str) -> Self {
Buffer::new(
@ -91,6 +95,7 @@ impl<T: ShaderType + WriteInto> Buffer<T> {
Internal::Uniform(_) => {
wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST
}
#[cfg(not(target_arch = "wasm32"))]
Internal::Storage(_) => {
wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST
}
@ -154,6 +159,8 @@ impl<T: ShaderType + WriteInto> Buffer<T> {
// Currently supported dynamic buffers.
enum Internal {
Uniform(encase::DynamicUniformBuffer<Vec<u8>>),
#[cfg(not(target_arch = "wasm32"))]
//storage buffers are not supported on wgpu wasm target (yet)
Storage(encase::DynamicStorageBuffer<Vec<u8>>),
}
@ -168,6 +175,7 @@ impl Internal {
.write(value)
.expect("Error when writing to dynamic uniform buffer.")
as u32,
#[cfg(not(target_arch = "wasm32"))]
Internal::Storage(buf) => buf
.write(value)
.expect("Error when writing to dynamic storage buffer.")
@ -179,6 +187,7 @@ impl Internal {
pub(super) fn get_ref(&self) -> &Vec<u8> {
match self {
Internal::Uniform(buf) => buf.as_ref(),
#[cfg(not(target_arch = "wasm32"))]
Internal::Storage(buf) => buf.as_ref(),
}
}
@ -190,6 +199,7 @@ impl Internal {
buf.as_mut().clear();
buf.set_offset(0);
}
#[cfg(not(target_arch = "wasm32"))]
Internal::Storage(buf) => {
buf.as_mut().clear();
buf.set_offset(0);
@ -197,3 +207,13 @@ impl Internal {
}
}
}
impl fmt::Debug for Internal {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Uniform(_) => write!(f, "Internal::Uniform(_)"),
#[cfg(not(target_arch = "wasm32"))]
Self::Storage(_) => write!(f, "Internal::Storage(_)"),
}
}
}

View file

@ -8,7 +8,7 @@ const DEFAULT_STATIC_BUFFER_COUNT: wgpu::BufferAddress = 128;
/// A generic buffer struct useful for items which have no alignment requirements
/// (e.g. Vertex, Index buffers) & no dynamic offsets.
#[derive(Debug)]
pub(crate) struct Buffer<T> {
pub struct Buffer<T> {
//stored sequentially per mesh iteration; refers to the offset index in the GPU buffer
offsets: Vec<wgpu::BufferAddress>,
label: &'static str,

View file

@ -1,22 +1,23 @@
mod atlas;
#[cfg(feature = "image_rs")]
mod raster;
#[cfg(feature = "image")]
use iced_graphics::image::raster;
#[cfg(feature = "svg")]
mod vector;
use iced_graphics::image::vector;
use crate::Transformation;
use atlas::Atlas;
use iced_graphics::layer;
use iced_native::Rectangle;
use iced_native::{Rectangle, Size};
use std::cell::RefCell;
use std::mem;
use bytemuck::{Pod, Zeroable};
#[cfg(feature = "image_rs")]
#[cfg(feature = "image")]
use iced_native::image;
#[cfg(feature = "svg")]
@ -24,10 +25,10 @@ use iced_native::svg;
#[derive(Debug)]
pub struct Pipeline {
#[cfg(feature = "image_rs")]
raster_cache: RefCell<raster::Cache>,
#[cfg(feature = "image")]
raster_cache: RefCell<raster::Cache<Atlas>>,
#[cfg(feature = "svg")]
vector_cache: RefCell<vector::Cache>,
vector_cache: RefCell<vector::Cache<Atlas>>,
pipeline: wgpu::RenderPipeline,
uniforms: wgpu::Buffer,
@ -242,11 +243,11 @@ impl Pipeline {
});
Pipeline {
#[cfg(feature = "image_rs")]
raster_cache: RefCell::new(raster::Cache::new()),
#[cfg(feature = "image")]
raster_cache: RefCell::new(raster::Cache::default()),
#[cfg(feature = "svg")]
vector_cache: RefCell::new(vector::Cache::new()),
vector_cache: RefCell::new(vector::Cache::default()),
pipeline,
uniforms: uniforms_buffer,
@ -261,8 +262,8 @@ impl Pipeline {
}
}
#[cfg(feature = "image_rs")]
pub fn dimensions(&self, handle: &image::Handle) -> (u32, u32) {
#[cfg(feature = "image")]
pub fn dimensions(&self, handle: &image::Handle) -> Size<u32> {
let mut cache = self.raster_cache.borrow_mut();
let memory = cache.load(handle);
@ -270,7 +271,7 @@ impl Pipeline {
}
#[cfg(feature = "svg")]
pub fn viewport_dimensions(&self, handle: &svg::Handle) -> (u32, u32) {
pub fn viewport_dimensions(&self, handle: &svg::Handle) -> Size<u32> {
let mut cache = self.vector_cache.borrow_mut();
let svg = cache.load(handle);
@ -290,7 +291,7 @@ impl Pipeline {
) {
let instances: &mut Vec<Instance> = &mut Vec::new();
#[cfg(feature = "image_rs")]
#[cfg(feature = "image")]
let mut raster_cache = self.raster_cache.borrow_mut();
#[cfg(feature = "svg")]
@ -298,12 +299,11 @@ impl Pipeline {
for image in images {
match &image {
#[cfg(feature = "image_rs")]
#[cfg(feature = "image")]
layer::Image::Raster { handle, bounds } => {
if let Some(atlas_entry) = raster_cache.upload(
handle,
device,
encoder,
&mut (device, encoder),
&mut self.texture_atlas,
) {
add_instances(
@ -314,7 +314,7 @@ impl Pipeline {
);
}
}
#[cfg(not(feature = "image_rs"))]
#[cfg(not(feature = "image"))]
layer::Image::Raster { .. } => {}
#[cfg(feature = "svg")]
@ -325,8 +325,7 @@ impl Pipeline {
handle,
size,
_scale,
device,
encoder,
&mut (device, encoder),
&mut self.texture_atlas,
) {
add_instances(
@ -446,12 +445,20 @@ impl Pipeline {
}
}
pub fn trim_cache(&mut self) {
#[cfg(feature = "image_rs")]
self.raster_cache.borrow_mut().trim(&mut self.texture_atlas);
pub fn trim_cache(
&mut self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
) {
#[cfg(feature = "image")]
self.raster_cache
.borrow_mut()
.trim(&mut self.texture_atlas, &mut (device, encoder));
#[cfg(feature = "svg")]
self.vector_cache.borrow_mut().trim(&mut self.texture_atlas);
self.vector_cache
.borrow_mut()
.trim(&mut self.texture_atlas, &mut (device, encoder));
}
}
@ -509,15 +516,18 @@ fn add_instances(
add_instance(image_position, image_size, allocation, instances);
}
atlas::Entry::Fragmented { fragments, size } => {
let scaling_x = image_size[0] / size.0 as f32;
let scaling_y = image_size[1] / size.1 as f32;
let scaling_x = image_size[0] / size.width as f32;
let scaling_y = image_size[1] / size.height as f32;
for fragment in fragments {
let allocation = &fragment.allocation;
let [x, y] = image_position;
let (fragment_x, fragment_y) = fragment.position;
let (fragment_width, fragment_height) = allocation.size();
let Size {
width: fragment_width,
height: fragment_height,
} = allocation.size();
let position = [
x + fragment_x as f32 * scaling_x,
@ -543,7 +553,7 @@ fn add_instance(
instances: &mut Vec<Instance>,
) {
let (x, y) = allocation.position();
let (width, height) = allocation.size();
let Size { width, height } = allocation.size();
let layer = allocation.layer();
let instance = Instance {

View file

@ -4,8 +4,6 @@ mod allocation;
mod allocator;
mod layer;
use std::num::NonZeroU32;
pub use allocation::Allocation;
pub use entry::Entry;
pub use layer::Layer;
@ -14,6 +12,11 @@ use allocator::Allocator;
pub const SIZE: u32 = 2048;
use iced_graphics::image;
use iced_graphics::Size;
use std::num::NonZeroU32;
#[derive(Debug)]
pub struct Atlas {
texture: wgpu::Texture,
@ -35,7 +38,7 @@ impl Atlas {
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::COPY_DST
| wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::TEXTURE_BINDING,
@ -61,99 +64,6 @@ impl Atlas {
self.layers.len()
}
pub fn upload(
&mut self,
width: u32,
height: u32,
data: &[u8],
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
) -> Option<Entry> {
use wgpu::util::DeviceExt;
let entry = {
let current_size = self.layers.len();
let entry = self.allocate(width, height)?;
// We grow the internal texture after allocating if necessary
let new_layers = self.layers.len() - current_size;
self.grow(new_layers, device, encoder);
entry
};
log::info!("Allocated atlas entry: {:?}", entry);
// It is a webgpu requirement that:
// BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
// So we calculate padded_width by rounding width up to the next
// multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
let padding = (align - (4 * width) % align) % align;
let padded_width = (4 * width + padding) as usize;
let padded_data_size = padded_width * height as usize;
let mut padded_data = vec![0; padded_data_size];
for row in 0..height as usize {
let offset = row * padded_width;
padded_data[offset..offset + 4 * width as usize].copy_from_slice(
&data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
)
}
let buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("iced_wgpu::image staging buffer"),
contents: &padded_data,
usage: wgpu::BufferUsages::COPY_SRC,
});
match &entry {
Entry::Contiguous(allocation) => {
self.upload_allocation(
&buffer, width, height, padding, 0, allocation, encoder,
);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
let (x, y) = fragment.position;
let offset = (y * padded_width as u32 + 4 * x) as usize;
self.upload_allocation(
&buffer,
width,
height,
padding,
offset,
&fragment.allocation,
encoder,
);
}
}
}
log::info!("Current atlas: {:?}", self);
Some(entry)
}
pub fn remove(&mut self, entry: &Entry) {
log::info!("Removing atlas entry: {:?}", entry);
match entry {
Entry::Contiguous(allocation) => {
self.deallocate(allocation);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
self.deallocate(&fragment.allocation);
}
}
}
}
fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> {
// Allocate one layer if texture fits perfectly
if width == SIZE && height == SIZE {
@ -204,7 +114,7 @@ impl Atlas {
}
return Some(Entry::Fragmented {
size: (width, height),
size: Size::new(width, height),
fragments,
});
}
@ -284,7 +194,7 @@ impl Atlas {
encoder: &mut wgpu::CommandEncoder,
) {
let (x, y) = allocation.position();
let (width, height) = allocation.size();
let Size { width, height } = allocation.size();
let layer = allocation.layer();
let extent = wgpu::Extent3d {
@ -336,7 +246,7 @@ impl Atlas {
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::COPY_DST
| wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::TEXTURE_BINDING,
@ -388,3 +298,100 @@ impl Atlas {
});
}
}
impl image::Storage for Atlas {
type Entry = Entry;
type State<'a> = (&'a wgpu::Device, &'a mut wgpu::CommandEncoder);
fn upload(
&mut self,
width: u32,
height: u32,
data: &[u8],
(device, encoder): &mut Self::State<'_>,
) -> Option<Self::Entry> {
use wgpu::util::DeviceExt;
let entry = {
let current_size = self.layers.len();
let entry = self.allocate(width, height)?;
// We grow the internal texture after allocating if necessary
let new_layers = self.layers.len() - current_size;
self.grow(new_layers, device, encoder);
entry
};
log::info!("Allocated atlas entry: {:?}", entry);
// It is a webgpu requirement that:
// BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
// So we calculate padded_width by rounding width up to the next
// multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
let padding = (align - (4 * width) % align) % align;
let padded_width = (4 * width + padding) as usize;
let padded_data_size = padded_width * height as usize;
let mut padded_data = vec![0; padded_data_size];
for row in 0..height as usize {
let offset = row * padded_width;
padded_data[offset..offset + 4 * width as usize].copy_from_slice(
&data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
)
}
let buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("iced_wgpu::image staging buffer"),
contents: &padded_data,
usage: wgpu::BufferUsages::COPY_SRC,
});
match &entry {
Entry::Contiguous(allocation) => {
self.upload_allocation(
&buffer, width, height, padding, 0, allocation, encoder,
);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
let (x, y) = fragment.position;
let offset = (y * padded_width as u32 + 4 * x) as usize;
self.upload_allocation(
&buffer,
width,
height,
padding,
offset,
&fragment.allocation,
encoder,
);
}
}
}
log::info!("Current atlas: {:?}", self);
Some(entry)
}
fn remove(&mut self, entry: &Entry, _: &mut Self::State<'_>) {
log::info!("Removing atlas entry: {:?}", entry);
match entry {
Entry::Contiguous(allocation) => {
self.deallocate(allocation);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
self.deallocate(&fragment.allocation);
}
}
}
}
}

View file

@ -1,5 +1,7 @@
use crate::image::atlas::{self, allocator};
use iced_graphics::Size;
#[derive(Debug)]
pub enum Allocation {
Partial {
@ -19,10 +21,10 @@ impl Allocation {
}
}
pub fn size(&self) -> (u32, u32) {
pub fn size(&self) -> Size<u32> {
match self {
Allocation::Partial { region, .. } => region.size(),
Allocation::Full { .. } => (atlas::SIZE, atlas::SIZE),
Allocation::Full { .. } => Size::new(atlas::SIZE, atlas::SIZE),
}
}

View file

@ -46,10 +46,10 @@ impl Region {
(rectangle.min.x as u32, rectangle.min.y as u32)
}
pub fn size(&self) -> (u32, u32) {
pub fn size(&self) -> iced_graphics::Size<u32> {
let size = self.allocation.rectangle.size();
(size.width as u32, size.height as u32)
iced_graphics::Size::new(size.width as u32, size.height as u32)
}
}

View file

@ -1,17 +1,19 @@
use crate::image::atlas;
use iced_graphics::image;
use iced_graphics::Size;
#[derive(Debug)]
pub enum Entry {
Contiguous(atlas::Allocation),
Fragmented {
size: (u32, u32),
size: Size<u32>,
fragments: Vec<Fragment>,
},
}
impl Entry {
#[cfg(feature = "image_rs")]
pub fn size(&self) -> (u32, u32) {
impl image::storage::Entry for Entry {
fn size(&self) -> Size<u32> {
match self {
Entry::Contiguous(allocation) => allocation.size(),
Entry::Fragmented { size, .. } => *size,

View file

@ -1,222 +0,0 @@
use crate::image::atlas::{self, Atlas};
use iced_native::image;
use std::collections::{HashMap, HashSet};
use bitflags::bitflags;
#[derive(Debug)]
pub enum Memory {
Host(::image_rs::ImageBuffer<::image_rs::Bgra<u8>, Vec<u8>>),
Device(atlas::Entry),
NotFound,
Invalid,
}
impl Memory {
pub fn dimensions(&self) -> (u32, u32) {
match self {
Memory::Host(image) => image.dimensions(),
Memory::Device(entry) => entry.size(),
Memory::NotFound => (1, 1),
Memory::Invalid => (1, 1),
}
}
}
#[derive(Debug)]
pub struct Cache {
map: HashMap<u64, Memory>,
hits: HashSet<u64>,
}
impl Cache {
pub fn new() -> Self {
Self {
map: HashMap::new(),
hits: HashSet::new(),
}
}
pub fn load(&mut self, handle: &image::Handle) -> &mut Memory {
if self.contains(handle) {
return self.get(handle).unwrap();
}
let memory = match handle.data() {
image::Data::Path(path) => {
if let Ok(image) = image_rs::open(path) {
let operation = std::fs::File::open(path)
.ok()
.map(std::io::BufReader::new)
.and_then(|mut reader| {
Operation::from_exif(&mut reader).ok()
})
.unwrap_or_else(Operation::empty);
Memory::Host(operation.perform(image.to_bgra8()))
} else {
Memory::NotFound
}
}
image::Data::Bytes(bytes) => {
if let Ok(image) = image_rs::load_from_memory(bytes) {
let operation =
Operation::from_exif(&mut std::io::Cursor::new(bytes))
.ok()
.unwrap_or_else(Operation::empty);
Memory::Host(operation.perform(image.to_bgra8()))
} else {
Memory::Invalid
}
}
image::Data::Pixels {
width,
height,
pixels,
} => {
if let Some(image) = image_rs::ImageBuffer::from_vec(
*width,
*height,
pixels.to_vec(),
) {
Memory::Host(image)
} else {
Memory::Invalid
}
}
};
self.insert(handle, memory);
self.get(handle).unwrap()
}
pub fn upload(
&mut self,
handle: &image::Handle,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
atlas: &mut Atlas,
) -> Option<&atlas::Entry> {
let memory = self.load(handle);
if let Memory::Host(image) = memory {
let (width, height) = image.dimensions();
let entry = atlas.upload(width, height, image, device, encoder)?;
*memory = Memory::Device(entry);
}
if let Memory::Device(allocation) = memory {
Some(allocation)
} else {
None
}
}
pub fn trim(&mut self, atlas: &mut Atlas) {
let hits = &self.hits;
self.map.retain(|k, memory| {
let retain = hits.contains(k);
if !retain {
if let Memory::Device(entry) = memory {
atlas.remove(entry);
}
}
retain
});
self.hits.clear();
}
fn get(&mut self, handle: &image::Handle) -> Option<&mut Memory> {
let _ = self.hits.insert(handle.id());
self.map.get_mut(&handle.id())
}
fn insert(&mut self, handle: &image::Handle, memory: Memory) {
let _ = self.map.insert(handle.id(), memory);
}
fn contains(&self, handle: &image::Handle) -> bool {
self.map.contains_key(&handle.id())
}
}
bitflags! {
struct Operation: u8 {
const FLIP_HORIZONTALLY = 0b001;
const ROTATE_180 = 0b010;
const FLIP_DIAGONALLY = 0b100;
}
}
impl Operation {
// Meaning of the returned value is described e.g. at:
// https://magnushoff.com/articles/jpeg-orientation/
fn from_exif<R>(reader: &mut R) -> Result<Self, exif::Error>
where
R: std::io::BufRead + std::io::Seek,
{
let exif = exif::Reader::new().read_from_container(reader)?;
Ok(exif
.get_field(exif::Tag::Orientation, exif::In::PRIMARY)
.and_then(|field| field.value.get_uint(0))
.and_then(|value| u8::try_from(value).ok())
.and_then(|value| Self::from_bits(value.saturating_sub(1)))
.unwrap_or_else(Self::empty))
}
fn perform<P>(
self,
image: image_rs::ImageBuffer<P, Vec<P::Subpixel>>,
) -> image_rs::ImageBuffer<P, Vec<P::Subpixel>>
where
P: image_rs::Pixel + 'static,
{
use image_rs::imageops;
let mut image = if self.contains(Self::FLIP_DIAGONALLY) {
flip_diagonally(image)
} else {
image
};
if self.contains(Self::ROTATE_180) {
imageops::rotate180_in_place(&mut image);
}
if self.contains(Self::FLIP_HORIZONTALLY) {
imageops::flip_horizontal_in_place(&mut image);
}
image
}
}
fn flip_diagonally<I>(
image: I,
) -> image_rs::ImageBuffer<I::Pixel, Vec<<I::Pixel as image_rs::Pixel>::Subpixel>>
where
I: image_rs::GenericImage,
I::Pixel: 'static,
{
let (width, height) = image.dimensions();
let mut out = image_rs::ImageBuffer::new(height, width);
for x in 0..width {
for y in 0..height {
let p = image.get_pixel(x, y);
out.put_pixel(y, x, p);
}
}
out
}

View file

@ -1,173 +0,0 @@
use crate::image::atlas::{self, Atlas};
use iced_native::svg;
use std::collections::{HashMap, HashSet};
use std::fs;
pub enum Svg {
Loaded(usvg::Tree),
NotFound,
}
impl Svg {
pub fn viewport_dimensions(&self) -> (u32, u32) {
match self {
Svg::Loaded(tree) => {
let size = tree.svg_node().size;
(size.width() as u32, size.height() as u32)
}
Svg::NotFound => (1, 1),
}
}
}
#[derive(Debug)]
pub struct Cache {
svgs: HashMap<u64, Svg>,
rasterized: HashMap<(u64, u32, u32), atlas::Entry>,
svg_hits: HashSet<u64>,
rasterized_hits: HashSet<(u64, u32, u32)>,
}
impl Cache {
pub fn new() -> Self {
Self {
svgs: HashMap::new(),
rasterized: HashMap::new(),
svg_hits: HashSet::new(),
rasterized_hits: HashSet::new(),
}
}
pub fn load(&mut self, handle: &svg::Handle) -> &Svg {
if self.svgs.contains_key(&handle.id()) {
return self.svgs.get(&handle.id()).unwrap();
}
let svg = match handle.data() {
svg::Data::Path(path) => {
let tree = fs::read_to_string(path).ok().and_then(|contents| {
usvg::Tree::from_str(
&contents,
&usvg::Options::default().to_ref(),
)
.ok()
});
tree.map(Svg::Loaded).unwrap_or(Svg::NotFound)
}
svg::Data::Bytes(bytes) => {
match usvg::Tree::from_data(
bytes,
&usvg::Options::default().to_ref(),
) {
Ok(tree) => Svg::Loaded(tree),
Err(_) => Svg::NotFound,
}
}
};
let _ = self.svgs.insert(handle.id(), svg);
self.svgs.get(&handle.id()).unwrap()
}
pub fn upload(
&mut self,
handle: &svg::Handle,
[width, height]: [f32; 2],
scale: f32,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
texture_atlas: &mut Atlas,
) -> Option<&atlas::Entry> {
let id = handle.id();
let (width, height) = (
(scale * width).ceil() as u32,
(scale * height).ceil() as u32,
);
// TODO: Optimize!
// We currently rerasterize the SVG when its size changes. This is slow
// as heck. A GPU rasterizer like `pathfinder` may perform better.
// It would be cool to be able to smooth resize the `svg` example.
if self.rasterized.contains_key(&(id, width, height)) {
let _ = self.svg_hits.insert(id);
let _ = self.rasterized_hits.insert((id, width, height));
return self.rasterized.get(&(id, width, height));
}
match self.load(handle) {
Svg::Loaded(tree) => {
if width == 0 || height == 0 {
return None;
}
// TODO: Optimize!
// We currently rerasterize the SVG when its size changes. This is slow
// as heck. A GPU rasterizer like `pathfinder` may perform better.
// It would be cool to be able to smooth resize the `svg` example.
let mut img = tiny_skia::Pixmap::new(width, height)?;
resvg::render(
tree,
if width > height {
usvg::FitTo::Width(width)
} else {
usvg::FitTo::Height(height)
},
img.as_mut(),
)?;
let mut rgba = img.take();
rgba.chunks_exact_mut(4).for_each(|rgba| rgba.swap(0, 2));
let allocation = texture_atlas.upload(
width,
height,
bytemuck::cast_slice(rgba.as_slice()),
device,
encoder,
)?;
log::debug!("allocating {} {}x{}", id, width, height);
let _ = self.svg_hits.insert(id);
let _ = self.rasterized_hits.insert((id, width, height));
let _ = self.rasterized.insert((id, width, height), allocation);
self.rasterized.get(&(id, width, height))
}
Svg::NotFound => None,
}
}
pub fn trim(&mut self, atlas: &mut Atlas) {
let svg_hits = &self.svg_hits;
let rasterized_hits = &self.rasterized_hits;
self.svgs.retain(|k, _| svg_hits.contains(k));
self.rasterized.retain(|k, entry| {
let retain = rasterized_hits.contains(k);
if !retain {
atlas.remove(entry);
}
retain
});
self.svg_hits.clear();
self.rasterized_hits.clear();
}
}
impl std::fmt::Debug for Svg {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Svg::Loaded(_) => write!(f, "Svg::Loaded"),
Svg::NotFound => write!(f, "Svg::NotFound"),
}
}
}

View file

@ -16,7 +16,7 @@
//! - Meshes of triangles, useful to draw geometry freely.
//!
//! [Iced]: https://github.com/iced-rs/iced
//! [`iced_native`]: https://github.com/iced-rs/iced/tree/0.4/native
//! [`iced_native`]: https://github.com/iced-rs/iced/tree/0.5/native
//! [`wgpu`]: https://github.com/gfx-rs/wgpu-rs
//! [WebGPU API]: https://gpuweb.github.io/gpuweb/
//! [`wgpu_glyph`]: https://github.com/hecrj/wgpu_glyph
@ -39,13 +39,13 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
pub mod settings;
pub mod triangle;
pub mod window;
mod backend;
mod buffer;
mod quad;
mod text;
mod triangle;
pub use iced_graphics::{Antialiasing, Color, Error, Primitive, Viewport};
pub use iced_native::Theme;
@ -56,7 +56,7 @@ pub use settings::Settings;
pub(crate) use iced_graphics::Transformation;
#[cfg(any(feature = "image_rs", feature = "svg"))]
#[cfg(any(feature = "image", feature = "svg"))]
mod image;
/// A [`wgpu`] graphics renderer for [`iced`].

View file

@ -1,17 +1,30 @@
struct Uniforms {
struct Globals {
transform: mat4x4<f32>,
color: vec4<f32>
}
@group(0) @binding(0)
var<uniform> uniforms: Uniforms;
@group(0) @binding(0) var<uniform> globals: Globals;
struct VertexInput {
@location(0) position: vec2<f32>,
@location(1) color: vec4<f32>,
}
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) color: vec4<f32>,
}
@vertex
fn vs_main(@location(0) input: vec2<f32>) -> @builtin(position) vec4<f32> {
return uniforms.transform * vec4<f32>(input.xy, 0.0, 1.0);
fn vs_main(input: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.color = input.color;
out.position = globals.transform * vec4<f32>(input.position, 0.0, 1.0);
return out;
}
@fragment
fn fs_main() -> @location(0) vec4<f32> {
return uniforms.color;
fn fs_main(input: VertexOutput) -> @location(0) vec4<f32> {
return input.color;
}

View file

@ -1,30 +0,0 @@
struct Globals {
transform: mat4x4<f32>,
}
@group(0) @binding(0) var<uniform> globals: Globals;
struct VertexInput {
@location(0) position: vec2<f32>,
@location(1) color: vec4<f32>,
}
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) color: vec4<f32>,
}
@vertex
fn vs_main(input: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.color = input.color;
out.position = globals.transform * vec4<f32>(input.position, 0.0, 1.0);
return out;
}
@fragment
fn fs_main(input: VertexOutput) -> @location(0) vec4<f32> {
return input.color;
}

View file

@ -1,64 +1,27 @@
//! Draw meshes of triangles.
mod gradient;
mod msaa;
mod solid;
use crate::buffer::r#static::Buffer;
use crate::settings;
use crate::Transformation;
use iced_graphics::layer::mesh::{self, Mesh};
use iced_graphics::triangle::{self, Vertex2D};
use iced_graphics::triangle::ColoredVertex2D;
use iced_graphics::Size;
use core::fmt;
use std::fmt::Formatter;
/// Triangle pipeline for all mesh layers in a [`iced_graphics::Canvas`] widget.
#[derive(Debug)]
pub(crate) struct Pipeline {
pub struct Pipeline {
blit: Option<msaa::Blit>,
vertex_buffer: Buffer<Vertex2D>,
index_buffer: Buffer<u32>,
index_strides: Vec<u32>,
pipelines: PipelineList,
}
/// Supported triangle pipelines for different fills.
pub(crate) struct PipelineList {
solid: solid::Pipeline,
/// Gradients are currently not supported on WASM targets due to their need of storage buffers.
#[cfg(not(target_arch = "wasm32"))]
gradient: gradient::Pipeline,
}
impl fmt::Debug for PipelineList {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("TrianglePipelines").finish()
}
}
impl PipelineList {
/// Resets each pipeline's buffers.
fn clear(&mut self) {
self.solid.buffer.clear();
self.gradient.uniform_buffer.clear();
self.gradient.storage_buffer.clear();
}
/// Writes the contents of each pipeline's CPU buffer to the GPU, resizing the GPU buffer
/// beforehand if necessary.
fn write(
&mut self,
device: &wgpu::Device,
staging_belt: &mut wgpu::util::StagingBelt,
encoder: &mut wgpu::CommandEncoder,
) {
self.solid.write(device, staging_belt, encoder);
self.gradient.write(device, staging_belt, encoder);
}
}
impl Pipeline {
/// Creates supported pipelines, listed in [TrianglePipelines].
pub fn new(
device: &wgpu::Device,
format: wgpu::TextureFormat,
@ -66,25 +29,19 @@ impl Pipeline {
) -> Pipeline {
Pipeline {
blit: antialiasing.map(|a| msaa::Blit::new(device, format, a)),
vertex_buffer: Buffer::new(
device,
"iced_wgpu::triangle vertex buffer",
wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
),
index_buffer: Buffer::new(
device,
"iced_wgpu::triangle vertex buffer",
wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
),
index_strides: Vec::new(),
pipelines: PipelineList {
solid: solid::Pipeline::new(device, format, antialiasing),
gradient: gradient::Pipeline::new(device, format, antialiasing),
},
solid: solid::Pipeline::new(device, format, antialiasing),
#[cfg(not(target_arch = "wasm32"))]
gradient: gradient::Pipeline::new(device, format, antialiasing),
}
}
/// Draws the contents of the current layer's meshes to the [target].
pub fn draw(
&mut self,
device: &wgpu::Device,
@ -96,67 +53,185 @@ impl Pipeline {
scale_factor: f32,
meshes: &[Mesh<'_>],
) {
//count the total amount of vertices & indices we need to handle
let (total_vertices, total_indices) = mesh::attribute_count_of(meshes);
// Count the total amount of vertices & indices we need to handle
let count = mesh::attribute_count_of(meshes);
// Then we ensure the current attribute buffers are big enough, resizing if necessary.
// We are not currently using the return value of these functions as we have no system in
// place to calculate mesh diff, or to know whether or not that would be more performant for
// the majority of use cases. Therefore we will write GPU data every frame (for now).
let _ = self.index_buffer.resize(device, count.indices);
let _ = self.solid.vertices.resize(device, count.solid_vertices);
//We are not currently using the return value of these functions as we have no system in
//place to calculate mesh diff, or to know whether or not that would be more performant for
//the majority of use cases. Therefore we will write GPU data every frame (for now).
let _ = self.vertex_buffer.resize(device, total_vertices);
let _ = self.index_buffer.resize(device, total_indices);
#[cfg(not(target_arch = "wasm32"))]
let _ = self
.gradient
.vertices
.resize(device, count.gradient_vertices);
//prepare dynamic buffers & data store for writing
// Prepare dynamic buffers & data store for writing
self.index_strides.clear();
self.pipelines.clear();
self.solid.vertices.clear();
self.solid.uniforms.clear();
let mut vertex_offset = 0;
#[cfg(not(target_arch = "wasm32"))]
{
self.gradient.uniforms.clear();
self.gradient.vertices.clear();
self.gradient.storage.clear();
}
let mut solid_vertex_offset = 0;
let mut index_offset = 0;
for mesh in meshes {
let transform = transformation
* Transformation::translate(mesh.origin.x, mesh.origin.y);
#[cfg(not(target_arch = "wasm32"))]
let mut gradient_vertex_offset = 0;
//write to both buffers
let new_vertex_offset = self.vertex_buffer.write(
device,
staging_belt,
encoder,
vertex_offset,
&mesh.buffers.vertices,
);
for mesh in meshes {
let origin = mesh.origin();
let indices = mesh.indices();
let transform =
transformation * Transformation::translate(origin.x, origin.y);
let new_index_offset = self.index_buffer.write(
device,
staging_belt,
encoder,
index_offset,
&mesh.buffers.indices,
indices,
);
vertex_offset += new_vertex_offset;
index_offset += new_index_offset;
self.index_strides.push(mesh.buffers.indices.len() as u32);
self.index_strides.push(indices.len() as u32);
//push uniform data to CPU buffers
match mesh.style {
triangle::Style::Solid(color) => {
self.pipelines.solid.push(transform, color);
match mesh {
Mesh::Solid { buffers, .. } => {
self.solid.uniforms.push(&solid::Uniforms::new(transform));
let written_bytes = self.solid.vertices.write(
device,
staging_belt,
encoder,
solid_vertex_offset,
&buffers.vertices,
);
solid_vertex_offset += written_bytes;
}
triangle::Style::Gradient(gradient) => {
self.pipelines.gradient.push(transform, gradient);
#[cfg(not(target_arch = "wasm32"))]
Mesh::Gradient {
buffers, gradient, ..
} => {
let written_bytes = self.gradient.vertices.write(
device,
staging_belt,
encoder,
gradient_vertex_offset,
&buffers.vertices,
);
gradient_vertex_offset += written_bytes;
match gradient {
iced_graphics::Gradient::Linear(linear) => {
use glam::{IVec4, Vec4};
let start_offset = self.gradient.color_stop_offset;
let end_offset = (linear.color_stops.len() as i32)
+ start_offset
- 1;
self.gradient.uniforms.push(&gradient::Uniforms {
transform: transform.into(),
direction: Vec4::new(
linear.start.x,
linear.start.y,
linear.end.x,
linear.end.y,
),
stop_range: IVec4::new(
start_offset,
end_offset,
0,
0,
),
});
self.gradient.color_stop_offset = end_offset + 1;
let stops: Vec<gradient::ColorStop> = linear
.color_stops
.iter()
.map(|stop| {
let [r, g, b, a] = stop.color.into_linear();
gradient::ColorStop {
offset: stop.offset,
color: Vec4::new(r, g, b, a),
}
})
.collect();
self.gradient
.color_stops_pending_write
.color_stops
.extend(stops);
}
}
}
#[cfg(target_arch = "wasm32")]
Mesh::Gradient { .. } => {}
}
}
//write uniform data to GPU
self.pipelines.write(device, staging_belt, encoder);
// Write uniform data to GPU
if count.solid_vertices > 0 {
let uniforms_resized = self.solid.uniforms.resize(device);
//configure the render pass now that the data is uploaded to the GPU
if uniforms_resized {
self.solid.bind_group = solid::Pipeline::bind_group(
device,
self.solid.uniforms.raw(),
&self.solid.bind_group_layout,
)
}
self.solid.uniforms.write(device, staging_belt, encoder);
}
#[cfg(not(target_arch = "wasm32"))]
if count.gradient_vertices > 0 {
// First write the pending color stops to the CPU buffer
self.gradient
.storage
.push(&self.gradient.color_stops_pending_write);
// Resize buffers if needed
let uniforms_resized = self.gradient.uniforms.resize(device);
let storage_resized = self.gradient.storage.resize(device);
if uniforms_resized || storage_resized {
self.gradient.bind_group = gradient::Pipeline::bind_group(
device,
self.gradient.uniforms.raw(),
self.gradient.storage.raw(),
&self.gradient.bind_group_layout,
);
}
// Write to GPU
self.gradient.uniforms.write(device, staging_belt, encoder);
self.gradient.storage.write(device, staging_belt, encoder);
// Cleanup
self.gradient.color_stop_offset = 0;
self.gradient.color_stops_pending_write.color_stops.clear();
}
// Configure render pass
{
//configure antialiasing pass
let (attachment, resolve_target, load) = if let Some(blit) =
&mut self.blit
{
@ -186,11 +261,12 @@ impl Pipeline {
});
let mut num_solids = 0;
#[cfg(not(target_arch = "wasm32"))]
let mut num_gradients = 0;
let mut last_is_solid = None;
for (index, mesh) in meshes.iter().enumerate() {
let clip_bounds = (mesh.clip_bounds * scale_factor).snap();
let clip_bounds = (mesh.clip_bounds() * scale_factor).snap();
render_pass.set_scissor_rect(
clip_bounds.x,
@ -199,46 +275,57 @@ impl Pipeline {
clip_bounds.height,
);
match mesh.style {
triangle::Style::Solid(_) => {
match mesh {
Mesh::Solid { .. } => {
if !last_is_solid.unwrap_or(false) {
self.pipelines
.solid
.set_render_pass_pipeline(&mut render_pass);
render_pass.set_pipeline(&self.solid.pipeline);
last_is_solid = Some(true);
}
self.pipelines.solid.configure_render_pass(
&mut render_pass,
num_solids,
render_pass.set_bind_group(
0,
&self.solid.bind_group,
&[self.solid.uniforms.offset_at_index(num_solids)],
);
render_pass.set_vertex_buffer(
0,
self.solid.vertices.slice_from_index(num_solids),
);
num_solids += 1;
}
triangle::Style::Gradient(_) => {
#[cfg(not(target_arch = "wasm32"))]
Mesh::Gradient { .. } => {
if last_is_solid.unwrap_or(true) {
self.pipelines
.gradient
.set_render_pass_pipeline(&mut render_pass);
render_pass.set_pipeline(&self.gradient.pipeline);
last_is_solid = Some(false);
}
self.pipelines.gradient.configure_render_pass(
&mut render_pass,
num_gradients,
render_pass.set_bind_group(
0,
&self.gradient.bind_group,
&[self
.gradient
.uniforms
.offset_at_index(num_gradients)],
);
render_pass.set_vertex_buffer(
0,
self.gradient
.vertices
.slice_from_index(num_gradients),
);
num_gradients += 1;
}
#[cfg(target_arch = "wasm32")]
Mesh::Gradient { .. } => {}
};
render_pass.set_vertex_buffer(
0,
self.vertex_buffer.slice_from_index(index),
);
render_pass.set_index_buffer(
self.index_buffer.slice_from_index(index),
wgpu::IndexFormat::Uint32,
@ -252,7 +339,6 @@ impl Pipeline {
}
}
self.vertex_buffer.clear();
self.index_buffer.clear();
if let Some(blit) = &mut self.blit {
@ -261,19 +347,6 @@ impl Pipeline {
}
}
//utility functions for individual pipelines with shared functionality
fn vertex_buffer_layout<'a>() -> wgpu::VertexBufferLayout<'a> {
wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex2D>() as u64,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x2,
offset: 0,
shader_location: 0,
}],
}
}
fn fragment_target(
texture_format: wgpu::TextureFormat,
) -> Option<wgpu::ColorTargetState> {
@ -301,3 +374,360 @@ fn multisample_state(
alpha_to_coverage_enabled: false,
}
}
mod solid {
use crate::buffer::dynamic;
use crate::buffer::r#static::Buffer;
use crate::settings;
use crate::triangle;
use encase::ShaderType;
use iced_graphics::Transformation;
#[derive(Debug)]
pub struct Pipeline {
pub pipeline: wgpu::RenderPipeline,
pub vertices: Buffer<triangle::ColoredVertex2D>,
pub uniforms: dynamic::Buffer<Uniforms>,
pub bind_group_layout: wgpu::BindGroupLayout,
pub bind_group: wgpu::BindGroup,
}
#[derive(Debug, Clone, Copy, ShaderType)]
pub struct Uniforms {
transform: glam::Mat4,
}
impl Uniforms {
pub fn new(transform: Transformation) -> Self {
Self {
transform: transform.into(),
}
}
}
impl Pipeline {
/// Creates a new [SolidPipeline] using `solid.wgsl` shader.
pub fn new(
device: &wgpu::Device,
format: wgpu::TextureFormat,
antialiasing: Option<settings::Antialiasing>,
) -> Self {
let vertices = Buffer::new(
device,
"iced_wgpu::triangle::solid vertex buffer",
wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
);
let uniforms = dynamic::Buffer::uniform(
device,
"iced_wgpu::triangle::solid uniforms",
);
let bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
label: Some("iced_wgpu::triangle::solid bind group layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: Some(Uniforms::min_size()),
},
count: None,
}],
},
);
let bind_group =
Self::bind_group(device, uniforms.raw(), &bind_group_layout);
let layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
label: Some("iced_wgpu::triangle::solid pipeline layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
},
);
let shader =
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(
"iced_wgpu::triangle::solid create shader module",
),
source: wgpu::ShaderSource::Wgsl(
std::borrow::Cow::Borrowed(include_str!(
"shader/solid.wgsl"
)),
),
});
let pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("iced_wgpu::triangle::solid pipeline"),
layout: Some(&layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<
triangle::ColoredVertex2D,
>()
as u64,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &wgpu::vertex_attr_array!(
// Position
0 => Float32x2,
// Color
1 => Float32x4,
),
}],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[triangle::fragment_target(format)],
}),
primitive: triangle::primitive_state(),
depth_stencil: None,
multisample: triangle::multisample_state(antialiasing),
multiview: None,
},
);
Self {
pipeline,
vertices,
uniforms,
bind_group_layout,
bind_group,
}
}
pub fn bind_group(
device: &wgpu::Device,
buffer: &wgpu::Buffer,
layout: &wgpu::BindGroupLayout,
) -> wgpu::BindGroup {
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced_wgpu::triangle::solid bind group"),
layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(
wgpu::BufferBinding {
buffer,
offset: 0,
size: Some(Uniforms::min_size()),
},
),
}],
})
}
}
}
#[cfg(not(target_arch = "wasm32"))]
mod gradient {
use crate::buffer::dynamic;
use crate::buffer::r#static::Buffer;
use crate::settings;
use crate::triangle;
use encase::ShaderType;
use glam::{IVec4, Vec4};
use iced_graphics::triangle::Vertex2D;
#[derive(Debug)]
pub struct Pipeline {
pub pipeline: wgpu::RenderPipeline,
pub vertices: Buffer<Vertex2D>,
pub uniforms: dynamic::Buffer<Uniforms>,
pub storage: dynamic::Buffer<Storage>,
pub color_stop_offset: i32,
//Need to store these and then write them all at once
//or else they will be padded to 256 and cause gaps in the storage buffer
pub color_stops_pending_write: Storage,
pub bind_group_layout: wgpu::BindGroupLayout,
pub bind_group: wgpu::BindGroup,
}
#[derive(Debug, ShaderType)]
pub struct Uniforms {
pub transform: glam::Mat4,
//xy = start, zw = end
pub direction: Vec4,
//x = start stop, y = end stop, zw = padding
pub stop_range: IVec4,
}
#[derive(Debug, ShaderType)]
pub struct ColorStop {
pub color: Vec4,
pub offset: f32,
}
#[derive(Debug, ShaderType)]
pub struct Storage {
#[size(runtime)]
pub color_stops: Vec<ColorStop>,
}
impl Pipeline {
/// Creates a new [GradientPipeline] using `gradient.wgsl` shader.
pub(super) fn new(
device: &wgpu::Device,
format: wgpu::TextureFormat,
antialiasing: Option<settings::Antialiasing>,
) -> Self {
let vertices = Buffer::new(
device,
"iced_wgpu::triangle::gradient vertex buffer",
wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
);
let uniforms = dynamic::Buffer::uniform(
device,
"iced_wgpu::triangle::gradient uniforms",
);
//Note: with a WASM target storage buffers are not supported. Will need to use UBOs & static
// sized array (eg like the 32-sized array on OpenGL side right now) to make gradients work
let storage = dynamic::Buffer::storage(
device,
"iced_wgpu::triangle::gradient storage",
);
let bind_group_layout = device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
label: Some(
"iced_wgpu::triangle::gradient bind group layout",
),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: Some(Uniforms::min_size()),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage {
read_only: true,
},
has_dynamic_offset: false,
min_binding_size: Some(Storage::min_size()),
},
count: None,
},
],
},
);
let bind_group = Pipeline::bind_group(
device,
uniforms.raw(),
storage.raw(),
&bind_group_layout,
);
let layout = device.create_pipeline_layout(
&wgpu::PipelineLayoutDescriptor {
label: Some(
"iced_wgpu::triangle::gradient pipeline layout",
),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
},
);
let shader =
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(
"iced_wgpu::triangle::gradient create shader module",
),
source: wgpu::ShaderSource::Wgsl(
std::borrow::Cow::Borrowed(include_str!(
"shader/gradient.wgsl"
)),
),
});
let pipeline = device.create_render_pipeline(
&wgpu::RenderPipelineDescriptor {
label: Some("iced_wgpu::triangle::gradient pipeline"),
layout: Some(&layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex2D>()
as u64,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &wgpu::vertex_attr_array!(
// Position
0 => Float32x2,
),
}],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[triangle::fragment_target(format)],
}),
primitive: triangle::primitive_state(),
depth_stencil: None,
multisample: triangle::multisample_state(antialiasing),
multiview: None,
},
);
Self {
pipeline,
vertices,
uniforms,
storage,
color_stop_offset: 0,
color_stops_pending_write: Storage {
color_stops: vec![],
},
bind_group_layout,
bind_group,
}
}
pub fn bind_group(
device: &wgpu::Device,
uniform_buffer: &wgpu::Buffer,
storage_buffer: &wgpu::Buffer,
layout: &wgpu::BindGroupLayout,
) -> wgpu::BindGroup {
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced_wgpu::triangle::gradient bind group"),
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(
wgpu::BufferBinding {
buffer: uniform_buffer,
offset: 0,
size: Some(Uniforms::min_size()),
},
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: storage_buffer.as_entire_binding(),
},
],
})
}
}
}

View file

@ -1,268 +0,0 @@
use crate::buffer::dynamic;
use crate::settings;
use crate::triangle;
use encase::ShaderType;
use glam::{IVec4, Vec4};
use iced_graphics::gradient::Gradient;
use iced_graphics::Transformation;
pub struct Pipeline {
pipeline: wgpu::RenderPipeline,
pub(super) uniform_buffer: dynamic::Buffer<Uniforms>,
pub(super) storage_buffer: dynamic::Buffer<Storage>,
color_stop_offset: i32,
//Need to store these and then write them all at once
//or else they will be padded to 256 and cause gaps in the storage buffer
color_stops_pending_write: Storage,
bind_group_layout: wgpu::BindGroupLayout,
bind_group: wgpu::BindGroup,
}
#[derive(Debug, ShaderType)]
pub(super) struct Uniforms {
transform: glam::Mat4,
//xy = start, zw = end
direction: Vec4,
//x = start stop, y = end stop, zw = padding
stop_range: IVec4,
}
#[derive(Debug, ShaderType)]
pub(super) struct ColorStop {
color: Vec4,
offset: f32,
}
#[derive(ShaderType)]
pub(super) struct Storage {
#[size(runtime)]
pub color_stops: Vec<ColorStop>,
}
impl Pipeline {
/// Creates a new [GradientPipeline] using `gradient.wgsl` shader.
pub(super) fn new(
device: &wgpu::Device,
format: wgpu::TextureFormat,
antialiasing: Option<settings::Antialiasing>,
) -> Self {
let uniform_buffer = dynamic::Buffer::uniform(
device,
"iced_wgpu::triangle::gradient uniforms",
);
//Note: with a WASM target storage buffers are not supported. Will need to use UBOs & static
// sized array (eg like the 32-sized array on OpenGL side right now) to make gradients work
let storage_buffer = dynamic::Buffer::storage(
device,
"iced_wgpu::triangle::gradient storage",
);
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("iced_wgpu::triangle::gradient bind group layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: Some(Uniforms::min_size()),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage {
read_only: true,
},
has_dynamic_offset: false,
min_binding_size: Some(Storage::min_size()),
},
count: None,
},
],
});
let bind_group = Pipeline::bind_group(
device,
uniform_buffer.raw(),
storage_buffer.raw(),
&bind_group_layout,
);
let layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("iced_wgpu::triangle::gradient pipeline layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let shader =
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(
"iced_wgpu::triangle::gradient create shader module",
),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(
include_str!("../shader/gradient.wgsl"),
)),
});
let pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("iced_wgpu::triangle::gradient pipeline"),
layout: Some(&layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[triangle::vertex_buffer_layout()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[triangle::fragment_target(format)],
}),
primitive: triangle::primitive_state(),
depth_stencil: None,
multisample: triangle::multisample_state(antialiasing),
multiview: None,
});
Self {
pipeline,
uniform_buffer,
storage_buffer,
color_stop_offset: 0,
color_stops_pending_write: Storage {
color_stops: vec![],
},
bind_group_layout,
bind_group,
}
}
/// Pushes a new gradient uniform to the CPU buffer.
pub fn push(&mut self, transform: Transformation, gradient: &Gradient) {
match gradient {
Gradient::Linear(linear) => {
let start_offset = self.color_stop_offset;
let end_offset =
(linear.color_stops.len() as i32) + start_offset - 1;
self.uniform_buffer.push(&Uniforms {
transform: transform.into(),
direction: Vec4::new(
linear.start.x,
linear.start.y,
linear.end.x,
linear.end.y,
),
stop_range: IVec4::new(start_offset, end_offset, 0, 0),
});
self.color_stop_offset = end_offset + 1;
let stops: Vec<ColorStop> = linear
.color_stops
.iter()
.map(|stop| {
let [r, g, b, a] = stop.color.into_linear();
ColorStop {
offset: stop.offset,
color: Vec4::new(r, g, b, a),
}
})
.collect();
self.color_stops_pending_write.color_stops.extend(stops);
}
}
}
fn bind_group(
device: &wgpu::Device,
uniform_buffer: &wgpu::Buffer,
storage_buffer: &wgpu::Buffer,
layout: &wgpu::BindGroupLayout,
) -> wgpu::BindGroup {
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced_wgpu::triangle::gradient bind group"),
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(
wgpu::BufferBinding {
buffer: uniform_buffer,
offset: 0,
size: Some(Uniforms::min_size()),
},
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: storage_buffer.as_entire_binding(),
},
],
})
}
/// Writes the contents of the gradient CPU buffer to the GPU buffer, resizing the GPU buffer
/// beforehand if necessary.
pub fn write(
&mut self,
device: &wgpu::Device,
staging_belt: &mut wgpu::util::StagingBelt,
encoder: &mut wgpu::CommandEncoder,
) {
//first write the pending color stops to the CPU buffer
self.storage_buffer.push(&self.color_stops_pending_write);
//resize buffers if needed
let uniforms_resized = self.uniform_buffer.resize(device);
let storage_resized = self.storage_buffer.resize(device);
if uniforms_resized || storage_resized {
//recreate bind groups if any buffers were resized
self.bind_group = Pipeline::bind_group(
device,
self.uniform_buffer.raw(),
self.storage_buffer.raw(),
&self.bind_group_layout,
);
}
//write to GPU
self.uniform_buffer.write(device, staging_belt, encoder);
self.storage_buffer.write(device, staging_belt, encoder);
//cleanup
self.color_stop_offset = 0;
self.color_stops_pending_write.color_stops.clear();
}
pub fn set_render_pass_pipeline<'a>(
&'a self,
render_pass: &mut wgpu::RenderPass<'a>,
) {
render_pass.set_pipeline(&self.pipeline);
}
/// Configures the current render pass to draw the gradient at its offset stored in the
/// [DynamicBuffer] at [index].
pub fn configure_render_pass<'a>(
&'a self,
render_pass: &mut wgpu::RenderPass<'a>,
count: usize,
) {
render_pass.set_bind_group(
0,
&self.bind_group,
&[self.uniform_buffer.offset_at_index(count)],
)
}
}

View file

@ -1,170 +0,0 @@
use crate::buffer::dynamic;
use crate::triangle;
use crate::{settings, Color};
use encase::ShaderType;
use glam::Vec4;
use iced_graphics::Transformation;
pub struct Pipeline {
pipeline: wgpu::RenderPipeline,
pub(super) buffer: dynamic::Buffer<Uniforms>,
bind_group_layout: wgpu::BindGroupLayout,
bind_group: wgpu::BindGroup,
}
#[derive(Debug, Clone, Copy, ShaderType)]
pub(super) struct Uniforms {
transform: glam::Mat4,
color: Vec4,
}
impl Uniforms {
pub fn new(transform: Transformation, color: Color) -> Self {
let [r, g, b, a] = color.into_linear();
Self {
transform: transform.into(),
color: Vec4::new(r, g, b, a),
}
}
}
impl Pipeline {
/// Creates a new [SolidPipeline] using `solid.wgsl` shader.
pub fn new(
device: &wgpu::Device,
format: wgpu::TextureFormat,
antialiasing: Option<settings::Antialiasing>,
) -> Self {
let buffer = dynamic::Buffer::uniform(
device,
"iced_wgpu::triangle::solid uniforms",
);
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("iced_wgpu::triangle::solid bind group layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: Some(Uniforms::min_size()),
},
count: None,
}],
});
let bind_group =
Pipeline::bind_group(device, buffer.raw(), &bind_group_layout);
let layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("iced_wgpu::triangle::solid pipeline layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let shader =
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("iced_wgpu::triangle::solid create shader module"),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(
include_str!("../shader/solid.wgsl"),
)),
});
let pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("iced_wgpu::triangle::solid pipeline"),
layout: Some(&layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[triangle::vertex_buffer_layout()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[triangle::fragment_target(format)],
}),
primitive: triangle::primitive_state(),
depth_stencil: None,
multisample: triangle::multisample_state(antialiasing),
multiview: None,
});
Self {
pipeline,
buffer,
bind_group_layout,
bind_group,
}
}
fn bind_group(
device: &wgpu::Device,
buffer: &wgpu::Buffer,
layout: &wgpu::BindGroupLayout,
) -> wgpu::BindGroup {
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("iced_wgpu::triangle::solid bind group"),
layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer,
offset: 0,
size: Some(Uniforms::min_size()),
}),
}],
})
}
/// Pushes a new solid uniform to the CPU buffer.
pub fn push(&mut self, transform: Transformation, color: &Color) {
self.buffer.push(&Uniforms::new(transform, *color));
}
/// Writes the contents of the solid CPU buffer to the GPU buffer, resizing the GPU buffer
/// beforehand if necessary.
pub fn write(
&mut self,
device: &wgpu::Device,
staging_belt: &mut wgpu::util::StagingBelt,
encoder: &mut wgpu::CommandEncoder,
) {
let uniforms_resized = self.buffer.resize(device);
if uniforms_resized {
self.bind_group = Pipeline::bind_group(
device,
self.buffer.raw(),
&self.bind_group_layout,
)
}
self.buffer.write(device, staging_belt, encoder);
}
pub fn set_render_pass_pipeline<'a>(
&'a self,
render_pass: &mut wgpu::RenderPass<'a>,
) {
render_pass.set_pipeline(&self.pipeline);
}
/// Configures the current render pass to draw the solid at its offset stored in the
/// [DynamicBuffer] at [index].
pub fn configure_render_pass<'a>(
&'a self,
render_pass: &mut wgpu::RenderPass<'a>,
count: usize,
) {
render_pass.set_bind_group(
0,
&self.bind_group,
&[self.buffer.offset_at_index(count)],
)
}
}