Remove image abstractions in iced_graphics

This commit is contained in:
Héctor Ramón Jiménez 2023-03-07 03:47:49 +01:00
parent 9b4bcd287a
commit 3a26baa564
No known key found for this signature in database
GPG key ID: 140CC052C94F138E
11 changed files with 352 additions and 436 deletions

View file

@ -119,7 +119,7 @@ impl Backend {
self.triangle_pipeline.end_frame();
#[cfg(any(feature = "image", feature = "svg"))]
self.image_pipeline.end_frame(device, queue, encoder);
self.image_pipeline.end_frame();
}
fn prepare_text(

View file

@ -1,5 +1,11 @@
mod atlas;
#[cfg(feature = "image")]
mod raster;
#[cfg(feature = "svg")]
mod vector;
use atlas::Atlas;
use crate::core::{Rectangle, Size};
@ -7,12 +13,6 @@ use crate::graphics::Transformation;
use crate::layer;
use crate::Buffer;
#[cfg(feature = "image")]
use crate::graphics::image::raster;
#[cfg(feature = "svg")]
use crate::graphics::image::vector;
use std::cell::RefCell;
use std::mem;
@ -30,9 +30,9 @@ use tracing::info_span;
#[derive(Debug)]
pub struct Pipeline {
#[cfg(feature = "image")]
raster_cache: RefCell<raster::Cache<Atlas>>,
raster_cache: RefCell<raster::Cache>,
#[cfg(feature = "svg")]
vector_cache: RefCell<vector::Cache<Atlas>>,
vector_cache: RefCell<vector::Cache>,
pipeline: wgpu::RenderPipeline,
vertices: wgpu::Buffer,
@ -368,8 +368,10 @@ impl Pipeline {
#[cfg(feature = "image")]
layer::Image::Raster { handle, bounds } => {
if let Some(atlas_entry) = raster_cache.upload(
device,
queue,
encoder,
handle,
&mut (device, queue, encoder),
&mut self.texture_atlas,
) {
add_instances(
@ -392,11 +394,13 @@ impl Pipeline {
let size = [bounds.width, bounds.height];
if let Some(atlas_entry) = vector_cache.upload(
device,
queue,
encoder,
handle,
*color,
size,
_scale,
&mut (device, queue, encoder),
&mut self.texture_atlas,
) {
add_instances(
@ -477,21 +481,12 @@ impl Pipeline {
}
}
pub fn end_frame(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
) {
pub fn end_frame(&mut self) {
#[cfg(feature = "image")]
self.raster_cache
.borrow_mut()
.trim(&mut self.texture_atlas, &mut (device, queue, encoder));
self.raster_cache.borrow_mut().trim(&mut self.texture_atlas);
#[cfg(feature = "svg")]
self.vector_cache
.borrow_mut()
.trim(&mut self.texture_atlas, &mut (device, queue, encoder));
self.vector_cache.borrow_mut().trim(&mut self.texture_atlas);
self.prepare_layer = 0;
}

View file

@ -13,7 +13,6 @@ use allocator::Allocator;
pub const SIZE: u32 = 2048;
use crate::core::Size;
use crate::graphics::image;
use std::num::NonZeroU32;
@ -64,6 +63,97 @@ impl Atlas {
self.layers.len()
}
pub fn upload(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
width: u32,
height: u32,
data: &[u8],
) -> Option<Entry> {
let entry = {
let current_size = self.layers.len();
let entry = self.allocate(width, height)?;
// We grow the internal texture after allocating if necessary
let new_layers = self.layers.len() - current_size;
self.grow(new_layers, device, encoder);
entry
};
log::info!("Allocated atlas entry: {:?}", entry);
// It is a webgpu requirement that:
// BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
// So we calculate padded_width by rounding width up to the next
// multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
let padding = (align - (4 * width) % align) % align;
let padded_width = (4 * width + padding) as usize;
let padded_data_size = padded_width * height as usize;
let mut padded_data = vec![0; padded_data_size];
for row in 0..height as usize {
let offset = row * padded_width;
padded_data[offset..offset + 4 * width as usize].copy_from_slice(
&data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
)
}
match &entry {
Entry::Contiguous(allocation) => {
self.upload_allocation(
&padded_data,
width,
height,
padding,
0,
allocation,
queue,
);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
let (x, y) = fragment.position;
let offset = (y * padded_width as u32 + 4 * x) as usize;
self.upload_allocation(
&padded_data,
width,
height,
padding,
offset,
&fragment.allocation,
queue,
);
}
}
}
log::info!("Current atlas: {:?}", self);
Some(entry)
}
pub fn remove(&mut self, entry: &Entry) {
log::info!("Removing atlas entry: {:?}", entry);
match entry {
Entry::Contiguous(allocation) => {
self.deallocate(allocation);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
self.deallocate(&fragment.allocation);
}
}
}
}
fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> {
// Allocate one layer if texture fits perfectly
if width == SIZE && height == SIZE {
@ -296,101 +386,3 @@ impl Atlas {
});
}
}
impl image::Storage for Atlas {
type Entry = Entry;
type State<'a> = (
&'a wgpu::Device,
&'a wgpu::Queue,
&'a mut wgpu::CommandEncoder,
);
fn upload(
&mut self,
width: u32,
height: u32,
data: &[u8],
(device, queue, encoder): &mut Self::State<'_>,
) -> Option<Self::Entry> {
let entry = {
let current_size = self.layers.len();
let entry = self.allocate(width, height)?;
// We grow the internal texture after allocating if necessary
let new_layers = self.layers.len() - current_size;
self.grow(new_layers, device, encoder);
entry
};
log::info!("Allocated atlas entry: {:?}", entry);
// It is a webgpu requirement that:
// BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
// So we calculate padded_width by rounding width up to the next
// multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
let padding = (align - (4 * width) % align) % align;
let padded_width = (4 * width + padding) as usize;
let padded_data_size = padded_width * height as usize;
let mut padded_data = vec![0; padded_data_size];
for row in 0..height as usize {
let offset = row * padded_width;
padded_data[offset..offset + 4 * width as usize].copy_from_slice(
&data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
)
}
match &entry {
Entry::Contiguous(allocation) => {
self.upload_allocation(
&padded_data,
width,
height,
padding,
0,
allocation,
queue,
);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
let (x, y) = fragment.position;
let offset = (y * padded_width as u32 + 4 * x) as usize;
self.upload_allocation(
&padded_data,
width,
height,
padding,
offset,
&fragment.allocation,
queue,
);
}
}
}
log::info!("Current atlas: {:?}", self);
Some(entry)
}
fn remove(&mut self, entry: &Entry, _: &mut Self::State<'_>) {
log::info!("Removing atlas entry: {:?}", entry);
match entry {
Entry::Contiguous(allocation) => {
self.deallocate(allocation);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
self.deallocate(&fragment.allocation);
}
}
}
}
}

View file

@ -1,5 +1,4 @@
use crate::core::Size;
use crate::graphics::image;
use crate::image::atlas;
#[derive(Debug)]
@ -11,8 +10,9 @@ pub enum Entry {
},
}
impl image::storage::Entry for Entry {
fn size(&self) -> Size<u32> {
impl Entry {
#[cfg(feature = "image")]
pub fn size(&self) -> Size<u32> {
match self {
Entry::Contiguous(allocation) => allocation.size(),
Entry::Fragmented { size, .. } => *size,

121
wgpu/src/image/raster.rs Normal file
View file

@ -0,0 +1,121 @@
use crate::core::image;
use crate::core::Size;
use crate::graphics;
use crate::graphics::image::image_rs;
use crate::image::atlas::{self, Atlas};
use std::collections::{HashMap, HashSet};
/// Entry in cache corresponding to an image handle
#[derive(Debug)]
pub enum Memory {
/// Image data on host
Host(image_rs::ImageBuffer<image_rs::Rgba<u8>, Vec<u8>>),
/// Storage entry
Device(atlas::Entry),
/// Image not found
NotFound,
/// Invalid image data
Invalid,
}
impl Memory {
/// Width and height of image
pub fn dimensions(&self) -> Size<u32> {
match self {
Memory::Host(image) => {
let (width, height) = image.dimensions();
Size::new(width, height)
}
Memory::Device(entry) => entry.size(),
Memory::NotFound => Size::new(1, 1),
Memory::Invalid => Size::new(1, 1),
}
}
}
/// Caches image raster data
#[derive(Debug, Default)]
pub struct Cache {
map: HashMap<u64, Memory>,
hits: HashSet<u64>,
}
impl Cache {
/// Load image
pub fn load(&mut self, handle: &image::Handle) -> &mut Memory {
if self.contains(handle) {
return self.get(handle).unwrap();
}
let memory = match graphics::image::load(handle) {
Ok(image) => Memory::Host(image.to_rgba8()),
Err(image_rs::error::ImageError::IoError(_)) => Memory::NotFound,
Err(_) => Memory::Invalid,
};
self.insert(handle, memory);
self.get(handle).unwrap()
}
/// Load image and upload raster data
pub fn upload(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
handle: &image::Handle,
atlas: &mut Atlas,
) -> Option<&atlas::Entry> {
let memory = self.load(handle);
if let Memory::Host(image) = memory {
let (width, height) = image.dimensions();
let entry =
atlas.upload(device, queue, encoder, width, height, image)?;
*memory = Memory::Device(entry);
}
if let Memory::Device(allocation) = memory {
Some(allocation)
} else {
None
}
}
/// Trim cache misses from cache
pub fn trim(&mut self, atlas: &mut Atlas) {
let hits = &self.hits;
self.map.retain(|k, memory| {
let retain = hits.contains(k);
if !retain {
if let Memory::Device(entry) = memory {
atlas.remove(entry);
}
}
retain
});
self.hits.clear();
}
fn get(&mut self, handle: &image::Handle) -> Option<&mut Memory> {
let _ = self.hits.insert(handle.id());
self.map.get_mut(&handle.id())
}
fn insert(&mut self, handle: &image::Handle, memory: Memory) {
let _ = self.map.insert(handle.id(), memory);
}
fn contains(&self, handle: &image::Handle) -> bool {
self.map.contains_key(&handle.id())
}
}

181
wgpu/src/image/vector.rs Normal file
View file

@ -0,0 +1,181 @@
use crate::core::svg;
use crate::core::{Color, Size};
use crate::image::atlas::{self, Atlas};
use resvg::tiny_skia;
use resvg::usvg;
use std::collections::{HashMap, HashSet};
use std::fs;
/// Entry in cache corresponding to an svg handle
pub enum Svg {
/// Parsed svg
Loaded(usvg::Tree),
/// Svg not found or failed to parse
NotFound,
}
impl Svg {
/// Viewport width and height
pub fn viewport_dimensions(&self) -> Size<u32> {
match self {
Svg::Loaded(tree) => {
let size = tree.size;
Size::new(size.width() as u32, size.height() as u32)
}
Svg::NotFound => Size::new(1, 1),
}
}
}
/// Caches svg vector and raster data
#[derive(Debug, Default)]
pub struct Cache {
svgs: HashMap<u64, Svg>,
rasterized: HashMap<(u64, u32, u32, ColorFilter), atlas::Entry>,
svg_hits: HashSet<u64>,
rasterized_hits: HashSet<(u64, u32, u32, ColorFilter)>,
}
type ColorFilter = Option<[u8; 4]>;
impl Cache {
/// Load svg
pub fn load(&mut self, handle: &svg::Handle) -> &Svg {
if self.svgs.contains_key(&handle.id()) {
return self.svgs.get(&handle.id()).unwrap();
}
let svg = match handle.data() {
svg::Data::Path(path) => {
let tree = fs::read_to_string(path).ok().and_then(|contents| {
usvg::Tree::from_str(&contents, &usvg::Options::default())
.ok()
});
tree.map(Svg::Loaded).unwrap_or(Svg::NotFound)
}
svg::Data::Bytes(bytes) => {
match usvg::Tree::from_data(bytes, &usvg::Options::default()) {
Ok(tree) => Svg::Loaded(tree),
Err(_) => Svg::NotFound,
}
}
};
let _ = self.svgs.insert(handle.id(), svg);
self.svgs.get(&handle.id()).unwrap()
}
/// Load svg and upload raster data
pub fn upload(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
handle: &svg::Handle,
color: Option<Color>,
[width, height]: [f32; 2],
scale: f32,
atlas: &mut Atlas,
) -> Option<&atlas::Entry> {
let id = handle.id();
let (width, height) = (
(scale * width).ceil() as u32,
(scale * height).ceil() as u32,
);
let color = color.map(Color::into_rgba8);
let key = (id, width, height, color);
// TODO: Optimize!
// We currently rerasterize the SVG when its size changes. This is slow
// as heck. A GPU rasterizer like `pathfinder` may perform better.
// It would be cool to be able to smooth resize the `svg` example.
if self.rasterized.contains_key(&key) {
let _ = self.svg_hits.insert(id);
let _ = self.rasterized_hits.insert(key);
return self.rasterized.get(&key);
}
match self.load(handle) {
Svg::Loaded(tree) => {
if width == 0 || height == 0 {
return None;
}
// TODO: Optimize!
// We currently rerasterize the SVG when its size changes. This is slow
// as heck. A GPU rasterizer like `pathfinder` may perform better.
// It would be cool to be able to smooth resize the `svg` example.
let mut img = tiny_skia::Pixmap::new(width, height)?;
resvg::render(
tree,
if width > height {
usvg::FitTo::Width(width)
} else {
usvg::FitTo::Height(height)
},
tiny_skia::Transform::default(),
img.as_mut(),
)?;
let mut rgba = img.take();
if let Some(color) = color {
rgba.chunks_exact_mut(4).for_each(|rgba| {
if rgba[3] > 0 {
rgba[0] = color[0];
rgba[1] = color[1];
rgba[2] = color[2];
}
});
}
let allocation = atlas
.upload(device, queue, encoder, width, height, &rgba)?;
log::debug!("allocating {} {}x{}", id, width, height);
let _ = self.svg_hits.insert(id);
let _ = self.rasterized_hits.insert(key);
let _ = self.rasterized.insert(key, allocation);
self.rasterized.get(&key)
}
Svg::NotFound => None,
}
}
/// Load svg and upload raster data
pub fn trim(&mut self, atlas: &mut Atlas) {
let svg_hits = &self.svg_hits;
let rasterized_hits = &self.rasterized_hits;
self.svgs.retain(|k, _| svg_hits.contains(k));
self.rasterized.retain(|k, entry| {
let retain = rasterized_hits.contains(k);
if !retain {
atlas.remove(entry);
}
retain
});
self.svg_hits.clear();
self.rasterized_hits.clear();
}
}
impl std::fmt::Debug for Svg {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Svg::Loaded(_) => write!(f, "Svg::Loaded"),
Svg::NotFound => write!(f, "Svg::NotFound"),
}
}
}