Move image/svg handling into iced_graphics

The `TextureStore` trait is implemented by the atlas, and can also be
implemented in the glow renderer or in a software renderer.

The API here may be improved in the future, but API stability is
presumably not a huge issue since these types will only be used by
renderer backends.
This commit is contained in:
Ian Douglas Scott 2022-10-31 13:37:56 -07:00 committed by Héctor Ramón Jiménez
parent 7b12991728
commit 2c7c42ee93
No known key found for this signature in database
GPG key ID: 140CC052C94F138E
10 changed files with 281 additions and 191 deletions

View file

@ -8,19 +8,20 @@ license = "MIT AND OFL-1.1"
repository = "https://github.com/iced-rs/iced"
[features]
svg = ["resvg", "usvg", "tiny-skia"]
image = ["png", "jpeg", "jpeg_rayon", "gif", "webp", "bmp"]
png = ["image_rs/png"]
jpeg = ["image_rs/jpeg"]
jpeg_rayon = ["image_rs/jpeg_rayon"]
gif = ["image_rs/gif"]
webp = ["image_rs/webp"]
pnm = ["image_rs/pnm"]
ico = ["image_rs/ico"]
bmp = ["image_rs/bmp"]
hdr = ["image_rs/hdr"]
dds = ["image_rs/dds"]
farbfeld = ["image_rs/farbfeld"]
svg = ["iced_graphics/svg"]
image = ["image_rs", "iced_graphics/image", "png", "jpeg", "jpeg_rayon", "gif", "webp", "bmp"]
image_rs = ["iced_graphics/image_rs"]
png = ["iced_graphics/png"]
jpeg = ["iced_graphics/jpeg"]
jpeg_rayon = ["iced_graphics/jpeg_rayon"]
gif = ["iced_graphics/gif"]
webp = ["iced_graphics/webp"]
pnm = ["iced_graphics/pnm"]
ico = ["iced_graphics/ico"]
bmp = ["iced_graphics/bmp"]
hdr = ["iced_graphics/hdr"]
dds = ["iced_graphics/dds"]
farbfeld = ["iced_graphics/farbfeld"]
canvas = ["iced_graphics/canvas"]
qr_code = ["iced_graphics/qr_code"]
default_system_font = ["iced_graphics/font-source"]
@ -35,7 +36,6 @@ raw-window-handle = "0.5"
log = "0.4"
guillotiere = "0.6"
futures = "0.3"
kamadak-exif = "0.5"
bitflags = "1.2"
[dependencies.bytemuck]
@ -51,24 +51,6 @@ version = "0.3"
path = "../graphics"
features = ["font-fallback", "font-icons"]
[dependencies.image_rs]
version = "0.23"
package = "image"
default-features = false
optional = true
[dependencies.resvg]
version = "0.18"
optional = true
[dependencies.usvg]
version = "0.18"
optional = true
[dependencies.tiny-skia]
version = "0.6"
optional = true
[dependencies.encase]
version = "0.3.0"
features = ["glam"]

View file

@ -99,7 +99,7 @@ impl Backend {
}
#[cfg(any(feature = "image_rs", feature = "svg"))]
self.image_pipeline.trim_cache();
self.image_pipeline.trim_cache(device, encoder);
}
fn flush(

View file

@ -1,10 +1,10 @@
mod atlas;
#[cfg(feature = "image_rs")]
mod raster;
use iced_graphics::image::raster;
#[cfg(feature = "svg")]
mod vector;
use iced_graphics::image::vector;
use crate::Transformation;
use atlas::Atlas;
@ -25,9 +25,9 @@ use iced_native::svg;
#[derive(Debug)]
pub struct Pipeline {
#[cfg(feature = "image_rs")]
raster_cache: RefCell<raster::Cache>,
raster_cache: RefCell<raster::Cache<Atlas>>,
#[cfg(feature = "svg")]
vector_cache: RefCell<vector::Cache>,
vector_cache: RefCell<vector::Cache<Atlas>>,
pipeline: wgpu::RenderPipeline,
uniforms: wgpu::Buffer,
@ -243,10 +243,10 @@ impl Pipeline {
Pipeline {
#[cfg(feature = "image_rs")]
raster_cache: RefCell::new(raster::Cache::new()),
raster_cache: RefCell::new(raster::Cache::default()),
#[cfg(feature = "svg")]
vector_cache: RefCell::new(vector::Cache::new()),
vector_cache: RefCell::new(vector::Cache::default()),
pipeline,
uniforms: uniforms_buffer,
@ -302,8 +302,7 @@ impl Pipeline {
layer::Image::Raster { handle, bounds } => {
if let Some(atlas_entry) = raster_cache.upload(
handle,
device,
encoder,
&mut (device, encoder),
&mut self.texture_atlas,
) {
add_instances(
@ -325,8 +324,7 @@ impl Pipeline {
handle,
size,
_scale,
device,
encoder,
&mut (device, encoder),
&mut self.texture_atlas,
) {
add_instances(
@ -446,12 +444,20 @@ impl Pipeline {
}
}
pub fn trim_cache(&mut self) {
pub fn trim_cache(
&mut self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
) {
#[cfg(feature = "image_rs")]
self.raster_cache.borrow_mut().trim(&mut self.texture_atlas);
self.raster_cache
.borrow_mut()
.trim(&mut self.texture_atlas, &mut (device, encoder));
#[cfg(feature = "svg")]
self.vector_cache.borrow_mut().trim(&mut self.texture_atlas);
self.vector_cache
.borrow_mut()
.trim(&mut self.texture_atlas, &mut (device, encoder));
}
}

View file

@ -4,6 +4,7 @@ mod allocation;
mod allocator;
mod layer;
use iced_graphics::image::TextureStore;
use std::num::NonZeroU32;
pub use allocation::Allocation;
@ -61,99 +62,6 @@ impl Atlas {
self.layers.len()
}
pub fn upload(
&mut self,
width: u32,
height: u32,
data: &[u8],
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
) -> Option<Entry> {
use wgpu::util::DeviceExt;
let entry = {
let current_size = self.layers.len();
let entry = self.allocate(width, height)?;
// We grow the internal texture after allocating if necessary
let new_layers = self.layers.len() - current_size;
self.grow(new_layers, device, encoder);
entry
};
log::info!("Allocated atlas entry: {:?}", entry);
// It is a webgpu requirement that:
// BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
// So we calculate padded_width by rounding width up to the next
// multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
let padding = (align - (4 * width) % align) % align;
let padded_width = (4 * width + padding) as usize;
let padded_data_size = padded_width * height as usize;
let mut padded_data = vec![0; padded_data_size];
for row in 0..height as usize {
let offset = row * padded_width;
padded_data[offset..offset + 4 * width as usize].copy_from_slice(
&data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
)
}
let buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("iced_wgpu::image staging buffer"),
contents: &padded_data,
usage: wgpu::BufferUsages::COPY_SRC,
});
match &entry {
Entry::Contiguous(allocation) => {
self.upload_allocation(
&buffer, width, height, padding, 0, allocation, encoder,
);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
let (x, y) = fragment.position;
let offset = (y * padded_width as u32 + 4 * x) as usize;
self.upload_allocation(
&buffer,
width,
height,
padding,
offset,
&fragment.allocation,
encoder,
);
}
}
}
log::info!("Current atlas: {:?}", self);
Some(entry)
}
pub fn remove(&mut self, entry: &Entry) {
log::info!("Removing atlas entry: {:?}", entry);
match entry {
Entry::Contiguous(allocation) => {
self.deallocate(allocation);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
self.deallocate(&fragment.allocation);
}
}
}
}
fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> {
// Allocate one layer if texture fits perfectly
if width == SIZE && height == SIZE {
@ -388,3 +296,100 @@ impl Atlas {
});
}
}
impl TextureStore for Atlas {
type Entry = Entry;
type State<'a> = (&'a wgpu::Device, &'a mut wgpu::CommandEncoder);
fn upload(
&mut self,
width: u32,
height: u32,
data: &[u8],
(device, encoder): &mut Self::State<'_>,
) -> Option<Self::Entry> {
use wgpu::util::DeviceExt;
let entry = {
let current_size = self.layers.len();
let entry = self.allocate(width, height)?;
// We grow the internal texture after allocating if necessary
let new_layers = self.layers.len() - current_size;
self.grow(new_layers, device, encoder);
entry
};
log::info!("Allocated atlas entry: {:?}", entry);
// It is a webgpu requirement that:
// BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
// So we calculate padded_width by rounding width up to the next
// multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
let padding = (align - (4 * width) % align) % align;
let padded_width = (4 * width + padding) as usize;
let padded_data_size = padded_width * height as usize;
let mut padded_data = vec![0; padded_data_size];
for row in 0..height as usize {
let offset = row * padded_width;
padded_data[offset..offset + 4 * width as usize].copy_from_slice(
&data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
)
}
let buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("iced_wgpu::image staging buffer"),
contents: &padded_data,
usage: wgpu::BufferUsages::COPY_SRC,
});
match &entry {
Entry::Contiguous(allocation) => {
self.upload_allocation(
&buffer, width, height, padding, 0, allocation, encoder,
);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
let (x, y) = fragment.position;
let offset = (y * padded_width as u32 + 4 * x) as usize;
self.upload_allocation(
&buffer,
width,
height,
padding,
offset,
&fragment.allocation,
encoder,
);
}
}
}
log::info!("Current atlas: {:?}", self);
Some(entry)
}
fn remove(&mut self, entry: &Entry, _: &mut Self::State<'_>) {
log::info!("Removing atlas entry: {:?}", entry);
match entry {
Entry::Contiguous(allocation) => {
self.deallocate(allocation);
}
Entry::Fragmented { fragments, .. } => {
for fragment in fragments {
self.deallocate(&fragment.allocation);
}
}
}
}
}

View file

@ -1,4 +1,5 @@
use crate::image::atlas;
use iced_graphics::image::TextureStoreEntry;
#[derive(Debug)]
pub enum Entry {
@ -9,9 +10,8 @@ pub enum Entry {
},
}
impl Entry {
#[cfg(feature = "image_rs")]
pub fn size(&self) -> (u32, u32) {
impl TextureStoreEntry for Entry {
fn size(&self) -> (u32, u32) {
match self {
Entry::Contiguous(allocation) => allocation.size(),
Entry::Fragmented { size, .. } => *size,

View file

@ -1,222 +0,0 @@
use crate::image::atlas::{self, Atlas};
use iced_native::image;
use std::collections::{HashMap, HashSet};
use bitflags::bitflags;
#[derive(Debug)]
pub enum Memory {
Host(::image_rs::ImageBuffer<::image_rs::Bgra<u8>, Vec<u8>>),
Device(atlas::Entry),
NotFound,
Invalid,
}
impl Memory {
pub fn dimensions(&self) -> (u32, u32) {
match self {
Memory::Host(image) => image.dimensions(),
Memory::Device(entry) => entry.size(),
Memory::NotFound => (1, 1),
Memory::Invalid => (1, 1),
}
}
}
#[derive(Debug)]
pub struct Cache {
map: HashMap<u64, Memory>,
hits: HashSet<u64>,
}
impl Cache {
pub fn new() -> Self {
Self {
map: HashMap::new(),
hits: HashSet::new(),
}
}
pub fn load(&mut self, handle: &image::Handle) -> &mut Memory {
if self.contains(handle) {
return self.get(handle).unwrap();
}
let memory = match handle.data() {
image::Data::Path(path) => {
if let Ok(image) = image_rs::open(path) {
let operation = std::fs::File::open(path)
.ok()
.map(std::io::BufReader::new)
.and_then(|mut reader| {
Operation::from_exif(&mut reader).ok()
})
.unwrap_or_else(Operation::empty);
Memory::Host(operation.perform(image.to_bgra8()))
} else {
Memory::NotFound
}
}
image::Data::Bytes(bytes) => {
if let Ok(image) = image_rs::load_from_memory(bytes) {
let operation =
Operation::from_exif(&mut std::io::Cursor::new(bytes))
.ok()
.unwrap_or_else(Operation::empty);
Memory::Host(operation.perform(image.to_bgra8()))
} else {
Memory::Invalid
}
}
image::Data::Pixels {
width,
height,
pixels,
} => {
if let Some(image) = image_rs::ImageBuffer::from_vec(
*width,
*height,
pixels.to_vec(),
) {
Memory::Host(image)
} else {
Memory::Invalid
}
}
};
self.insert(handle, memory);
self.get(handle).unwrap()
}
pub fn upload(
&mut self,
handle: &image::Handle,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
atlas: &mut Atlas,
) -> Option<&atlas::Entry> {
let memory = self.load(handle);
if let Memory::Host(image) = memory {
let (width, height) = image.dimensions();
let entry = atlas.upload(width, height, image, device, encoder)?;
*memory = Memory::Device(entry);
}
if let Memory::Device(allocation) = memory {
Some(allocation)
} else {
None
}
}
pub fn trim(&mut self, atlas: &mut Atlas) {
let hits = &self.hits;
self.map.retain(|k, memory| {
let retain = hits.contains(k);
if !retain {
if let Memory::Device(entry) = memory {
atlas.remove(entry);
}
}
retain
});
self.hits.clear();
}
fn get(&mut self, handle: &image::Handle) -> Option<&mut Memory> {
let _ = self.hits.insert(handle.id());
self.map.get_mut(&handle.id())
}
fn insert(&mut self, handle: &image::Handle, memory: Memory) {
let _ = self.map.insert(handle.id(), memory);
}
fn contains(&self, handle: &image::Handle) -> bool {
self.map.contains_key(&handle.id())
}
}
bitflags! {
struct Operation: u8 {
const FLIP_HORIZONTALLY = 0b001;
const ROTATE_180 = 0b010;
const FLIP_DIAGONALLY = 0b100;
}
}
impl Operation {
// Meaning of the returned value is described e.g. at:
// https://magnushoff.com/articles/jpeg-orientation/
fn from_exif<R>(reader: &mut R) -> Result<Self, exif::Error>
where
R: std::io::BufRead + std::io::Seek,
{
let exif = exif::Reader::new().read_from_container(reader)?;
Ok(exif
.get_field(exif::Tag::Orientation, exif::In::PRIMARY)
.and_then(|field| field.value.get_uint(0))
.and_then(|value| u8::try_from(value).ok())
.and_then(|value| Self::from_bits(value.saturating_sub(1)))
.unwrap_or_else(Self::empty))
}
fn perform<P>(
self,
image: image_rs::ImageBuffer<P, Vec<P::Subpixel>>,
) -> image_rs::ImageBuffer<P, Vec<P::Subpixel>>
where
P: image_rs::Pixel + 'static,
{
use image_rs::imageops;
let mut image = if self.contains(Self::FLIP_DIAGONALLY) {
flip_diagonally(image)
} else {
image
};
if self.contains(Self::ROTATE_180) {
imageops::rotate180_in_place(&mut image);
}
if self.contains(Self::FLIP_HORIZONTALLY) {
imageops::flip_horizontal_in_place(&mut image);
}
image
}
}
fn flip_diagonally<I>(
image: I,
) -> image_rs::ImageBuffer<I::Pixel, Vec<<I::Pixel as image_rs::Pixel>::Subpixel>>
where
I: image_rs::GenericImage,
I::Pixel: 'static,
{
let (width, height) = image.dimensions();
let mut out = image_rs::ImageBuffer::new(height, width);
for x in 0..width {
for y in 0..height {
let p = image.get_pixel(x, y);
out.put_pixel(y, x, p);
}
}
out
}

View file

@ -1,173 +0,0 @@
use crate::image::atlas::{self, Atlas};
use iced_native::svg;
use std::collections::{HashMap, HashSet};
use std::fs;
pub enum Svg {
Loaded(usvg::Tree),
NotFound,
}
impl Svg {
pub fn viewport_dimensions(&self) -> (u32, u32) {
match self {
Svg::Loaded(tree) => {
let size = tree.svg_node().size;
(size.width() as u32, size.height() as u32)
}
Svg::NotFound => (1, 1),
}
}
}
#[derive(Debug)]
pub struct Cache {
svgs: HashMap<u64, Svg>,
rasterized: HashMap<(u64, u32, u32), atlas::Entry>,
svg_hits: HashSet<u64>,
rasterized_hits: HashSet<(u64, u32, u32)>,
}
impl Cache {
pub fn new() -> Self {
Self {
svgs: HashMap::new(),
rasterized: HashMap::new(),
svg_hits: HashSet::new(),
rasterized_hits: HashSet::new(),
}
}
pub fn load(&mut self, handle: &svg::Handle) -> &Svg {
if self.svgs.contains_key(&handle.id()) {
return self.svgs.get(&handle.id()).unwrap();
}
let svg = match handle.data() {
svg::Data::Path(path) => {
let tree = fs::read_to_string(path).ok().and_then(|contents| {
usvg::Tree::from_str(
&contents,
&usvg::Options::default().to_ref(),
)
.ok()
});
tree.map(Svg::Loaded).unwrap_or(Svg::NotFound)
}
svg::Data::Bytes(bytes) => {
match usvg::Tree::from_data(
bytes,
&usvg::Options::default().to_ref(),
) {
Ok(tree) => Svg::Loaded(tree),
Err(_) => Svg::NotFound,
}
}
};
let _ = self.svgs.insert(handle.id(), svg);
self.svgs.get(&handle.id()).unwrap()
}
pub fn upload(
&mut self,
handle: &svg::Handle,
[width, height]: [f32; 2],
scale: f32,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
texture_atlas: &mut Atlas,
) -> Option<&atlas::Entry> {
let id = handle.id();
let (width, height) = (
(scale * width).ceil() as u32,
(scale * height).ceil() as u32,
);
// TODO: Optimize!
// We currently rerasterize the SVG when its size changes. This is slow
// as heck. A GPU rasterizer like `pathfinder` may perform better.
// It would be cool to be able to smooth resize the `svg` example.
if self.rasterized.contains_key(&(id, width, height)) {
let _ = self.svg_hits.insert(id);
let _ = self.rasterized_hits.insert((id, width, height));
return self.rasterized.get(&(id, width, height));
}
match self.load(handle) {
Svg::Loaded(tree) => {
if width == 0 || height == 0 {
return None;
}
// TODO: Optimize!
// We currently rerasterize the SVG when its size changes. This is slow
// as heck. A GPU rasterizer like `pathfinder` may perform better.
// It would be cool to be able to smooth resize the `svg` example.
let mut img = tiny_skia::Pixmap::new(width, height)?;
resvg::render(
tree,
if width > height {
usvg::FitTo::Width(width)
} else {
usvg::FitTo::Height(height)
},
img.as_mut(),
)?;
let mut rgba = img.take();
rgba.chunks_exact_mut(4).for_each(|rgba| rgba.swap(0, 2));
let allocation = texture_atlas.upload(
width,
height,
bytemuck::cast_slice(rgba.as_slice()),
device,
encoder,
)?;
log::debug!("allocating {} {}x{}", id, width, height);
let _ = self.svg_hits.insert(id);
let _ = self.rasterized_hits.insert((id, width, height));
let _ = self.rasterized.insert((id, width, height), allocation);
self.rasterized.get(&(id, width, height))
}
Svg::NotFound => None,
}
}
pub fn trim(&mut self, atlas: &mut Atlas) {
let svg_hits = &self.svg_hits;
let rasterized_hits = &self.rasterized_hits;
self.svgs.retain(|k, _| svg_hits.contains(k));
self.rasterized.retain(|k, entry| {
let retain = rasterized_hits.contains(k);
if !retain {
atlas.remove(entry);
}
retain
});
self.svg_hits.clear();
self.rasterized_hits.clear();
}
}
impl std::fmt::Debug for Svg {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Svg::Loaded(_) => write!(f, "Svg::Loaded"),
Svg::NotFound => write!(f, "Svg::NotFound"),
}
}
}