Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat/webgl support #578

Merged
merged 6 commits into from
May 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,11 @@ Please only add new entries below the [Unreleased](#unreleased---releasedate) he
- **core**: The split functions in `StateReader::map_reader`, `StateWriter::map_writer`, and `StateWriter::split_writer` no longer need to return a reference. (#568 @M-Adoo)
- **core**: Introduced `StateWatcher` for watching state modifies, which was previously the responsibility of `StateReader`. This results in a cleaner and more compact `StateReader` implementation. (#556, @M-Adoo)
- **gpu**: Introduced `GPUBackendImpl::max_textures_per_draw` to set a limit on textures per draw phase (#562 @M-Adoo)
- **gpu**: Updated the `wgpu` implementation of the GPU backend to support WebGL. (#578, @M-Adoo)

### Fixed

- **gpu**: Retrieve the texture limit size from the GPU instead of using a hardcoded value. (#578, @M-Adoo)

### Changed

Expand Down
4 changes: 3 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ resolver = "2"
debug = true

[profile.release]
debug = true
lto = true
strip = true
codegen-units = 1

[workspace.package]
authors = ["RibirX<[email protected]>"]
Expand Down
1 change: 0 additions & 1 deletion core/src/builtin_widgets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@ impl<T> FatObj<T> {

/// Maps an `FatObj<T>` to `FatObj<V>` by applying a function to the host
/// object.
#[inline]
#[track_caller]
pub fn map<V>(self, f: impl FnOnce(T) -> V) -> FatObj<V> {
FatObj {
Expand Down
86 changes: 48 additions & 38 deletions gpu/src/gpu_backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,24 +107,21 @@ where
self.begin_draw_phase();
let output_size = output.size();
for cmd in commands.into_iter() {
let max_tex_per_draw = self.gpu_impl.limits().max_tex_load;
let maybe_used = match cmd {
PaintCommand::ImgPath { .. } => 2,
PaintCommand::PopClip => 0,
_ => 1,
};
if self.tex_ids_map.all_textures().len() + maybe_used
>= self.gpu_impl.load_tex_limit_per_draw()
|| !self.continues_cmd(&cmd)
{
if !self.can_batch(&cmd) {
// if the next command may hit the texture limit, submit the current draw phase.
// And start a new draw phase.
self.draw_triangles(output);
self.end_draw_phase();
self.begin_draw_phase();

assert!(
self.tex_ids_map.all_textures().len() + maybe_used
< self.gpu_impl.load_tex_limit_per_draw(),
self.tex_ids_map.all_textures().len() + maybe_used < max_tex_per_draw,
"The GPUBackend implementation does not provide a sufficient texture limit per draw."
)
}
Expand Down Expand Up @@ -194,16 +191,15 @@ where
let img_slice = self.tex_mgr.store_image(&img, &mut self.gpu_impl);
let img_start = img_slice.rect.origin.to_f32().to_array();
let img_size = img_slice.rect.size.to_f32().to_array();
let img_tex_idx = self.tex_ids_map.tex_idx(img_slice.tex_id);
let mask_head_and_tex_idx =
mask_head << 16 | self.tex_ids_map.tex_idx(img_slice.tex_id) as i32;
let prim_idx = self.img_prims.len() as u32;
let prim = ImgPrimitive {
transform: ts.inverse().unwrap().to_array(),
img_start,
img_size,
img_tex_idx,
mask_head,
mask_head_and_tex_idx,
opacity,
_dummy: 0,
};
self.img_prims.push(prim);
let buffer = &mut self.img_vertices_buffer;
Expand Down Expand Up @@ -242,14 +238,14 @@ where
PaintCommand::LinearGradient { path, linear_gradient } => {
let ts = path.transform;
if let Some((rect, mask_head)) = self.new_mask_layer(path) {
let stop = (self.linear_gradient_stops.len() << 16 | linear_gradient.stops.len()) as u32;
let mask_head_and_spread = mask_head << 16 | linear_gradient.spread_method as i32;
let prim: LinearGradientPrimitive = LinearGradientPrimitive {
transform: ts.inverse().unwrap().to_array(),
stop_start: self.linear_gradient_stops.len() as u32,
stop_cnt: linear_gradient.stops.len() as u32,
stop,
start_position: linear_gradient.start.to_array(),
end_position: linear_gradient.end.to_array(),
mask_head,
spread: linear_gradient.spread_method as u32,
mask_head_and_spread,
};
self.linear_gradient_stops.extend(
linear_gradient
Expand Down Expand Up @@ -343,17 +339,29 @@ where
self.linear_gradient_stops.clear();
}

fn continues_cmd(&self, cmd: &PaintCommand) -> bool {
matches!(
(self.current_phase, cmd),
(CurrentPhase::None, _)
| (_, PaintCommand::Clip(_))
| (_, PaintCommand::PopClip)
| (CurrentPhase::Color, PaintCommand::ColorPath { .. })
| (CurrentPhase::Img, PaintCommand::ImgPath { .. })
| (CurrentPhase::RadialGradient, PaintCommand::RadialGradient { .. })
| (CurrentPhase::LinearGradient, PaintCommand::LinearGradient { .. })
)
fn can_batch(&self, cmd: &PaintCommand) -> bool {
let limits = self.gpu_impl.limits();
let tex_used = self.tex_ids_map.all_textures().len();
match (self.current_phase, cmd) {
(CurrentPhase::None, _) | (_, PaintCommand::PopClip) => true,
(_, PaintCommand::Clip(_)) | (CurrentPhase::Color, PaintCommand::ColorPath { .. }) => {
tex_used < limits.max_tex_load
}
(CurrentPhase::Img, PaintCommand::ImgPath { .. }) => {
tex_used < limits.max_tex_load - 1 && self.img_prims.len() < limits.max_image_primitives
}
(CurrentPhase::RadialGradient, PaintCommand::RadialGradient { .. }) => {
tex_used < limits.max_tex_load
&& self.radial_gradient_prims.len() < limits.max_radial_gradient_primitives
&& self.radial_gradient_stops.len() < limits.max_gradient_stop_primitives
}
(CurrentPhase::LinearGradient, PaintCommand::LinearGradient { .. }) => {
tex_used < limits.max_tex_load
&& self.linear_gradient_prims.len() < limits.max_linear_gradient_primitives
&& self.linear_gradient_stops.len() < limits.max_gradient_stop_primitives
}
_ => false,
}
}

fn current_clip_mask_index(&self) -> i32 {
Expand All @@ -373,18 +381,20 @@ where
fn new_mask_layer(&mut self, path: PaintPath) -> Option<([Point; 4], i32)> {
let paint_bounds = path.paint_bounds.round_out().to_i32().cast_unit();
let view = paint_bounds.intersection(self.viewport())?;
let prefer_cache_size = prefer_cache_size(&path.path, &path.transform);

let (mask, mask_to_view) =
if valid_cache_item(&prefer_cache_size) || view.contains_rect(&paint_bounds) {
self
.tex_mgr
.store_alpha_path(path.path, &path.transform, &mut self.gpu_impl)
} else {
self
.tex_mgr
.store_clipped_path(view, path, &mut self.gpu_impl)
};

let (mask, mask_to_view) = if self
.tex_mgr
.is_good_for_cache(path.paint_bounds.size.to_i32().cast_unit())
|| view.contains_rect(&paint_bounds)
{
self
.tex_mgr
.store_alpha_path(path.path, &path.transform, &mut self.gpu_impl)
} else {
self
.tex_mgr
.store_clipped_path(view, path, &mut self.gpu_impl)
};

let mut points = rect_corners(&mask.rect.to_f32().cast_unit());
for p in points.iter_mut() {
Expand Down Expand Up @@ -412,7 +422,7 @@ where
gpu_impl.load_mask_layers(&self.mask_layers);

let textures = self.tex_ids_map.all_textures();
let max_textures = gpu_impl.load_tex_limit_per_draw();
let max_textures = gpu_impl.limits().max_tex_load;
let mut tex_buffer = Vec::with_capacity(max_textures);
textures.iter().take(max_textures).for_each(|id| {
tex_buffer.push(self.tex_mgr.texture(*id));
Expand Down
86 changes: 61 additions & 25 deletions gpu/src/gpu_backend/atlas.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,6 @@ use slab::Slab;
use super::Texture;
use crate::GPUBackendImpl;

pub const ATLAS_MAX_ITEM: DeviceSize = DeviceSize::new(512, 512);
pub const ATLAS_MIN_SIZE: DeviceSize = DeviceSize::new(1024, 1024);
pub const ATLAS_MAX_SIZE: DeviceSize = DeviceSize::new(4096, 4096);

#[derive(Copy, Clone, Debug, PartialEq)]
enum AtlasDist {
Atlas(Allocation),
Expand All @@ -25,10 +21,16 @@ pub struct AtlasHandle<Attr> {
atlas_dist: AtlasDist,
}

pub(crate) struct AtlasConfig {
label: &'static str,
min_size: DeviceSize,
max_size: DeviceSize,
}

pub(crate) struct Atlas<T: Texture, K, Attr> {
config: AtlasConfig,
atlas_allocator: AtlasAllocator,
texture: T,
label: &'static str,
cache: FrameCache<K, AtlasHandle<Attr>>,
extras: Slab<T>,
islands: Vec<AtlasHandle<Attr>>,
Expand All @@ -53,13 +55,14 @@ where
K: Hash + Eq,
{
pub fn new(
label: &'static str, format: ColorFormat, anti_aliasing: AntiAliasing, gpu_impl: &mut T::Host,
config: AtlasConfig, format: ColorFormat, anti_aliasing: AntiAliasing, gpu_impl: &mut T::Host,
) -> Self {
let texture = gpu_impl.new_texture(ATLAS_MIN_SIZE, anti_aliasing, format);
let min_size = config.min_size;
let texture = gpu_impl.new_texture(min_size, anti_aliasing, format);
Self {
label,
config,
texture,
atlas_allocator: AtlasAllocator::new(ATLAS_MIN_SIZE.cast_unit()),
atlas_allocator: AtlasAllocator::new(min_size.cast_unit()),
cache: FrameCache::new(),
extras: Slab::default(),
islands: vec![],
Expand Down Expand Up @@ -92,7 +95,7 @@ where
if alloc.is_none() {
let expand_size = (current_size * 2)
.max(current_size)
.min(ATLAS_MAX_SIZE);
.min(self.config.max_size);
if expand_size != self.texture.size() {
self.atlas_allocator.grow(expand_size.cast_unit());
let mut new_tex = gpu_impl.new_texture(
Expand Down Expand Up @@ -155,16 +158,24 @@ where

pub fn size(&self) -> DeviceSize { self.texture.size() }

/// The max size of the atlas can be.
pub fn max_size(&self) -> DeviceSize { self.config.max_size }

pub fn clear(&mut self) {
self.cache.clear();
self.atlas_allocator.clear();
self.extras.clear();
}

pub fn is_good_size_to_alloc(&self, size: DeviceSize) -> bool {
(!size.greater_than(self.config.max_size).any())
&& size.area() <= self.config.max_size.area() / 4
}

pub(crate) fn end_frame(&mut self) {
self
.cache
.end_frame(self.label)
.end_frame(self.config.label)
.for_each(|h| release_handle!(self, h));
self
.islands
Expand All @@ -173,6 +184,12 @@ where
}
}

impl AtlasConfig {
pub fn new(label: &'static str, max_size: DeviceSize) -> Self {
Self { label, min_size: max_size / 4, max_size }
}
}

impl<Attr> AtlasHandle<Attr> {
pub fn tex_id(&self) -> usize {
match &self.atlas_dist {
Expand Down Expand Up @@ -202,9 +219,14 @@ mod tests {
#[test]
fn atlas_grow_to_alloc() {
let mut gpu_impl = block_on(WgpuImpl::headless());
let mut atlas =
Atlas::<WgpuTexture, _, _>::new("_", ColorFormat::Alpha8, AntiAliasing::None, &mut gpu_impl);
let size = DeviceSize::new(ATLAS_MIN_SIZE.width + 1, 16);
let mut atlas = Atlas::<WgpuTexture, _, _>::new(
AtlasConfig::new("", DeviceSize::new(4096, 4096)),
ColorFormat::Alpha8,
AntiAliasing::None,
&mut gpu_impl,
);

let size = DeviceSize::new(atlas.config.min_size.width + 1, 16);
let h = atlas.allocate(1, (), size, &mut gpu_impl);
gpu_impl.end_frame();
assert_eq!(h.tex_id(), 0);
Expand All @@ -213,10 +235,15 @@ mod tests {
#[test]
fn resource_clear() {
let mut wgpu = block_on(WgpuImpl::headless());
let mut atlas =
Atlas::<WgpuTexture, _, _>::new("_", ColorFormat::Rgba8, AntiAliasing::None, &mut wgpu);
let size = wgpu.limits().texture_size;
let mut atlas = Atlas::<WgpuTexture, _, _>::new(
AtlasConfig::new("", size),
ColorFormat::Rgba8,
AntiAliasing::None,
&mut wgpu,
);
atlas.allocate(1, (), DeviceSize::new(32, 32), &mut wgpu);
atlas.allocate(2, (), DeviceSize::new(4097, 16), &mut wgpu);
atlas.allocate(2, (), size, &mut wgpu);
atlas.end_frame();
atlas.end_frame();
wgpu.end_frame();
Expand All @@ -228,8 +255,12 @@ mod tests {
#[test]
fn fix_scale_path_cache_miss() {
let mut wgpu = block_on(WgpuImpl::headless());
let mut atlas =
Atlas::<WgpuTexture, _, _>::new("_", ColorFormat::Rgba8, AntiAliasing::None, &mut wgpu);
let mut atlas = Atlas::<WgpuTexture, _, _>::new(
AtlasConfig::new("", DeviceSize::new(4096, 4096)),
ColorFormat::Rgba8,
AntiAliasing::None,
&mut wgpu,
);
atlas.allocate(1, (), DeviceSize::new(32, 32), &mut wgpu);
atlas.allocate(1, (), DeviceSize::new(512, 512), &mut wgpu); // before the frame end, two allocation for key(1) should keep.
let mut alloc_count = 0;
Expand All @@ -251,22 +282,27 @@ mod tests {
#[test]
fn fix_atlas_expand_overlap() {
let mut wgpu = block_on(WgpuImpl::headless());
let mut atlas =
Atlas::<WgpuTexture, _, _>::new("_", ColorFormat::Alpha8, AntiAliasing::None, &mut wgpu);
let mut atlas = Atlas::<WgpuTexture, _, _>::new(
AtlasConfig::new("", DeviceSize::new(4096, 4096)),
ColorFormat::Alpha8,
AntiAliasing::None,
&mut wgpu,
);
let icon = DeviceSize::new(32, 32);
atlas.allocate(1, (), icon, &mut wgpu);

atlas
.texture
.write_data(&DeviceRect::from_size(icon), &[1; 32 * 32], &mut wgpu);

let min_size = atlas.config.min_size;
// force atlas to expand
let h = atlas.allocate(2, (), ATLAS_MIN_SIZE, &mut wgpu);
let h = atlas.allocate(2, (), min_size, &mut wgpu);
let second_rect = h.tex_rect(&atlas);
const SECOND_AREA: usize = (ATLAS_MIN_SIZE.width * ATLAS_MIN_SIZE.height) as usize;
let second_area: usize = (min_size.width * min_size.height) as usize;
atlas
.texture
.write_data(&second_rect, &[2; SECOND_AREA], &mut wgpu);
.write_data(&second_rect, &vec![2; second_area], &mut wgpu);
let img = atlas
.texture
.copy_as_image(&DeviceRect::from_size(atlas.size()), &mut wgpu);
Expand All @@ -281,7 +317,7 @@ mod tests {
.iter()
.map(|v| *v as usize)
.sum::<usize>(),
icon.area() as usize + SECOND_AREA * 2
icon.area() as usize + second_area * 2
)
}
}
Loading
Loading