diff --git a/crates/eframe/src/web/web_painter_wgpu.rs b/crates/eframe/src/web/web_painter_wgpu.rs index 7a4cf0e79e60..8f7b5644b8ea 100644 --- a/crates/eframe/src/web/web_painter_wgpu.rs +++ b/crates/eframe/src/web/web_painter_wgpu.rs @@ -1,62 +1,14 @@ use std::sync::{mpsc, Arc}; +use super::web_painter::WebPainter; +use crate::epaint::ColorImage; +use crate::{epaint, WebOptions}; use egui::{Event, UserData, ViewportId}; +use egui_wgpu::capture::CaptureState; use egui_wgpu::{RenderState, SurfaceErrorAction, WgpuSetup}; use wasm_bindgen::JsValue; use web_sys::HtmlCanvasElement; -use super::web_painter::WebPainter; -use crate::epaint::ColorImage; -use crate::{epaint, WebOptions}; - -/// A texture and a buffer for reading the rendered frame back to the cpu. -/// The texture is required since [`wgpu::TextureUsages::COPY_DST`] is not an allowed -/// flag for the surface texture on all platforms. This means that anytime we want to -/// capture the frame, we first render it to this texture, and then we can copy it to -/// both the surface texture and the buffer, from where we can pull it back to the cpu. -struct CaptureState { - texture: wgpu::Texture, - padding: BufferPadding, -} - -impl CaptureState { - fn new(device: &Arc, surface_texture: &wgpu::Texture) -> Self { - let texture = device.create_texture(&wgpu::TextureDescriptor { - label: Some("egui_screen_capture_texture"), - size: surface_texture.size(), - mip_level_count: surface_texture.mip_level_count(), - sample_count: surface_texture.sample_count(), - dimension: surface_texture.dimension(), - format: surface_texture.format(), - usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC, - view_formats: &[], - }); - - let padding = BufferPadding::new(surface_texture.width()); - - Self { texture, padding } - } -} - -#[derive(Copy, Clone)] -struct BufferPadding { - unpadded_bytes_per_row: u32, - padded_bytes_per_row: u32, -} - -impl BufferPadding { - fn new(width: u32) -> Self { - let bytes_per_pixel = std::mem::size_of::() as u32; - let unpadded_bytes_per_row = width * bytes_per_pixel; - let padded_bytes_per_row = - wgpu::util::align_to(unpadded_bytes_per_row, wgpu::COPY_BYTES_PER_ROW_ALIGNMENT); - Self { - unpadded_bytes_per_row, - padded_bytes_per_row, - } - } -} - pub(crate) struct WebPainterWgpu { canvas: HtmlCanvasElement, surface: wgpu::Surface<'static>, @@ -66,7 +18,6 @@ pub(crate) struct WebPainterWgpu { depth_format: Option, depth_texture_view: Option, screen_capture_state: Option, - capture_supported: bool, capture_rx: mpsc::Receiver<(Vec, ColorImage)>, capture_tx: mpsc::Sender<(Vec, ColorImage)>, ctx: egui::Context, @@ -105,125 +56,6 @@ impl WebPainterWgpu { }) } - // CaptureState only needs to be updated when the size of the two textures don't match, and we want to - // capture a frame - fn update_capture_state( - screen_capture_state: &mut Option, - surface_texture: &wgpu::SurfaceTexture, - render_state: &RenderState, - ) { - let surface_texture = &surface_texture.texture; - match screen_capture_state { - Some(capture_state) => { - if capture_state.texture.size() != surface_texture.size() { - *capture_state = CaptureState::new(&render_state.device, surface_texture); - } - } - None => { - *screen_capture_state = - Some(CaptureState::new(&render_state.device, surface_texture)); - } - } - } - - // Handles copying from the CaptureState texture to the surface texture and the cpu - fn read_screen_rgba( - ctx: egui::Context, - screen_capture_state: &mut CaptureState, - render_state: &RenderState, - output_frame: Option<&wgpu::SurfaceTexture>, - data: Vec, - tx: mpsc::Sender<(Vec, ColorImage)>, - ) { - // It would be more efficient to reuse the Buffer, e.g. via some kind of ring buffer, but - // for most screenshot use cases this should be fine. When taking many screenshots (e.g. for a video) - // it might make sense to revisit this and implement a more efficient solution. - #[allow(clippy::arc_with_non_send_sync)] - let buffer = Arc::new(render_state.device.create_buffer(&wgpu::BufferDescriptor { - label: Some("egui_screen_capture_buffer"), - size: (screen_capture_state.padding.padded_bytes_per_row - * screen_capture_state.texture.height()) as u64, - usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, - mapped_at_creation: false, - })); - let padding = screen_capture_state.padding; - let tex = &mut screen_capture_state.texture; - - let device = &render_state.device; - let queue = &render_state.queue; - - let tex_extent = tex.size(); - - let mut encoder = device.create_command_encoder(&Default::default()); - encoder.copy_texture_to_buffer( - tex.as_image_copy(), - wgpu::ImageCopyBuffer { - buffer: &buffer, - layout: wgpu::ImageDataLayout { - offset: 0, - bytes_per_row: Some(padding.padded_bytes_per_row), - rows_per_image: None, - }, - }, - tex_extent, - ); - - if let Some(texture) = output_frame { - encoder.copy_texture_to_texture( - tex.as_image_copy(), - texture.texture.as_image_copy(), - tex.size(), - ); - } - - let id = queue.submit(Some(encoder.finish())); - let buffer_clone = buffer.clone(); - let buffer_slice = buffer_clone.slice(..); - let format = tex.format(); - buffer_slice.map_async(wgpu::MapMode::Read, move |result| { - if let Err(err) = result { - log::error!("Failed to map buffer for reading: {:?}", err); - return; - } - let to_rgba = match format { - wgpu::TextureFormat::Rgba8Unorm => [0, 1, 2, 3], - wgpu::TextureFormat::Bgra8Unorm => [2, 1, 0, 3], - _ => { - log::error!("Screen can't be captured unless the surface format is Rgba8Unorm or Bgra8Unorm. Current surface format is {:?}", format); - return; - } - }; - let buffer_slice = buffer.slice(..); - - let mut pixels = Vec::with_capacity((tex_extent.width * tex_extent.height) as usize); - for padded_row in buffer_slice - .get_mapped_range() - .chunks(padding.padded_bytes_per_row as usize) - { - let row = &padded_row[..padding.unpadded_bytes_per_row as usize]; - for color in row.chunks(4) { - pixels.push(epaint::Color32::from_rgba_premultiplied( - color[to_rgba[0]], - color[to_rgba[1]], - color[to_rgba[2]], - color[to_rgba[3]], - )); - } - } - buffer.unmap(); - - tx.send(( - data, - ColorImage { - size: [tex_extent.width as usize, tex_extent.height as usize], - pixels, - }, - )).ok(); - ctx.request_repaint(); - }); - device.poll(wgpu::Maintain::WaitForSubmissionIndex(id)); - } - #[allow(unused)] // only used if `wgpu` is the only active feature. pub async fn new( ctx: egui::Context, @@ -296,21 +128,10 @@ impl WebPainterWgpu { .get_default_config(&render_state.adapter, 0, 0) // Width/height is set later. .ok_or("The surface isn't supported by this adapter")?; - let capture_supported = surface - .get_capabilities(&render_state.adapter) - .usages - .contains(wgpu::TextureUsages::COPY_DST); - let usage = if capture_supported { - default_configuration.usage | wgpu::TextureUsages::COPY_DST - } else { - default_configuration.usage - }; - let surface_configuration = wgpu::SurfaceConfiguration { format: render_state.target_format, present_mode: options.wgpu_options.present_mode, view_formats: vec![render_state.target_format], - usage, ..default_configuration }; @@ -326,7 +147,6 @@ impl WebPainterWgpu { depth_format, depth_texture_view: None, on_surface_error: options.wgpu_options.on_surface_error.clone(), - capture_supported, screen_capture_state: None, capture_rx: rx, capture_tx: tx, @@ -354,11 +174,7 @@ impl WebPainter for WebPainterWgpu { textures_delta: &egui::TexturesDelta, capture_data: Vec, ) -> Result<(), JsValue> { - let capture = !capture_data.is_empty() && self.capture_supported; - - if !capture_data.is_empty() && !self.capture_supported { - log::warn!("Capture requested, but the surface doesn't support it. (Screenshots don't work with egui_wgpu and the wgpu gl backend)"); - } + let capture = !capture_data.is_empty(); let size_in_pixels = [self.canvas.width(), self.canvas.height()]; @@ -438,7 +254,7 @@ impl WebPainter for WebPainterWgpu { let renderer = render_state.renderer.read(); let frame_view = if capture { - Self::update_capture_state( + CaptureState::update_capture_state( &mut self.screen_capture_state, &output_frame, render_state, @@ -514,7 +330,7 @@ impl WebPainter for WebPainterWgpu { if capture { if let Some(capture_state) = &mut self.screen_capture_state { - Self::read_screen_rgba( + CaptureState::read_screen_rgba( self.ctx.clone(), capture_state, render_state, diff --git a/crates/egui-wgpu/src/blit.wgsl b/crates/egui-wgpu/src/blit.wgsl new file mode 100644 index 000000000000..ad1d972da32b --- /dev/null +++ b/crates/egui-wgpu/src/blit.wgsl @@ -0,0 +1,52 @@ +struct VertexOutput { + @builtin(position) position: vec4, + @location(0) tex_coords: vec2, +}; + +// meant to be called with 3 vertex indices: 0, 1, 2 +// draws one large triangle over the clip space like this: +// (the asterisks represent the clip space bounds) +//-1,1 1,1 +// --------------------------------- +// | * . +// | * . +// | * . +// | * . +// | * . +// | * . +// |*************** +// | . 1,-1 +// | . +// | . +// | . +// | . +// |. +@vertex +fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { + var result: VertexOutput; + let x = i32(vertex_index) / 2; + let y = i32(vertex_index) & 1; + let tc = vec2( + f32(x) * 2.0, + f32(y) * 2.0 + ); + result.position = vec4( + tc.x * 2.0 - 1.0, + 1.0 - tc.y * 2.0, + 0.0, 1.0 + ); + result.tex_coords = tc; + return result; +} + +@group(0) +@binding(0) +var r_color: texture_2d; +@group(0) +@binding(1) +var r_sampler: sampler; + +@fragment +fn fs_main(vertex: VertexOutput) -> @location(0) vec4 { + return textureSample(r_color, r_sampler, vertex.tex_coords); +} diff --git a/crates/egui-wgpu/src/capture.rs b/crates/egui-wgpu/src/capture.rs new file mode 100644 index 000000000000..1c660eb49831 --- /dev/null +++ b/crates/egui-wgpu/src/capture.rs @@ -0,0 +1,251 @@ +use crate::RenderState; +use egui::UserData; +use epaint::ColorImage; +use std::sync::{mpsc, Arc}; +use wgpu::{MultisampleState, StoreOp}; + +/// A texture and a buffer for reading the rendered frame back to the cpu. +/// The texture is required since [`wgpu::TextureUsages::COPY_DST`] is not an allowed +/// flag for the surface texture on all platforms. This means that anytime we want to +/// capture the frame, we first render it to this texture, and then we can copy it to +/// both the surface texture and the buffer, from where we can pull it back to the cpu. +pub struct CaptureState { + pub texture: wgpu::Texture, + padding: BufferPadding, + pipeline: wgpu::RenderPipeline, + bind_group: wgpu::BindGroup, +} + +impl CaptureState { + pub fn new(device: &Arc, surface_texture: &wgpu::Texture) -> Self { + let texture = device.create_texture(&wgpu::TextureDescriptor { + label: Some("egui_screen_capture_texture"), + size: surface_texture.size(), + mip_level_count: surface_texture.mip_level_count(), + sample_count: surface_texture.sample_count(), + dimension: surface_texture.dimension(), + format: surface_texture.format(), + usage: wgpu::TextureUsages::RENDER_ATTACHMENT + | wgpu::TextureUsages::TEXTURE_BINDING + | wgpu::TextureUsages::COPY_SRC, + view_formats: &[], + }); + + let padding = BufferPadding::new(surface_texture.width()); + + let shader = device.create_shader_module(wgpu::include_wgsl!("blit.wgsl")); + + let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("blit"), + layout: None, + vertex: wgpu::VertexState { + module: &shader, + entry_point: Some("vs_main"), + compilation_options: Default::default(), + buffers: &[], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: Some("fs_main"), + compilation_options: Default::default(), + targets: &[Some(surface_texture.format().into())], + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleList, + ..Default::default() + }, + depth_stencil: None, + multisample: MultisampleState::default(), + multiview: None, + cache: None, + }); + + let bind_group_layout = pipeline.get_bind_group_layout(0); + + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { + label: Some("mip"), + address_mode_u: wgpu::AddressMode::ClampToEdge, + address_mode_v: wgpu::AddressMode::ClampToEdge, + address_mode_w: wgpu::AddressMode::ClampToEdge, + mag_filter: wgpu::FilterMode::Linear, + min_filter: wgpu::FilterMode::Linear, + mipmap_filter: wgpu::FilterMode::Nearest, + ..Default::default() + }); + + let view = texture.create_view(&Default::default()); + + let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&sampler), + }, + ], + label: None, + }); + + Self { + texture, + padding, + pipeline, + bind_group, + } + } + + // CaptureState only needs to be updated when the size of the two textures don't match, and we want to + // capture a frame + pub fn update_capture_state( + screen_capture_state: &mut Option, + surface_texture: &wgpu::SurfaceTexture, + render_state: &RenderState, + ) { + let surface_texture = &surface_texture.texture; + match screen_capture_state { + Some(capture_state) => { + if capture_state.texture.size() != surface_texture.size() { + *capture_state = CaptureState::new(&render_state.device, surface_texture); + } + } + None => { + *screen_capture_state = + Some(CaptureState::new(&render_state.device, surface_texture)); + } + } + } + + // Handles copying from the CaptureState texture to the surface texture and the cpu + pub fn read_screen_rgba( + ctx: egui::Context, + screen_capture_state: &mut CaptureState, + render_state: &RenderState, + output_frame: Option<&wgpu::SurfaceTexture>, + data: Vec, + tx: mpsc::Sender<(Vec, ColorImage)>, + ) { + // It would be more efficient to reuse the Buffer, e.g. via some kind of ring buffer, but + // for most screenshot use cases this should be fine. When taking many screenshots (e.g. for a video) + // it might make sense to revisit this and implement a more efficient solution. + #[allow(clippy::arc_with_non_send_sync)] + let buffer = Arc::new(render_state.device.create_buffer(&wgpu::BufferDescriptor { + label: Some("egui_screen_capture_buffer"), + size: (screen_capture_state.padding.padded_bytes_per_row + * screen_capture_state.texture.height()) as u64, + usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, + mapped_at_creation: false, + })); + let padding = screen_capture_state.padding; + let tex = &mut screen_capture_state.texture; + + let device = &render_state.device; + let queue = &render_state.queue; + + let tex_extent = tex.size(); + + let mut encoder = device.create_command_encoder(&Default::default()); + encoder.copy_texture_to_buffer( + tex.as_image_copy(), + wgpu::ImageCopyBuffer { + buffer: &buffer, + layout: wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: Some(padding.padded_bytes_per_row), + rows_per_image: None, + }, + }, + tex_extent, + ); + + if let Some(texture) = output_frame { + let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("blit"), + color_attachments: &[Some(wgpu::RenderPassColorAttachment { + view: &texture.texture.create_view(&Default::default()), + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT), + store: StoreOp::Store, + }, + })], + depth_stencil_attachment: None, + occlusion_query_set: None, + timestamp_writes: None, + }); + + pass.set_pipeline(&screen_capture_state.pipeline); + pass.set_bind_group(0, &screen_capture_state.bind_group, &[]); + pass.draw(0..3, 0..1); + } + + let id = queue.submit(Some(encoder.finish())); + let buffer_clone = buffer.clone(); + let buffer_slice = buffer_clone.slice(..); + let format = tex.format(); + buffer_slice.map_async(wgpu::MapMode::Read, move |result| { + if let Err(err) = result { + log::error!("Failed to map buffer for reading: {:?}", err); + return; + } + let to_rgba = match format { + wgpu::TextureFormat::Rgba8Unorm => [0, 1, 2, 3], + wgpu::TextureFormat::Bgra8Unorm => [2, 1, 0, 3], + _ => { + log::error!("Screen can't be captured unless the surface format is Rgba8Unorm or Bgra8Unorm. Current surface format is {:?}", format); + return; + } + }; + let buffer_slice = buffer.slice(..); + + let mut pixels = Vec::with_capacity((tex_extent.width * tex_extent.height) as usize); + for padded_row in buffer_slice + .get_mapped_range() + .chunks(padding.padded_bytes_per_row as usize) + { + let row = &padded_row[..padding.unpadded_bytes_per_row as usize]; + for color in row.chunks(4) { + pixels.push(epaint::Color32::from_rgba_premultiplied( + color[to_rgba[0]], + color[to_rgba[1]], + color[to_rgba[2]], + color[to_rgba[3]], + )); + } + } + buffer.unmap(); + + tx.send(( + data, + ColorImage { + size: [tex_extent.width as usize, tex_extent.height as usize], + pixels, + }, + )).ok(); + ctx.request_repaint(); + }); + device.poll(wgpu::Maintain::WaitForSubmissionIndex(id)); + } +} + +#[derive(Copy, Clone)] +struct BufferPadding { + unpadded_bytes_per_row: u32, + padded_bytes_per_row: u32, +} + +impl BufferPadding { + fn new(width: u32) -> Self { + let bytes_per_pixel = std::mem::size_of::() as u32; + let unpadded_bytes_per_row = width * bytes_per_pixel; + let padded_bytes_per_row = + wgpu::util::align_to(unpadded_bytes_per_row, wgpu::COPY_BYTES_PER_ROW_ALIGNMENT); + Self { + unpadded_bytes_per_row, + padded_bytes_per_row, + } + } +} diff --git a/crates/egui-wgpu/src/lib.rs b/crates/egui-wgpu/src/lib.rs index e2c22a5cffc0..72dafae95b2b 100644 --- a/crates/egui-wgpu/src/lib.rs +++ b/crates/egui-wgpu/src/lib.rs @@ -26,6 +26,7 @@ mod renderer; pub use renderer::*; use wgpu::{Adapter, Device, Instance, Queue}; +pub mod capture; /// Module for painting [`egui`](https://github.com/emilk/egui) with [`wgpu`] on [`winit`]. #[cfg(feature = "winit")] pub mod winit; diff --git a/crates/egui_demo_lib/src/demo/screenshot.rs b/crates/egui_demo_lib/src/demo/screenshot.rs index a2e4727560da..eb62611c8634 100644 --- a/crates/egui_demo_lib/src/demo/screenshot.rs +++ b/crates/egui_demo_lib/src/demo/screenshot.rs @@ -5,6 +5,7 @@ use std::sync::Arc; #[derive(PartialEq, Eq, Default)] pub struct Screenshot { image: Option<(Arc, egui::TextureHandle)>, + continuous: bool, } impl crate::Demo for Screenshot { @@ -39,7 +40,9 @@ impl crate::View for Screenshot { }); ui.horizontal_top(|ui| { - if ui.button("📷 Take Screenshot").clicked() { + let capture = ui.button("📷 Take Screenshot").clicked(); + ui.checkbox(&mut self.continuous, "Capture continuously"); + if capture || self.continuous { ui.ctx() .send_viewport_cmd(ViewportCommand::Screenshot(UserData::default())); }