diff --git a/CHANGELOG.md b/CHANGELOG.md index 08d20c09fa..d424038c9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -101,6 +101,7 @@ By @wumpf in [#4147](https://github.com/gfx-rs/wgpu/pull/4147) - `wgpu::CreateSurfaceError` and `wgpu::RequestDeviceError` now give details of the failure, but no longer implement `PartialEq` and cannot be constructed. By @kpreid in [#4066](https://github.com/gfx-rs/wgpu/pull/4066) and [#4145](https://github.com/gfx-rs/wgpu/pull/4145) - Make `WGPU_POWER_PREF=none` a valid value. By @fornwall in [4076](https://github.com/gfx-rs/wgpu/pull/4076) - Support dual source blending in OpenGL ES, Metal, Vulkan & DX12. By @freqmod in [4022](https://github.com/gfx-rs/wgpu/pull/4022) +- Add stub support for device destroy and device validity. By @bradwerth in [4163](https://github.com/gfx-rs/wgpu/pull/4163) #### Vulkan diff --git a/tests/tests/device.rs b/tests/tests/device.rs index 7964f2afdb..991170e739 100644 --- a/tests/tests/device.rs +++ b/tests/tests/device.rs @@ -1,6 +1,6 @@ use wasm_bindgen_test::*; -use wgpu_test::{initialize_test, FailureCase, TestParameters}; +use wgpu_test::{fail, initialize_test, FailureCase, TestParameters}; #[test] #[wasm_bindgen_test] @@ -91,3 +91,355 @@ async fn request_device_error_message() { } assert!(device_error.contains(expected), "{device_error}"); } + +#[test] +fn device_destroy_then_more() { + // This is a test of device behavior after device.destroy. Specifically, all operations + // should trigger errors since the device is lost. + // + // On DX12 this test fails with a validation error in the very artifical actions taken + // after lose the device. The error is "ID3D12CommandAllocator::Reset: The command + // allocator cannot be reset because a command list is currently being recorded with the + // allocator." That may indicate that DX12 doesn't like opened command buffers staying + // open even after they return an error. For now, this test is skipped on DX12. + // + // The DX12 issue may be related to https://github.com/gfx-rs/wgpu/issues/3193. + initialize_test( + TestParameters::default() + .features(wgpu::Features::CLEAR_TEXTURE) + .skip(FailureCase::backend(wgpu::Backends::DX12)), + |ctx| { + // Create some resources on the device that we will attempt to use *after* losing + // the device. + + // Create some 512 x 512 2D textures. + let texture_extent = wgpu::Extent3d { + width: 512, + height: 512, + depth_or_array_layers: 1, + }; + let texture_for_view = ctx.device.create_texture(&wgpu::TextureDescriptor { + label: None, + size: texture_extent, + mip_level_count: 2, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rg8Uint, + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + view_formats: &[], + }); + let target_view = texture_for_view.create_view(&wgpu::TextureViewDescriptor::default()); + + let texture_for_read = ctx.device.create_texture(&wgpu::TextureDescriptor { + label: None, + size: texture_extent, + mip_level_count: 2, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rg8Uint, + usage: wgpu::TextureUsages::COPY_SRC, + view_formats: &[], + }); + + let texture_for_write = ctx.device.create_texture(&wgpu::TextureDescriptor { + label: None, + size: texture_extent, + mip_level_count: 2, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rg8Uint, + usage: wgpu::TextureUsages::COPY_DST, + view_formats: &[], + }); + + // Create some buffers. + let buffer_source = ctx.device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: 256, + usage: wgpu::BufferUsages::MAP_WRITE | wgpu::BufferUsages::COPY_SRC, + mapped_at_creation: false, + }); + let buffer_dest = ctx.device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: 256, + usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST, + mapped_at_creation: false, + }); + + // Create a bind group layout. + let bind_group_layout = + ctx.device + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: None, + entries: &[], + }); + + // Create a shader module. + let shader_module = ctx + .device + .create_shader_module(wgpu::ShaderModuleDescriptor { + label: None, + source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed("")), + }); + + // Create some command encoders. + let mut encoder_for_clear = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + let mut encoder_for_compute_pass = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + let mut encoder_for_render_pass = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + let mut encoder_for_buffer_buffer_copy = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + let mut encoder_for_buffer_texture_copy = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + let mut encoder_for_texture_buffer_copy = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + let mut encoder_for_texture_texture_copy = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + // Destroy the device. This will cause all other requests to return some variation of + // a device invalid error. + ctx.device.destroy(); + + // TODO: verify the following operations will return an invalid device error: + // * Run a compute pass + // * Run a render pass + // * Finish a render bundle encoder + // * Create a texture from HAL + // * Create a buffer from HAL + // * Create a sampler + // * Validate a surface configuration + // * Start capture + // * Stop capture + // * Buffer map + + // TODO: figure out how to structure a test around these operations which panic when + // the device is invalid: + // * device.features() + // * device.limits() + // * device.downlevel_properties() + // * device.create_query_set() + + // TODO: change these fail calls to check for the specific errors which indicate that + // the device is not valid. + + // Creating a commmand encoder should fail. + fail(&ctx.device, || { + ctx.device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + }); + + // Creating a buffer should fail. + fail(&ctx.device, || { + ctx.device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: 256, + usage: wgpu::BufferUsages::MAP_WRITE | wgpu::BufferUsages::COPY_SRC, + mapped_at_creation: false, + }); + }); + + // Creating a texture should fail. + fail(&ctx.device, || { + ctx.device.create_texture(&wgpu::TextureDescriptor { + label: None, + size: wgpu::Extent3d { + width: 512, + height: 512, + depth_or_array_layers: 1, + }, + mip_level_count: 2, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rg8Uint, + usage: wgpu::TextureUsages::COPY_SRC, + view_formats: &[], + }); + }); + + // Texture clear should fail. + fail(&ctx.device, || { + encoder_for_clear.clear_texture( + &texture_for_write, + &wgpu::ImageSubresourceRange { + aspect: wgpu::TextureAspect::All, + base_mip_level: 0, + mip_level_count: None, + base_array_layer: 0, + array_layer_count: None, + }, + ); + }); + + // Creating a compute pass should fail. + fail(&ctx.device, || { + encoder_for_compute_pass.begin_compute_pass(&wgpu::ComputePassDescriptor { + label: None, + timestamp_writes: None, + }); + }); + + // Creating a render pass should fail. + fail(&ctx.device, || { + encoder_for_render_pass.begin_render_pass(&wgpu::RenderPassDescriptor { + label: None, + color_attachments: &[Some(wgpu::RenderPassColorAttachment { + ops: wgpu::Operations::default(), + resolve_target: None, + view: &target_view, + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + }); + + // Copying a buffer to a buffer should fail. + fail(&ctx.device, || { + encoder_for_buffer_buffer_copy.copy_buffer_to_buffer( + &buffer_source, + 0, + &buffer_dest, + 0, + 256, + ); + }); + + // Copying a buffer to a texture should fail. + fail(&ctx.device, || { + encoder_for_buffer_texture_copy.copy_buffer_to_texture( + wgpu::ImageCopyBuffer { + buffer: &buffer_source, + layout: wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: Some(4), + rows_per_image: None, + }, + }, + texture_for_write.as_image_copy(), + texture_extent, + ); + }); + + // Copying a texture to a buffer should fail. + fail(&ctx.device, || { + encoder_for_texture_buffer_copy.copy_texture_to_buffer( + texture_for_read.as_image_copy(), + wgpu::ImageCopyBuffer { + buffer: &buffer_source, + layout: wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: Some(4), + rows_per_image: None, + }, + }, + texture_extent, + ); + }); + + // Copying a texture to a texture should fail. + fail(&ctx.device, || { + encoder_for_texture_texture_copy.copy_texture_to_texture( + texture_for_read.as_image_copy(), + texture_for_write.as_image_copy(), + texture_extent, + ); + }); + + // Creating a bind group layout should fail. + fail(&ctx.device, || { + ctx.device + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: None, + entries: &[], + }); + }); + + // Creating a bind group should fail. + fail(&ctx.device, || { + ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: None, + layout: &bind_group_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::Buffer( + buffer_source.as_entire_buffer_binding(), + ), + }], + }); + }); + + // Creating a pipeline layout should fail. + fail(&ctx.device, || { + ctx.device + .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: &[], + push_constant_ranges: &[], + }); + }); + + // Creating a shader module should fail. + fail(&ctx.device, || { + ctx.device + .create_shader_module(wgpu::ShaderModuleDescriptor { + label: None, + source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed("")), + }); + }); + + // Creating a shader module spirv should fail. + fail(&ctx.device, || unsafe { + ctx.device + .create_shader_module_spirv(&wgpu::ShaderModuleDescriptorSpirV { + label: None, + source: std::borrow::Cow::Borrowed(&[]), + }); + }); + + // Creating a render pipeline should fail. + fail(&ctx.device, || { + ctx.device + .create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: None, + layout: None, + vertex: wgpu::VertexState { + module: &shader_module, + entry_point: "", + buffers: &[], + }, + primitive: wgpu::PrimitiveState::default(), + depth_stencil: None, + multisample: wgpu::MultisampleState::default(), + fragment: None, + multiview: None, + }); + }); + + // Creating a compute pipeline should fail. + fail(&ctx.device, || { + ctx.device + .create_compute_pipeline(&wgpu::ComputePipelineDescriptor { + label: None, + layout: None, + module: &shader_module, + entry_point: "", + }); + }); + }, + ) +} diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index ceceb2ba58..c39d5fbcd9 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -219,6 +219,9 @@ impl Global { } let device = &device_guard[cmd_buf.device_id.value]; + if !device.is_valid() { + return Err(ClearError::InvalidDevice(cmd_buf.device_id.value.0)); + } clear_texture( &*texture_guard, diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index c9b20c5a0e..fbb53bdf85 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -16,6 +16,7 @@ use crate::{ hal_api::HalApi, hub::Token, id, + id::DeviceId, identity::GlobalIdentityHandlerFactory, init_tracker::MemoryInitKind, pipeline, @@ -193,6 +194,8 @@ pub enum ComputePassErrorInner { Encoder(#[from] CommandEncoderError), #[error("Bind group {0:?} is invalid")] InvalidBindGroup(id::BindGroupId), + #[error("Device {0:?} is invalid")] + InvalidDevice(DeviceId), #[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")] BindGroupIndexOutOfRange { index: u32, max: u32 }, #[error("Compute pipeline {0:?} is invalid")] @@ -390,6 +393,12 @@ impl Global { let raw = cmd_buf.encoder.open(); let device = &device_guard[cmd_buf.device_id.value]; + if !device.is_valid() { + return Err(ComputePassErrorInner::InvalidDevice( + cmd_buf.device_id.value.0, + )) + .map_pass_err(init_scope); + } #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf.commands { diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index f9202ee3f5..06a9cee1d2 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -18,6 +18,7 @@ use crate::{ hal_api::HalApi, hub::Token, id, + id::DeviceId, identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction}, pipeline::{self, PipelineFlags}, @@ -523,6 +524,8 @@ pub enum RenderPassErrorInner { ColorAttachment(#[from] ColorAttachmentError), #[error(transparent)] Encoder(#[from] CommandEncoderError), + #[error("Device {0:?} is invalid")] + InvalidDevice(DeviceId), #[error("Attachment texture view {0:?} is invalid")] InvalidAttachment(id::TextureViewId), #[error("The format of the depth-stencil attachment ({0:?}) is not a depth-stencil format")] @@ -1346,6 +1349,12 @@ impl Global { } let device = &device_guard[cmd_buf.device_id.value]; + if !device.is_valid() { + return Err(RenderPassErrorInner::InvalidDevice( + cmd_buf.device_id.value.0, + )) + .map_pass_err(init_scope); + } cmd_buf.encoder.open_pass(base.label); let (bundle_guard, mut token) = hub.render_bundles.read(&mut token); diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 58f88a70e1..10eb80f426 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -8,7 +8,7 @@ use crate::{ global::Global, hal_api::HalApi, hub::Token, - id::{BufferId, CommandEncoderId, TextureId, Valid}, + id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid}, identity::GlobalIdentityHandlerFactory, init_tracker::{ has_copy_partial_init_tracker_coverage, MemoryInitKind, TextureInitRange, @@ -40,6 +40,8 @@ pub enum CopySide { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum TransferError { + #[error("Device {0:?} is invalid")] + InvalidDevice(DeviceId), #[error("Buffer {0:?} is invalid or destroyed")] InvalidBuffer(BufferId), #[error("Texture {0:?} is invalid or destroyed")] @@ -569,6 +571,9 @@ impl Global { let (buffer_guard, _) = hub.buffers.read(&mut token); let device = &device_guard[cmd_buf.device_id.value]; + if !device.is_valid() { + return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + } #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf.commands { @@ -714,6 +719,9 @@ impl Global { let (texture_guard, _) = hub.textures.read(&mut token); let device = &device_guard[cmd_buf.device_id.value]; + if !device.is_valid() { + return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + } #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf.commands { @@ -858,6 +866,9 @@ impl Global { let (texture_guard, _) = hub.textures.read(&mut token); let device = &device_guard[cmd_buf.device_id.value]; + if !device.is_valid() { + return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + } #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf.commands { @@ -1020,6 +1031,9 @@ impl Global { let (texture_guard, _) = hub.textures.read(&mut token); let device = &device_guard[cmd_buf.device_id.value]; + if !device.is_valid() { + return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + } #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf.commands { diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 632c83e37f..eee1f6e054 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -102,6 +102,9 @@ impl Global { let mut token = Token::root(); let (device_guard, _) = hub.devices.read(&mut token); let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; + if !device.valid { + return Err(InvalidDevice); + } Ok(device.features) } @@ -114,6 +117,9 @@ impl Global { let mut token = Token::root(); let (device_guard, _) = hub.devices.read(&mut token); let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; + if !device.valid { + return Err(InvalidDevice); + } Ok(device.limits.clone()) } @@ -126,6 +132,9 @@ impl Global { let mut token = Token::root(); let (device_guard, _) = hub.devices.read(&mut token); let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; + if !device.valid { + return Err(InvalidDevice); + } Ok(device.downlevel.clone()) } @@ -148,6 +157,9 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } if desc.usage.is_empty() { // Per spec, `usage` must not be zero. @@ -586,6 +598,9 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace @@ -641,6 +656,9 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } // NB: Any change done through the raw texture handle will not be // recorded in the replay @@ -717,6 +735,9 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } // NB: Any change done through the raw buffer handle will not be // recorded in the replay @@ -978,6 +999,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace @@ -1056,6 +1081,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace @@ -1170,6 +1199,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace @@ -1249,6 +1282,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace @@ -1351,6 +1388,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { let mut trace = trace.lock(); @@ -1416,6 +1457,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { let mut trace = trace.lock(); @@ -1486,6 +1531,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid, }; + if !device.valid { + break DeviceError::Invalid; + } + let dev_stored = Stored { value: id::Valid(device_id), ref_count: device.life_guard.add_ref(), @@ -1579,6 +1628,10 @@ impl Global { Ok(device) => device, Err(_) => break command::RenderBundleError::INVALID_DEVICE, }; + if !device.valid { + break command::RenderBundleError::INVALID_DEVICE; + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace.lock().add(trace::Action::CreateRenderBundle { @@ -1661,6 +1714,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace.lock().add(trace::Action::CreateQuerySet { @@ -1750,6 +1807,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + let adapter = &adapter_guard[device.adapter_id.value]; #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { @@ -1893,6 +1954,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace.lock().add(trace::Action::CreateComputePipeline { @@ -2141,6 +2206,10 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; + if !device.valid { + break DeviceError::Invalid.into(); + } + #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { trace @@ -2405,6 +2474,9 @@ impl Global { let mut token = Token::root(); let (device_guard, _) = hub.devices.read(&mut token); if let Ok(device) = device_guard.get(id) { + if !device.valid { + return; + } unsafe { device.raw.start_capture() }; } } @@ -2414,6 +2486,9 @@ impl Global { let mut token = Token::root(); let (device_guard, _) = hub.devices.read(&mut token); if let Ok(device) = device_guard.get(id) { + if !device.valid { + return; + } unsafe { device.raw.stop_capture() }; } } @@ -2435,6 +2510,46 @@ impl Global { } } + pub fn device_destroy(&self, device_id: DeviceId) { + let hub = A::hub(self); + let mut token = Token::root(); + + let (mut device_guard, _) = hub.devices.write(&mut token); + if let Ok(device) = device_guard.get_mut(device_id) { + // Follow the steps at + // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. + + // It's legal to call destroy multiple times, but if the device + // is already invalid, there's nothing more to do. There's also + // no need to return an error. + if !device.valid { + return; + } + + // The last part of destroy is to lose the device. The spec says + // delay that until all "currently-enqueued operations on any + // queue on this device are completed." + + // TODO: implement this delay. + + // Finish by losing the device. + + // TODO: associate this "destroyed" reason more tightly with + // the GPUDeviceLostReason defined in webgpu.idl. + device.lose(Some("destroyed")); + } + } + + pub fn device_lose(&self, device_id: DeviceId, reason: Option<&str>) { + let hub = A::hub(self); + let mut token = Token::root(); + + let (mut device_guard, _) = hub.devices.write(&mut token); + if let Ok(device) = device_guard.get_mut(device_id) { + device.lose(reason); + } + } + /// Exit the unreferenced, inactive device `device_id`. fn exit_device(&self, device_id: DeviceId) { let hub = A::hub(self); @@ -2519,6 +2634,11 @@ impl Global { } }; + let device = &device_guard[buffer.device_id.value]; + if !device.valid { + return Err((op, BufferAccessError::Invalid)); + } + if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) { return Err((op, e.into())); } @@ -2559,8 +2679,6 @@ impl Global { }; log::debug!("Buffer {:?} map state -> Waiting", buffer_id); - let device = &device_guard[buffer.device_id.value]; - let ret = (buffer.device_id.value, buffer.life_guard.add_ref()); let mut trackers = device.trackers.lock(); @@ -2573,7 +2691,6 @@ impl Global { }; let device = &device_guard[device_id]; - device .lock_life(&mut token) .map(id::Valid(buffer_id), ref_count); diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 9a77bf9536..3e2a245127 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -292,11 +292,11 @@ pub struct InvalidDevice; #[derive(Clone, Debug, Error)] pub enum DeviceError { - #[error("Parent device is invalid")] + #[error("Parent device is invalid.")] Invalid, #[error("Parent device is lost")] Lost, - #[error("Not enough memory left")] + #[error("Not enough memory left.")] OutOfMemory, #[error("Creation of a resource failed for a reason other than running out of memory.")] ResourceCreationFailed, @@ -305,7 +305,7 @@ pub enum DeviceError { impl From for DeviceError { fn from(error: hal::DeviceError) -> Self { match error { - hal::DeviceError::Lost => DeviceError::Lost, + hal::DeviceError::Lost => DeviceError::Invalid, hal::DeviceError::OutOfMemory => DeviceError::OutOfMemory, hal::DeviceError::ResourceCreationFailed => DeviceError::ResourceCreationFailed, } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 8acc34acf4..8f37f68be9 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -72,6 +72,19 @@ pub struct Device { pub(crate) active_submission_index: SubmissionIndex, pub(super) fence: A::Fence, + /// Is this device valid? Valid is closely associated with "lose the device", + /// which can be triggered by various methods, including at the end of device + /// destroy, and by any GPU errors that cause us to no longer trust the state + /// of the device. Ideally we would like to fold valid into the storage of + /// the device itself (for example as an Error enum), but unfortunately we + /// need to continue to be able to retrieve the device in poll_devices to + /// determine if it can be dropped. If our internal accesses of devices were + /// done through ref-counted references and external accesses checked for + /// Error enums, we wouldn't need this. For now, we need it. All the call + /// sites where we check it are areas that should be revisited if we start + /// using ref-counted references for internal access. + pub(crate) valid: bool, + /// All live resources allocated with this [`Device`]. /// /// Has to be locked temporarily only (locked last) @@ -189,6 +202,7 @@ impl Device { command_allocator: Mutex::new(com_alloc), active_submission_index: 0, fence, + valid: true, trackers: Mutex::new(Tracker::new()), life_tracker: Mutex::new(life::LifetimeTracker::new()), temp_suspected: life::SuspectedResources::default(), @@ -214,6 +228,10 @@ impl Device { }) } + pub fn is_valid(&self) -> bool { + self.valid + } + pub(super) fn lock_life<'this, 'token: 'this>( &'this self, //TODO: fix this - the token has to be borrowed for the lock @@ -3155,6 +3173,27 @@ impl Device { desc: desc.map_label(|_| ()), }) } + + pub(crate) fn lose(&mut self, _reason: Option<&str>) { + // Follow the steps at https://gpuweb.github.io/gpuweb/#lose-the-device. + + // Mark the device explicitly as invalid. This is checked in various + // places to prevent new work from being submitted. + self.valid = false; + + // The following steps remain in "lose the device": + // 1) Resolve the GPUDevice device.lost promise. + + // TODO: triggger this passively or actively, and supply the reason. + + // 2) Complete any outstanding mapAsync() steps. + // 3) Complete any outstanding onSubmittedWorkDone() steps. + + // These parts are passively accomplished by setting valid to false, + // since that will prevent any new work from being added to the queues. + // Future calls to poll_devices will continue to check the work queues + // until they are cleared, and then drop the device. + } } impl Device { diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 7366934d27..9a6d0e32a6 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -138,6 +138,9 @@ impl Global { let (device, config) = match surface.presentation { Some(ref present) => { let device = &device_guard[present.device_id.value]; + if !device.is_valid() { + return Err(DeviceError::Invalid.into()); + } (device, present.config.clone()) } None => return Err(SurfaceError::NotConfigured), @@ -290,6 +293,9 @@ impl Global { }; let device = &mut device_guard[present.device_id.value]; + if !device.is_valid() { + return Err(DeviceError::Invalid.into()); + } #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { @@ -376,6 +382,9 @@ impl Global { }; let device = &mut device_guard[present.device_id.value]; + if !device.is_valid() { + return Err(DeviceError::Invalid.into()); + } #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 3d3028d334..e8776f881a 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -1439,6 +1439,15 @@ impl crate::Context for Context { wgc::gfx_select!(device => global.device_drop(*device)); } + fn device_destroy(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { + let global = &self.0; + wgc::gfx_select!(device => global.device_destroy(*device)); + } + fn device_lose(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { + // TODO: accept a reason, and pass it to device_lose. + let global = &self.0; + wgc::gfx_select!(device => global.device_lose(*device, None)); + } fn device_poll( &self, device: &Self::DeviceId, diff --git a/wgpu/src/backend/web.rs b/wgpu/src/backend/web.rs index b649d41ebb..3868a01fcd 100644 --- a/wgpu/src/backend/web.rs +++ b/wgpu/src/backend/web.rs @@ -1914,6 +1914,16 @@ impl crate::context::Context for Context { // Device is dropped automatically } + fn device_destroy(&self, _buffer: &Self::DeviceId, device_data: &Self::DeviceData) { + device_data.0.destroy(); + } + + fn device_lose(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) { + // TODO: figure out the GPUDevice implementation of this, including resolving + // the device.lost promise, which will require a different invocation pattern + // with a callback. + } + fn device_poll( &self, _device: &Self::DeviceId, diff --git a/wgpu/src/context.rs b/wgpu/src/context.rs index 9d0bdd9100..bfcd4ae2ec 100644 --- a/wgpu/src/context.rs +++ b/wgpu/src/context.rs @@ -269,6 +269,8 @@ pub trait Context: Debug + WasmNotSend + WasmNotSync + Sized { desc: &RenderBundleEncoderDescriptor, ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData); fn device_drop(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); + fn device_destroy(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); + fn device_lose(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); fn device_poll( &self, device: &Self::DeviceId, @@ -1363,6 +1365,8 @@ pub(crate) trait DynContext: Debug + WasmNotSend + WasmNotSync { desc: &RenderBundleEncoderDescriptor, ) -> (ObjectId, Box); fn device_drop(&self, device: &ObjectId, device_data: &crate::Data); + fn device_destroy(&self, device: &ObjectId, device_data: &crate::Data); + fn device_lose(&self, device: &ObjectId, device_data: &crate::Data); fn device_poll(&self, device: &ObjectId, device_data: &crate::Data, maintain: Maintain) -> bool; fn device_on_uncaptured_error( @@ -2424,6 +2428,18 @@ where Context::device_drop(self, &device, device_data) } + fn device_destroy(&self, device: &ObjectId, device_data: &crate::Data) { + let device = ::from(*device); + let device_data = downcast_ref(device_data); + Context::device_destroy(self, &device, device_data) + } + + fn device_lose(&self, device: &ObjectId, device_data: &crate::Data) { + let device = ::from(*device); + let device_data = downcast_ref(device_data); + Context::device_lose(self, &device, device_data) + } + fn device_poll( &self, device: &ObjectId, diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs index 88b852eaa4..c2090c7f4f 100644 --- a/wgpu/src/lib.rs +++ b/wgpu/src/lib.rs @@ -2764,6 +2764,11 @@ impl Device { ) } } + + /// Destroy this device. + pub fn destroy(&self) { + DynContext::device_destroy(&*self.context, &self.id, self.data.as_ref()) + } } impl Drop for Device {